Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1 | /* |
Venkata Sharath Chandra Manchala | 09d116a | 2020-01-03 16:42:00 -0800 | [diff] [blame] | 2 | * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Balamurugan Mahalingam | d015964 | 2018-07-11 15:02:29 +0530 | [diff] [blame] | 19 | #include "hal_hw_headers.h" |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 20 | #include "dp_types.h" |
| 21 | #include "dp_rx.h" |
| 22 | #include "dp_peer.h" |
| 23 | #include "hal_rx.h" |
| 24 | #include "hal_api.h" |
| 25 | #include "qdf_nbuf.h" |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 26 | #ifdef MESH_MODE_SUPPORT |
| 27 | #include "if_meta_hdr.h" |
| 28 | #endif |
Ishank Jain | bc2d91f | 2017-01-03 18:14:54 +0530 | [diff] [blame] | 29 | #include "dp_internal.h" |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 30 | #include "dp_rx_mon.h" |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 31 | #include "dp_ipa.h" |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 32 | #ifdef FEATURE_WDS |
| 33 | #include "dp_txrx_wds.h" |
| 34 | #endif |
Mohit Khanna | 7051499 | 2018-11-12 18:39:03 -0800 | [diff] [blame] | 35 | |
Varsha Mishra | 09a4c0e | 2019-05-22 12:09:24 +0530 | [diff] [blame] | 36 | #ifdef ATH_RX_PRI_SAVE |
| 37 | #define DP_RX_TID_SAVE(_nbuf, _tid) \ |
| 38 | (qdf_nbuf_set_priority(_nbuf, _tid)) |
| 39 | #else |
| 40 | #define DP_RX_TID_SAVE(_nbuf, _tid) |
| 41 | #endif |
| 42 | |
Mohit Khanna | c42d803 | 2019-08-08 18:44:17 -0700 | [diff] [blame] | 43 | #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING |
| 44 | static inline |
| 45 | bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) |
| 46 | { |
| 47 | if (ta_peer->vdev->opmode == wlan_op_mode_ndi && |
| 48 | qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { |
| 49 | DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1); |
| 50 | return false; |
| 51 | } |
| 52 | return true; |
| 53 | } |
| 54 | #else |
| 55 | static inline |
| 56 | bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) |
| 57 | { |
| 58 | return true; |
| 59 | } |
| 60 | #endif |
Pranita Solanke | 0586296 | 2019-01-09 11:39:29 +0530 | [diff] [blame] | 61 | static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) |
| 62 | { |
| 63 | return vdev->ap_bridge_enabled; |
| 64 | } |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 65 | |
Saket Jha | 7f89014 | 2019-07-10 18:31:36 -0700 | [diff] [blame] | 66 | #ifdef DUP_RX_DESC_WAR |
Akshay Kosigi | 8eda31c | 2019-07-10 14:42:42 +0530 | [diff] [blame] | 67 | void dp_rx_dump_info_and_assert(struct dp_soc *soc, |
| 68 | hal_ring_handle_t hal_ring, |
| 69 | hal_ring_desc_t ring_desc, |
| 70 | struct dp_rx_desc *rx_desc) |
Saket Jha | 7f89014 | 2019-07-10 18:31:36 -0700 | [diff] [blame] | 71 | { |
| 72 | void *hal_soc = soc->hal_soc; |
| 73 | |
| 74 | hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); |
| 75 | dp_rx_desc_dump(rx_desc); |
| 76 | } |
| 77 | #else |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 78 | void dp_rx_dump_info_and_assert(struct dp_soc *soc, |
| 79 | hal_ring_handle_t hal_ring_hdl, |
Akshay Kosigi | 91c5652 | 2019-07-02 11:49:39 +0530 | [diff] [blame] | 80 | hal_ring_desc_t ring_desc, |
| 81 | struct dp_rx_desc *rx_desc) |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 82 | { |
Akshay Kosigi | a870c61 | 2019-07-08 23:10:30 +0530 | [diff] [blame] | 83 | hal_soc_handle_t hal_soc = soc->hal_soc; |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 84 | |
| 85 | dp_rx_desc_dump(rx_desc); |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 86 | hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); |
| 87 | hal_srng_dump_ring(hal_soc, hal_ring_hdl); |
Mohit Khanna | 16cd1b2 | 2019-01-25 10:46:00 -0800 | [diff] [blame] | 88 | qdf_assert_always(0); |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 89 | } |
Saket Jha | 7f89014 | 2019-07-10 18:31:36 -0700 | [diff] [blame] | 90 | #endif |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 91 | |
Rakesh Pillai | 79979d6 | 2020-02-29 20:42:28 +0530 | [diff] [blame] | 92 | #ifdef RX_DESC_SANITY_WAR |
| 93 | static inline |
| 94 | QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, |
| 95 | hal_ring_handle_t hal_ring_hdl, |
| 96 | hal_ring_desc_t ring_desc, |
| 97 | struct dp_rx_desc *rx_desc) |
| 98 | { |
Saket Jha | 3aeabaa | 2020-03-03 16:21:12 -0800 | [diff] [blame] | 99 | uint8_t return_buffer_manager; |
| 100 | |
Rakesh Pillai | 79979d6 | 2020-02-29 20:42:28 +0530 | [diff] [blame] | 101 | if (qdf_unlikely(!rx_desc)) { |
| 102 | /* |
| 103 | * This is an unlikely case where the cookie obtained |
| 104 | * from the ring_desc is invalid and hence we are not |
| 105 | * able to find the corresponding rx_desc |
| 106 | */ |
Saket Jha | 3aeabaa | 2020-03-03 16:21:12 -0800 | [diff] [blame] | 107 | goto fail; |
| 108 | } |
| 109 | |
| 110 | return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc); |
| 111 | if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM || |
| 112 | return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) { |
Saket Jha | 3aeabaa | 2020-03-03 16:21:12 -0800 | [diff] [blame] | 113 | goto fail; |
Rakesh Pillai | 79979d6 | 2020-02-29 20:42:28 +0530 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | return QDF_STATUS_SUCCESS; |
Saket Jha | 3aeabaa | 2020-03-03 16:21:12 -0800 | [diff] [blame] | 117 | |
| 118 | fail: |
| 119 | DP_STATS_INC(soc, rx.err.invalid_cookie, 1); |
| 120 | dp_err("Ring Desc:"); |
| 121 | hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, |
| 122 | ring_desc); |
| 123 | return QDF_STATUS_E_NULL_VALUE; |
| 124 | |
Rakesh Pillai | 79979d6 | 2020-02-29 20:42:28 +0530 | [diff] [blame] | 125 | } |
| 126 | #else |
| 127 | static inline |
| 128 | QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, |
| 129 | hal_ring_handle_t hal_ring_hdl, |
| 130 | hal_ring_desc_t ring_desc, |
| 131 | struct dp_rx_desc *rx_desc) |
| 132 | { |
| 133 | return QDF_STATUS_SUCCESS; |
| 134 | } |
| 135 | #endif |
| 136 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 137 | /* |
| 138 | * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs |
| 139 | * called during dp rx initialization |
| 140 | * and at the end of dp_rx_process. |
| 141 | * |
| 142 | * @soc: core txrx main context |
| 143 | * @mac_id: mac_id which is one of 3 mac_ids |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 144 | * @dp_rxdma_srng: dp rxdma circular ring |
Jeff Johnson | ff2dfb2 | 2018-05-12 10:27:57 -0700 | [diff] [blame] | 145 | * @rx_desc_pool: Pointer to free Rx descriptor pool |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 146 | * @num_req_buffers: number of buffer to be replenished |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 147 | * @desc_list: list of descs if called from dp_rx_process |
| 148 | * or NULL during dp rx initialization or out of buffer |
| 149 | * interrupt. |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 150 | * @tail: tail of descs list |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 151 | * Return: return success or failure |
| 152 | */ |
| 153 | QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 154 | struct dp_srng *dp_rxdma_srng, |
| 155 | struct rx_desc_pool *rx_desc_pool, |
| 156 | uint32_t num_req_buffers, |
| 157 | union dp_rx_desc_list_elem_t **desc_list, |
Venkata Sharath Chandra Manchala | 16fcceb | 2018-01-03 11:27:15 -0800 | [diff] [blame] | 158 | union dp_rx_desc_list_elem_t **tail) |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 159 | { |
| 160 | uint32_t num_alloc_desc; |
| 161 | uint16_t num_desc_to_free = 0; |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 162 | struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 163 | uint32_t num_entries_avail; |
| 164 | uint32_t count; |
| 165 | int sync_hw_ptr = 1; |
| 166 | qdf_dma_addr_t paddr; |
| 167 | qdf_nbuf_t rx_netbuf; |
| 168 | void *rxdma_ring_entry; |
| 169 | union dp_rx_desc_list_elem_t *next; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 170 | QDF_STATUS ret; |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 171 | uint16_t buf_size = rx_desc_pool->buf_size; |
| 172 | uint8_t buf_alignment = rx_desc_pool->buf_alignment; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 173 | |
| 174 | void *rxdma_srng; |
| 175 | |
| 176 | rxdma_srng = dp_rxdma_srng->hal_srng; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 177 | |
| 178 | if (!rxdma_srng) { |
Mohit Khanna | 9a6fdd5 | 2017-12-12 10:55:48 +0800 | [diff] [blame] | 179 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 180 | "rxdma srng not initialized"); |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 181 | DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 182 | return QDF_STATUS_E_FAILURE; |
| 183 | } |
| 184 | |
Karunakar Dasineni | 1d891ed | 2017-03-29 15:42:02 -0700 | [diff] [blame] | 185 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 186 | "requested %d buffers for replenish", num_req_buffers); |
| 187 | |
Karunakar Dasineni | 87f0c5d | 2017-10-29 21:54:21 -0700 | [diff] [blame] | 188 | hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); |
| 189 | num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, |
| 190 | rxdma_srng, |
| 191 | sync_hw_ptr); |
| 192 | |
| 193 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Jeff Johnson | ff2dfb2 | 2018-05-12 10:27:57 -0700 | [diff] [blame] | 194 | "no of available entries in rxdma ring: %d", |
Karunakar Dasineni | 87f0c5d | 2017-10-29 21:54:21 -0700 | [diff] [blame] | 195 | num_entries_avail); |
| 196 | |
| 197 | if (!(*desc_list) && (num_entries_avail > |
| 198 | ((dp_rxdma_srng->num_entries * 3) / 4))) { |
| 199 | num_req_buffers = num_entries_avail; |
| 200 | } else if (num_entries_avail < num_req_buffers) { |
| 201 | num_desc_to_free = num_req_buffers - num_entries_avail; |
| 202 | num_req_buffers = num_entries_avail; |
| 203 | } |
| 204 | |
| 205 | if (qdf_unlikely(!num_req_buffers)) { |
| 206 | num_desc_to_free = num_req_buffers; |
| 207 | hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); |
| 208 | goto free_descs; |
| 209 | } |
| 210 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 211 | /* |
| 212 | * if desc_list is NULL, allocate the descs from freelist |
| 213 | */ |
| 214 | if (!(*desc_list)) { |
| 215 | num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 216 | rx_desc_pool, |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 217 | num_req_buffers, |
| 218 | desc_list, |
| 219 | tail); |
| 220 | |
| 221 | if (!num_alloc_desc) { |
| 222 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 223 | "no free rx_descs in freelist"); |
Ishank Jain | 1e7401c | 2017-02-17 15:38:39 +0530 | [diff] [blame] | 224 | DP_STATS_INC(dp_pdev, err.desc_alloc_fail, |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 225 | num_req_buffers); |
Karunakar Dasineni | 87f0c5d | 2017-10-29 21:54:21 -0700 | [diff] [blame] | 226 | hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 227 | return QDF_STATUS_E_NOMEM; |
| 228 | } |
| 229 | |
Karunakar Dasineni | 1d891ed | 2017-03-29 15:42:02 -0700 | [diff] [blame] | 230 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 231 | "%d rx desc allocated", num_alloc_desc); |
| 232 | num_req_buffers = num_alloc_desc; |
| 233 | } |
| 234 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 235 | |
Tallapragada Kalyan | 4e3341a | 2017-02-06 12:19:43 +0530 | [diff] [blame] | 236 | count = 0; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 237 | |
Tallapragada Kalyan | 4e3341a | 2017-02-06 12:19:43 +0530 | [diff] [blame] | 238 | while (count < num_req_buffers) { |
Tallapragada Kalyan | a867edf | 2017-11-14 12:26:41 +0530 | [diff] [blame] | 239 | rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 240 | buf_size, |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 241 | RX_BUFFER_RESERVATION, |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 242 | buf_alignment, |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 243 | FALSE); |
| 244 | |
Ankit Kumar | f2526d4 | 2019-05-02 15:13:27 +0530 | [diff] [blame] | 245 | if (qdf_unlikely(!rx_netbuf)) { |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 246 | DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); |
Shashikala Prabhu | 0bb59b2 | 2019-05-31 16:11:19 +0530 | [diff] [blame] | 247 | break; |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 248 | } |
Houston Hoffman | fc0a960 | 2017-01-26 22:36:31 -0800 | [diff] [blame] | 249 | |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 250 | ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, |
Ankit Kumar | 0ae4abc | 2019-05-02 15:08:42 +0530 | [diff] [blame] | 251 | QDF_DMA_FROM_DEVICE); |
jinweic chen | c354632 | 2018-02-02 15:03:41 +0800 | [diff] [blame] | 252 | if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { |
Manjunathappa Prakash | 6d2f46d | 2017-11-10 20:27:19 -0800 | [diff] [blame] | 253 | qdf_nbuf_free(rx_netbuf); |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 254 | DP_STATS_INC(dp_pdev, replenish.map_err, 1); |
| 255 | continue; |
| 256 | } |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 257 | |
| 258 | paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); |
| 259 | |
Nisha Menon | f7c5baa | 2019-10-23 12:50:16 -0700 | [diff] [blame] | 260 | dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true); |
Tallapragada Kalyan | 4e3341a | 2017-02-06 12:19:43 +0530 | [diff] [blame] | 261 | /* |
| 262 | * check if the physical address of nbuf->data is |
| 263 | * less then 0x50000000 then free the nbuf and try |
| 264 | * allocating new nbuf. We can try for 100 times. |
| 265 | * this is a temp WAR till we fix it properly. |
| 266 | */ |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 267 | ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, rx_desc_pool); |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 268 | if (ret == QDF_STATUS_E_FAILURE) { |
| 269 | DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); |
Tallapragada Kalyan | 4e3341a | 2017-02-06 12:19:43 +0530 | [diff] [blame] | 270 | break; |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 271 | } |
Tallapragada Kalyan | 4e3341a | 2017-02-06 12:19:43 +0530 | [diff] [blame] | 272 | |
| 273 | count++; |
| 274 | |
| 275 | rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, |
Mohit Khanna | 16cd1b2 | 2019-01-25 10:46:00 -0800 | [diff] [blame] | 276 | rxdma_srng); |
Tallapragada Kalyan | bc62989 | 2018-04-04 11:34:55 +0530 | [diff] [blame] | 277 | qdf_assert_always(rxdma_ring_entry); |
Tallapragada Kalyan | 4e3341a | 2017-02-06 12:19:43 +0530 | [diff] [blame] | 278 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 279 | next = (*desc_list)->next; |
| 280 | |
Pamidipati, Vijay | 5379474 | 2017-06-03 11:24:32 +0530 | [diff] [blame] | 281 | dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); |
Mohit Khanna | 16cd1b2 | 2019-01-25 10:46:00 -0800 | [diff] [blame] | 282 | |
| 283 | /* rx_desc.in_use should be zero at this time*/ |
| 284 | qdf_assert_always((*desc_list)->rx_desc.in_use == 0); |
| 285 | |
Pramod Simha | 59fcb31 | 2017-06-22 17:43:16 -0700 | [diff] [blame] | 286 | (*desc_list)->rx_desc.in_use = 1; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 287 | |
Krunal Soni | c96a116 | 2019-02-21 11:33:26 -0800 | [diff] [blame] | 288 | dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", |
| 289 | rx_netbuf, qdf_nbuf_data(rx_netbuf), |
| 290 | (unsigned long long)paddr, |
| 291 | (*desc_list)->rx_desc.cookie); |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 292 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 293 | hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, |
| 294 | (*desc_list)->rx_desc.cookie, |
Venkata Sharath Chandra Manchala | 16fcceb | 2018-01-03 11:27:15 -0800 | [diff] [blame] | 295 | rx_desc_pool->owner); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 296 | |
| 297 | *desc_list = next; |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 298 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); |
| 302 | |
Krunal Soni | c96a116 | 2019-02-21 11:33:26 -0800 | [diff] [blame] | 303 | dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", |
Shashikala Prabhu | 0bb59b2 | 2019-05-31 16:11:19 +0530 | [diff] [blame] | 304 | count, num_desc_to_free); |
Houston Hoffman | ae850c6 | 2017-08-11 16:47:50 -0700 | [diff] [blame] | 305 | |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 306 | /* No need to count the number of bytes received during replenish. |
| 307 | * Therefore set replenish.pkts.bytes as 0. |
| 308 | */ |
| 309 | DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 310 | |
Karunakar Dasineni | 87f0c5d | 2017-10-29 21:54:21 -0700 | [diff] [blame] | 311 | free_descs: |
| 312 | DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 313 | /* |
| 314 | * add any available free desc back to the free list |
| 315 | */ |
| 316 | if (*desc_list) |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 317 | dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, |
| 318 | mac_id, rx_desc_pool); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 319 | |
| 320 | return QDF_STATUS_SUCCESS; |
| 321 | } |
| 322 | |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 323 | /* |
| 324 | * dp_rx_deliver_raw() - process RAW mode pkts and hand over the |
| 325 | * pkts to RAW mode simulation to |
| 326 | * decapsulate the pkt. |
| 327 | * |
| 328 | * @vdev: vdev on which RAW mode is enabled |
| 329 | * @nbuf_list: list of RAW pkts to process |
c_cgodav | bd5b3c2 | 2017-06-07 12:31:40 +0530 | [diff] [blame] | 330 | * @peer: peer object from which the pkt is rx |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 331 | * |
| 332 | * Return: void |
| 333 | */ |
Tallapragada Kalyan | 3a0005c | 2017-03-10 15:22:57 +0530 | [diff] [blame] | 334 | void |
c_cgodav | bd5b3c2 | 2017-06-07 12:31:40 +0530 | [diff] [blame] | 335 | dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, |
Pamidipati, Vijay | eb8a92c | 2017-05-01 00:55:56 +0530 | [diff] [blame] | 336 | struct dp_peer *peer) |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 337 | { |
| 338 | qdf_nbuf_t deliver_list_head = NULL; |
| 339 | qdf_nbuf_t deliver_list_tail = NULL; |
| 340 | qdf_nbuf_t nbuf; |
| 341 | |
| 342 | nbuf = nbuf_list; |
| 343 | while (nbuf) { |
| 344 | qdf_nbuf_t next = qdf_nbuf_next(nbuf); |
| 345 | |
| 346 | DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); |
| 347 | |
Chaithanya Garrepalli | 9b3988c | 2018-05-12 15:47:15 +0530 | [diff] [blame] | 348 | DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); |
Anish Nataraj | 7b6d21f | 2018-04-30 11:08:54 +0530 | [diff] [blame] | 349 | DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 350 | /* |
| 351 | * reset the chfrag_start and chfrag_end bits in nbuf cb |
| 352 | * as this is a non-amsdu pkt and RAW mode simulation expects |
| 353 | * these bit s to be 0 for non-amsdu pkt. |
| 354 | */ |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 355 | if (qdf_nbuf_is_rx_chfrag_start(nbuf) && |
| 356 | qdf_nbuf_is_rx_chfrag_end(nbuf)) { |
| 357 | qdf_nbuf_set_rx_chfrag_start(nbuf, 0); |
| 358 | qdf_nbuf_set_rx_chfrag_end(nbuf, 0); |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 359 | } |
| 360 | |
| 361 | nbuf = next; |
| 362 | } |
| 363 | |
| 364 | vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, |
Pavankumar Nandeshwar | 0ce3870 | 2019-09-30 18:43:03 +0530 | [diff] [blame] | 365 | &deliver_list_tail, peer->mac_addr.raw); |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 366 | |
| 367 | vdev->osif_rx(vdev->osif_vdev, deliver_list_head); |
| 368 | } |
| 369 | |
| 370 | |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 371 | #ifdef DP_LFR |
| 372 | /* |
| 373 | * In case of LFR, data of a new peer might be sent up |
| 374 | * even before peer is added. |
| 375 | */ |
| 376 | static inline struct dp_vdev * |
| 377 | dp_get_vdev_from_peer(struct dp_soc *soc, |
| 378 | uint16_t peer_id, |
| 379 | struct dp_peer *peer, |
| 380 | struct hal_rx_mpdu_desc_info mpdu_desc_info) |
| 381 | { |
| 382 | struct dp_vdev *vdev; |
| 383 | uint8_t vdev_id; |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 384 | |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 385 | if (unlikely(!peer)) { |
| 386 | if (peer_id != HTT_INVALID_PEER) { |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 387 | vdev_id = DP_PEER_METADATA_VDEV_ID_GET( |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 388 | mpdu_desc_info.peer_meta_data); |
| 389 | QDF_TRACE(QDF_MODULE_ID_DP, |
Houston Hoffman | 41b912c | 2017-08-30 14:27:51 -0700 | [diff] [blame] | 390 | QDF_TRACE_LEVEL_DEBUG, |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 391 | FL("PeerID %d not found use vdevID %d"), |
| 392 | peer_id, vdev_id); |
| 393 | vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 394 | vdev_id); |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 395 | } else { |
| 396 | QDF_TRACE(QDF_MODULE_ID_DP, |
Houston Hoffman | 41b912c | 2017-08-30 14:27:51 -0700 | [diff] [blame] | 397 | QDF_TRACE_LEVEL_DEBUG, |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 398 | FL("Invalid PeerID %d"), |
| 399 | peer_id); |
| 400 | return NULL; |
| 401 | } |
| 402 | } else { |
| 403 | vdev = peer->vdev; |
| 404 | } |
| 405 | return vdev; |
| 406 | } |
| 407 | #else |
| 408 | static inline struct dp_vdev * |
| 409 | dp_get_vdev_from_peer(struct dp_soc *soc, |
| 410 | uint16_t peer_id, |
| 411 | struct dp_peer *peer, |
| 412 | struct hal_rx_mpdu_desc_info mpdu_desc_info) |
| 413 | { |
| 414 | if (unlikely(!peer)) { |
| 415 | QDF_TRACE(QDF_MODULE_ID_DP, |
Houston Hoffman | 41b912c | 2017-08-30 14:27:51 -0700 | [diff] [blame] | 416 | QDF_TRACE_LEVEL_DEBUG, |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 417 | FL("Peer not found for peerID %d"), |
| 418 | peer_id); |
| 419 | return NULL; |
| 420 | } else { |
| 421 | return peer->vdev; |
| 422 | } |
| 423 | } |
| 424 | #endif |
Tallapragada Kalyan | 3a0005c | 2017-03-10 15:22:57 +0530 | [diff] [blame] | 425 | |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 426 | #ifndef FEATURE_WDS |
Tallapragada Kalyan | 2ae71e0 | 2018-08-31 19:30:54 +0530 | [diff] [blame] | 427 | static void |
| 428 | dp_rx_da_learn(struct dp_soc *soc, |
| 429 | uint8_t *rx_tlv_hdr, |
| 430 | struct dp_peer *ta_peer, |
| 431 | qdf_nbuf_t nbuf) |
| 432 | { |
| 433 | } |
| 434 | #endif |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 435 | /* |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 436 | * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic |
| 437 | * |
| 438 | * @soc: core txrx main context |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 439 | * @ta_peer : source peer entry |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 440 | * @rx_tlv_hdr : start address of rx tlvs |
| 441 | * @nbuf : nbuf that has to be intrabss forwarded |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 442 | * |
| 443 | * Return: bool: true if it is forwarded else false |
| 444 | */ |
| 445 | static bool |
| 446 | dp_rx_intrabss_fwd(struct dp_soc *soc, |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 447 | struct dp_peer *ta_peer, |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 448 | uint8_t *rx_tlv_hdr, |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 449 | qdf_nbuf_t nbuf, |
| 450 | struct hal_rx_msdu_metadata msdu_metadata) |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 451 | { |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 452 | uint16_t len; |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 453 | uint8_t is_frag; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 454 | struct dp_peer *da_peer; |
| 455 | struct dp_ast_entry *ast_entry; |
| 456 | qdf_nbuf_t nbuf_copy; |
Ankit Kumar | e222775 | 2019-04-30 00:16:04 +0530 | [diff] [blame] | 457 | uint8_t tid = qdf_nbuf_get_tid_val(nbuf); |
Varsha Mishra | 1f4cfb6 | 2019-05-31 00:59:15 +0530 | [diff] [blame] | 458 | uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); |
| 459 | struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. |
| 460 | tid_stats.tid_rx_stats[ring_id][tid]; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 461 | |
| 462 | /* check if the destination peer is available in peer table |
| 463 | * and also check if the source peer and destination peer |
| 464 | * belong to the same vap and destination peer is not bss peer. |
| 465 | */ |
Ruchi, Agrawal | bd894b3 | 2017-11-03 17:24:56 +0530 | [diff] [blame] | 466 | |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 467 | if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) { |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 468 | |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 469 | ast_entry = soc->ast_table[msdu_metadata.da_idx]; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 470 | if (!ast_entry) |
| 471 | return false; |
| 472 | |
Tallapragada Kalyan | 2ae71e0 | 2018-08-31 19:30:54 +0530 | [diff] [blame] | 473 | if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { |
| 474 | ast_entry->is_active = TRUE; |
| 475 | return false; |
| 476 | } |
| 477 | |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 478 | da_peer = ast_entry->peer; |
| 479 | |
| 480 | if (!da_peer) |
| 481 | return false; |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 482 | /* TA peer cannot be same as peer(DA) on which AST is present |
| 483 | * this indicates a change in topology and that AST entries |
| 484 | * are yet to be updated. |
| 485 | */ |
| 486 | if (da_peer == ta_peer) |
| 487 | return false; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 488 | |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 489 | if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) { |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 490 | len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); |
| 491 | is_frag = qdf_nbuf_is_frag(nbuf); |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 492 | memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); |
Tallapragada Kalyan | 0cd1793 | 2017-06-23 11:16:42 +0530 | [diff] [blame] | 493 | |
Tallapragada Kalyan | 32e74e6 | 2018-01-11 11:32:44 +0530 | [diff] [blame] | 494 | /* linearize the nbuf just before we send to |
| 495 | * dp_tx_send() |
| 496 | */ |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 497 | if (qdf_unlikely(is_frag)) { |
Tallapragada Kalyan | 32e74e6 | 2018-01-11 11:32:44 +0530 | [diff] [blame] | 498 | if (qdf_nbuf_linearize(nbuf) == -ENOMEM) |
| 499 | return false; |
| 500 | |
| 501 | nbuf = qdf_nbuf_unshare(nbuf); |
Tallapragada Kalyan | bc62989 | 2018-04-04 11:34:55 +0530 | [diff] [blame] | 502 | if (!nbuf) { |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 503 | DP_STATS_INC_PKT(ta_peer, |
Tallapragada Kalyan | bc62989 | 2018-04-04 11:34:55 +0530 | [diff] [blame] | 504 | rx.intra_bss.fail, |
| 505 | 1, |
| 506 | len); |
| 507 | /* return true even though the pkt is |
| 508 | * not forwarded. Basically skb_unshare |
| 509 | * failed and we want to continue with |
| 510 | * next nbuf. |
| 511 | */ |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 512 | tid_stats->fail_cnt[INTRABSS_DROP]++; |
Tallapragada Kalyan | bc62989 | 2018-04-04 11:34:55 +0530 | [diff] [blame] | 513 | return true; |
| 514 | } |
Tallapragada Kalyan | 32e74e6 | 2018-01-11 11:32:44 +0530 | [diff] [blame] | 515 | } |
| 516 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 517 | if (!dp_tx_send((struct cdp_soc_t *)soc, |
| 518 | ta_peer->vdev->vdev_id, nbuf)) { |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 519 | DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, |
| 520 | len); |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 521 | return true; |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 522 | } else { |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 523 | DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 524 | len); |
| 525 | tid_stats->fail_cnt[INTRABSS_DROP]++; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 526 | return false; |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 527 | } |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 528 | } |
| 529 | } |
| 530 | /* if it is a broadcast pkt (eg: ARP) and it is not its own |
| 531 | * source, then clone the pkt and send the cloned pkt for |
| 532 | * intra BSS forwarding and original pkt up the network stack |
| 533 | * Note: how do we handle multicast pkts. do we forward |
| 534 | * all multicast pkts as is or let a higher layer module |
| 535 | * like igmpsnoop decide whether to forward or not with |
| 536 | * Mcast enhancement. |
| 537 | */ |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 538 | else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) && |
| 539 | !ta_peer->bss_peer))) { |
Mohit Khanna | c42d803 | 2019-08-08 18:44:17 -0700 | [diff] [blame] | 540 | if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) |
| 541 | goto end; |
| 542 | |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 543 | nbuf_copy = qdf_nbuf_copy(nbuf); |
| 544 | if (!nbuf_copy) |
Mohit Khanna | c42d803 | 2019-08-08 18:44:17 -0700 | [diff] [blame] | 545 | goto end; |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 546 | |
| 547 | len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 548 | memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); |
Tallapragada Kalyan | 0cd1793 | 2017-06-23 11:16:42 +0530 | [diff] [blame] | 549 | |
Mainak Sen | 8bc9b42 | 2019-10-29 13:29:58 +0530 | [diff] [blame] | 550 | /* Set cb->ftype to intrabss FWD */ |
| 551 | qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 552 | if (dp_tx_send((struct cdp_soc_t *)soc, |
| 553 | ta_peer->vdev->vdev_id, nbuf_copy)) { |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 554 | DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 555 | tid_stats->fail_cnt[INTRABSS_DROP]++; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 556 | qdf_nbuf_free(nbuf_copy); |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 557 | } else { |
| 558 | DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 559 | tid_stats->intrabss_cnt++; |
Chaitanya Kiran Godavarthi | f6c0612 | 2018-11-23 23:24:05 +0530 | [diff] [blame] | 560 | } |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 561 | } |
Mohit Khanna | c42d803 | 2019-08-08 18:44:17 -0700 | [diff] [blame] | 562 | |
| 563 | end: |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 564 | /* return false as we have to still send the original pkt |
| 565 | * up the stack |
| 566 | */ |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 567 | return false; |
| 568 | } |
| 569 | |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 570 | #ifdef MESH_MODE_SUPPORT |
| 571 | |
| 572 | /** |
| 573 | * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats |
| 574 | * |
| 575 | * @vdev: DP Virtual device handle |
| 576 | * @nbuf: Buffer pointer |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 577 | * @rx_tlv_hdr: start of rx tlv header |
Venkateswara Swamy Bandaru | 3f4e1c4 | 2017-07-10 19:47:09 +0530 | [diff] [blame] | 578 | * @peer: pointer to peer |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 579 | * |
| 580 | * This function allocated memory for mesh receive stats and fill the |
| 581 | * required stats. Stores the memory address in skb cb. |
| 582 | * |
| 583 | * Return: void |
| 584 | */ |
Venkateswara Swamy Bandaru | 1fecd15 | 2017-07-04 17:26:18 +0530 | [diff] [blame] | 585 | |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 586 | void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, |
Venkateswara Swamy Bandaru | 3f4e1c4 | 2017-07-10 19:47:09 +0530 | [diff] [blame] | 587 | uint8_t *rx_tlv_hdr, struct dp_peer *peer) |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 588 | { |
| 589 | struct mesh_recv_hdr_s *rx_info = NULL; |
| 590 | uint32_t pkt_type; |
| 591 | uint32_t nss; |
| 592 | uint32_t rate_mcs; |
Venkateswara Swamy Bandaru | cde5599 | 2017-07-04 17:30:19 +0530 | [diff] [blame] | 593 | uint32_t bw; |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 594 | |
| 595 | /* fill recv mesh stats */ |
| 596 | rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); |
| 597 | |
| 598 | /* upper layers are resposible to free this memory */ |
| 599 | |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 600 | if (!rx_info) { |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 601 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 602 | "Memory allocation failed for mesh rx stats"); |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 603 | DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 604 | return; |
| 605 | } |
| 606 | |
Venkateswara Swamy Bandaru | cde5599 | 2017-07-04 17:30:19 +0530 | [diff] [blame] | 607 | rx_info->rs_flags = MESH_RXHDR_VER1; |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 608 | if (qdf_nbuf_is_rx_chfrag_start(nbuf)) |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 609 | rx_info->rs_flags |= MESH_RX_FIRST_MSDU; |
| 610 | |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 611 | if (qdf_nbuf_is_rx_chfrag_end(nbuf)) |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 612 | rx_info->rs_flags |= MESH_RX_LAST_MSDU; |
| 613 | |
| 614 | if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { |
| 615 | rx_info->rs_flags |= MESH_RX_DECRYPTED; |
| 616 | rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); |
Venkateswara Swamy Bandaru | 3f4e1c4 | 2017-07-10 19:47:09 +0530 | [diff] [blame] | 617 | if (vdev->osif_get_key) |
| 618 | vdev->osif_get_key(vdev->osif_vdev, |
| 619 | &rx_info->rs_decryptkey[0], |
| 620 | &peer->mac_addr.raw[0], |
| 621 | rx_info->rs_keyix); |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); |
| 625 | rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); |
| 626 | pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); |
| 627 | rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); |
Venkateswara Swamy Bandaru | cde5599 | 2017-07-04 17:30:19 +0530 | [diff] [blame] | 628 | bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); |
Balamurugan Mahalingam | d015964 | 2018-07-11 15:02:29 +0530 | [diff] [blame] | 629 | nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); |
Venkateswara Swamy Bandaru | cde5599 | 2017-07-04 17:30:19 +0530 | [diff] [blame] | 630 | rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | |
| 631 | (bw << 24); |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 632 | |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 633 | qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); |
Venkateswara Swamy Bandaru | c64c862 | 2017-02-27 20:08:33 +0530 | [diff] [blame] | 634 | |
Venkateswara Swamy Bandaru | 37a3a45 | 2018-02-12 15:37:14 +0530 | [diff] [blame] | 635 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, |
Venkateswara Swamy Bandaru | c64c862 | 2017-02-27 20:08:33 +0530 | [diff] [blame] | 636 | FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), |
| 637 | rx_info->rs_flags, |
| 638 | rx_info->rs_rssi, |
| 639 | rx_info->rs_channel, |
| 640 | rx_info->rs_ratephy1, |
| 641 | rx_info->rs_keyix); |
| 642 | |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 643 | } |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 644 | |
| 645 | /** |
Venkateswara Swamy Bandaru | 3f4e1c4 | 2017-07-10 19:47:09 +0530 | [diff] [blame] | 646 | * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 647 | * |
| 648 | * @vdev: DP Virtual device handle |
| 649 | * @nbuf: Buffer pointer |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 650 | * @rx_tlv_hdr: start of rx tlv header |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 651 | * |
| 652 | * This checks if the received packet is matching any filter out |
| 653 | * catogery and and drop the packet if it matches. |
| 654 | * |
| 655 | * Return: status(0 indicates drop, 1 indicate to no drop) |
| 656 | */ |
| 657 | |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 658 | QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, |
| 659 | uint8_t *rx_tlv_hdr) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 660 | { |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 661 | union dp_align_mac_addr mac_addr; |
Venkata Sharath Chandra Manchala | e7924fd | 2019-09-21 12:44:52 -0700 | [diff] [blame] | 662 | struct dp_soc *soc = vdev->pdev->soc; |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 663 | |
| 664 | if (qdf_unlikely(vdev->mesh_rx_filter)) { |
| 665 | if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) |
Venkata Sharath Chandra Manchala | 1e3a479 | 2019-09-21 13:15:09 -0700 | [diff] [blame] | 666 | if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, |
| 667 | rx_tlv_hdr)) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 668 | return QDF_STATUS_SUCCESS; |
| 669 | |
| 670 | if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) |
Venkata Sharath Chandra Manchala | e7924fd | 2019-09-21 12:44:52 -0700 | [diff] [blame] | 671 | if (hal_rx_mpdu_get_to_ds(soc->hal_soc, |
| 672 | rx_tlv_hdr)) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 673 | return QDF_STATUS_SUCCESS; |
| 674 | |
| 675 | if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) |
Venkata Sharath Chandra Manchala | 1e3a479 | 2019-09-21 13:15:09 -0700 | [diff] [blame] | 676 | if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, |
| 677 | rx_tlv_hdr) && |
Venkata Sharath Chandra Manchala | e7924fd | 2019-09-21 12:44:52 -0700 | [diff] [blame] | 678 | !hal_rx_mpdu_get_to_ds(soc->hal_soc, |
| 679 | rx_tlv_hdr)) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 680 | return QDF_STATUS_SUCCESS; |
| 681 | |
| 682 | if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { |
Venkata Sharath Chandra Manchala | e3ae319 | 2019-09-21 13:59:46 -0700 | [diff] [blame] | 683 | if (hal_rx_mpdu_get_addr1(soc->hal_soc, |
| 684 | rx_tlv_hdr, |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 685 | &mac_addr.raw[0])) |
| 686 | return QDF_STATUS_E_FAILURE; |
| 687 | |
| 688 | if (!qdf_mem_cmp(&mac_addr.raw[0], |
| 689 | &vdev->mac_addr.raw[0], |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 690 | QDF_MAC_ADDR_SIZE)) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 691 | return QDF_STATUS_SUCCESS; |
| 692 | } |
| 693 | |
| 694 | if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { |
Venkata Sharath Chandra Manchala | a81a2fe | 2019-09-21 14:29:40 -0700 | [diff] [blame] | 695 | if (hal_rx_mpdu_get_addr2(soc->hal_soc, |
| 696 | rx_tlv_hdr, |
| 697 | &mac_addr.raw[0])) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 698 | return QDF_STATUS_E_FAILURE; |
| 699 | |
| 700 | if (!qdf_mem_cmp(&mac_addr.raw[0], |
| 701 | &vdev->mac_addr.raw[0], |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 702 | QDF_MAC_ADDR_SIZE)) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 703 | return QDF_STATUS_SUCCESS; |
| 704 | } |
| 705 | } |
| 706 | |
| 707 | return QDF_STATUS_E_FAILURE; |
| 708 | } |
| 709 | |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 710 | #else |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 711 | void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, |
Venkateswara Swamy Bandaru | 3f4e1c4 | 2017-07-10 19:47:09 +0530 | [diff] [blame] | 712 | uint8_t *rx_tlv_hdr, struct dp_peer *peer) |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 713 | { |
| 714 | } |
| 715 | |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 716 | QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, |
| 717 | uint8_t *rx_tlv_hdr) |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 718 | { |
| 719 | return QDF_STATUS_E_FAILURE; |
| 720 | } |
| 721 | |
Venkateswara Swamy Bandaru | c14b1b6 | 2017-02-24 12:26:08 +0530 | [diff] [blame] | 722 | #endif |
| 723 | |
Pranita Solanke | 0586296 | 2019-01-09 11:39:29 +0530 | [diff] [blame] | 724 | #ifdef FEATURE_NAC_RSSI |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 725 | /** |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 726 | * dp_rx_nac_filter(): Function to perform filtering of non-associated |
| 727 | * clients |
| 728 | * @pdev: DP pdev handle |
| 729 | * @rx_pkt_hdr: Rx packet Header |
| 730 | * |
| 731 | * return: dp_vdev* |
| 732 | */ |
| 733 | static |
| 734 | struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, |
| 735 | uint8_t *rx_pkt_hdr) |
| 736 | { |
| 737 | struct ieee80211_frame *wh; |
| 738 | struct dp_neighbour_peer *peer = NULL; |
| 739 | |
| 740 | wh = (struct ieee80211_frame *)rx_pkt_hdr; |
| 741 | |
| 742 | if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) |
| 743 | return NULL; |
| 744 | |
| 745 | qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); |
| 746 | TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, |
| 747 | neighbour_peer_list_elem) { |
| 748 | if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 749 | wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 750 | QDF_TRACE( |
Houston Hoffman | ae850c6 | 2017-08-11 16:47:50 -0700 | [diff] [blame] | 751 | QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 752 | FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), |
| 753 | peer->neighbour_peers_macaddr.raw[0], |
| 754 | peer->neighbour_peers_macaddr.raw[1], |
| 755 | peer->neighbour_peers_macaddr.raw[2], |
| 756 | peer->neighbour_peers_macaddr.raw[3], |
| 757 | peer->neighbour_peers_macaddr.raw[4], |
| 758 | peer->neighbour_peers_macaddr.raw[5]); |
Pratik Gandhi | 97fa0b0 | 2017-07-14 00:55:43 +0530 | [diff] [blame] | 759 | |
| 760 | qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); |
| 761 | |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 762 | return pdev->monitor_vdev; |
| 763 | } |
| 764 | } |
| 765 | qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); |
| 766 | |
| 767 | return NULL; |
| 768 | } |
| 769 | |
| 770 | /** |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 771 | * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac |
| 772 | * @soc: DP SOC handle |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 773 | * @mpdu: mpdu for which peer is invalid |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 774 | * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and |
| 775 | * pool_id has same mapping) |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 776 | * |
| 777 | * return: integer type |
| 778 | */ |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 779 | uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, |
| 780 | uint8_t mac_id) |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 781 | { |
| 782 | struct dp_invalid_peer_msg msg; |
| 783 | struct dp_vdev *vdev = NULL; |
| 784 | struct dp_pdev *pdev = NULL; |
| 785 | struct ieee80211_frame *wh; |
syed touqeer pasha | 4ffe1c5 | 2018-02-09 12:37:25 +0530 | [diff] [blame] | 786 | qdf_nbuf_t curr_nbuf, next_nbuf; |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 787 | uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); |
| 788 | uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 789 | |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 790 | rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); |
| 791 | |
Venkata Sharath Chandra Manchala | 43d5632 | 2019-09-20 16:51:48 -0700 | [diff] [blame] | 792 | if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { |
Shashikala Prabhu | e11412d | 2019-03-08 11:37:15 +0530 | [diff] [blame] | 793 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 794 | "Drop decapped frames"); |
| 795 | goto free; |
| 796 | } |
| 797 | |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 798 | wh = (struct ieee80211_frame *)rx_pkt_hdr; |
| 799 | |
| 800 | if (!DP_FRAME_IS_DATA(wh)) { |
| 801 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 802 | "NAWDS valid only for data frames"); |
syed touqeer pasha | 4ffe1c5 | 2018-02-09 12:37:25 +0530 | [diff] [blame] | 803 | goto free; |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 804 | } |
| 805 | |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 806 | if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 807 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 808 | "Invalid nbuf length"); |
syed touqeer pasha | 4ffe1c5 | 2018-02-09 12:37:25 +0530 | [diff] [blame] | 809 | goto free; |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 810 | } |
| 811 | |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 812 | pdev = dp_get_pdev_for_lmac_id(soc, mac_id); |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 813 | |
Gyanranjan Hazarika | e804726 | 2019-06-05 00:43:38 -0700 | [diff] [blame] | 814 | if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 815 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Gyanranjan Hazarika | e804726 | 2019-06-05 00:43:38 -0700 | [diff] [blame] | 816 | "PDEV %s", !pdev ? "not found" : "down"); |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 817 | goto free; |
| 818 | } |
| 819 | |
| 820 | if (pdev->filter_neighbour_peers) { |
| 821 | /* Next Hop scenario not yet handle */ |
| 822 | vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); |
| 823 | if (vdev) { |
| 824 | dp_rx_mon_deliver(soc, pdev->pdev_id, |
| 825 | pdev->invalid_peer_head_msdu, |
| 826 | pdev->invalid_peer_tail_msdu); |
| 827 | |
| 828 | pdev->invalid_peer_head_msdu = NULL; |
| 829 | pdev->invalid_peer_tail_msdu = NULL; |
| 830 | |
| 831 | return 0; |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 832 | } |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 833 | } |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 834 | |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 835 | TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 836 | |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 837 | if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, |
| 838 | QDF_MAC_ADDR_SIZE) == 0) { |
| 839 | goto out; |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 840 | } |
| 841 | } |
| 842 | |
| 843 | if (!vdev) { |
| 844 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 845 | "VDEV not found"); |
syed touqeer pasha | 4ffe1c5 | 2018-02-09 12:37:25 +0530 | [diff] [blame] | 846 | goto free; |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 847 | } |
| 848 | |
| 849 | out: |
| 850 | msg.wh = wh; |
Pratik Gandhi | 3da3bc7 | 2017-03-16 18:20:22 +0530 | [diff] [blame] | 851 | qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); |
| 852 | msg.nbuf = mpdu; |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 853 | msg.vdev_id = vdev->vdev_id; |
| 854 | if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 855 | pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( |
| 856 | (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, |
| 857 | pdev->pdev_id, &msg); |
syed touqeer pasha | 4ffe1c5 | 2018-02-09 12:37:25 +0530 | [diff] [blame] | 858 | |
| 859 | free: |
| 860 | /* Drop and free packet */ |
| 861 | curr_nbuf = mpdu; |
| 862 | while (curr_nbuf) { |
| 863 | next_nbuf = qdf_nbuf_next(curr_nbuf); |
| 864 | qdf_nbuf_free(curr_nbuf); |
| 865 | curr_nbuf = next_nbuf; |
| 866 | } |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 867 | |
| 868 | return 0; |
| 869 | } |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 870 | |
| 871 | /** |
| 872 | * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler |
| 873 | * @soc: DP SOC handle |
| 874 | * @mpdu: mpdu for which peer is invalid |
| 875 | * @mpdu_done: if an mpdu is completed |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 876 | * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and |
| 877 | * pool_id has same mapping) |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 878 | * |
| 879 | * return: integer type |
| 880 | */ |
| 881 | void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 882 | qdf_nbuf_t mpdu, bool mpdu_done, |
| 883 | uint8_t mac_id) |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 884 | { |
| 885 | /* Only trigger the process when mpdu is completed */ |
| 886 | if (mpdu_done) |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 887 | dp_rx_process_invalid_peer(soc, mpdu, mac_id); |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 888 | } |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 889 | #else |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 890 | uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, |
| 891 | uint8_t mac_id) |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 892 | { |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 893 | qdf_nbuf_t curr_nbuf, next_nbuf; |
| 894 | struct dp_pdev *pdev; |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 895 | struct dp_vdev *vdev = NULL; |
| 896 | struct ieee80211_frame *wh; |
| 897 | uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); |
| 898 | uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 899 | |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 900 | wh = (struct ieee80211_frame *)rx_pkt_hdr; |
| 901 | |
| 902 | if (!DP_FRAME_IS_DATA(wh)) { |
Jinwei Chen | 214590a | 2018-12-06 16:45:44 +0800 | [diff] [blame] | 903 | QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, |
| 904 | "only for data frames"); |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 905 | goto free; |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 906 | } |
| 907 | |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 908 | if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { |
| 909 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 910 | "Invalid nbuf length"); |
| 911 | goto free; |
| 912 | } |
Jinwei Chen | 214590a | 2018-12-06 16:45:44 +0800 | [diff] [blame] | 913 | |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 914 | pdev = dp_get_pdev_for_lmac_id(soc, mac_id); |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 915 | if (!pdev) { |
| 916 | QDF_TRACE(QDF_MODULE_ID_DP, |
| 917 | QDF_TRACE_LEVEL_ERROR, |
| 918 | "PDEV not found"); |
| 919 | goto free; |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 920 | } |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 921 | |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 922 | qdf_spin_lock_bh(&pdev->vdev_list_lock); |
| 923 | DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { |
| 924 | if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, |
| 925 | QDF_MAC_ADDR_SIZE) == 0) { |
| 926 | qdf_spin_unlock_bh(&pdev->vdev_list_lock); |
| 927 | goto out; |
| 928 | } |
| 929 | } |
| 930 | qdf_spin_unlock_bh(&pdev->vdev_list_lock); |
| 931 | |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 932 | if (!vdev) { |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 933 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 934 | "VDEV not found"); |
| 935 | goto free; |
| 936 | } |
| 937 | |
| 938 | out: |
| 939 | if (soc->cdp_soc.ol_ops->rx_invalid_peer) |
| 940 | soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); |
| 941 | free: |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 942 | /* reset the head and tail pointers */ |
| 943 | pdev = dp_get_pdev_for_lmac_id(soc, mac_id); |
| 944 | if (pdev) { |
| 945 | pdev->invalid_peer_head_msdu = NULL; |
| 946 | pdev->invalid_peer_tail_msdu = NULL; |
| 947 | } |
| 948 | |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 949 | /* Drop and free packet */ |
| 950 | curr_nbuf = mpdu; |
| 951 | while (curr_nbuf) { |
| 952 | next_nbuf = qdf_nbuf_next(curr_nbuf); |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 953 | qdf_nbuf_free(curr_nbuf); |
| 954 | curr_nbuf = next_nbuf; |
| 955 | } |
| 956 | |
Venkata Sharath Chandra Manchala | 09d116a | 2020-01-03 16:42:00 -0800 | [diff] [blame] | 957 | /* Reset the head and tail pointers */ |
| 958 | pdev = dp_get_pdev_for_mac_id(soc, mac_id); |
| 959 | if (pdev) { |
| 960 | pdev->invalid_peer_head_msdu = NULL; |
| 961 | pdev->invalid_peer_tail_msdu = NULL; |
| 962 | } |
| 963 | |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 964 | return 0; |
| 965 | } |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 966 | |
| 967 | void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 968 | qdf_nbuf_t mpdu, bool mpdu_done, |
| 969 | uint8_t mac_id) |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 970 | { |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 971 | /* Process the nbuf */ |
Keyur Parekh | b8149a5 | 2019-04-16 21:30:25 -0700 | [diff] [blame] | 972 | dp_rx_process_invalid_peer(soc, mpdu, mac_id); |
chenguo | 91c9010 | 2017-12-12 16:16:37 +0800 | [diff] [blame] | 973 | } |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 974 | #endif |
| 975 | |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 976 | #ifdef RECEIVE_OFFLOAD |
| 977 | /** |
| 978 | * dp_rx_print_offload_info() - Print offload info from RX TLV |
Venkata Sharath Chandra Manchala | 5c5d409 | 2019-09-25 13:31:51 -0700 | [diff] [blame] | 979 | * @soc: dp soc handle |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 980 | * @rx_tlv: RX TLV for which offload information is to be printed |
| 981 | * |
| 982 | * Return: None |
| 983 | */ |
Venkata Sharath Chandra Manchala | 5c5d409 | 2019-09-25 13:31:51 -0700 | [diff] [blame] | 984 | static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv) |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 985 | { |
Krunal Soni | c96a116 | 2019-02-21 11:33:26 -0800 | [diff] [blame] | 986 | dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); |
| 987 | dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); |
| 988 | dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); |
Venkata Sharath Chandra Manchala | 5c5d409 | 2019-09-25 13:31:51 -0700 | [diff] [blame] | 989 | dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc, |
| 990 | rx_tlv)); |
Krunal Soni | c96a116 | 2019-02-21 11:33:26 -0800 | [diff] [blame] | 991 | dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); |
| 992 | dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); |
| 993 | dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); |
| 994 | dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); |
| 995 | dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); |
| 996 | dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); |
| 997 | dp_verbose_debug("---------------------------------------------------------"); |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 998 | } |
| 999 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1000 | /** |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1001 | * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb |
| 1002 | * @soc: DP SOC handle |
| 1003 | * @rx_tlv: RX TLV received for the msdu |
| 1004 | * @msdu: msdu for which GRO info needs to be filled |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 1005 | * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1006 | * |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1007 | * Return: None |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1008 | */ |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1009 | static |
| 1010 | void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 1011 | qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1012 | { |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1013 | if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 1014 | return; |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1015 | |
| 1016 | /* Filling up RX offload info only for TCP packets */ |
| 1017 | if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) |
| 1018 | return; |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1019 | |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 1020 | *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; |
| 1021 | |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 1022 | QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = |
| 1023 | HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 1024 | QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = |
| 1025 | HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 1026 | QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = |
Venkata Sharath Chandra Manchala | 5c5d409 | 2019-09-25 13:31:51 -0700 | [diff] [blame] | 1027 | hal_rx_tlv_get_tcp_chksum(soc->hal_soc, |
| 1028 | rx_tlv); |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 1029 | QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = |
| 1030 | HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); |
| 1031 | QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = |
| 1032 | HAL_RX_TLV_GET_TCP_ACK(rx_tlv); |
| 1033 | QDF_NBUF_CB_RX_TCP_WIN(msdu) = |
| 1034 | HAL_RX_TLV_GET_TCP_WIN(rx_tlv); |
| 1035 | QDF_NBUF_CB_RX_TCP_PROTO(msdu) = |
| 1036 | HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); |
| 1037 | QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = |
| 1038 | HAL_RX_TLV_GET_IPV6(rx_tlv); |
| 1039 | QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = |
| 1040 | HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); |
Manjunathappa Prakash | 71772a5 | 2017-11-07 18:01:31 -0800 | [diff] [blame] | 1041 | QDF_NBUF_CB_RX_FLOW_ID(msdu) = |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 1042 | HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); |
Dhanashri Atre | 991ee4d | 2017-05-03 19:03:10 -0700 | [diff] [blame] | 1043 | |
Venkata Sharath Chandra Manchala | 5c5d409 | 2019-09-25 13:31:51 -0700 | [diff] [blame] | 1044 | dp_rx_print_offload_info(soc, rx_tlv); |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1045 | } |
| 1046 | #else |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1047 | static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 1048 | qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1049 | { |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1050 | } |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1051 | #endif /* RECEIVE_OFFLOAD */ |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1052 | |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1053 | /** |
| 1054 | * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. |
| 1055 | * |
| 1056 | * @nbuf: pointer to msdu. |
| 1057 | * @mpdu_len: mpdu length |
| 1058 | * |
| 1059 | * Return: returns true if nbuf is last msdu of mpdu else retuns false. |
| 1060 | */ |
| 1061 | static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1062 | { |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1063 | bool last_nbuf; |
| 1064 | |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 1065 | if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { |
| 1066 | qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1067 | last_nbuf = false; |
| 1068 | } else { |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1069 | qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1070 | last_nbuf = true; |
| 1071 | } |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1072 | |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 1073 | *mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1074 | |
| 1075 | return last_nbuf; |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1076 | } |
| 1077 | |
| 1078 | /** |
| 1079 | * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across |
| 1080 | * multiple nbufs. |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1081 | * @nbuf: pointer to the first msdu of an amsdu. |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1082 | * |
| 1083 | * This function implements the creation of RX frag_list for cases |
| 1084 | * where an MSDU is spread across multiple nbufs. |
| 1085 | * |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1086 | * Return: returns the head nbuf which contains complete frag_list. |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1087 | */ |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 1088 | qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf) |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1089 | { |
Saket Jha | 6ef0340 | 2019-12-17 17:03:27 -0800 | [diff] [blame] | 1090 | qdf_nbuf_t parent, frag_list, next = NULL; |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1091 | uint16_t frag_list_len = 0; |
| 1092 | uint16_t mpdu_len; |
| 1093 | bool last_nbuf; |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1094 | |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 1095 | /* |
| 1096 | * Use msdu len got from REO entry descriptor instead since |
| 1097 | * there is case the RX PKT TLV is corrupted while msdu_len |
| 1098 | * from REO descriptor is right for non-raw RX scatter msdu. |
| 1099 | */ |
| 1100 | mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1101 | /* |
| 1102 | * this is a case where the complete msdu fits in one single nbuf. |
| 1103 | * in this case HW sets both start and end bit and we only need to |
| 1104 | * reset these bits for RAW mode simulator to decap the pkt |
| 1105 | */ |
| 1106 | if (qdf_nbuf_is_rx_chfrag_start(nbuf) && |
| 1107 | qdf_nbuf_is_rx_chfrag_end(nbuf)) { |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 1108 | qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); |
| 1109 | qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1110 | return nbuf; |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1111 | } |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1112 | |
| 1113 | /* |
| 1114 | * This is a case where we have multiple msdus (A-MSDU) spread across |
| 1115 | * multiple nbufs. here we create a fraglist out of these nbufs. |
| 1116 | * |
| 1117 | * the moment we encounter a nbuf with continuation bit set we |
| 1118 | * know for sure we have an MSDU which is spread across multiple |
| 1119 | * nbufs. We loop through and reap nbufs till we reach last nbuf. |
| 1120 | */ |
| 1121 | parent = nbuf; |
| 1122 | frag_list = nbuf->next; |
| 1123 | nbuf = nbuf->next; |
| 1124 | |
| 1125 | /* |
| 1126 | * set the start bit in the first nbuf we encounter with continuation |
| 1127 | * bit set. This has the proper mpdu length set as it is the first |
| 1128 | * msdu of the mpdu. this becomes the parent nbuf and the subsequent |
| 1129 | * nbufs will form the frag_list of the parent nbuf. |
| 1130 | */ |
| 1131 | qdf_nbuf_set_rx_chfrag_start(parent, 1); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1132 | last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); |
| 1133 | |
| 1134 | /* |
| 1135 | * this is where we set the length of the fragments which are |
| 1136 | * associated to the parent nbuf. We iterate through the frag_list |
| 1137 | * till we hit the last_nbuf of the list. |
| 1138 | */ |
| 1139 | do { |
| 1140 | last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); |
| 1141 | qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); |
| 1142 | frag_list_len += qdf_nbuf_len(nbuf); |
| 1143 | |
| 1144 | if (last_nbuf) { |
| 1145 | next = nbuf->next; |
| 1146 | nbuf->next = NULL; |
| 1147 | break; |
| 1148 | } |
| 1149 | |
| 1150 | nbuf = nbuf->next; |
| 1151 | } while (!last_nbuf); |
| 1152 | |
| 1153 | qdf_nbuf_set_rx_chfrag_start(nbuf, 0); |
| 1154 | qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); |
| 1155 | parent->next = next; |
| 1156 | |
| 1157 | qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); |
| 1158 | return parent; |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 1159 | } |
| 1160 | |
Varsha Mishra | a331e6e | 2019-03-11 12:16:14 +0530 | [diff] [blame] | 1161 | /** |
| 1162 | * dp_rx_compute_delay() - Compute and fill in all timestamps |
| 1163 | * to pass in correct fields |
| 1164 | * |
| 1165 | * @vdev: pdev handle |
| 1166 | * @tx_desc: tx descriptor |
| 1167 | * @tid: tid value |
| 1168 | * Return: none |
| 1169 | */ |
| 1170 | void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) |
| 1171 | { |
Varsha Mishra | 1f4cfb6 | 2019-05-31 00:59:15 +0530 | [diff] [blame] | 1172 | uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); |
Varsha Mishra | a331e6e | 2019-03-11 12:16:14 +0530 | [diff] [blame] | 1173 | int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); |
| 1174 | uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); |
Ankit Kumar | e222775 | 2019-04-30 00:16:04 +0530 | [diff] [blame] | 1175 | uint8_t tid = qdf_nbuf_get_tid_val(nbuf); |
Varsha Mishra | a331e6e | 2019-03-11 12:16:14 +0530 | [diff] [blame] | 1176 | uint32_t interframe_delay = |
| 1177 | (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); |
| 1178 | |
| 1179 | dp_update_delay_stats(vdev->pdev, to_stack, tid, |
Varsha Mishra | 1f4cfb6 | 2019-05-31 00:59:15 +0530 | [diff] [blame] | 1180 | CDP_DELAY_STATS_REAP_STACK, ring_id); |
Varsha Mishra | a331e6e | 2019-03-11 12:16:14 +0530 | [diff] [blame] | 1181 | /* |
| 1182 | * Update interframe delay stats calculated at deliver_data_ol point. |
| 1183 | * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so |
| 1184 | * interframe delay will not be calculate correctly for 1st frame. |
| 1185 | * On the other side, this will help in avoiding extra per packet check |
| 1186 | * of vdev->prev_rx_deliver_tstamp. |
| 1187 | */ |
| 1188 | dp_update_delay_stats(vdev->pdev, interframe_delay, tid, |
Varsha Mishra | 1f4cfb6 | 2019-05-31 00:59:15 +0530 | [diff] [blame] | 1189 | CDP_DELAY_STATS_RX_INTERFRAME, ring_id); |
Varsha Mishra | a331e6e | 2019-03-11 12:16:14 +0530 | [diff] [blame] | 1190 | vdev->prev_rx_deliver_tstamp = current_ts; |
| 1191 | } |
| 1192 | |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 1193 | /** |
| 1194 | * dp_rx_drop_nbuf_list() - drop an nbuf list |
| 1195 | * @pdev: dp pdev reference |
| 1196 | * @buf_list: buffer list to be dropepd |
| 1197 | * |
| 1198 | * Return: int (number of bufs dropped) |
| 1199 | */ |
| 1200 | static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, |
| 1201 | qdf_nbuf_t buf_list) |
| 1202 | { |
| 1203 | struct cdp_tid_rx_stats *stats = NULL; |
Varsha Mishra | 1f4cfb6 | 2019-05-31 00:59:15 +0530 | [diff] [blame] | 1204 | uint8_t tid = 0, ring_id = 0; |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 1205 | int num_dropped = 0; |
| 1206 | qdf_nbuf_t buf, next_buf; |
| 1207 | |
| 1208 | buf = buf_list; |
| 1209 | while (buf) { |
Varsha Mishra | 1f4cfb6 | 2019-05-31 00:59:15 +0530 | [diff] [blame] | 1210 | ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 1211 | next_buf = qdf_nbuf_queue_next(buf); |
Ankit Kumar | e222775 | 2019-04-30 00:16:04 +0530 | [diff] [blame] | 1212 | tid = qdf_nbuf_get_tid_val(buf); |
Rakesh Pillai | c1aeb35 | 2020-01-14 13:06:15 +0530 | [diff] [blame] | 1213 | if (qdf_likely(pdev)) { |
| 1214 | stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; |
| 1215 | stats->fail_cnt[INVALID_PEER_VDEV]++; |
| 1216 | stats->delivered_to_stack--; |
| 1217 | } |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 1218 | qdf_nbuf_free(buf); |
| 1219 | buf = next_buf; |
| 1220 | num_dropped++; |
| 1221 | } |
| 1222 | |
| 1223 | return num_dropped; |
| 1224 | } |
| 1225 | |
| 1226 | #ifdef PEER_CACHE_RX_PKTS |
| 1227 | /** |
| 1228 | * dp_rx_flush_rx_cached() - flush cached rx frames |
| 1229 | * @peer: peer |
| 1230 | * @drop: flag to drop frames or forward to net stack |
| 1231 | * |
| 1232 | * Return: None |
| 1233 | */ |
| 1234 | void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) |
| 1235 | { |
| 1236 | struct dp_peer_cached_bufq *bufqi; |
| 1237 | struct dp_rx_cached_buf *cache_buf = NULL; |
| 1238 | ol_txrx_rx_fp data_rx = NULL; |
| 1239 | int num_buff_elem; |
| 1240 | QDF_STATUS status; |
| 1241 | |
| 1242 | if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) { |
| 1243 | qdf_atomic_dec(&peer->flush_in_progress); |
| 1244 | return; |
| 1245 | } |
| 1246 | |
| 1247 | qdf_spin_lock_bh(&peer->peer_info_lock); |
| 1248 | if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) |
| 1249 | data_rx = peer->vdev->osif_rx; |
| 1250 | else |
| 1251 | drop = true; |
| 1252 | qdf_spin_unlock_bh(&peer->peer_info_lock); |
| 1253 | |
| 1254 | bufqi = &peer->bufq_info; |
| 1255 | |
| 1256 | qdf_spin_lock_bh(&bufqi->bufq_lock); |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 1257 | qdf_list_remove_front(&bufqi->cached_bufq, |
| 1258 | (qdf_list_node_t **)&cache_buf); |
| 1259 | while (cache_buf) { |
| 1260 | num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( |
| 1261 | cache_buf->buf); |
| 1262 | bufqi->entries -= num_buff_elem; |
| 1263 | qdf_spin_unlock_bh(&bufqi->bufq_lock); |
| 1264 | if (drop) { |
| 1265 | bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, |
| 1266 | cache_buf->buf); |
| 1267 | } else { |
| 1268 | /* Flush the cached frames to OSIF DEV */ |
| 1269 | status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); |
| 1270 | if (status != QDF_STATUS_SUCCESS) |
| 1271 | bufqi->dropped = dp_rx_drop_nbuf_list( |
| 1272 | peer->vdev->pdev, |
| 1273 | cache_buf->buf); |
| 1274 | } |
| 1275 | qdf_mem_free(cache_buf); |
| 1276 | cache_buf = NULL; |
| 1277 | qdf_spin_lock_bh(&bufqi->bufq_lock); |
| 1278 | qdf_list_remove_front(&bufqi->cached_bufq, |
| 1279 | (qdf_list_node_t **)&cache_buf); |
| 1280 | } |
| 1281 | qdf_spin_unlock_bh(&bufqi->bufq_lock); |
| 1282 | qdf_atomic_dec(&peer->flush_in_progress); |
| 1283 | } |
| 1284 | |
| 1285 | /** |
| 1286 | * dp_rx_enqueue_rx() - cache rx frames |
| 1287 | * @peer: peer |
| 1288 | * @rx_buf_list: cache buffer list |
| 1289 | * |
| 1290 | * Return: None |
| 1291 | */ |
| 1292 | static QDF_STATUS |
| 1293 | dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) |
| 1294 | { |
| 1295 | struct dp_rx_cached_buf *cache_buf; |
| 1296 | struct dp_peer_cached_bufq *bufqi = &peer->bufq_info; |
| 1297 | int num_buff_elem; |
| 1298 | |
Nisha Menon | 4f63366 | 2020-01-21 18:17:28 -0800 | [diff] [blame] | 1299 | dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, |
| 1300 | bufqi->dropped); |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 1301 | if (!peer->valid) { |
| 1302 | bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, |
| 1303 | rx_buf_list); |
| 1304 | return QDF_STATUS_E_INVAL; |
| 1305 | } |
| 1306 | |
| 1307 | qdf_spin_lock_bh(&bufqi->bufq_lock); |
| 1308 | if (bufqi->entries >= bufqi->thresh) { |
| 1309 | bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, |
| 1310 | rx_buf_list); |
| 1311 | qdf_spin_unlock_bh(&bufqi->bufq_lock); |
| 1312 | return QDF_STATUS_E_RESOURCES; |
| 1313 | } |
| 1314 | qdf_spin_unlock_bh(&bufqi->bufq_lock); |
| 1315 | |
| 1316 | num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); |
| 1317 | |
| 1318 | cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); |
| 1319 | if (!cache_buf) { |
| 1320 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
| 1321 | "Failed to allocate buf to cache rx frames"); |
| 1322 | bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, |
| 1323 | rx_buf_list); |
| 1324 | return QDF_STATUS_E_NOMEM; |
| 1325 | } |
| 1326 | |
| 1327 | cache_buf->buf = rx_buf_list; |
| 1328 | |
| 1329 | qdf_spin_lock_bh(&bufqi->bufq_lock); |
| 1330 | qdf_list_insert_back(&bufqi->cached_bufq, |
| 1331 | &cache_buf->node); |
| 1332 | bufqi->entries += num_buff_elem; |
| 1333 | qdf_spin_unlock_bh(&bufqi->bufq_lock); |
| 1334 | |
| 1335 | return QDF_STATUS_SUCCESS; |
| 1336 | } |
| 1337 | |
| 1338 | static inline |
| 1339 | bool dp_rx_is_peer_cache_bufq_supported(void) |
| 1340 | { |
| 1341 | return true; |
| 1342 | } |
| 1343 | #else |
| 1344 | static inline |
| 1345 | bool dp_rx_is_peer_cache_bufq_supported(void) |
| 1346 | { |
| 1347 | return false; |
| 1348 | } |
| 1349 | |
| 1350 | static inline QDF_STATUS |
| 1351 | dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) |
| 1352 | { |
| 1353 | return QDF_STATUS_SUCCESS; |
| 1354 | } |
| 1355 | #endif |
| 1356 | |
Nisha Menon | 4f63366 | 2020-01-21 18:17:28 -0800 | [diff] [blame] | 1357 | void dp_rx_deliver_to_stack(struct dp_soc *soc, |
| 1358 | struct dp_vdev *vdev, |
| 1359 | struct dp_peer *peer, |
| 1360 | qdf_nbuf_t nbuf_head, |
| 1361 | qdf_nbuf_t nbuf_tail) |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 1362 | { |
Rakesh Pillai | c1aeb35 | 2020-01-14 13:06:15 +0530 | [diff] [blame] | 1363 | int num_nbuf = 0; |
| 1364 | |
| 1365 | if (qdf_unlikely(!vdev || vdev->delete.pending)) { |
| 1366 | num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); |
| 1367 | /* |
| 1368 | * This is a special case where vdev is invalid, |
| 1369 | * so we cannot know the pdev to which this packet |
| 1370 | * belonged. Hence we update the soc rx error stats. |
| 1371 | */ |
| 1372 | DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); |
| 1373 | return; |
| 1374 | } |
| 1375 | |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 1376 | /* |
Jeff Johnson | ff2dfb2 | 2018-05-12 10:27:57 -0700 | [diff] [blame] | 1377 | * highly unlikely to have a vdev without a registered rx |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 1378 | * callback function. if so let us free the nbuf_list. |
| 1379 | */ |
| 1380 | if (qdf_unlikely(!vdev->osif_rx)) { |
Nisha Menon | 4f63366 | 2020-01-21 18:17:28 -0800 | [diff] [blame] | 1381 | if (peer && dp_rx_is_peer_cache_bufq_supported()) { |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 1382 | dp_rx_enqueue_rx(peer, nbuf_head); |
Nisha Menon | 4f63366 | 2020-01-21 18:17:28 -0800 | [diff] [blame] | 1383 | } else { |
| 1384 | num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, |
| 1385 | nbuf_head); |
| 1386 | DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf); |
| 1387 | } |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 1388 | return; |
| 1389 | } |
| 1390 | |
| 1391 | if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1392 | (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { |
| 1393 | vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, |
Pavankumar Nandeshwar | 0ce3870 | 2019-09-30 18:43:03 +0530 | [diff] [blame] | 1394 | &nbuf_tail, peer->mac_addr.raw); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 1395 | } |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 1396 | |
| 1397 | /* Function pointer initialized only when FISA is enabled */ |
| 1398 | if (vdev->osif_fisa_rx) |
| 1399 | /* on failure send it via regular path */ |
| 1400 | vdev->osif_fisa_rx(soc, vdev, nbuf_head); |
| 1401 | else |
| 1402 | vdev->osif_rx(vdev->osif_vdev, nbuf_head); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 1403 | } |
| 1404 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1405 | /** |
| 1406 | * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. |
| 1407 | * @nbuf: pointer to the first msdu of an amsdu. |
| 1408 | * @rx_tlv_hdr: pointer to the start of RX TLV headers. |
| 1409 | * |
| 1410 | * The ipsumed field of the skb is set based on whether HW validated the |
| 1411 | * IP/TCP/UDP checksum. |
| 1412 | * |
| 1413 | * Return: void |
| 1414 | */ |
Tallapragada Kalyan | 51198fc | 2018-04-18 14:30:44 +0530 | [diff] [blame] | 1415 | static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, |
| 1416 | qdf_nbuf_t nbuf, |
| 1417 | uint8_t *rx_tlv_hdr) |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1418 | { |
| 1419 | qdf_nbuf_rx_cksum_t cksum = {0}; |
Tallapragada Kalyan | 51198fc | 2018-04-18 14:30:44 +0530 | [diff] [blame] | 1420 | bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); |
| 1421 | bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1422 | |
Tallapragada Kalyan | 51198fc | 2018-04-18 14:30:44 +0530 | [diff] [blame] | 1423 | if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1424 | cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1425 | qdf_nbuf_set_rx_cksum(nbuf, &cksum); |
Tallapragada Kalyan | 51198fc | 2018-04-18 14:30:44 +0530 | [diff] [blame] | 1426 | } else { |
| 1427 | DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); |
| 1428 | DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1429 | } |
| 1430 | } |
| 1431 | |
Adil Saeed Musthafa | bbc4de0 | 2019-12-12 14:34:44 -0800 | [diff] [blame] | 1432 | #ifdef VDEV_PEER_PROTOCOL_COUNT |
| 1433 | #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \ |
| 1434 | { \ |
| 1435 | qdf_nbuf_t nbuf_local; \ |
| 1436 | struct dp_peer *peer_local; \ |
| 1437 | struct dp_vdev *vdev_local = vdev_hdl; \ |
| 1438 | do { \ |
| 1439 | if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ |
| 1440 | break; \ |
| 1441 | nbuf_local = nbuf; \ |
| 1442 | peer_local = peer; \ |
| 1443 | if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ |
| 1444 | break; \ |
| 1445 | else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ |
| 1446 | break; \ |
| 1447 | dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ |
| 1448 | (nbuf_local), \ |
| 1449 | (peer_local), 0, 1); \ |
| 1450 | } while (0); \ |
| 1451 | } |
| 1452 | #else |
| 1453 | #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) |
| 1454 | #endif |
| 1455 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1456 | /** |
| 1457 | * dp_rx_msdu_stats_update() - update per msdu stats. |
| 1458 | * @soc: core txrx main context |
| 1459 | * @nbuf: pointer to the first msdu of an amsdu. |
| 1460 | * @rx_tlv_hdr: pointer to the start of RX TLV headers. |
| 1461 | * @peer: pointer to the peer object. |
| 1462 | * @ring_id: reo dest ring number on which pkt is reaped. |
Varsha Mishra | 9d42f12 | 2019-05-03 12:47:40 +0530 | [diff] [blame] | 1463 | * @tid_stats: per tid rx stats. |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1464 | * |
| 1465 | * update all the per msdu stats for that nbuf. |
| 1466 | * Return: void |
| 1467 | */ |
| 1468 | static void dp_rx_msdu_stats_update(struct dp_soc *soc, |
| 1469 | qdf_nbuf_t nbuf, |
| 1470 | uint8_t *rx_tlv_hdr, |
| 1471 | struct dp_peer *peer, |
Varsha Mishra | 9d42f12 | 2019-05-03 12:47:40 +0530 | [diff] [blame] | 1472 | uint8_t ring_id, |
| 1473 | struct cdp_tid_rx_stats *tid_stats) |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1474 | { |
| 1475 | bool is_ampdu, is_not_amsdu; |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1476 | uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; |
| 1477 | struct dp_vdev *vdev = peer->vdev; |
Srinivas Girigowda | 03bd4b6 | 2019-02-25 10:57:08 -0800 | [diff] [blame] | 1478 | qdf_ether_header_t *eh; |
Ankit Kumar | f90c944 | 2019-05-02 18:55:20 +0530 | [diff] [blame] | 1479 | uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1480 | |
Adil Saeed Musthafa | bbc4de0 | 2019-12-12 14:34:44 -0800 | [diff] [blame] | 1481 | dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1482 | is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & |
| 1483 | qdf_nbuf_is_rx_chfrag_end(nbuf); |
| 1484 | |
Aditya Sathish | 6add3db | 2018-04-10 19:43:34 +0530 | [diff] [blame] | 1485 | DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1486 | DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); |
| 1487 | DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); |
Paul Zhang | 1275896 | 2019-08-23 14:52:47 +0800 | [diff] [blame] | 1488 | DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf)); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1489 | |
Varsha Mishra | 9d42f12 | 2019-05-03 12:47:40 +0530 | [diff] [blame] | 1490 | tid_stats->msdu_cnt++; |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 1491 | if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && |
Aditya Sathish | 6add3db | 2018-04-10 19:43:34 +0530 | [diff] [blame] | 1492 | (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { |
Srinivas Girigowda | 03bd4b6 | 2019-02-25 10:57:08 -0800 | [diff] [blame] | 1493 | eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); |
Amir Patel | 3217ade | 2018-09-07 12:21:35 +0530 | [diff] [blame] | 1494 | DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); |
Varsha Mishra | 9d42f12 | 2019-05-03 12:47:40 +0530 | [diff] [blame] | 1495 | tid_stats->mcast_msdu_cnt++; |
Srinivas Girigowda | 7950297 | 2019-02-11 12:25:12 -0800 | [diff] [blame] | 1496 | if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { |
Aditya Sathish | 6add3db | 2018-04-10 19:43:34 +0530 | [diff] [blame] | 1497 | DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); |
Varsha Mishra | 9d42f12 | 2019-05-03 12:47:40 +0530 | [diff] [blame] | 1498 | tid_stats->bcast_msdu_cnt++; |
Aditya Sathish | 6add3db | 2018-04-10 19:43:34 +0530 | [diff] [blame] | 1499 | } |
| 1500 | } |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1501 | |
| 1502 | /* |
| 1503 | * currently we can return from here as we have similar stats |
| 1504 | * updated at per ppdu level instead of msdu level |
| 1505 | */ |
| 1506 | if (!soc->process_rx_status) |
| 1507 | return; |
| 1508 | |
| 1509 | is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); |
| 1510 | DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); |
| 1511 | DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); |
| 1512 | |
| 1513 | sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); |
| 1514 | mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); |
Ankit Kumar | e222775 | 2019-04-30 00:16:04 +0530 | [diff] [blame] | 1515 | tid = qdf_nbuf_get_tid_val(nbuf); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1516 | bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); |
Balamurugan Mahalingam | d015964 | 2018-07-11 15:02:29 +0530 | [diff] [blame] | 1517 | reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, |
| 1518 | rx_tlv_hdr); |
| 1519 | nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1520 | pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); |
| 1521 | |
Venkata Sharath Chandra Manchala | faa0d8b | 2018-04-09 14:39:43 -0700 | [diff] [blame] | 1522 | DP_STATS_INC(peer, rx.bw[bw], 1); |
Jinwei Chen | e661127 | 2019-04-22 18:38:51 +0800 | [diff] [blame] | 1523 | /* |
| 1524 | * only if nss > 0 and pkt_type is 11N/AC/AX, |
| 1525 | * then increase index [nss - 1] in array counter. |
| 1526 | */ |
| 1527 | if (nss > 0 && (pkt_type == DOT11_N || |
| 1528 | pkt_type == DOT11_AC || |
| 1529 | pkt_type == DOT11_AX)) |
| 1530 | DP_STATS_INC(peer, rx.nss[nss - 1], 1); |
| 1531 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1532 | DP_STATS_INC(peer, rx.sgi_count[sgi], 1); |
| 1533 | DP_STATS_INCC(peer, rx.err.mic_err, 1, |
| 1534 | hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); |
| 1535 | DP_STATS_INCC(peer, rx.err.decrypt_err, 1, |
| 1536 | hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); |
| 1537 | |
| 1538 | DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1539 | DP_STATS_INC(peer, rx.reception_type[reception_type], 1); |
| 1540 | |
Venkata Sharath Chandra Manchala | d18887e | 2018-10-02 18:18:52 -0700 | [diff] [blame] | 1541 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1542 | ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); |
| 1543 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, |
| 1544 | ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); |
Venkata Sharath Chandra Manchala | d18887e | 2018-10-02 18:18:52 -0700 | [diff] [blame] | 1545 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1546 | ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); |
| 1547 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, |
| 1548 | ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); |
Venkata Sharath Chandra Manchala | d18887e | 2018-10-02 18:18:52 -0700 | [diff] [blame] | 1549 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1550 | ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); |
| 1551 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, |
| 1552 | ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); |
Venkata Sharath Chandra Manchala | d18887e | 2018-10-02 18:18:52 -0700 | [diff] [blame] | 1553 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1554 | ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); |
| 1555 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, |
| 1556 | ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); |
Venkata Sharath Chandra Manchala | d18887e | 2018-10-02 18:18:52 -0700 | [diff] [blame] | 1557 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1558 | ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); |
| 1559 | DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, |
phadiman | 6c3432b | 2019-01-09 12:45:28 +0530 | [diff] [blame] | 1560 | ((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1561 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1562 | if ((soc->process_rx_status) && |
| 1563 | hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { |
Amir Patel | 756d05e | 2018-10-10 12:35:30 +0530 | [diff] [blame] | 1564 | #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE |
phadiman | 4975730 | 2018-12-18 16:13:59 +0530 | [diff] [blame] | 1565 | if (!vdev->pdev) |
| 1566 | return; |
| 1567 | |
Amir Patel | 756d05e | 2018-10-10 12:35:30 +0530 | [diff] [blame] | 1568 | dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, |
Tallapragada Kalyan | 9e4b36f | 2019-05-02 13:22:34 +0530 | [diff] [blame] | 1569 | &peer->stats, peer->peer_ids[0], |
Amir Patel | 756d05e | 2018-10-10 12:35:30 +0530 | [diff] [blame] | 1570 | UPDATE_PEER_STATS, |
| 1571 | vdev->pdev->pdev_id); |
| 1572 | #endif |
| 1573 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 1574 | } |
| 1575 | } |
| 1576 | |
Chaithanya Garrepalli | af34aae | 2019-02-18 20:44:27 +0530 | [diff] [blame] | 1577 | static inline bool is_sa_da_idx_valid(struct dp_soc *soc, |
Akshay Kosigi | 6eef9e3 | 2019-06-24 14:32:18 +0530 | [diff] [blame] | 1578 | uint8_t *rx_tlv_hdr, |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 1579 | qdf_nbuf_t nbuf, |
| 1580 | struct hal_rx_msdu_metadata msdu_info) |
Chaithanya Garrepalli | af34aae | 2019-02-18 20:44:27 +0530 | [diff] [blame] | 1581 | { |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 1582 | if ((qdf_nbuf_is_sa_valid(nbuf) && |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 1583 | (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || |
Mohit Khanna | 3ba9372 | 2019-06-13 18:32:50 -0700 | [diff] [blame] | 1584 | (!qdf_nbuf_is_da_mcbc(nbuf) && |
| 1585 | qdf_nbuf_is_da_valid(nbuf) && |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 1586 | (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) |
Chaithanya Garrepalli | af34aae | 2019-02-18 20:44:27 +0530 | [diff] [blame] | 1587 | return false; |
| 1588 | |
| 1589 | return true; |
| 1590 | } |
| 1591 | |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 1592 | #ifndef WDS_VENDOR_EXTENSION |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 1593 | int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, |
| 1594 | struct dp_vdev *vdev, |
| 1595 | struct dp_peer *peer) |
Tallapragada Kalyan | 2a5fc62 | 2017-12-08 21:07:43 +0530 | [diff] [blame] | 1596 | { |
| 1597 | return 1; |
| 1598 | } |
| 1599 | #endif |
| 1600 | |
Manjunathappa Prakash | 8f70862 | 2019-02-20 17:02:59 -0800 | [diff] [blame] | 1601 | #ifdef RX_DESC_DEBUG_CHECK |
| 1602 | /** |
| 1603 | * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr |
| 1604 | * corruption |
| 1605 | * |
| 1606 | * @ring_desc: REO ring descriptor |
| 1607 | * @rx_desc: Rx descriptor |
| 1608 | * |
| 1609 | * Return: NONE |
| 1610 | */ |
Akshay Kosigi | 91c5652 | 2019-07-02 11:49:39 +0530 | [diff] [blame] | 1611 | static inline |
| 1612 | void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, |
| 1613 | struct dp_rx_desc *rx_desc) |
Manjunathappa Prakash | 8f70862 | 2019-02-20 17:02:59 -0800 | [diff] [blame] | 1614 | { |
| 1615 | struct hal_buf_info hbi; |
| 1616 | |
| 1617 | hal_rx_reo_buf_paddr_get(ring_desc, &hbi); |
| 1618 | /* Sanity check for possible buffer paddr corruption */ |
| 1619 | qdf_assert_always((&hbi)->paddr == |
| 1620 | qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); |
| 1621 | } |
| 1622 | #else |
Akshay Kosigi | 91c5652 | 2019-07-02 11:49:39 +0530 | [diff] [blame] | 1623 | static inline |
| 1624 | void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, |
| 1625 | struct dp_rx_desc *rx_desc) |
Manjunathappa Prakash | 8f70862 | 2019-02-20 17:02:59 -0800 | [diff] [blame] | 1626 | { |
| 1627 | } |
| 1628 | #endif |
| 1629 | |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1630 | #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT |
| 1631 | static inline |
| 1632 | bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) |
| 1633 | { |
| 1634 | bool limit_hit = false; |
| 1635 | struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; |
| 1636 | |
| 1637 | limit_hit = |
| 1638 | (num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false; |
| 1639 | |
| 1640 | if (limit_hit) |
| 1641 | DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) |
| 1642 | |
| 1643 | return limit_hit; |
| 1644 | } |
| 1645 | |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1646 | static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) |
| 1647 | { |
| 1648 | return soc->wlan_cfg_ctx->rx_enable_eol_data_check; |
| 1649 | } |
| 1650 | |
| 1651 | #else |
| 1652 | static inline |
| 1653 | bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) |
| 1654 | { |
| 1655 | return false; |
| 1656 | } |
| 1657 | |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1658 | static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) |
| 1659 | { |
| 1660 | return false; |
| 1661 | } |
| 1662 | |
| 1663 | #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ |
Saket Jha | 7f89014 | 2019-07-10 18:31:36 -0700 | [diff] [blame] | 1664 | |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1665 | /** |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1666 | * dp_is_special_data() - check is the pkt special like eapol, dhcp, etc |
| 1667 | * |
| 1668 | * @nbuf: pkt skb pointer |
| 1669 | * |
| 1670 | * Return: true if matched, false if not |
| 1671 | */ |
| 1672 | static inline |
| 1673 | bool dp_is_special_data(qdf_nbuf_t nbuf) |
| 1674 | { |
| 1675 | if (qdf_nbuf_is_ipv4_arp_pkt(nbuf) || |
| 1676 | qdf_nbuf_is_ipv4_dhcp_pkt(nbuf) || |
| 1677 | qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || |
| 1678 | qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) |
| 1679 | return true; |
| 1680 | else |
| 1681 | return false; |
| 1682 | } |
| 1683 | |
| 1684 | #ifdef DP_RX_PKT_NO_PEER_DELIVER |
| 1685 | /** |
| 1686 | * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if |
| 1687 | * no corresbonding peer found |
| 1688 | * @soc: core txrx main context |
| 1689 | * @nbuf: pkt skb pointer |
| 1690 | * |
| 1691 | * This function will try to deliver some RX special frames to stack |
| 1692 | * even there is no peer matched found. for instance, LFR case, some |
| 1693 | * eapol data will be sent to host before peer_map done. |
| 1694 | * |
| 1695 | * Return: None |
| 1696 | */ |
Rakesh Pillai | f09f0b7 | 2020-03-02 14:09:18 +0530 | [diff] [blame] | 1697 | static |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1698 | void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) |
| 1699 | { |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1700 | uint16_t peer_id; |
| 1701 | uint8_t vdev_id; |
| 1702 | struct dp_vdev *vdev; |
| 1703 | uint32_t l2_hdr_offset = 0; |
| 1704 | uint16_t msdu_len = 0; |
| 1705 | uint32_t pkt_len = 0; |
| 1706 | uint8_t *rx_tlv_hdr; |
| 1707 | |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 1708 | peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1709 | if (peer_id > soc->max_peers) |
| 1710 | goto deliver_fail; |
| 1711 | |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 1712 | vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1713 | vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); |
Rakesh Pillai | c1aeb35 | 2020-01-14 13:06:15 +0530 | [diff] [blame] | 1714 | if (!vdev || vdev->delete.pending || !vdev->osif_rx) |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1715 | goto deliver_fail; |
| 1716 | |
| 1717 | rx_tlv_hdr = qdf_nbuf_data(nbuf); |
| 1718 | l2_hdr_offset = |
Venkata Sharath Chandra Manchala | f05b2ae | 2019-09-20 17:25:21 -0700 | [diff] [blame] | 1719 | hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1720 | |
| 1721 | msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); |
| 1722 | pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; |
| 1723 | |
Mohit Khanna | d273250 | 2019-08-12 01:33:37 -0700 | [diff] [blame] | 1724 | if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { |
| 1725 | qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); |
| 1726 | } else { |
| 1727 | qdf_nbuf_set_pktlen(nbuf, pkt_len); |
| 1728 | qdf_nbuf_pull_head(nbuf, |
| 1729 | RX_PKT_TLVS_LEN + |
| 1730 | l2_hdr_offset); |
| 1731 | } |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 1732 | |
| 1733 | /* only allow special frames */ |
| 1734 | if (!dp_is_special_data(nbuf)) |
| 1735 | goto deliver_fail; |
| 1736 | |
| 1737 | vdev->osif_rx(vdev->osif_vdev, nbuf); |
| 1738 | DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); |
| 1739 | return; |
| 1740 | |
| 1741 | deliver_fail: |
| 1742 | DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, |
| 1743 | QDF_NBUF_CB_RX_PKT_LEN(nbuf)); |
| 1744 | qdf_nbuf_free(nbuf); |
| 1745 | } |
| 1746 | #else |
| 1747 | static inline |
| 1748 | void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) |
| 1749 | { |
| 1750 | DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, |
| 1751 | QDF_NBUF_CB_RX_PKT_LEN(nbuf)); |
| 1752 | qdf_nbuf_free(nbuf); |
| 1753 | } |
| 1754 | #endif |
| 1755 | |
| 1756 | /** |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1757 | * dp_rx_srng_get_num_pending() - get number of pending entries |
| 1758 | * @hal_soc: hal soc opaque pointer |
| 1759 | * @hal_ring: opaque pointer to the HAL Rx Ring |
| 1760 | * @num_entries: number of entries in the hal_ring. |
| 1761 | * @near_full: pointer to a boolean. This is set if ring is near full. |
| 1762 | * |
| 1763 | * The function returns the number of entries in a destination ring which are |
| 1764 | * yet to be reaped. The function also checks if the ring is near full. |
| 1765 | * If more than half of the ring needs to be reaped, the ring is considered |
| 1766 | * approaching full. |
| 1767 | * The function useses hal_srng_dst_num_valid_locked to get the number of valid |
| 1768 | * entries. It should not be called within a SRNG lock. HW pointer value is |
| 1769 | * synced into cached_hp. |
| 1770 | * |
| 1771 | * Return: Number of pending entries if any |
| 1772 | */ |
| 1773 | static |
| 1774 | uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, |
| 1775 | hal_ring_handle_t hal_ring_hdl, |
| 1776 | uint32_t num_entries, |
| 1777 | bool *near_full) |
| 1778 | { |
| 1779 | uint32_t num_pending = 0; |
| 1780 | |
| 1781 | num_pending = hal_srng_dst_num_valid_locked(hal_soc, |
| 1782 | hal_ring_hdl, |
| 1783 | true); |
| 1784 | |
| 1785 | if (num_entries && (num_pending >= num_entries >> 1)) |
| 1786 | *near_full = true; |
| 1787 | else |
| 1788 | *near_full = false; |
| 1789 | |
| 1790 | return num_pending; |
| 1791 | } |
| 1792 | |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 1793 | #ifdef WLAN_SUPPORT_RX_FISA |
Manjunathappa Prakash | 9ee605c | 2020-02-10 19:35:18 -0800 | [diff] [blame] | 1794 | void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 1795 | { |
Manjunathappa Prakash | 9ee605c | 2020-02-10 19:35:18 -0800 | [diff] [blame] | 1796 | QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; |
| 1797 | qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 1798 | } |
Manjunathappa Prakash | 9ee605c | 2020-02-10 19:35:18 -0800 | [diff] [blame] | 1799 | #else |
| 1800 | void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 1801 | { |
Manjunathappa Prakash | 9ee605c | 2020-02-10 19:35:18 -0800 | [diff] [blame] | 1802 | qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 1803 | } |
Manjunathappa Prakash | 9ee605c | 2020-02-10 19:35:18 -0800 | [diff] [blame] | 1804 | #endif |
| 1805 | |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 1806 | |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1807 | /** |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1808 | * dp_rx_process() - Brain of the Rx processing functionality |
| 1809 | * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1810 | * @int_ctx: per interrupt context |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1811 | * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced |
Mohit Khanna | 7ac554b | 2018-05-24 11:58:13 -0700 | [diff] [blame] | 1812 | * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1813 | * @quota: No. of units (packets) that can be serviced in one shot. |
| 1814 | * |
| 1815 | * This function implements the core of Rx functionality. This is |
| 1816 | * expected to handle only non-error frames. |
| 1817 | * |
| 1818 | * Return: uint32_t: No. of elements processed |
| 1819 | */ |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1820 | uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1821 | uint8_t reo_ring_num, uint32_t quota) |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1822 | { |
Akshay Kosigi | 91c5652 | 2019-07-02 11:49:39 +0530 | [diff] [blame] | 1823 | hal_ring_desc_t ring_desc; |
Akshay Kosigi | a870c61 | 2019-07-08 23:10:30 +0530 | [diff] [blame] | 1824 | hal_soc_handle_t hal_soc; |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1825 | struct dp_rx_desc *rx_desc = NULL; |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 1826 | qdf_nbuf_t nbuf, next; |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1827 | bool near_full; |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1828 | union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; |
| 1829 | union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1830 | uint32_t num_pending; |
Chaithanya Garrepalli | 8aaf9b6 | 2018-05-17 15:53:21 +0530 | [diff] [blame] | 1831 | uint32_t rx_bufs_used = 0, rx_buf_cookie; |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 1832 | uint16_t msdu_len = 0; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1833 | uint16_t peer_id; |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 1834 | uint8_t vdev_id; |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1835 | struct dp_peer *peer; |
| 1836 | struct dp_vdev *vdev; |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 1837 | uint32_t pkt_len = 0; |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1838 | struct hal_rx_mpdu_desc_info mpdu_desc_info; |
| 1839 | struct hal_rx_msdu_desc_info msdu_desc_info; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1840 | enum hal_reo_error_status error; |
Tallapragada Kalyan | bb3bbcd | 2017-07-14 12:17:04 +0530 | [diff] [blame] | 1841 | uint32_t peer_mdata; |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 1842 | uint8_t *rx_tlv_hdr; |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1843 | uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; |
Pamidipati, Vijay | 57a435a | 2017-10-17 11:03:39 +0530 | [diff] [blame] | 1844 | uint8_t mac_id = 0; |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 1845 | struct dp_pdev *rx_pdev; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 1846 | struct dp_srng *dp_rxdma_srng; |
| 1847 | struct rx_desc_pool *rx_desc_pool; |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 1848 | struct dp_soc *soc = int_ctx->soc; |
Pamidipati, Vijay | 57a435a | 2017-10-17 11:03:39 +0530 | [diff] [blame] | 1849 | uint8_t ring_id = 0; |
| 1850 | uint8_t core_id = 0; |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 1851 | struct cdp_tid_rx_stats *tid_stats; |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1852 | qdf_nbuf_t nbuf_head; |
| 1853 | qdf_nbuf_t nbuf_tail; |
| 1854 | qdf_nbuf_t deliver_list_head; |
| 1855 | qdf_nbuf_t deliver_list_tail; |
| 1856 | uint32_t num_rx_bufs_reaped = 0; |
| 1857 | uint32_t intr_id; |
| 1858 | struct hif_opaque_softc *scn; |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1859 | int32_t tid = 0; |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1860 | bool is_prev_msdu_last = true; |
| 1861 | uint32_t num_entries_avail = 0; |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 1862 | uint32_t rx_ol_pkt_cnt = 0; |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1863 | uint32_t num_entries = 0; |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 1864 | struct hal_rx_msdu_metadata msdu_metadata; |
Rakesh Pillai | 79979d6 | 2020-02-29 20:42:28 +0530 | [diff] [blame] | 1865 | QDF_STATUS status; |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1866 | |
Venkata Sharath Chandra Manchala | a405eb7 | 2017-03-06 14:35:00 -0800 | [diff] [blame] | 1867 | DP_HIST_INIT(); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1868 | |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1869 | qdf_assert_always(soc && hal_ring_hdl); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1870 | hal_soc = soc->hal_soc; |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1871 | qdf_assert_always(hal_soc); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1872 | |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1873 | scn = soc->hif_handle; |
Sravan Kumar Kairam | b96e507 | 2019-08-21 20:59:51 +0530 | [diff] [blame] | 1874 | hif_pm_runtime_mark_dp_rx_busy(scn); |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1875 | intr_id = int_ctx->dp_intr_id; |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 1876 | num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 1877 | |
| 1878 | more_data: |
| 1879 | /* reset local variables here to be re-used in the function */ |
| 1880 | nbuf_head = NULL; |
| 1881 | nbuf_tail = NULL; |
| 1882 | deliver_list_head = NULL; |
| 1883 | deliver_list_tail = NULL; |
| 1884 | peer = NULL; |
| 1885 | vdev = NULL; |
| 1886 | num_rx_bufs_reaped = 0; |
| 1887 | |
| 1888 | qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); |
| 1889 | qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); |
| 1890 | qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); |
| 1891 | qdf_mem_zero(head, sizeof(head)); |
| 1892 | qdf_mem_zero(tail, sizeof(tail)); |
Yue Ma | 245b47b | 2017-02-21 16:35:31 -0800 | [diff] [blame] | 1893 | |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1894 | if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1895 | |
| 1896 | /* |
| 1897 | * Need API to convert from hal_ring pointer to |
| 1898 | * Ring Type / Ring Id combo |
| 1899 | */ |
Venkata Sharath Chandra Manchala | a405eb7 | 2017-03-06 14:35:00 -0800 | [diff] [blame] | 1900 | DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1901 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1902 | FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1903 | goto done; |
| 1904 | } |
| 1905 | |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 1906 | /* |
| 1907 | * start reaping the buffers from reo ring and queue |
| 1908 | * them in per vdev queue. |
| 1909 | * Process the received pkts in a different per vdev loop. |
| 1910 | */ |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1911 | while (qdf_likely(quota && |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1912 | (ring_desc = hal_srng_dst_peek(hal_soc, |
| 1913 | hal_ring_hdl)))) { |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1914 | |
| 1915 | error = HAL_RX_ERROR_STATUS_GET(ring_desc); |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1916 | ring_id = hal_srng_ring_id_get(hal_ring_hdl); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1917 | |
| 1918 | if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { |
| 1919 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1920 | FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error); |
Ishank Jain | 57c42a1 | 2017-04-12 10:42:22 +0530 | [diff] [blame] | 1921 | DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1922 | /* Don't know how to deal with this -- assert */ |
| 1923 | qdf_assert(0); |
| 1924 | } |
| 1925 | |
| 1926 | rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); |
| 1927 | |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 1928 | rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); |
Rakesh Pillai | 79979d6 | 2020-02-29 20:42:28 +0530 | [diff] [blame] | 1929 | status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, |
| 1930 | ring_desc, rx_desc); |
| 1931 | if (QDF_IS_STATUS_ERROR(status)) { |
| 1932 | hal_srng_dst_get_next(hal_soc, hal_ring_hdl); |
| 1933 | continue; |
| 1934 | } |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 1935 | |
Saket Jha | 3aeabaa | 2020-03-03 16:21:12 -0800 | [diff] [blame] | 1936 | dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc); |
| 1937 | |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 1938 | /* |
| 1939 | * this is a unlikely scenario where the host is reaping |
| 1940 | * a descriptor which it already reaped just a while ago |
| 1941 | * but is yet to replenish it back to HW. |
| 1942 | * In this case host will dump the last 128 descriptors |
| 1943 | * including the software descriptor rx_desc and assert. |
| 1944 | */ |
Gyanranjan Hazarika | e804726 | 2019-06-05 00:43:38 -0700 | [diff] [blame] | 1945 | |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 1946 | if (qdf_unlikely(!rx_desc->in_use)) { |
| 1947 | DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); |
Manjunathappa Prakash | 5f1b698 | 2019-07-12 12:36:21 -0700 | [diff] [blame] | 1948 | dp_info_rl("Reaping rx_desc not in use!"); |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1949 | dp_rx_dump_info_and_assert(soc, hal_ring_hdl, |
Mohit Khanna | 16cd1b2 | 2019-01-25 10:46:00 -0800 | [diff] [blame] | 1950 | ring_desc, rx_desc); |
Saket Jha | 7f89014 | 2019-07-10 18:31:36 -0700 | [diff] [blame] | 1951 | /* ignore duplicate RX desc and continue to process */ |
Manjunathappa Prakash | 5f1b698 | 2019-07-12 12:36:21 -0700 | [diff] [blame] | 1952 | /* Pop out the descriptor */ |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1953 | hal_srng_dst_get_next(hal_soc, hal_ring_hdl); |
Saket Jha | 7f89014 | 2019-07-10 18:31:36 -0700 | [diff] [blame] | 1954 | continue; |
Mohit Khanna | 16cd1b2 | 2019-01-25 10:46:00 -0800 | [diff] [blame] | 1955 | } |
| 1956 | |
| 1957 | if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { |
| 1958 | dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); |
| 1959 | DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1960 | dp_rx_dump_info_and_assert(soc, hal_ring_hdl, |
Tallapragada Kalyan | eff377a | 2019-01-09 19:13:19 +0530 | [diff] [blame] | 1961 | ring_desc, rx_desc); |
| 1962 | } |
| 1963 | |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 1964 | /* Get MPDU DESC info */ |
| 1965 | hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1966 | |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1967 | /* Get MSDU DESC info */ |
| 1968 | hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); |
| 1969 | |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 1970 | if (qdf_unlikely(msdu_desc_info.msdu_flags & |
| 1971 | HAL_MSDU_F_MSDU_CONTINUATION)) { |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1972 | /* previous msdu has end bit set, so current one is |
| 1973 | * the new MPDU |
| 1974 | */ |
| 1975 | if (is_prev_msdu_last) { |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1976 | /* Get number of entries available in HW ring */ |
| 1977 | num_entries_avail = |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 1978 | hal_srng_dst_num_valid(hal_soc, |
| 1979 | hal_ring_hdl, 1); |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1980 | |
| 1981 | /* For new MPDU check if we can read complete |
| 1982 | * MPDU by comparing the number of buffers |
| 1983 | * available and number of buffers needed to |
| 1984 | * reap this MPDU |
| 1985 | */ |
| 1986 | if (((msdu_desc_info.msdu_len / |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 1987 | (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) + |
| 1988 | 1)) > num_entries_avail) { |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 1989 | DP_STATS_INC( |
| 1990 | soc, |
| 1991 | rx.msdu_scatter_wait_break, |
| 1992 | 1); |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1993 | break; |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 1994 | } |
| 1995 | is_prev_msdu_last = false; |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1996 | } |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 1997 | |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 1998 | } |
| 1999 | |
Jinwei Chen | b0c2305 | 2020-03-02 20:44:28 +0800 | [diff] [blame] | 2000 | /* |
| 2001 | * move unmap after scattered msdu waiting break logic |
| 2002 | * in case double skb unmap happened. |
| 2003 | */ |
| 2004 | qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, |
| 2005 | QDF_DMA_FROM_DEVICE); |
| 2006 | rx_desc->unmapped = 1; |
| 2007 | |
| 2008 | core_id = smp_processor_id(); |
| 2009 | DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); |
| 2010 | |
| 2011 | if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) |
| 2012 | qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); |
| 2013 | |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 2014 | if (qdf_unlikely(mpdu_desc_info.mpdu_flags & |
| 2015 | HAL_MPDU_F_RAW_AMPDU)) |
| 2016 | qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); |
| 2017 | |
| 2018 | if (!is_prev_msdu_last && |
| 2019 | msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) |
| 2020 | is_prev_msdu_last = true; |
| 2021 | |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 2022 | /* Pop out the descriptor*/ |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 2023 | hal_srng_dst_get_next(hal_soc, hal_ring_hdl); |
Chaithanya Garrepalli | d3d99db | 2018-12-19 21:54:03 +0530 | [diff] [blame] | 2024 | |
| 2025 | rx_bufs_reaped[rx_desc->pool_id]++; |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2026 | peer_mdata = mpdu_desc_info.peer_meta_data; |
| 2027 | QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = |
| 2028 | DP_PEER_METADATA_PEER_ID_GET(peer_mdata); |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 2029 | QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = |
| 2030 | DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); |
Tallapragada Kalyan | bb3bbcd | 2017-07-14 12:17:04 +0530 | [diff] [blame] | 2031 | |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 2032 | /* |
| 2033 | * save msdu flags first, last and continuation msdu in |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2034 | * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and |
| 2035 | * length to nbuf->cb. This ensures the info required for |
| 2036 | * per pkt processing is always in the same cache line. |
| 2037 | * This helps in improving throughput for smaller pkt |
| 2038 | * sizes. |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 2039 | */ |
| 2040 | if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 2041 | qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 2042 | |
| 2043 | if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 2044 | qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 2045 | |
| 2046 | if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 2047 | qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); |
Kalyan Tallapragada | 277f45e | 2017-01-30 14:25:27 +0530 | [diff] [blame] | 2048 | |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2049 | if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) |
| 2050 | qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); |
| 2051 | |
| 2052 | if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) |
| 2053 | qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); |
| 2054 | |
| 2055 | if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) |
| 2056 | qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); |
| 2057 | |
Ankit Kumar | e222775 | 2019-04-30 00:16:04 +0530 | [diff] [blame] | 2058 | qdf_nbuf_set_tid_val(rx_desc->nbuf, |
| 2059 | HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); |
| 2060 | |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2061 | QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; |
| 2062 | |
Mohit Khanna | 7ac554b | 2018-05-24 11:58:13 -0700 | [diff] [blame] | 2063 | QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2064 | |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2065 | DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 2066 | |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 2067 | /* |
| 2068 | * if continuation bit is set then we have MSDU spread |
| 2069 | * across multiple buffers, let us not decrement quota |
| 2070 | * till we reap all buffers of that MSDU. |
| 2071 | */ |
Vivek | de90e59 | 2017-11-30 17:24:18 +0530 | [diff] [blame] | 2072 | if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) |
Tallapragada Kalyan | 52b45a1 | 2017-05-12 17:36:16 +0530 | [diff] [blame] | 2073 | quota -= 1; |
| 2074 | |
Tallapragada Kalyan | aae8c41 | 2017-02-13 12:00:17 +0530 | [diff] [blame] | 2075 | dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], |
| 2076 | &tail[rx_desc->pool_id], |
| 2077 | rx_desc); |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 2078 | |
| 2079 | num_rx_bufs_reaped++; |
Jinwei Chen | b0c2305 | 2020-03-02 20:44:28 +0800 | [diff] [blame] | 2080 | /* |
| 2081 | * only if complete msdu is received for scatter case, |
| 2082 | * then allow break. |
| 2083 | */ |
| 2084 | if (is_prev_msdu_last && |
| 2085 | dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped)) |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 2086 | break; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2087 | } |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 2088 | done: |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 2089 | dp_srng_access_end(int_ctx, soc, hal_ring_hdl); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2090 | |
Tallapragada Kalyan | aae8c41 | 2017-02-13 12:00:17 +0530 | [diff] [blame] | 2091 | for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { |
| 2092 | /* |
| 2093 | * continue with next mac_id if no pkts were reaped |
| 2094 | * from that pool |
| 2095 | */ |
| 2096 | if (!rx_bufs_reaped[mac_id]) |
| 2097 | continue; |
| 2098 | |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2099 | dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; |
| 2100 | |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 2101 | rx_desc_pool = &soc->rx_desc_buf[mac_id]; |
| 2102 | |
| 2103 | dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, |
| 2104 | rx_desc_pool, rx_bufs_reaped[mac_id], |
Venkata Sharath Chandra Manchala | 16fcceb | 2018-01-03 11:27:15 -0800 | [diff] [blame] | 2105 | &head[mac_id], &tail[mac_id]); |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 2106 | } |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2107 | |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 2108 | dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); |
psimha | 03f9a79 | 2017-10-17 10:42:58 -0700 | [diff] [blame] | 2109 | /* Peer can be NULL is case of LFR */ |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 2110 | if (qdf_likely(peer)) |
psimha | 03f9a79 | 2017-10-17 10:42:58 -0700 | [diff] [blame] | 2111 | vdev = NULL; |
| 2112 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 2113 | /* |
| 2114 | * BIG loop where each nbuf is dequeued from global queue, |
| 2115 | * processed and queued back on a per vdev basis. These nbufs |
| 2116 | * are sent to stack as and when we run out of nbufs |
| 2117 | * or a new nbuf dequeued from global queue has a different |
| 2118 | * vdev when compared to previous nbuf. |
| 2119 | */ |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2120 | nbuf = nbuf_head; |
| 2121 | while (nbuf) { |
| 2122 | next = nbuf->next; |
| 2123 | rx_tlv_hdr = qdf_nbuf_data(nbuf); |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 2124 | vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); |
| 2125 | |
| 2126 | if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) { |
Rakesh Pillai | c1aeb35 | 2020-01-14 13:06:15 +0530 | [diff] [blame] | 2127 | dp_rx_deliver_to_stack(soc, vdev, peer, |
| 2128 | deliver_list_head, |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 2129 | deliver_list_tail); |
| 2130 | deliver_list_head = NULL; |
| 2131 | deliver_list_tail = NULL; |
| 2132 | } |
| 2133 | |
Ankit Kumar | e222775 | 2019-04-30 00:16:04 +0530 | [diff] [blame] | 2134 | /* Get TID from struct cb->tid_val, save to tid */ |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 2135 | if (qdf_nbuf_is_rx_chfrag_start(nbuf)) |
Ankit Kumar | e222775 | 2019-04-30 00:16:04 +0530 | [diff] [blame] | 2136 | tid = qdf_nbuf_get_tid_val(nbuf); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2137 | |
Chaithanya Garrepalli | 52511a1 | 2019-12-12 20:24:40 +0530 | [diff] [blame] | 2138 | peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2139 | peer = dp_peer_find_by_id(soc, peer_id); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2140 | |
Mohit Khanna | 163c317 | 2018-06-27 01:34:02 -0700 | [diff] [blame] | 2141 | if (peer) { |
| 2142 | QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; |
| 2143 | qdf_dp_trace_set_track(nbuf, QDF_RX); |
| 2144 | QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; |
| 2145 | QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = |
| 2146 | QDF_NBUF_RX_PKT_DATA_TRACK; |
| 2147 | } |
| 2148 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 2149 | rx_bufs_used++; |
| 2150 | |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 2151 | if (qdf_likely(peer)) { |
psimha | 03f9a79 | 2017-10-17 10:42:58 -0700 | [diff] [blame] | 2152 | vdev = peer->vdev; |
Chaithanya Garrepalli | 974da26 | 2018-02-22 20:32:19 +0530 | [diff] [blame] | 2153 | } else { |
Manjunathappa Prakash | 9713759 | 2019-07-26 17:08:36 -0700 | [diff] [blame] | 2154 | nbuf->next = NULL; |
Jinwei Chen | 9d3f985 | 2019-07-12 19:01:18 +0800 | [diff] [blame] | 2155 | dp_rx_deliver_to_stack_no_peer(soc, nbuf); |
Chaithanya Garrepalli | 974da26 | 2018-02-22 20:32:19 +0530 | [diff] [blame] | 2156 | nbuf = next; |
| 2157 | continue; |
| 2158 | } |
| 2159 | |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 2160 | if (qdf_unlikely(!vdev)) { |
Chaithanya Garrepalli | 974da26 | 2018-02-22 20:32:19 +0530 | [diff] [blame] | 2161 | qdf_nbuf_free(nbuf); |
| 2162 | nbuf = next; |
| 2163 | DP_STATS_INC(soc, rx.err.invalid_vdev, 1); |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 2164 | dp_peer_unref_del_find_by_id(peer); |
Chaithanya Garrepalli | 974da26 | 2018-02-22 20:32:19 +0530 | [diff] [blame] | 2165 | continue; |
| 2166 | } |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 2167 | |
Tallapragada Kalyan | cea9c93 | 2019-04-30 16:43:28 +0530 | [diff] [blame] | 2168 | rx_pdev = vdev->pdev; |
| 2169 | DP_RX_TID_SAVE(nbuf, tid); |
| 2170 | if (qdf_unlikely(rx_pdev->delay_stats_flag)) |
| 2171 | qdf_nbuf_set_timestamp(nbuf); |
| 2172 | |
| 2173 | ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); |
| 2174 | tid_stats = |
| 2175 | &rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; |
| 2176 | |
| 2177 | /* |
| 2178 | * Check if DMA completed -- msdu_done is the last bit |
| 2179 | * to be written |
| 2180 | */ |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 2181 | if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && |
Tallapragada Kalyan | cea9c93 | 2019-04-30 16:43:28 +0530 | [diff] [blame] | 2182 | !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { |
| 2183 | dp_err("MSDU DONE failure"); |
| 2184 | DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); |
| 2185 | hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, |
| 2186 | QDF_TRACE_LEVEL_INFO); |
| 2187 | tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; |
| 2188 | qdf_nbuf_free(nbuf); |
| 2189 | qdf_assert(0); |
| 2190 | nbuf = next; |
| 2191 | continue; |
| 2192 | } |
| 2193 | |
Tallapragada Kalyan | e33a563 | 2018-02-22 20:33:15 +0530 | [diff] [blame] | 2194 | DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2195 | /* |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 2196 | * First IF condition: |
| 2197 | * 802.11 Fragmented pkts are reinjected to REO |
| 2198 | * HW block as SG pkts and for these pkts we only |
| 2199 | * need to pull the RX TLVS header length. |
| 2200 | * Second IF condition: |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2201 | * The below condition happens when an MSDU is spread |
| 2202 | * across multiple buffers. This can happen in two cases |
| 2203 | * 1. The nbuf size is smaller then the received msdu. |
| 2204 | * ex: we have set the nbuf size to 2048 during |
| 2205 | * nbuf_alloc. but we received an msdu which is |
| 2206 | * 2304 bytes in size then this msdu is spread |
| 2207 | * across 2 nbufs. |
| 2208 | * |
| 2209 | * 2. AMSDUs when RAW mode is enabled. |
| 2210 | * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread |
| 2211 | * across 1st nbuf and 2nd nbuf and last MSDU is |
| 2212 | * spread across 2nd nbuf and 3rd nbuf. |
| 2213 | * |
| 2214 | * for these scenarios let us create a skb frag_list and |
| 2215 | * append these buffers till the last MSDU of the AMSDU |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 2216 | * Third condition: |
| 2217 | * This is the most likely case, we receive 802.3 pkts |
| 2218 | * decapsulated by HW, here we need to set the pkt length. |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2219 | */ |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 2220 | hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2221 | if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { |
| 2222 | bool is_mcbc, is_sa_vld, is_da_vld; |
| 2223 | |
Venkata Sharath Chandra Manchala | ee90938 | 2019-09-20 10:52:37 -0700 | [diff] [blame] | 2224 | is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, |
| 2225 | rx_tlv_hdr); |
Venkata Sharath Chandra Manchala | 59ebd5e | 2019-09-20 15:52:55 -0700 | [diff] [blame] | 2226 | is_sa_vld = |
| 2227 | hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, |
| 2228 | rx_tlv_hdr); |
Venkata Sharath Chandra Manchala | 7905538 | 2019-09-21 11:22:30 -0700 | [diff] [blame] | 2229 | is_da_vld = |
| 2230 | hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, |
| 2231 | rx_tlv_hdr); |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2232 | |
| 2233 | qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); |
| 2234 | qdf_nbuf_set_da_valid(nbuf, is_da_vld); |
| 2235 | qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); |
| 2236 | |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 2237 | qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 2238 | } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2239 | msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 2240 | nbuf = dp_rx_sg_create(nbuf); |
Chaithanya Garrepalli | 72dc913 | 2018-02-21 18:37:34 +0530 | [diff] [blame] | 2241 | next = nbuf->next; |
Jinwei Chen | 0b92469 | 2020-01-14 13:52:06 +0800 | [diff] [blame] | 2242 | |
| 2243 | if (qdf_nbuf_is_raw_frame(nbuf)) { |
| 2244 | DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); |
| 2245 | DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); |
| 2246 | } else { |
| 2247 | qdf_nbuf_free(nbuf); |
| 2248 | DP_STATS_INC(soc, rx.err.scatter_msdu, 1); |
| 2249 | dp_info_rl("scatter msdu len %d, dropped", |
| 2250 | msdu_len); |
| 2251 | nbuf = next; |
| 2252 | dp_peer_unref_del_find_by_id(peer); |
| 2253 | continue; |
| 2254 | } |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 2255 | } else { |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 2256 | |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2257 | msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 2258 | pkt_len = msdu_len + |
| 2259 | msdu_metadata.l3_hdr_pad + |
| 2260 | RX_PKT_TLVS_LEN; |
Chaithanya Garrepalli | a173a18 | 2018-05-18 21:33:10 +0530 | [diff] [blame] | 2261 | |
| 2262 | qdf_nbuf_set_pktlen(nbuf, pkt_len); |
Manjunathappa Prakash | 9ee605c | 2020-02-10 19:35:18 -0800 | [diff] [blame] | 2263 | dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad); |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 2264 | } |
| 2265 | |
Varsha Mishra | 6e1760c | 2019-07-27 22:51:42 +0530 | [diff] [blame] | 2266 | /* |
| 2267 | * process frame for mulitpass phrase processing |
| 2268 | */ |
| 2269 | if (qdf_unlikely(vdev->multipass_en)) { |
Ankit Kumar | 53581e9 | 2020-01-02 10:15:16 +0530 | [diff] [blame] | 2270 | if (dp_rx_multipass_process(peer, nbuf, tid) == false) { |
| 2271 | DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); |
| 2272 | qdf_nbuf_free(nbuf); |
| 2273 | nbuf = next; |
| 2274 | dp_peer_unref_del_find_by_id(peer); |
| 2275 | continue; |
| 2276 | } |
Varsha Mishra | 6e1760c | 2019-07-27 22:51:42 +0530 | [diff] [blame] | 2277 | } |
| 2278 | |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2279 | if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { |
Tallapragada Kalyan | 2a5fc62 | 2017-12-08 21:07:43 +0530 | [diff] [blame] | 2280 | QDF_TRACE(QDF_MODULE_ID_DP, |
| 2281 | QDF_TRACE_LEVEL_ERROR, |
| 2282 | FL("Policy Check Drop pkt")); |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 2283 | tid_stats->fail_cnt[POLICY_CHECK_DROP]++; |
Tallapragada Kalyan | 2a5fc62 | 2017-12-08 21:07:43 +0530 | [diff] [blame] | 2284 | /* Drop & free packet */ |
| 2285 | qdf_nbuf_free(nbuf); |
| 2286 | /* Statistics */ |
| 2287 | nbuf = next; |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 2288 | dp_peer_unref_del_find_by_id(peer); |
Tallapragada Kalyan | 2a5fc62 | 2017-12-08 21:07:43 +0530 | [diff] [blame] | 2289 | continue; |
| 2290 | } |
| 2291 | |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2292 | if (qdf_unlikely(peer && (peer->nawds_enabled) && |
| 2293 | (qdf_nbuf_is_da_mcbc(nbuf)) && |
Venkata Sharath Chandra Manchala | 2a52d34 | 2019-09-21 11:52:54 -0700 | [diff] [blame] | 2294 | (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, |
| 2295 | rx_tlv_hdr) == |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2296 | false))) { |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 2297 | tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; |
Ruchi, Agrawal | 2755048 | 2018-02-20 19:43:41 +0530 | [diff] [blame] | 2298 | DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); |
Ruchi, Agrawal | bd894b3 | 2017-11-03 17:24:56 +0530 | [diff] [blame] | 2299 | qdf_nbuf_free(nbuf); |
| 2300 | nbuf = next; |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 2301 | dp_peer_unref_del_find_by_id(peer); |
Ruchi, Agrawal | bd894b3 | 2017-11-03 17:24:56 +0530 | [diff] [blame] | 2302 | continue; |
| 2303 | } |
| 2304 | |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2305 | if (soc->process_rx_status) |
| 2306 | dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2307 | |
Karunakar Dasineni | 142f9ba | 2019-03-19 23:04:59 -0700 | [diff] [blame] | 2308 | /* Update the protocol tag in SKB based on CCE metadata */ |
Karunakar Dasineni | d8c7ad2 | 2019-04-18 18:15:02 -0700 | [diff] [blame] | 2309 | dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, |
| 2310 | reo_ring_num, false, true); |
Karunakar Dasineni | 142f9ba | 2019-03-19 23:04:59 -0700 | [diff] [blame] | 2311 | |
Sumeet Rao | c4fa4df | 2019-07-05 02:11:19 -0700 | [diff] [blame] | 2312 | /* Update the flow tag in SKB based on FSE metadata */ |
| 2313 | dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); |
| 2314 | |
Varsha Mishra | 9d42f12 | 2019-05-03 12:47:40 +0530 | [diff] [blame] | 2315 | dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, |
| 2316 | ring_id, tid_stats); |
Aditya Sathish | 6add3db | 2018-04-10 19:43:34 +0530 | [diff] [blame] | 2317 | |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2318 | if (qdf_unlikely(vdev->mesh_vdev)) { |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 2319 | if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2320 | == QDF_STATUS_SUCCESS) { |
| 2321 | QDF_TRACE(QDF_MODULE_ID_DP, |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 2322 | QDF_TRACE_LEVEL_INFO_MED, |
| 2323 | FL("mesh pkt filtered")); |
| 2324 | tid_stats->fail_cnt[MESH_FILTER_DROP]++; |
| 2325 | DP_STATS_INC(vdev->pdev, dropped.mesh_filter, |
| 2326 | 1); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2327 | |
| 2328 | qdf_nbuf_free(nbuf); |
| 2329 | nbuf = next; |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 2330 | dp_peer_unref_del_find_by_id(peer); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2331 | continue; |
| 2332 | } |
| 2333 | dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); |
| 2334 | } |
| 2335 | |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2336 | if (qdf_likely(vdev->rx_decap_type == |
Sravan Kumar Kairam | d7d1d67 | 2018-09-04 14:56:33 +0530 | [diff] [blame] | 2337 | htt_cmn_pkt_type_ethernet) && |
| 2338 | qdf_likely(!vdev->mesh_vdev)) { |
phadiman | 4213e9c | 2018-10-29 12:50:02 +0530 | [diff] [blame] | 2339 | /* WDS Destination Address Learning */ |
Nandha Kishore Easwaran | f9c44ce | 2019-01-18 15:31:18 +0530 | [diff] [blame] | 2340 | dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); |
phadiman | 4213e9c | 2018-10-29 12:50:02 +0530 | [diff] [blame] | 2341 | |
Chaithanya Garrepalli | af34aae | 2019-02-18 20:44:27 +0530 | [diff] [blame] | 2342 | /* Due to HW issue, sometimes we see that the sa_idx |
| 2343 | * and da_idx are invalid with sa_valid and da_valid |
| 2344 | * bits set |
| 2345 | * |
| 2346 | * in this case we also see that value of |
| 2347 | * sa_sw_peer_id is set as 0 |
| 2348 | * |
| 2349 | * Drop the packet if sa_idx and da_idx OOB or |
| 2350 | * sa_sw_peerid is 0 |
| 2351 | */ |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 2352 | if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, |
| 2353 | msdu_metadata)) { |
Chaithanya Garrepalli | af34aae | 2019-02-18 20:44:27 +0530 | [diff] [blame] | 2354 | qdf_nbuf_free(nbuf); |
| 2355 | nbuf = next; |
| 2356 | DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); |
Jinwei Chen | 5bcc30f | 2019-05-20 21:17:56 +0800 | [diff] [blame] | 2357 | dp_peer_unref_del_find_by_id(peer); |
Chaithanya Garrepalli | af34aae | 2019-02-18 20:44:27 +0530 | [diff] [blame] | 2358 | continue; |
| 2359 | } |
phadiman | 4213e9c | 2018-10-29 12:50:02 +0530 | [diff] [blame] | 2360 | /* WDS Source Port Learning */ |
Ankit Kumar | f2526d4 | 2019-05-02 15:13:27 +0530 | [diff] [blame] | 2361 | if (qdf_likely(vdev->wds_enabled)) |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 2362 | dp_rx_wds_srcport_learn(soc, |
| 2363 | rx_tlv_hdr, |
| 2364 | peer, |
| 2365 | nbuf, |
| 2366 | msdu_metadata); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2367 | |
| 2368 | /* Intrabss-fwd */ |
Ruchi, Agrawal | bd894b3 | 2017-11-03 17:24:56 +0530 | [diff] [blame] | 2369 | if (dp_rx_check_ap_bridge(vdev)) |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2370 | if (dp_rx_intrabss_fwd(soc, |
| 2371 | peer, |
| 2372 | rx_tlv_hdr, |
syed touqeer pasha | 6997a37 | 2019-12-31 15:45:55 +0530 | [diff] [blame] | 2373 | nbuf, |
| 2374 | msdu_metadata)) { |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2375 | nbuf = next; |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 2376 | dp_peer_unref_del_find_by_id(peer); |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 2377 | tid_stats->intrabss_cnt++; |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2378 | continue; /* Get next desc */ |
| 2379 | } |
| 2380 | } |
| 2381 | |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 2382 | dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); |
| 2383 | |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2384 | DP_RX_LIST_APPEND(deliver_list_head, |
Mohit Khanna | 7ac554b | 2018-05-24 11:58:13 -0700 | [diff] [blame] | 2385 | deliver_list_tail, |
| 2386 | nbuf); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2387 | DP_STATS_INC_PKT(peer, rx.to_stack, 1, |
Tallapragada Kalyan | 7147b3c | 2019-03-27 18:40:27 +0530 | [diff] [blame] | 2388 | QDF_NBUF_CB_RX_PKT_LEN(nbuf)); |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2389 | |
Varsha Mishra | 1828179 | 2019-03-06 17:57:23 +0530 | [diff] [blame] | 2390 | tid_stats->delivered_to_stack++; |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2391 | nbuf = next; |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 2392 | dp_peer_unref_del_find_by_id(peer); |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 2393 | } |
Dhanashri Atre | 0da3122 | 2017-03-23 12:30:58 -0700 | [diff] [blame] | 2394 | |
Chaithanya Garrepalli | 79b64ac | 2020-01-07 17:28:49 +0530 | [diff] [blame] | 2395 | if (qdf_likely(deliver_list_head)) { |
| 2396 | if (qdf_likely(peer)) |
Rakesh Pillai | c1aeb35 | 2020-01-14 13:06:15 +0530 | [diff] [blame] | 2397 | dp_rx_deliver_to_stack(soc, vdev, peer, |
| 2398 | deliver_list_head, |
Chaithanya Garrepalli | 79b64ac | 2020-01-07 17:28:49 +0530 | [diff] [blame] | 2399 | deliver_list_tail); |
| 2400 | else { |
| 2401 | nbuf = deliver_list_head; |
| 2402 | while (nbuf) { |
| 2403 | next = nbuf->next; |
| 2404 | nbuf->next = NULL; |
| 2405 | dp_rx_deliver_to_stack_no_peer(soc, nbuf); |
| 2406 | nbuf = next; |
| 2407 | } |
| 2408 | } |
| 2409 | } |
Tallapragada Kalyan | dbbb0c8 | 2017-08-24 20:58:04 +0530 | [diff] [blame] | 2410 | |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 2411 | if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { |
Mohit Khanna | 8000265 | 2019-10-14 23:27:36 -0700 | [diff] [blame] | 2412 | if (quota) { |
| 2413 | num_pending = |
| 2414 | dp_rx_srng_get_num_pending(hal_soc, |
| 2415 | hal_ring_hdl, |
| 2416 | num_entries, |
| 2417 | &near_full); |
| 2418 | if (num_pending) { |
| 2419 | DP_STATS_INC(soc, rx.hp_oos2, 1); |
| 2420 | |
| 2421 | if (!hif_exec_should_yield(scn, intr_id)) |
| 2422 | goto more_data; |
| 2423 | |
| 2424 | if (qdf_unlikely(near_full)) { |
| 2425 | DP_STATS_INC(soc, rx.near_full, 1); |
| 2426 | goto more_data; |
| 2427 | } |
| 2428 | } |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 2429 | } |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 2430 | |
Manjunathappa Prakash | b896f0e | 2020-01-20 18:45:36 -0800 | [diff] [blame] | 2431 | if (vdev && vdev->osif_fisa_flush) |
Manjunathappa Prakash | 5d73e07 | 2020-01-08 16:50:25 -0800 | [diff] [blame] | 2432 | vdev->osif_fisa_flush(soc, reo_ring_num); |
| 2433 | |
Sravan Kumar Kairam | afd707d | 2019-08-11 18:43:30 +0530 | [diff] [blame] | 2434 | if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 2435 | vdev->osif_gro_flush(vdev->osif_vdev, |
| 2436 | reo_ring_num); |
| 2437 | } |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 2438 | } |
Mohit Khanna | 698987c | 2019-07-28 21:38:05 -0700 | [diff] [blame] | 2439 | |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 2440 | /* Update histogram statistics by looping through pdev's */ |
| 2441 | DP_RX_HIST_STATS_PER_PDEV(); |
| 2442 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2443 | return rx_bufs_used; /* Assume no scale factor for now */ |
| 2444 | } |
| 2445 | |
Rakesh Pillai | 534a143 | 2019-10-24 06:44:11 +0530 | [diff] [blame] | 2446 | QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) |
| 2447 | { |
| 2448 | QDF_STATUS ret; |
| 2449 | |
| 2450 | if (vdev->osif_rx_flush) { |
| 2451 | ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); |
| 2452 | if (!ret) { |
| 2453 | dp_err("Failed to flush rx pkts for vdev %d\n", |
| 2454 | vdev->vdev_id); |
| 2455 | return ret; |
| 2456 | } |
| 2457 | } |
| 2458 | |
| 2459 | return QDF_STATUS_SUCCESS; |
| 2460 | } |
| 2461 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2462 | /** |
Rakesh Pillai | 534a143 | 2019-10-24 06:44:11 +0530 | [diff] [blame] | 2463 | * dp_rx_pdev_detach() - detach dp rx |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 2464 | * @pdev: core txrx pdev context |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2465 | * |
| 2466 | * This function will detach DP RX into main device context |
| 2467 | * will free DP Rx resources. |
| 2468 | * |
| 2469 | * Return: void |
| 2470 | */ |
| 2471 | void |
| 2472 | dp_rx_pdev_detach(struct dp_pdev *pdev) |
| 2473 | { |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2474 | uint8_t mac_for_pdev = pdev->lmac_id; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2475 | struct dp_soc *soc = pdev->soc; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 2476 | struct rx_desc_pool *rx_desc_pool; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2477 | |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2478 | rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 2479 | |
psimha | eae1b41 | 2017-08-25 16:10:13 -0700 | [diff] [blame] | 2480 | if (rx_desc_pool->pool_size != 0) { |
phadiman | 449a268 | 2019-02-20 14:00:00 +0530 | [diff] [blame] | 2481 | if (!dp_is_soc_reinit(soc)) |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2482 | dp_rx_desc_nbuf_and_pool_free(soc, mac_for_pdev, |
Varun Reddy Yeturu | a7c21dc | 2019-05-16 14:03:46 -0700 | [diff] [blame] | 2483 | rx_desc_pool); |
phadiman | 449a268 | 2019-02-20 14:00:00 +0530 | [diff] [blame] | 2484 | else |
Varun Reddy Yeturu | a7c21dc | 2019-05-16 14:03:46 -0700 | [diff] [blame] | 2485 | dp_rx_desc_nbuf_free(soc, rx_desc_pool); |
psimha | eae1b41 | 2017-08-25 16:10:13 -0700 | [diff] [blame] | 2486 | } |
Tallapragada Kalyan | 603c594 | 2016-12-07 21:30:44 +0530 | [diff] [blame] | 2487 | |
| 2488 | return; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2489 | } |
| 2490 | |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2491 | static QDF_STATUS |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2492 | dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf, |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2493 | struct dp_pdev *dp_pdev, |
| 2494 | struct rx_desc_pool *rx_desc_pool) |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2495 | { |
| 2496 | qdf_dma_addr_t paddr; |
| 2497 | QDF_STATUS ret = QDF_STATUS_E_FAILURE; |
| 2498 | |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2499 | *nbuf = qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, |
| 2500 | RX_BUFFER_RESERVATION, |
| 2501 | rx_desc_pool->buf_alignment, FALSE); |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2502 | if (!(*nbuf)) { |
| 2503 | dp_err("nbuf alloc failed"); |
| 2504 | DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); |
| 2505 | return ret; |
| 2506 | } |
| 2507 | |
| 2508 | ret = qdf_nbuf_map_single(dp_soc->osdev, *nbuf, |
| 2509 | QDF_DMA_FROM_DEVICE); |
| 2510 | if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { |
| 2511 | qdf_nbuf_free(*nbuf); |
| 2512 | dp_err("nbuf map failed"); |
| 2513 | DP_STATS_INC(dp_pdev, replenish.map_err, 1); |
| 2514 | return ret; |
| 2515 | } |
| 2516 | |
| 2517 | paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0); |
| 2518 | |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2519 | ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool); |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2520 | if (ret == QDF_STATUS_E_FAILURE) { |
| 2521 | qdf_nbuf_unmap_single(dp_soc->osdev, *nbuf, |
| 2522 | QDF_DMA_FROM_DEVICE); |
| 2523 | qdf_nbuf_free(*nbuf); |
| 2524 | dp_err("nbuf check x86 failed"); |
| 2525 | DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); |
| 2526 | return ret; |
| 2527 | } |
| 2528 | |
| 2529 | return QDF_STATUS_SUCCESS; |
| 2530 | } |
| 2531 | |
Kiran Venkatappa | 115309a | 2019-07-16 22:15:35 +0530 | [diff] [blame] | 2532 | QDF_STATUS |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2533 | dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, |
| 2534 | struct dp_srng *dp_rxdma_srng, |
| 2535 | struct rx_desc_pool *rx_desc_pool, |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2536 | uint32_t num_req_buffers) |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2537 | { |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2538 | struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); |
Akshay Kosigi | a870c61 | 2019-07-08 23:10:30 +0530 | [diff] [blame] | 2539 | hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2540 | union dp_rx_desc_list_elem_t *next; |
| 2541 | void *rxdma_ring_entry; |
| 2542 | qdf_dma_addr_t paddr; |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2543 | qdf_nbuf_t *rx_nbuf_arr; |
| 2544 | uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; |
| 2545 | uint32_t buffer_index, nbuf_ptrs_per_page; |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2546 | qdf_nbuf_t nbuf; |
| 2547 | QDF_STATUS ret; |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2548 | int page_idx, total_pages; |
| 2549 | union dp_rx_desc_list_elem_t *desc_list = NULL; |
| 2550 | union dp_rx_desc_list_elem_t *tail = NULL; |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2551 | |
| 2552 | if (qdf_unlikely(!rxdma_srng)) { |
| 2553 | DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); |
| 2554 | return QDF_STATUS_E_FAILURE; |
| 2555 | } |
| 2556 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2557 | dp_debug("requested %u RX buffers for driver attach", num_req_buffers); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2558 | |
| 2559 | nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2560 | num_req_buffers, &desc_list, &tail); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2561 | if (!nr_descs) { |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2562 | dp_err("no free rx_descs in freelist"); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2563 | DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); |
| 2564 | return QDF_STATUS_E_NOMEM; |
| 2565 | } |
| 2566 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2567 | dp_debug("got %u RX descs for driver attach", nr_descs); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2568 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2569 | /* |
| 2570 | * Try to allocate pointers to the nbuf one page at a time. |
| 2571 | * Take pointers that can fit in one page of memory and |
| 2572 | * iterate through the total descriptors that need to be |
| 2573 | * allocated in order of pages. Reuse the pointers that |
| 2574 | * have been allocated to fit in one page across each |
| 2575 | * iteration to index into the nbuf. |
| 2576 | */ |
| 2577 | total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE; |
| 2578 | |
| 2579 | /* |
| 2580 | * Add an extra page to store the remainder if any |
| 2581 | */ |
| 2582 | if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE) |
| 2583 | total_pages++; |
| 2584 | rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2585 | if (!rx_nbuf_arr) { |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2586 | dp_err("failed to allocate nbuf array"); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2587 | DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2588 | QDF_BUG(0); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2589 | return QDF_STATUS_E_NOMEM; |
| 2590 | } |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2591 | nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2592 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2593 | for (page_idx = 0; page_idx < total_pages; page_idx++) { |
| 2594 | qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE); |
| 2595 | |
| 2596 | for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { |
| 2597 | /* |
| 2598 | * The last page of buffer pointers may not be required |
| 2599 | * completely based on the number of descriptors. Below |
| 2600 | * check will ensure we are allocating only the |
| 2601 | * required number of descriptors. |
| 2602 | */ |
| 2603 | if (nr_nbuf_total >= nr_descs) |
| 2604 | break; |
| 2605 | ret = dp_pdev_nbuf_alloc_and_map(dp_soc, |
| 2606 | &rx_nbuf_arr[nr_nbuf], |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2607 | dp_pdev, rx_desc_pool); |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2608 | if (QDF_IS_STATUS_ERROR(ret)) |
| 2609 | break; |
| 2610 | |
| 2611 | nr_nbuf_total++; |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2612 | } |
| 2613 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2614 | hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); |
| 2615 | |
| 2616 | for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { |
| 2617 | rxdma_ring_entry = |
| 2618 | hal_srng_src_get_next(dp_soc->hal_soc, |
| 2619 | rxdma_srng); |
| 2620 | qdf_assert_always(rxdma_ring_entry); |
| 2621 | |
| 2622 | next = desc_list->next; |
| 2623 | nbuf = rx_nbuf_arr[buffer_index]; |
| 2624 | paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); |
| 2625 | |
| 2626 | dp_rx_desc_prep(&desc_list->rx_desc, nbuf); |
| 2627 | desc_list->rx_desc.in_use = 1; |
| 2628 | |
| 2629 | hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, |
| 2630 | desc_list->rx_desc.cookie, |
| 2631 | rx_desc_pool->owner); |
| 2632 | |
| 2633 | dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true); |
| 2634 | |
| 2635 | desc_list = next; |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2636 | } |
| 2637 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2638 | hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2639 | } |
| 2640 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2641 | dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2642 | qdf_mem_free(rx_nbuf_arr); |
| 2643 | |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2644 | if (!nr_nbuf_total) { |
| 2645 | dp_err("No nbuf's allocated"); |
| 2646 | QDF_BUG(0); |
| 2647 | return QDF_STATUS_E_RESOURCES; |
| 2648 | } |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2649 | |
| 2650 | /* No need to count the number of bytes received during replenish. |
| 2651 | * Therefore set replenish.pkts.bytes as 0. |
| 2652 | */ |
| 2653 | DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2654 | |
jiad | 5679e39 | 2019-04-03 17:00:02 +0800 | [diff] [blame] | 2655 | return QDF_STATUS_SUCCESS; |
| 2656 | } |
| 2657 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2658 | /** |
| 2659 | * dp_rx_attach() - attach DP RX |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 2660 | * @pdev: core txrx pdev context |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2661 | * |
| 2662 | * This function will attach a DP RX instance into the main |
| 2663 | * device (SOC) context. Will allocate dp rx resource and |
| 2664 | * initialize resources. |
| 2665 | * |
| 2666 | * Return: QDF_STATUS_SUCCESS: success |
| 2667 | * QDF_STATUS_E_RESOURCES: Error return |
| 2668 | */ |
| 2669 | QDF_STATUS |
| 2670 | dp_rx_pdev_attach(struct dp_pdev *pdev) |
| 2671 | { |
| 2672 | uint8_t pdev_id = pdev->pdev_id; |
| 2673 | struct dp_soc *soc = pdev->soc; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2674 | uint32_t rxdma_entries; |
Mainak Sen | 9550273 | 2019-07-25 00:48:59 +0530 | [diff] [blame] | 2675 | uint32_t rx_sw_desc_weight; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 2676 | struct dp_srng *dp_rxdma_srng; |
| 2677 | struct rx_desc_pool *rx_desc_pool; |
Sumeet Rao | c4fa4df | 2019-07-05 02:11:19 -0700 | [diff] [blame] | 2678 | QDF_STATUS ret_val; |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2679 | int mac_for_pdev; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2680 | |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 2681 | if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 2682 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, |
| 2683 | "nss-wifi<4> skip Rx refil %d", pdev_id); |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 2684 | return QDF_STATUS_SUCCESS; |
| 2685 | } |
| 2686 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2687 | pdev = soc->pdev_list[pdev_id]; |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2688 | mac_for_pdev = pdev->lmac_id; |
| 2689 | dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; |
| 2690 | |
Mohit Khanna | 7051499 | 2018-11-12 18:39:03 -0800 | [diff] [blame] | 2691 | rxdma_entries = dp_rxdma_srng->num_entries; |
| 2692 | |
chenguo | 9bece1a | 2017-12-19 18:49:41 +0800 | [diff] [blame] | 2693 | soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2694 | |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2695 | rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; |
Mainak Sen | 9550273 | 2019-07-25 00:48:59 +0530 | [diff] [blame] | 2696 | rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx); |
| 2697 | |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2698 | dp_rx_desc_pool_alloc(soc, mac_for_pdev, |
Mainak Sen | 9550273 | 2019-07-25 00:48:59 +0530 | [diff] [blame] | 2699 | rx_sw_desc_weight * rxdma_entries, |
Mohit Khanna | 7051499 | 2018-11-12 18:39:03 -0800 | [diff] [blame] | 2700 | rx_desc_pool); |
Venkata Sharath Chandra Manchala | 16fcceb | 2018-01-03 11:27:15 -0800 | [diff] [blame] | 2701 | |
| 2702 | rx_desc_pool->owner = DP_WBM2SW_RBM; |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2703 | rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; |
| 2704 | rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; |
| 2705 | |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2706 | /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ |
Mohit Khanna | 7051499 | 2018-11-12 18:39:03 -0800 | [diff] [blame] | 2707 | |
Sumeet Rao | c4fa4df | 2019-07-05 02:11:19 -0700 | [diff] [blame] | 2708 | ret_val = dp_rx_fst_attach(soc, pdev); |
| 2709 | if ((ret_val != QDF_STATUS_SUCCESS) && |
| 2710 | (ret_val != QDF_STATUS_E_NOSUPPORT)) { |
| 2711 | QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR, |
| 2712 | "RX Flow Search Table attach failed: pdev %d err %d", |
| 2713 | pdev_id, ret_val); |
| 2714 | return ret_val; |
| 2715 | } |
| 2716 | |
Amit Shukla | 1edfe5a | 2019-10-24 14:03:39 -0700 | [diff] [blame] | 2717 | return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng, |
Varun Reddy Yeturu | f31e44d | 2019-06-14 07:50:11 -0700 | [diff] [blame] | 2718 | rx_desc_pool, rxdma_entries - 1); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 2719 | } |
jinweic chen | c354632 | 2018-02-02 15:03:41 +0800 | [diff] [blame] | 2720 | |
| 2721 | /* |
| 2722 | * dp_rx_nbuf_prepare() - prepare RX nbuf |
| 2723 | * @soc: core txrx main context |
| 2724 | * @pdev: core txrx pdev context |
| 2725 | * |
| 2726 | * This function alloc & map nbuf for RX dma usage, retry it if failed |
| 2727 | * until retry times reaches max threshold or succeeded. |
| 2728 | * |
| 2729 | * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. |
| 2730 | */ |
| 2731 | qdf_nbuf_t |
| 2732 | dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) |
| 2733 | { |
| 2734 | uint8_t *buf; |
| 2735 | int32_t nbuf_retry_count; |
| 2736 | QDF_STATUS ret; |
| 2737 | qdf_nbuf_t nbuf = NULL; |
| 2738 | |
| 2739 | for (nbuf_retry_count = 0; nbuf_retry_count < |
| 2740 | QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; |
| 2741 | nbuf_retry_count++) { |
| 2742 | /* Allocate a new skb */ |
| 2743 | nbuf = qdf_nbuf_alloc(soc->osdev, |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2744 | RX_DATA_BUFFER_SIZE, |
jinweic chen | c354632 | 2018-02-02 15:03:41 +0800 | [diff] [blame] | 2745 | RX_BUFFER_RESERVATION, |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2746 | RX_DATA_BUFFER_ALIGNMENT, |
jinweic chen | c354632 | 2018-02-02 15:03:41 +0800 | [diff] [blame] | 2747 | FALSE); |
| 2748 | |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 2749 | if (!nbuf) { |
jinweic chen | c354632 | 2018-02-02 15:03:41 +0800 | [diff] [blame] | 2750 | DP_STATS_INC(pdev, |
| 2751 | replenish.nbuf_alloc_fail, 1); |
| 2752 | continue; |
| 2753 | } |
| 2754 | |
| 2755 | buf = qdf_nbuf_data(nbuf); |
| 2756 | |
Shashikala Prabhu | 03a9f5b | 2020-01-28 19:11:30 +0530 | [diff] [blame] | 2757 | memset(buf, 0, RX_DATA_BUFFER_SIZE); |
jinweic chen | c354632 | 2018-02-02 15:03:41 +0800 | [diff] [blame] | 2758 | |
| 2759 | ret = qdf_nbuf_map_single(soc->osdev, nbuf, |
Ankit Kumar | 0ae4abc | 2019-05-02 15:08:42 +0530 | [diff] [blame] | 2760 | QDF_DMA_FROM_DEVICE); |
jinweic chen | c354632 | 2018-02-02 15:03:41 +0800 | [diff] [blame] | 2761 | |
| 2762 | /* nbuf map failed */ |
| 2763 | if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { |
| 2764 | qdf_nbuf_free(nbuf); |
| 2765 | DP_STATS_INC(pdev, replenish.map_err, 1); |
| 2766 | continue; |
| 2767 | } |
| 2768 | /* qdf_nbuf alloc and map succeeded */ |
| 2769 | break; |
| 2770 | } |
| 2771 | |
| 2772 | /* qdf_nbuf still alloc or map failed */ |
| 2773 | if (qdf_unlikely(nbuf_retry_count >= |
| 2774 | QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) |
| 2775 | return NULL; |
| 2776 | |
| 2777 | return nbuf; |
| 2778 | } |