Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 1 | /* |
Ruben Columbus | 073874c | 2019-10-08 14:29:30 -0700 | [diff] [blame] | 2 | * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | #ifndef __DP_TX_H |
| 19 | #define __DP_TX_H |
| 20 | |
| 21 | #include <qdf_types.h> |
| 22 | #include <qdf_nbuf.h> |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 23 | #include "dp_types.h" |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 24 | |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 25 | |
| 26 | #define DP_TX_MAX_NUM_FRAGS 6 |
| 27 | |
| 28 | #define DP_TX_DESC_FLAG_ALLOCATED 0x1 |
| 29 | #define DP_TX_DESC_FLAG_TO_FW 0x2 |
| 30 | #define DP_TX_DESC_FLAG_FRAG 0x4 |
| 31 | #define DP_TX_DESC_FLAG_RAW 0x8 |
| 32 | #define DP_TX_DESC_FLAG_MESH 0x10 |
Kabilan Kannan | 60e3b30 | 2017-09-07 20:06:17 -0700 | [diff] [blame] | 33 | #define DP_TX_DESC_FLAG_QUEUED_TX 0x20 |
| 34 | #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40 |
Ishank Jain | c838b13 | 2017-02-17 11:08:18 +0530 | [diff] [blame] | 35 | #define DP_TX_DESC_FLAG_ME 0x80 |
Kabilan Kannan | 60e3b30 | 2017-09-07 20:06:17 -0700 | [diff] [blame] | 36 | #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100 |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 37 | |
Ravi Joshi | ab33d9b | 2017-02-11 21:43:28 -0800 | [diff] [blame] | 38 | #define DP_TX_FREE_SINGLE_BUF(soc, buf) \ |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 39 | do { \ |
Pamidipati, Vijay | 110bf96 | 2017-03-24 21:38:20 +0530 | [diff] [blame] | 40 | qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \ |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 41 | qdf_nbuf_free(buf); \ |
| 42 | } while (0) |
| 43 | |
| 44 | #define OCB_HEADER_VERSION 1 |
| 45 | |
Amir Patel | 5dc47f5 | 2019-05-30 14:06:06 +0530 | [diff] [blame] | 46 | #ifdef TX_PER_PDEV_DESC_POOL |
| 47 | #ifdef QCA_LL_TX_FLOW_CONTROL_V2 |
| 48 | #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) |
| 49 | #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ |
| 50 | #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) |
| 51 | #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ |
| 52 | #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) |
| 53 | #else |
| 54 | #ifdef TX_PER_VDEV_DESC_POOL |
| 55 | #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) |
| 56 | #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) |
| 57 | #endif /* TX_PER_VDEV_DESC_POOL */ |
| 58 | #endif /* TX_PER_PDEV_DESC_POOL */ |
| 59 | #define DP_TX_QUEUE_MASK 0x3 |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 60 | /** |
| 61 | * struct dp_tx_frag_info_s |
| 62 | * @vaddr: hlos vritual address for buffer |
| 63 | * @paddr_lo: physical address lower 32bits |
| 64 | * @paddr_hi: physical address higher bits |
| 65 | * @len: length of the buffer |
| 66 | */ |
| 67 | struct dp_tx_frag_info_s { |
| 68 | uint8_t *vaddr; |
| 69 | uint32_t paddr_lo; |
| 70 | uint16_t paddr_hi; |
| 71 | uint16_t len; |
| 72 | }; |
| 73 | |
| 74 | /** |
| 75 | * struct dp_tx_seg_info_s - Segmentation Descriptor |
| 76 | * @nbuf: NBUF pointer if segment corresponds to separate nbuf |
| 77 | * @frag_cnt: Fragment count in this segment |
| 78 | * @total_len: Total length of segment |
| 79 | * @frags: per-Fragment information |
| 80 | * @next: pointer to next MSDU segment |
| 81 | */ |
| 82 | struct dp_tx_seg_info_s { |
| 83 | qdf_nbuf_t nbuf; |
| 84 | uint16_t frag_cnt; |
| 85 | uint16_t total_len; |
| 86 | struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS]; |
| 87 | struct dp_tx_seg_info_s *next; |
| 88 | }; |
| 89 | |
| 90 | /** |
| 91 | * struct dp_tx_sg_info_s - Scatter Gather Descriptor |
| 92 | * @num_segs: Number of segments (TSO/ME) in the frame |
| 93 | * @total_len: Total length of the frame |
| 94 | * @curr_seg: Points to current segment descriptor to be processed. Chain of |
| 95 | * descriptors for SG frames/multicast-unicast converted packets. |
| 96 | * |
| 97 | * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to |
| 98 | * carry fragmentation information |
| 99 | * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries |
| 100 | * indicated through flags in SKB CB (first_msdu and last_msdu). This will be |
| 101 | * converted into set of skb sg (nr_frags) structures. |
| 102 | */ |
| 103 | struct dp_tx_sg_info_s { |
| 104 | uint32_t num_segs; |
| 105 | uint32_t total_len; |
| 106 | struct dp_tx_seg_info_s *curr_seg; |
| 107 | }; |
| 108 | |
| 109 | /** |
| 110 | * struct dp_tx_queue - Tx queue |
| 111 | * @desc_pool_id: Descriptor Pool to be used for the tx queue |
| 112 | * @ring_id: TCL descriptor ring ID corresponding to the tx queue |
| 113 | * |
| 114 | * Tx queue contains information of the software (Descriptor pool) |
| 115 | * and hardware resources (TCL ring id) to be used for a particular |
| 116 | * transmit queue (obtained from skb_queue_mapping in case of linux) |
| 117 | */ |
| 118 | struct dp_tx_queue { |
| 119 | uint8_t desc_pool_id; |
| 120 | uint8_t ring_id; |
| 121 | }; |
| 122 | |
| 123 | /** |
| 124 | * struct dp_tx_msdu_info_s - MSDU Descriptor |
| 125 | * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement |
| 126 | * @tx_queue: Tx queue on which this MSDU should be transmitted |
| 127 | * @num_seg: Number of segments (TSO) |
| 128 | * @tid: TID (override) that is sent from HLOS |
| 129 | * @u.tso_info: TSO information for TSO frame types |
| 130 | * (chain of the TSO segments, number of segments) |
| 131 | * @u.sg_info: Scatter Gather information for non-TSO SG frames |
Venkateswara Swamy Bandaru | 58c8085 | 2018-01-29 17:52:02 +0530 | [diff] [blame] | 132 | * @meta_data: Mesh meta header information |
| 133 | * @exception_fw: Duplicate frame to be sent to firmware |
Varsha Mishra | 27c5bd3 | 2019-05-28 11:54:46 +0530 | [diff] [blame] | 134 | * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions |
| 135 | * @ix_tx_sniffer: Indicates if the packet has to be sniffed |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 136 | * |
| 137 | * This structure holds the complete MSDU information needed to program the |
| 138 | * Hardware TCL and MSDU extension descriptors for different frame types |
| 139 | * |
| 140 | */ |
| 141 | struct dp_tx_msdu_info_s { |
| 142 | enum dp_tx_frm_type frm_type; |
| 143 | struct dp_tx_queue tx_queue; |
| 144 | uint32_t num_seg; |
| 145 | uint8_t tid; |
| 146 | union { |
| 147 | struct qdf_tso_info_t tso_info; |
| 148 | struct dp_tx_sg_info_s sg_info; |
| 149 | } u; |
Venkateswara Swamy Bandaru | 9646895 | 2019-05-22 18:24:10 +0530 | [diff] [blame] | 150 | uint32_t meta_data[7]; |
Venkateswara Swamy Bandaru | 58c8085 | 2018-01-29 17:52:02 +0530 | [diff] [blame] | 151 | uint8_t exception_fw; |
Varsha Mishra | 27c5bd3 | 2019-05-28 11:54:46 +0530 | [diff] [blame] | 152 | uint16_t ppdu_cookie; |
| 153 | uint8_t is_tx_sniffer; |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 154 | }; |
| 155 | |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 156 | QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev); |
| 157 | QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev); |
Pamidipati, Vijay | c9a13a5 | 2017-04-06 17:45:49 +0530 | [diff] [blame] | 158 | void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev); |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 159 | |
| 160 | QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc); |
| 161 | QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc); |
| 162 | |
Anish Nataraj | e9d4c3b | 2018-11-24 22:24:56 +0530 | [diff] [blame] | 163 | /** |
| 164 | * dp_tso_attach() - TSO Attach handler |
| 165 | * @txrx_soc: Opaque Dp handle |
| 166 | * |
| 167 | * Reserve TSO descriptor buffers |
| 168 | * |
| 169 | * Return: QDF_STATUS_E_FAILURE on failure or |
| 170 | * QDF_STATUS_SUCCESS on success |
| 171 | */ |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 172 | QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc); |
Anish Nataraj | e9d4c3b | 2018-11-24 22:24:56 +0530 | [diff] [blame] | 173 | |
| 174 | /** |
| 175 | * dp_tso_detach() - TSO Detach handler |
| 176 | * @txrx_soc: Opaque Dp handle |
| 177 | * |
| 178 | * Deallocate TSO descriptor buffers |
| 179 | * |
| 180 | * Return: QDF_STATUS_E_FAILURE on failure or |
| 181 | * QDF_STATUS_SUCCESS on success |
| 182 | */ |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 183 | QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc); |
Anish Nataraj | e9d4c3b | 2018-11-24 22:24:56 +0530 | [diff] [blame] | 184 | |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 185 | QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev); |
| 186 | QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev); |
| 187 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 188 | qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); |
Himanshu Batra | 21ade15 | 2019-09-03 16:08:54 +0530 | [diff] [blame] | 189 | |
Pavankumar Nandeshwar | 0ce3870 | 2019-09-30 18:43:03 +0530 | [diff] [blame] | 190 | qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 191 | qdf_nbuf_t nbuf, |
Prathyusha Guduri | be41d97 | 2018-01-19 14:17:14 +0530 | [diff] [blame] | 192 | struct cdp_tx_exception_metadata *tx_exc); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 193 | qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 194 | qdf_nbuf_t nbuf); |
Varsha Mishra | 06b91d3 | 2019-08-09 19:54:49 +0530 | [diff] [blame] | 195 | qdf_nbuf_t |
| 196 | dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, |
| 197 | struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, |
| 198 | struct cdp_tx_exception_metadata *tx_exc_metadata); |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 199 | |
Amir Patel | 5dc47f5 | 2019-05-30 14:06:06 +0530 | [diff] [blame] | 200 | #if QDF_LOCK_STATS |
| 201 | noinline qdf_nbuf_t |
| 202 | dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, |
| 203 | struct dp_tx_msdu_info_s *msdu_info); |
| 204 | #else |
| 205 | qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, |
| 206 | struct dp_tx_msdu_info_s *msdu_info); |
| 207 | #endif |
Jeff Johnson | 6889ddf | 2019-02-08 07:22:01 -0800 | [diff] [blame] | 208 | #ifdef FEATURE_WLAN_TDLS |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 209 | /** |
| 210 | * dp_tx_non_std() - Allow the control-path SW to send data frames |
| 211 | * @soc_hdl: Datapath soc handle |
| 212 | * @vdev_id: id of vdev |
| 213 | * @tx_spec: what non-standard handling to apply to the tx data frames |
| 214 | * @msdu_list: NULL-terminated list of tx MSDUs |
| 215 | * |
| 216 | * Return: NULL on success, |
| 217 | * nbuf when it fails to send |
| 218 | */ |
| 219 | qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 220 | enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); |
Kabilan Kannan | 78acc11 | 2017-10-10 16:16:32 -0700 | [diff] [blame] | 221 | #endif |
Mainak Sen | 8bc9b42 | 2019-10-29 13:29:58 +0530 | [diff] [blame] | 222 | int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac); |
Kabilan Kannan | 60e3b30 | 2017-09-07 20:06:17 -0700 | [diff] [blame] | 223 | |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 224 | /** |
| 225 | * dp_tx_comp_handler() - Tx completion handler |
| 226 | * @int_ctx: pointer to DP interrupt context |
| 227 | * @soc: core txrx main context |
Varsha Mishra | 1f4cfb6 | 2019-05-31 00:59:15 +0530 | [diff] [blame] | 228 | * @hal_srng: Opaque HAL SRNG pointer |
Mohit Khanna | e5a6e94 | 2018-11-28 14:22:48 -0800 | [diff] [blame] | 229 | * @ring_id: completion ring id |
| 230 | * @quota: No. of packets/descriptors that can be serviced in one loop |
| 231 | * |
| 232 | * This function will collect hardware release ring element contents and |
| 233 | * handle descriptor contents. Based on contents, free packet or handle error |
| 234 | * conditions |
| 235 | * |
| 236 | * Return: Number of TX completions processed |
| 237 | */ |
| 238 | uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, |
Akshay Kosigi | 0bca9fb | 2019-06-27 15:26:13 +0530 | [diff] [blame] | 239 | hal_ring_handle_t hal_srng, uint8_t ring_id, |
| 240 | uint32_t quota); |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 241 | |
Pamidipati, Vijay | aeff444 | 2018-01-19 22:58:32 +0530 | [diff] [blame] | 242 | QDF_STATUS |
Ishank Jain | c838b13 | 2017-02-17 11:08:18 +0530 | [diff] [blame] | 243 | dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); |
| 244 | |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 245 | #ifndef FEATURE_WDS |
Tallapragada Kalyan | 71c46b9 | 2018-03-01 13:17:10 +0530 | [diff] [blame] | 246 | static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) |
| 247 | { |
| 248 | return; |
| 249 | } |
Radha krishna Simha Jiguru | f70f991 | 2017-08-02 18:32:22 +0530 | [diff] [blame] | 250 | #endif |
| 251 | |
Amir Patel | 5dc47f5 | 2019-05-30 14:06:06 +0530 | [diff] [blame] | 252 | #ifndef ATH_SUPPORT_IQUE |
Pamidipati, Vijay | 7a4721f | 2018-01-08 23:10:05 +0530 | [diff] [blame] | 253 | static inline void dp_tx_me_exit(struct dp_pdev *pdev) |
| 254 | { |
| 255 | return; |
| 256 | } |
| 257 | #endif |
Varsha Mishra | 6e1760c | 2019-07-27 22:51:42 +0530 | [diff] [blame] | 258 | |
| 259 | #ifndef QCA_MULTIPASS_SUPPORT |
| 260 | static inline |
| 261 | bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, |
| 262 | qdf_nbuf_t nbuf, |
| 263 | struct dp_tx_msdu_info_s *msdu_info) |
| 264 | { |
| 265 | return true; |
| 266 | } |
| 267 | |
| 268 | static inline |
| 269 | void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) |
| 270 | { |
| 271 | } |
| 272 | |
| 273 | #else |
| 274 | bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, |
| 275 | qdf_nbuf_t nbuf, |
| 276 | struct dp_tx_msdu_info_s *msdu_info); |
| 277 | |
| 278 | void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev); |
| 279 | #endif |
| 280 | |
Amir Patel | 5dc47f5 | 2019-05-30 14:06:06 +0530 | [diff] [blame] | 281 | /** |
| 282 | * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame |
| 283 | * @vdev: DP Virtual device handle |
| 284 | * @nbuf: Buffer pointer |
| 285 | * @queue: queue ids container for nbuf |
| 286 | * |
| 287 | * TX packet queue has 2 instances, software descriptors id and dma ring id |
| 288 | * Based on tx feature and hardware configuration queue id combination could be |
| 289 | * different. |
| 290 | * For example - |
| 291 | * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id |
| 292 | * With no XPS,lock based resource protection, Descriptor pool ids are different |
| 293 | * for each vdev, dma ring id will be same as single pdev id |
| 294 | * |
| 295 | * Return: None |
| 296 | */ |
| 297 | #ifdef QCA_OL_TX_MULTIQ_SUPPORT |
| 298 | static inline void dp_tx_get_queue(struct dp_vdev *vdev, |
| 299 | qdf_nbuf_t nbuf, struct dp_tx_queue *queue) |
| 300 | { |
| 301 | uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & |
| 302 | DP_TX_QUEUE_MASK; |
Radha krishna Simha Jiguru | 47876f6 | 2017-11-30 21:08:40 +0530 | [diff] [blame] | 303 | |
Amir Patel | 5dc47f5 | 2019-05-30 14:06:06 +0530 | [diff] [blame] | 304 | queue->desc_pool_id = queue_offset; |
| 305 | queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset]; |
| 306 | |
| 307 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 308 | "%s, pool_id:%d ring_id: %d", |
| 309 | __func__, queue->desc_pool_id, queue->ring_id); |
| 310 | } |
| 311 | #else /* QCA_OL_TX_MULTIQ_SUPPORT */ |
| 312 | static inline void dp_tx_get_queue(struct dp_vdev *vdev, |
| 313 | qdf_nbuf_t nbuf, struct dp_tx_queue *queue) |
| 314 | { |
| 315 | /* get flow id */ |
| 316 | queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); |
| 317 | queue->ring_id = DP_TX_GET_RING_ID(vdev); |
| 318 | |
| 319 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 320 | "%s, pool_id:%d ring_id: %d", |
| 321 | __func__, queue->desc_pool_id, queue->ring_id); |
| 322 | } |
| 323 | #endif |
Radha krishna Simha Jiguru | 47876f6 | 2017-11-30 21:08:40 +0530 | [diff] [blame] | 324 | #ifdef FEATURE_PERPKT_INFO |
| 325 | QDF_STATUS |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 326 | dp_get_completion_indication_for_stack(struct dp_soc *soc, |
| 327 | struct dp_pdev *pdev, |
Amir Patel | 12550f6 | 2018-09-28 19:05:28 +0530 | [diff] [blame] | 328 | struct dp_peer *peer, |
| 329 | struct hal_tx_completion_status *ts, |
Ankit Kumar | 8dc0e2a | 2019-02-28 18:17:15 +0530 | [diff] [blame] | 330 | qdf_nbuf_t netbuf, |
| 331 | uint64_t time_latency); |
Ruchi, Agrawal | c0f9c97 | 2018-02-02 11:24:05 +0530 | [diff] [blame] | 332 | |
| 333 | void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, |
Amir Patel | 12550f6 | 2018-09-28 19:05:28 +0530 | [diff] [blame] | 334 | uint16_t peer_id, uint32_t ppdu_id, |
| 335 | qdf_nbuf_t netbuf); |
Radha krishna Simha Jiguru | 47876f6 | 2017-11-30 21:08:40 +0530 | [diff] [blame] | 336 | #endif |
| 337 | |
Akshay Kosigi | 67c8bb9 | 2019-07-04 14:28:19 +0530 | [diff] [blame] | 338 | void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl); |
Ruchi, Agrawal | 234753c | 2018-06-28 14:53:37 +0530 | [diff] [blame] | 339 | |
Neil Zhao | 4887636 | 2018-03-22 11:23:02 -0700 | [diff] [blame] | 340 | #ifdef ATH_TX_PRI_OVERRIDE |
chenguo | 6824d8d | 2018-05-10 15:19:51 +0800 | [diff] [blame] | 341 | #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \ |
Neil Zhao | 4887636 | 2018-03-22 11:23:02 -0700 | [diff] [blame] | 342 | ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf)) |
| 343 | #else |
| 344 | #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) |
| 345 | #endif |
| 346 | |
Pavankumar Nandeshwar | b86ddaf | 2019-10-07 12:55:16 +0530 | [diff] [blame] | 347 | void |
| 348 | dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, |
| 349 | uint32_t buf_type); |
| 350 | |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 351 | /* TODO TX_FEATURE_NOT_YET */ |
| 352 | static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) |
| 353 | { |
| 354 | return; |
| 355 | } |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 356 | /* TODO TX_FEATURE_NOT_YET */ |
Ruben Columbus | 073874c | 2019-10-08 14:29:30 -0700 | [diff] [blame] | 357 | |
| 358 | #ifndef WLAN_TX_PKT_CAPTURE_ENH |
| 359 | static inline |
Pavankumar Nandeshwar | 0ce3870 | 2019-09-30 18:43:03 +0530 | [diff] [blame] | 360 | void dp_peer_set_tx_capture_enabled(struct dp_peer *peer_handle, bool value) |
Ruben Columbus | 073874c | 2019-10-08 14:29:30 -0700 | [diff] [blame] | 361 | { |
| 362 | } |
| 363 | #endif |
Pamidipati, Vijay | 576bd15 | 2016-09-27 20:58:18 +0530 | [diff] [blame] | 364 | #endif |