blob: 42148b5b9ff7944b6b8fc887fc00356ac9f28e0e [file] [log] [blame]
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301/*
Ruben Columbus073874c2019-10-08 14:29:30 -07002 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18#ifndef __DP_TX_H
19#define __DP_TX_H
20
21#include <qdf_types.h>
22#include <qdf_nbuf.h>
Leo Chang5ea93a42016-11-03 12:39:49 -070023#include "dp_types.h"
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053024
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053025
26#define DP_TX_MAX_NUM_FRAGS 6
27
28#define DP_TX_DESC_FLAG_ALLOCATED 0x1
29#define DP_TX_DESC_FLAG_TO_FW 0x2
30#define DP_TX_DESC_FLAG_FRAG 0x4
31#define DP_TX_DESC_FLAG_RAW 0x8
32#define DP_TX_DESC_FLAG_MESH 0x10
Kabilan Kannan60e3b302017-09-07 20:06:17 -070033#define DP_TX_DESC_FLAG_QUEUED_TX 0x20
34#define DP_TX_DESC_FLAG_COMPLETED_TX 0x40
Ishank Jainc838b132017-02-17 11:08:18 +053035#define DP_TX_DESC_FLAG_ME 0x80
Kabilan Kannan60e3b302017-09-07 20:06:17 -070036#define DP_TX_DESC_FLAG_TDLS_FRAME 0x100
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053037
Ravi Joshiab33d9b2017-02-11 21:43:28 -080038#define DP_TX_FREE_SINGLE_BUF(soc, buf) \
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053039do { \
Pamidipati, Vijay110bf962017-03-24 21:38:20 +053040 qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053041 qdf_nbuf_free(buf); \
42} while (0)
43
44#define OCB_HEADER_VERSION 1
45
Amir Patel5dc47f52019-05-30 14:06:06 +053046#ifdef TX_PER_PDEV_DESC_POOL
47#ifdef QCA_LL_TX_FLOW_CONTROL_V2
48#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
49#else /* QCA_LL_TX_FLOW_CONTROL_V2 */
50#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
51#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
52 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
53#else
54 #ifdef TX_PER_VDEV_DESC_POOL
55 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
56 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
57 #endif /* TX_PER_VDEV_DESC_POOL */
58#endif /* TX_PER_PDEV_DESC_POOL */
59#define DP_TX_QUEUE_MASK 0x3
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053060/**
61 * struct dp_tx_frag_info_s
62 * @vaddr: hlos vritual address for buffer
63 * @paddr_lo: physical address lower 32bits
64 * @paddr_hi: physical address higher bits
65 * @len: length of the buffer
66 */
67struct dp_tx_frag_info_s {
68 uint8_t *vaddr;
69 uint32_t paddr_lo;
70 uint16_t paddr_hi;
71 uint16_t len;
72};
73
74/**
75 * struct dp_tx_seg_info_s - Segmentation Descriptor
76 * @nbuf: NBUF pointer if segment corresponds to separate nbuf
77 * @frag_cnt: Fragment count in this segment
78 * @total_len: Total length of segment
79 * @frags: per-Fragment information
80 * @next: pointer to next MSDU segment
81 */
82struct dp_tx_seg_info_s {
83 qdf_nbuf_t nbuf;
84 uint16_t frag_cnt;
85 uint16_t total_len;
86 struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
87 struct dp_tx_seg_info_s *next;
88};
89
90/**
91 * struct dp_tx_sg_info_s - Scatter Gather Descriptor
92 * @num_segs: Number of segments (TSO/ME) in the frame
93 * @total_len: Total length of the frame
94 * @curr_seg: Points to current segment descriptor to be processed. Chain of
95 * descriptors for SG frames/multicast-unicast converted packets.
96 *
97 * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
98 * carry fragmentation information
99 * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
100 * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
101 * converted into set of skb sg (nr_frags) structures.
102 */
103struct dp_tx_sg_info_s {
104 uint32_t num_segs;
105 uint32_t total_len;
106 struct dp_tx_seg_info_s *curr_seg;
107};
108
109/**
110 * struct dp_tx_queue - Tx queue
111 * @desc_pool_id: Descriptor Pool to be used for the tx queue
112 * @ring_id: TCL descriptor ring ID corresponding to the tx queue
113 *
114 * Tx queue contains information of the software (Descriptor pool)
115 * and hardware resources (TCL ring id) to be used for a particular
116 * transmit queue (obtained from skb_queue_mapping in case of linux)
117 */
118struct dp_tx_queue {
119 uint8_t desc_pool_id;
120 uint8_t ring_id;
121};
122
123/**
124 * struct dp_tx_msdu_info_s - MSDU Descriptor
125 * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
126 * @tx_queue: Tx queue on which this MSDU should be transmitted
127 * @num_seg: Number of segments (TSO)
128 * @tid: TID (override) that is sent from HLOS
129 * @u.tso_info: TSO information for TSO frame types
130 * (chain of the TSO segments, number of segments)
131 * @u.sg_info: Scatter Gather information for non-TSO SG frames
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +0530132 * @meta_data: Mesh meta header information
133 * @exception_fw: Duplicate frame to be sent to firmware
Varsha Mishra27c5bd32019-05-28 11:54:46 +0530134 * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
135 * @ix_tx_sniffer: Indicates if the packet has to be sniffed
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530136 *
137 * This structure holds the complete MSDU information needed to program the
138 * Hardware TCL and MSDU extension descriptors for different frame types
139 *
140 */
141struct dp_tx_msdu_info_s {
142 enum dp_tx_frm_type frm_type;
143 struct dp_tx_queue tx_queue;
144 uint32_t num_seg;
145 uint8_t tid;
146 union {
147 struct qdf_tso_info_t tso_info;
148 struct dp_tx_sg_info_s sg_info;
149 } u;
Venkateswara Swamy Bandaru96468952019-05-22 18:24:10 +0530150 uint32_t meta_data[7];
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +0530151 uint8_t exception_fw;
Varsha Mishra27c5bd32019-05-28 11:54:46 +0530152 uint16_t ppdu_cookie;
153 uint8_t is_tx_sniffer;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530154};
155
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530156QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
157QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +0530158void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530159
160QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc);
161QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc);
162
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530163/**
164 * dp_tso_attach() - TSO Attach handler
165 * @txrx_soc: Opaque Dp handle
166 *
167 * Reserve TSO descriptor buffers
168 *
169 * Return: QDF_STATUS_E_FAILURE on failure or
170 * QDF_STATUS_SUCCESS on success
171 */
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +0530172QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530173
174/**
175 * dp_tso_detach() - TSO Detach handler
176 * @txrx_soc: Opaque Dp handle
177 *
178 * Deallocate TSO descriptor buffers
179 *
180 * Return: QDF_STATUS_E_FAILURE on failure or
181 * QDF_STATUS_SUCCESS on success
182 */
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +0530183QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530184
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530185QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
186QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);
187
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +0530188qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
Himanshu Batra21ade152019-09-03 16:08:54 +0530189
Pavankumar Nandeshwar0ce38702019-09-30 18:43:03 +0530190qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
191 qdf_nbuf_t nbuf,
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530192 struct cdp_tx_exception_metadata *tx_exc);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +0530193qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
194 qdf_nbuf_t nbuf);
Varsha Mishra06b91d32019-08-09 19:54:49 +0530195qdf_nbuf_t
196dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
197 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
198 struct cdp_tx_exception_metadata *tx_exc_metadata);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530199
Amir Patel5dc47f52019-05-30 14:06:06 +0530200#if QDF_LOCK_STATS
201noinline qdf_nbuf_t
202dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
203 struct dp_tx_msdu_info_s *msdu_info);
204#else
205qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
206 struct dp_tx_msdu_info_s *msdu_info);
207#endif
Jeff Johnson6889ddf2019-02-08 07:22:01 -0800208#ifdef FEATURE_WLAN_TDLS
Rakesh Pillaid295d1e2019-09-11 08:00:36 +0530209/**
210 * dp_tx_non_std() - Allow the control-path SW to send data frames
211 * @soc_hdl: Datapath soc handle
212 * @vdev_id: id of vdev
213 * @tx_spec: what non-standard handling to apply to the tx data frames
214 * @msdu_list: NULL-terminated list of tx MSDUs
215 *
216 * Return: NULL on success,
217 * nbuf when it fails to send
218 */
219qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
220 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
Kabilan Kannan78acc112017-10-10 16:16:32 -0700221#endif
Mainak Sen8bc9b422019-10-29 13:29:58 +0530222int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
Kabilan Kannan60e3b302017-09-07 20:06:17 -0700223
Mohit Khannae5a6e942018-11-28 14:22:48 -0800224/**
225 * dp_tx_comp_handler() - Tx completion handler
226 * @int_ctx: pointer to DP interrupt context
227 * @soc: core txrx main context
Varsha Mishra1f4cfb62019-05-31 00:59:15 +0530228 * @hal_srng: Opaque HAL SRNG pointer
Mohit Khannae5a6e942018-11-28 14:22:48 -0800229 * @ring_id: completion ring id
230 * @quota: No. of packets/descriptors that can be serviced in one loop
231 *
232 * This function will collect hardware release ring element contents and
233 * handle descriptor contents. Based on contents, free packet or handle error
234 * conditions
235 *
236 * Return: Number of TX completions processed
237 */
238uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530239 hal_ring_handle_t hal_srng, uint8_t ring_id,
240 uint32_t quota);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530241
Pamidipati, Vijayaeff4442018-01-19 22:58:32 +0530242QDF_STATUS
Ishank Jainc838b132017-02-17 11:08:18 +0530243dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
244
Amir Patelcb990262019-05-28 15:12:48 +0530245#ifndef FEATURE_WDS
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530246static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
247{
248 return;
249}
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530250#endif
251
Amir Patel5dc47f52019-05-30 14:06:06 +0530252#ifndef ATH_SUPPORT_IQUE
Pamidipati, Vijay7a4721f2018-01-08 23:10:05 +0530253static inline void dp_tx_me_exit(struct dp_pdev *pdev)
254{
255 return;
256}
257#endif
Varsha Mishra6e1760c2019-07-27 22:51:42 +0530258
259#ifndef QCA_MULTIPASS_SUPPORT
260static inline
261bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
262 qdf_nbuf_t nbuf,
263 struct dp_tx_msdu_info_s *msdu_info)
264{
265 return true;
266}
267
268static inline
269void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
270{
271}
272
273#else
274bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
275 qdf_nbuf_t nbuf,
276 struct dp_tx_msdu_info_s *msdu_info);
277
278void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
279#endif
280
Amir Patel5dc47f52019-05-30 14:06:06 +0530281/**
282 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
283 * @vdev: DP Virtual device handle
284 * @nbuf: Buffer pointer
285 * @queue: queue ids container for nbuf
286 *
287 * TX packet queue has 2 instances, software descriptors id and dma ring id
288 * Based on tx feature and hardware configuration queue id combination could be
289 * different.
290 * For example -
291 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
292 * With no XPS,lock based resource protection, Descriptor pool ids are different
293 * for each vdev, dma ring id will be same as single pdev id
294 *
295 * Return: None
296 */
297#ifdef QCA_OL_TX_MULTIQ_SUPPORT
298static inline void dp_tx_get_queue(struct dp_vdev *vdev,
299 qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
300{
301 uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) &
302 DP_TX_QUEUE_MASK;
Radha krishna Simha Jiguru47876f62017-11-30 21:08:40 +0530303
Amir Patel5dc47f52019-05-30 14:06:06 +0530304 queue->desc_pool_id = queue_offset;
305 queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
306
307 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
308 "%s, pool_id:%d ring_id: %d",
309 __func__, queue->desc_pool_id, queue->ring_id);
310}
311#else /* QCA_OL_TX_MULTIQ_SUPPORT */
312static inline void dp_tx_get_queue(struct dp_vdev *vdev,
313 qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
314{
315 /* get flow id */
316 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
317 queue->ring_id = DP_TX_GET_RING_ID(vdev);
318
319 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
320 "%s, pool_id:%d ring_id: %d",
321 __func__, queue->desc_pool_id, queue->ring_id);
322}
323#endif
Radha krishna Simha Jiguru47876f62017-11-30 21:08:40 +0530324#ifdef FEATURE_PERPKT_INFO
325QDF_STATUS
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530326dp_get_completion_indication_for_stack(struct dp_soc *soc,
327 struct dp_pdev *pdev,
Amir Patel12550f62018-09-28 19:05:28 +0530328 struct dp_peer *peer,
329 struct hal_tx_completion_status *ts,
Ankit Kumar8dc0e2a2019-02-28 18:17:15 +0530330 qdf_nbuf_t netbuf,
331 uint64_t time_latency);
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +0530332
333void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
Amir Patel12550f62018-09-28 19:05:28 +0530334 uint16_t peer_id, uint32_t ppdu_id,
335 qdf_nbuf_t netbuf);
Radha krishna Simha Jiguru47876f62017-11-30 21:08:40 +0530336#endif
337
Akshay Kosigi67c8bb92019-07-04 14:28:19 +0530338void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +0530339
Neil Zhao48876362018-03-22 11:23:02 -0700340#ifdef ATH_TX_PRI_OVERRIDE
chenguo6824d8d2018-05-10 15:19:51 +0800341#define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
Neil Zhao48876362018-03-22 11:23:02 -0700342 ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
343#else
344#define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
345#endif
346
Pavankumar Nandeshwarb86ddaf2019-10-07 12:55:16 +0530347void
348dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
349 uint32_t buf_type);
350
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530351/* TODO TX_FEATURE_NOT_YET */
352static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
353{
354 return;
355}
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530356/* TODO TX_FEATURE_NOT_YET */
Ruben Columbus073874c2019-10-08 14:29:30 -0700357
358#ifndef WLAN_TX_PKT_CAPTURE_ENH
359static inline
Pavankumar Nandeshwar0ce38702019-09-30 18:43:03 +0530360void dp_peer_set_tx_capture_enabled(struct dp_peer *peer_handle, bool value)
Ruben Columbus073874c2019-10-08 14:29:30 -0700361{
362}
363#endif
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530364#endif