blob: c7fbe794ce54add3cfacfecbb5160a8fa3a948e8 [file] [log] [blame]
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301/*
Kiran Venkatappa7ec51692017-12-26 13:41:23 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "htt.h"
20#include "dp_tx.h"
21#include "dp_tx_desc.h"
Ishank Jain1e7401c2017-02-17 15:38:39 +053022#include "dp_peer.h"
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053023#include "dp_types.h"
24#include "hal_tx.h"
25#include "qdf_mem.h"
26#include "qdf_nbuf.h"
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +053027#include "qdf_net_types.h"
Ravi Joshiaf9ace82017-02-17 12:41:48 -080028#include <wlan_cfg.h>
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +053029#ifdef MESH_MODE_SUPPORT
30#include "if_meta_hdr.h"
31#endif
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053032
Prathyusha Guduri02ed9482018-04-17 19:06:30 +053033#define DP_TX_QUEUE_MASK 0x3
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053034
35/* TODO Add support in TSO */
36#define DP_DESC_NUM_FRAG(x) 0
Ankit Gupta20e59582016-12-06 14:24:00 -080037
38/* disable TQM_BYPASS */
Dhanashri Atre10a93232016-11-11 18:47:05 -080039#define TQM_BYPASS_WAR 0
Ankit Gupta20e59582016-12-06 14:24:00 -080040
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +053041/* invalid peer id for reinject*/
42#define DP_INVALID_PEER 0XFFFE
43
ruchi agrawal45f3ac42017-10-25 09:03:28 +053044/*mapping between hal encrypt type and cdp_sec_type*/
45#define MAX_CDP_SEC_TYPE 12
46static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
47 HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
48 HAL_TX_ENCRYPT_TYPE_WEP_128,
49 HAL_TX_ENCRYPT_TYPE_WEP_104,
50 HAL_TX_ENCRYPT_TYPE_WEP_40,
51 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
52 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
53 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
54 HAL_TX_ENCRYPT_TYPE_WAPI,
55 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
56 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
57 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
58 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
59
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053060/**
61 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
62 * @vdev: DP Virtual device handle
63 * @nbuf: Buffer pointer
64 * @queue: queue ids container for nbuf
65 *
66 * TX packet queue has 2 instances, software descriptors id and dma ring id
67 * Based on tx feature and hardware configuration queue id combination could be
68 * different.
69 * For example -
70 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
71 * With no XPS,lock based resource protection, Descriptor pool ids are different
72 * for each vdev, dma ring id will be same as single pdev id
73 *
74 * Return: None
75 */
Prathyusha Guduri02ed9482018-04-17 19:06:30 +053076#ifdef QCA_OL_TX_MULTIQ_SUPPORT
77static inline void dp_tx_get_queue(struct dp_vdev *vdev,
78 qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
79{
80 uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
81 queue->desc_pool_id = queue_offset;
82 queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
83
84 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
85 "%s, pool_id:%d ring_id: %d",
86 __func__, queue->desc_pool_id, queue->ring_id);
87
88 return;
89}
90#else /* QCA_OL_TX_MULTIQ_SUPPORT */
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053091static inline void dp_tx_get_queue(struct dp_vdev *vdev,
92 qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
93{
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070094 /* get flow id */
Pamidipati, Vijay576bd152016-09-27 20:58:18 +053095 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
96 queue->ring_id = DP_TX_GET_RING_ID(vdev);
97
Houston Hoffman41b912c2017-08-30 14:27:51 -070098 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Yun Parkb9a7b5a2017-09-06 14:34:58 -070099 "%s, pool_id:%d ring_id: %d",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530100 __func__, queue->desc_pool_id, queue->ring_id);
101
102 return;
103}
Prathyusha Guduri02ed9482018-04-17 19:06:30 +0530104#endif
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530105
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700106#if defined(FEATURE_TSO)
107/**
108 * dp_tx_tso_desc_release() - Release the tso segment
109 * after unmapping all the fragments
110 *
111 * @pdev - physical device handle
112 * @tx_desc - Tx software descriptor
113 */
114static void dp_tx_tso_desc_release(struct dp_soc *soc,
115 struct dp_tx_desc_s *tx_desc)
116{
117 TSO_DEBUG("%s: Free the tso descriptor", __func__);
118 if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
119 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
120 "%s %d TSO desc is NULL!",
121 __func__, __LINE__);
122 qdf_assert(0);
123 } else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
124 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
125 "%s %d TSO common info is NULL!",
126 __func__, __LINE__);
127 qdf_assert(0);
128 } else {
129 struct qdf_tso_num_seg_elem_t *tso_num_desc =
130 (struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
131
132 if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
133 tso_num_desc->num_seg.tso_cmn_num_seg--;
134 qdf_nbuf_unmap_tso_segment(soc->osdev,
135 tx_desc->tso_desc, false);
136 } else {
137 tso_num_desc->num_seg.tso_cmn_num_seg--;
138 qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
139 qdf_nbuf_unmap_tso_segment(soc->osdev,
140 tx_desc->tso_desc, true);
141 dp_tso_num_seg_free(soc, tx_desc->pool_id,
142 tx_desc->tso_num_desc);
143 tx_desc->tso_num_desc = NULL;
144 }
145 dp_tx_tso_desc_free(soc,
146 tx_desc->pool_id, tx_desc->tso_desc);
147 tx_desc->tso_desc = NULL;
148 }
149}
150#else
151static void dp_tx_tso_desc_release(struct dp_soc *soc,
152 struct dp_tx_desc_s *tx_desc)
153{
154 return;
155}
156#endif
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530157/**
158 * dp_tx_desc_release() - Release Tx Descriptor
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530159 * @tx_desc : Tx Descriptor
160 * @desc_pool_id: Descriptor Pool ID
161 *
162 * Deallocate all resources attached to Tx descriptor and free the Tx
163 * descriptor.
164 *
165 * Return:
166 */
Jeff Johnson755f2612017-01-05 16:28:13 -0800167static void
Ravi Joshiab33d9b2017-02-11 21:43:28 -0800168dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530169{
Ravi Joshiab33d9b2017-02-11 21:43:28 -0800170 struct dp_pdev *pdev = tx_desc->pdev;
171 struct dp_soc *soc;
Debashis Duttaf645222017-01-20 19:29:25 -0800172 uint8_t comp_status = 0;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530173
Ravi Joshiab33d9b2017-02-11 21:43:28 -0800174 qdf_assert(pdev);
175
176 soc = pdev->soc;
Ishank Jainbc2d91f2017-01-03 18:14:54 +0530177
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700178 if (tx_desc->frm_type == dp_tx_frm_tso)
179 dp_tx_tso_desc_release(soc, tx_desc);
180
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530181 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
182 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
183
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +0530184 if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
185 dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
186
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530187 qdf_atomic_dec(&pdev->num_tx_outstanding);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530188
189 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530190 qdf_atomic_dec(&pdev->num_tx_exception);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530191
Debashis Duttaf645222017-01-20 19:29:25 -0800192 if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
193 hal_tx_comp_get_buffer_source(&tx_desc->comp))
194 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
195 else
196 comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
197
Houston Hoffman41b912c2017-08-30 14:27:51 -0700198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Yun Parkb9a7b5a2017-09-06 14:34:58 -0700199 "Tx Completion Release desc %d status %d outstanding %d",
200 tx_desc->id, comp_status,
201 qdf_atomic_read(&pdev->num_tx_outstanding));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530202
203 dp_tx_desc_free(soc, tx_desc, desc_pool_id);
204 return;
205}
206
207/**
208 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
209 * @vdev: DP vdev Handle
210 * @nbuf: skb
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530211 *
212 * Prepares and fills HTT metadata in the frame pre-header for special frames
213 * that should be transmitted using varying transmit parameters.
214 * There are 2 VDEV modes that currently needs this special metadata -
215 * 1) Mesh Mode
216 * 2) DSRC Mode
217 *
218 * Return: HTT metadata size
219 *
220 */
Jeff Johnson755f2612017-01-05 16:28:13 -0800221static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530222 uint32_t *meta_data)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530223{
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +0530224 struct htt_tx_msdu_desc_ext2_t *desc_ext =
225 (struct htt_tx_msdu_desc_ext2_t *) meta_data;
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530226
227 uint8_t htt_desc_size;
228
229 /* Size rounded of multiple of 8 bytes */
230 uint8_t htt_desc_size_aligned;
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +0530231
232 uint8_t *hdr = NULL;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530233
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530234 /*
235 * Metadata - HTT MSDU Extension header
236 */
237 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530238 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530239
240 if (vdev->mesh_vdev) {
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530241
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530242 /* Fill and add HTT metaheader */
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530243 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +0530244 if (hdr == NULL) {
245 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
246 "Error in filling HTT metadata\n");
247
248 return 0;
249 }
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +0530250 qdf_mem_copy(hdr, desc_ext, htt_desc_size);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530251
252 } else if (vdev->opmode == wlan_op_mode_ocb) {
253 /* Todo - Add support for DSRC */
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530254 }
255
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530256 return htt_desc_size_aligned;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530257}
258
259/**
Ishank Jain5122f8f2017-03-15 22:22:47 +0530260 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
261 * @tso_seg: TSO segment to process
262 * @ext_desc: Pointer to MSDU extension descriptor
263 *
264 * Return: void
265 */
266#if defined(FEATURE_TSO)
267static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
268 void *ext_desc)
269{
270 uint8_t num_frag;
Ishank Jain5122f8f2017-03-15 22:22:47 +0530271 uint32_t tso_flags;
272
273 /*
274 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
275 * tcp_flag_mask
276 *
277 * Checksum enable flags are set in TCL descriptor and not in Extension
278 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
279 */
280 tso_flags = *(uint32_t *) &tso_seg->tso_flags;
281
282 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
283
284 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
285 tso_seg->tso_flags.ip_len);
286
287 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
288 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
289
290
291 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
292 uint32_t lo = 0;
293 uint32_t hi = 0;
294
295 qdf_dmaaddr_to_32s(
296 tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
297 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
298 tso_seg->tso_frags[num_frag].length);
299 }
300
301 return;
302}
303#else
304static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
305 void *ext_desc)
306{
307 return;
308}
309#endif
310
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700311#if defined(FEATURE_TSO)
312/**
313 * dp_tx_free_tso_seg() - Loop through the tso segments
314 * allocated and free them
315 *
316 * @soc: soc handle
317 * @free_seg: list of tso segments
318 * @msdu_info: msdu descriptor
319 *
320 * Return - void
321 */
322static void dp_tx_free_tso_seg(struct dp_soc *soc,
323 struct qdf_tso_seg_elem_t *free_seg,
324 struct dp_tx_msdu_info_s *msdu_info)
325{
326 struct qdf_tso_seg_elem_t *next_seg;
327
328 while (free_seg) {
329 next_seg = free_seg->next;
330 dp_tx_tso_desc_free(soc,
331 msdu_info->tx_queue.desc_pool_id,
332 free_seg);
333 free_seg = next_seg;
334 }
335}
336
337/**
338 * dp_tx_free_tso_num_seg() - Loop through the tso num segments
339 * allocated and free them
340 *
341 * @soc: soc handle
342 * @free_seg: list of tso segments
343 * @msdu_info: msdu descriptor
344 * Return - void
345 */
346static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
347 struct qdf_tso_num_seg_elem_t *free_seg,
348 struct dp_tx_msdu_info_s *msdu_info)
349{
350 struct qdf_tso_num_seg_elem_t *next_seg;
351
352 while (free_seg) {
353 next_seg = free_seg->next;
354 dp_tso_num_seg_free(soc,
355 msdu_info->tx_queue.desc_pool_id,
356 free_seg);
357 free_seg = next_seg;
358 }
359}
360
Ishank Jain5122f8f2017-03-15 22:22:47 +0530361/**
362 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
363 * @vdev: virtual device handle
364 * @msdu: network buffer
365 * @msdu_info: meta data associated with the msdu
366 *
367 * Return: QDF_STATUS_SUCCESS success
368 */
Ishank Jain5122f8f2017-03-15 22:22:47 +0530369static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
370 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
371{
372 struct qdf_tso_seg_elem_t *tso_seg;
373 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
374 struct dp_soc *soc = vdev->pdev->soc;
375 struct qdf_tso_info_t *tso_info;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700376 struct qdf_tso_num_seg_elem_t *tso_num_seg;
Ishank Jain5122f8f2017-03-15 22:22:47 +0530377
378 tso_info = &msdu_info->u.tso_info;
379 tso_info->curr_seg = NULL;
380 tso_info->tso_seg_list = NULL;
381 tso_info->num_segs = num_seg;
382 msdu_info->frm_type = dp_tx_frm_tso;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700383 tso_info->tso_num_seg_list = NULL;
384
385 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530386
387 while (num_seg) {
388 tso_seg = dp_tx_tso_desc_alloc(
389 soc, msdu_info->tx_queue.desc_pool_id);
390 if (tso_seg) {
391 tso_seg->next = tso_info->tso_seg_list;
392 tso_info->tso_seg_list = tso_seg;
393 num_seg--;
394 } else {
Ishank Jain5122f8f2017-03-15 22:22:47 +0530395 struct qdf_tso_seg_elem_t *free_seg =
396 tso_info->tso_seg_list;
397
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700398 dp_tx_free_tso_seg(soc, free_seg, msdu_info);
399
Ishank Jain5122f8f2017-03-15 22:22:47 +0530400 return QDF_STATUS_E_NOMEM;
401 }
402 }
403
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700404 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
405
406 tso_num_seg = dp_tso_num_seg_alloc(soc,
407 msdu_info->tx_queue.desc_pool_id);
408
409 if (tso_num_seg) {
410 tso_num_seg->next = tso_info->tso_num_seg_list;
411 tso_info->tso_num_seg_list = tso_num_seg;
412 } else {
413 /* Bug: free tso_num_seg and tso_seg */
414 /* Free the already allocated num of segments */
415 struct qdf_tso_seg_elem_t *free_seg =
416 tso_info->tso_seg_list;
417
418 TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
419 __func__);
420 dp_tx_free_tso_seg(soc, free_seg, msdu_info);
421
422 return QDF_STATUS_E_NOMEM;
423 }
424
Ishank Jain5122f8f2017-03-15 22:22:47 +0530425 msdu_info->num_seg =
426 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
427
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700428 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
429 msdu_info->num_seg);
430
431 if (!(msdu_info->num_seg)) {
432 dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
433 dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
434 msdu_info);
435 return QDF_STATUS_E_INVAL;
436 }
437
Ishank Jain5122f8f2017-03-15 22:22:47 +0530438 tso_info->curr_seg = tso_info->tso_seg_list;
439
440 return QDF_STATUS_SUCCESS;
441}
442#else
443static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
444 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
445{
446 return QDF_STATUS_E_NOMEM;
447}
448#endif
449
450/**
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530451 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
452 * @vdev: DP Vdev handle
453 * @msdu_info: MSDU info to be setup in MSDU extension descriptor
454 * @desc_pool_id: Descriptor Pool ID
455 *
456 * Return:
457 */
Jeff Johnson755f2612017-01-05 16:28:13 -0800458static
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530459struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
460 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
461{
462 uint8_t i;
Ishank Jain2f81e962017-01-23 22:42:37 +0530463 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530464 struct dp_tx_seg_info_s *seg_info;
465 struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
466 struct dp_soc *soc = vdev->pdev->soc;
467
468 /* Allocate an extension descriptor */
469 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
Ishank Jain2f81e962017-01-23 22:42:37 +0530470 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530471
Ishank Jain57c42a12017-04-12 10:42:22 +0530472 if (!msdu_ext_desc) {
473 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530474 return NULL;
Ishank Jain57c42a12017-04-12 10:42:22 +0530475 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530476
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +0530477 if (msdu_info->exception_fw &&
478 qdf_unlikely(vdev->mesh_vdev)) {
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +0530479 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
480 &msdu_info->meta_data[0],
481 sizeof(struct htt_tx_msdu_desc_ext2_t));
482 qdf_atomic_inc(&vdev->pdev->num_tx_exception);
Venkateswara Swamy Bandaru97ca7e92018-04-27 18:30:10 +0530483 }
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +0530484
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530485 switch (msdu_info->frm_type) {
486 case dp_tx_frm_sg:
487 case dp_tx_frm_me:
488 case dp_tx_frm_raw:
489 seg_info = msdu_info->u.sg_info.curr_seg;
490 /* Update the buffer pointers in MSDU Extension Descriptor */
491 for (i = 0; i < seg_info->frag_cnt; i++) {
492 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
493 seg_info->frags[i].paddr_lo,
494 seg_info->frags[i].paddr_hi,
495 seg_info->frags[i].len);
496 }
497
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530498 break;
499
500 case dp_tx_frm_tso:
Ishank Jain5122f8f2017-03-15 22:22:47 +0530501 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
502 &cached_ext_desc[0]);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530503 break;
504
Ishank Jain5122f8f2017-03-15 22:22:47 +0530505
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530506 default:
507 break;
508 }
509
Yun Park11d46e02017-11-27 10:51:53 -0800510 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
511 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530512
513 hal_tx_ext_desc_sync(&cached_ext_desc[0],
514 msdu_ext_desc->vaddr);
515
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530516 return msdu_ext_desc;
517}
518
519/**
520 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
521 * @vdev: DP vdev handle
522 * @nbuf: skb
523 * @desc_pool_id: Descriptor pool ID
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530524 * @meta_data: Metadata to the fw
525 * @tx_exc_metadata: Handle that holds exception path metadata
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530526 * Allocate and prepare Tx descriptor with msdu information.
527 *
528 * Return: Pointer to Tx Descriptor on success,
529 * NULL on failure
530 */
Jeff Johnson755f2612017-01-05 16:28:13 -0800531static
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530532struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +0530533 qdf_nbuf_t nbuf, uint8_t desc_pool_id,
534 struct dp_tx_msdu_info_s *msdu_info,
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530535 struct cdp_tx_exception_metadata *tx_exc_metadata)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530536{
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530537 uint8_t align_pad;
Dhanashri Atre10a93232016-11-11 18:47:05 -0800538 uint8_t is_exception = 0;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530539 uint8_t htt_hdr_size;
540 struct ether_header *eh;
541 struct dp_tx_desc_s *tx_desc;
542 struct dp_pdev *pdev = vdev->pdev;
543 struct dp_soc *soc = pdev->soc;
544
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530545 /* Allocate software Tx descriptor */
546 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530547 if (qdf_unlikely(!tx_desc)) {
Ishank Jain57c42a12017-04-12 10:42:22 +0530548 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530549 return NULL;
550 }
551
552 /* Flow control/Congestion Control counters */
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530553 qdf_atomic_inc(&pdev->num_tx_outstanding);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530554
555 /* Initialize the SW tx descriptor */
556 tx_desc->nbuf = nbuf;
557 tx_desc->frm_type = dp_tx_frm_std;
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530558 tx_desc->tx_encap_type = (tx_exc_metadata ?
559 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530560 tx_desc->vdev = vdev;
Ravi Joshiab33d9b2017-02-11 21:43:28 -0800561 tx_desc->pdev = pdev;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530562 tx_desc->msdu_ext_desc = NULL;
Pamidipati, Vijay871850e2017-11-05 16:18:25 +0530563 tx_desc->pkt_offset = 0;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530564
565 /*
566 * For special modes (vdev_type == ocb or mesh), data frames should be
567 * transmitted using varying transmit parameters (tx spec) which include
568 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
569 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
570 * These frames are sent as exception packets to firmware.
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530571 *
Pamidipati, Vijay871850e2017-11-05 16:18:25 +0530572 * HW requirement is that metadata should always point to a
573 * 8-byte aligned address. So we add alignment pad to start of buffer.
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530574 * HTT Metadata should be ensured to be multiple of 8-bytes,
Pamidipati, Vijay871850e2017-11-05 16:18:25 +0530575 * to get 8-byte aligned start address along with align_pad added
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530576 *
577 * |-----------------------------|
578 * | |
579 * |-----------------------------| <-----Buffer Pointer Address given
580 * | | ^ in HW descriptor (aligned)
581 * | HTT Metadata | |
582 * | | |
583 * | | | Packet Offset given in descriptor
584 * | | |
585 * |-----------------------------| |
586 * | Alignment Pad | v
587 * |-----------------------------| <----- Actual buffer start address
588 * | SKB Data | (Unaligned)
589 * | |
590 * | |
591 * | |
592 * | |
593 * | |
594 * |-----------------------------|
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530595 */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +0530596 if (qdf_unlikely((msdu_info->exception_fw)) ||
597 (vdev->opmode == wlan_op_mode_ocb)) {
Pamidipati, Vijay871850e2017-11-05 16:18:25 +0530598 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
599 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
600 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
601 "qdf_nbuf_push_head failed\n");
602 goto failure;
603 }
604
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530605 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +0530606 msdu_info->meta_data);
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +0530607 if (htt_hdr_size == 0)
608 goto failure;
Pamidipati, Vijay871850e2017-11-05 16:18:25 +0530609 tx_desc->pkt_offset = align_pad + htt_hdr_size;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530610 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
Dhanashri Atre10a93232016-11-11 18:47:05 -0800611 is_exception = 1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530612 }
613
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530614 if (qdf_unlikely(QDF_STATUS_SUCCESS !=
615 qdf_nbuf_map(soc->osdev, nbuf,
616 QDF_DMA_TO_DEVICE))) {
617 /* Handle failure */
618 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
619 "qdf_nbuf_map failed\n");
Ishank Jain57c42a12017-04-12 10:42:22 +0530620 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530621 goto failure;
622 }
623
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530624 if (qdf_unlikely(vdev->nawds_enabled)) {
625 eh = (struct ether_header *) qdf_nbuf_data(nbuf);
626 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
627 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
Dhanashri Atre10a93232016-11-11 18:47:05 -0800628 is_exception = 1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530629 }
630 }
631
Dhanashri Atre10a93232016-11-11 18:47:05 -0800632#if !TQM_BYPASS_WAR
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530633 if (is_exception || tx_exc_metadata)
Dhanashri Atre10a93232016-11-11 18:47:05 -0800634#endif
635 {
636 /* Temporary WAR due to TQM VP issues */
637 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530638 qdf_atomic_inc(&pdev->num_tx_exception);
Dhanashri Atre10a93232016-11-11 18:47:05 -0800639 }
640
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530641 return tx_desc;
642
643failure:
Ravi Joshiab33d9b2017-02-11 21:43:28 -0800644 dp_tx_desc_release(tx_desc, desc_pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530645 return NULL;
646}
647
648/**
Ishank Jain5122f8f2017-03-15 22:22:47 +0530649 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530650 * @vdev: DP vdev handle
651 * @nbuf: skb
652 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
653 * @desc_pool_id : Descriptor Pool ID
654 *
655 * Allocate and prepare Tx descriptor with msdu and fragment descritor
656 * information. For frames wth fragments, allocate and prepare
657 * an MSDU extension descriptor
658 *
659 * Return: Pointer to Tx Descriptor on success,
660 * NULL on failure
661 */
Jeff Johnson755f2612017-01-05 16:28:13 -0800662static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530663 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
664 uint8_t desc_pool_id)
665{
666 struct dp_tx_desc_s *tx_desc;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530667 struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
668 struct dp_pdev *pdev = vdev->pdev;
669 struct dp_soc *soc = pdev->soc;
670
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530671 /* Allocate software Tx descriptor */
672 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
Ishank Jain57c42a12017-04-12 10:42:22 +0530673 if (!tx_desc) {
674 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530675 return NULL;
Ishank Jain57c42a12017-04-12 10:42:22 +0530676 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530677
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530678 /* Flow control/Congestion Control counters */
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530679 qdf_atomic_inc(&pdev->num_tx_outstanding);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530680
681 /* Initialize the SW tx descriptor */
682 tx_desc->nbuf = nbuf;
683 tx_desc->frm_type = msdu_info->frm_type;
684 tx_desc->tx_encap_type = vdev->tx_encap_type;
685 tx_desc->vdev = vdev;
Ravi Joshiab33d9b2017-02-11 21:43:28 -0800686 tx_desc->pdev = pdev;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530687 tx_desc->pkt_offset = 0;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700688 tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
689 tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530690
691 /* Handle scattered frames - TSO/SG/ME */
692 /* Allocate and prepare an extension descriptor for scattered frames */
693 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
694 if (!msdu_ext_desc) {
695 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
696 "%s Tx Extension Descriptor Alloc Fail\n",
697 __func__);
698 goto failure;
699 }
700
Dhanashri Atre10a93232016-11-11 18:47:05 -0800701#if TQM_BYPASS_WAR
702 /* Temporary WAR due to TQM VP issues */
703 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530704 qdf_atomic_inc(&pdev->num_tx_exception);
Dhanashri Atre10a93232016-11-11 18:47:05 -0800705#endif
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +0530706 if (qdf_unlikely(msdu_info->exception_fw))
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +0530707 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
Dhanashri Atre10a93232016-11-11 18:47:05 -0800708
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530709 tx_desc->msdu_ext_desc = msdu_ext_desc;
710 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
711
712 return tx_desc;
713failure:
Ravi Joshiab33d9b2017-02-11 21:43:28 -0800714 dp_tx_desc_release(tx_desc, desc_pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530715 return NULL;
716}
717
718/**
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530719 * dp_tx_prepare_raw() - Prepare RAW packet TX
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530720 * @vdev: DP vdev handle
721 * @nbuf: buffer pointer
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530722 * @seg_info: Pointer to Segment info Descriptor to be prepared
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530723 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
724 * descriptor
725 *
726 * Return:
727 */
Jeff Johnson755f2612017-01-05 16:28:13 -0800728static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530729 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530730{
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530731 qdf_nbuf_t curr_nbuf = NULL;
732 uint16_t total_len = 0;
Pamidipati, Vijayda917d52017-07-18 20:13:22 +0530733 qdf_dma_addr_t paddr;
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530734 int32_t i;
Ruchi, Agrawald8532ff2017-12-15 15:50:22 +0530735 int32_t mapped_buf_num = 0;
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530736
737 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
Sathish Kumare7e784d2017-05-04 16:24:22 +0530738 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530739
Ishank Jain57c42a12017-04-12 10:42:22 +0530740 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
741
Sathish Kumare7e784d2017-05-04 16:24:22 +0530742 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
ruchi agrawal45f3ac42017-10-25 09:03:28 +0530743 if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
Sathish Kumare7e784d2017-05-04 16:24:22 +0530744 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
Sathish Kumare7e784d2017-05-04 16:24:22 +0530745
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530746 for (curr_nbuf = nbuf, i = 0; curr_nbuf;
Ruchi, Agrawald8532ff2017-12-15 15:50:22 +0530747 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
748
749 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
750 QDF_DMA_TO_DEVICE)) {
751 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
752 "%s dma map error \n", __func__);
753 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
754 mapped_buf_num = i;
755 goto error;
756 }
757
Pamidipati, Vijayda917d52017-07-18 20:13:22 +0530758 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
759 seg_info->frags[i].paddr_lo = paddr;
760 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +0530761 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
762 seg_info->frags[i].vaddr = (void *) curr_nbuf;
763 total_len += qdf_nbuf_len(curr_nbuf);
764 }
765
766 seg_info->frag_cnt = i;
767 seg_info->total_len = total_len;
768 seg_info->next = NULL;
769
770 sg_info->curr_seg = seg_info;
771
772 msdu_info->frm_type = dp_tx_frm_raw;
773 msdu_info->num_seg = 1;
774
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530775 return nbuf;
Ruchi, Agrawald8532ff2017-12-15 15:50:22 +0530776
777error:
778 i = 0;
779 while (nbuf) {
780 curr_nbuf = nbuf;
781 if (i < mapped_buf_num) {
782 qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
783 i++;
784 }
785 nbuf = qdf_nbuf_next(nbuf);
786 qdf_nbuf_free(curr_nbuf);
787 }
788 return NULL;
789
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530790}
791
792/**
793 * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
794 * @soc: DP Soc Handle
795 * @vdev: DP vdev handle
796 * @tx_desc: Tx Descriptor Handle
797 * @tid: TID from HLOS for overriding default DSCP-TID mapping
798 * @fw_metadata: Metadata to send to Target Firmware along with frame
799 * @ring_id: Ring ID of H/W ring to which we enqueue the packet
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530800 * @tx_exc_metadata: Handle that holds exception path meta data
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530801 *
802 * Gets the next free TCL HW DMA descriptor and sets up required parameters
803 * from software Tx descriptor
804 *
805 * Return:
806 */
Jeff Johnson755f2612017-01-05 16:28:13 -0800807static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
808 struct dp_tx_desc_s *tx_desc, uint8_t tid,
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530809 uint16_t fw_metadata, uint8_t ring_id,
810 struct cdp_tx_exception_metadata
811 *tx_exc_metadata)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530812{
813 uint8_t type;
814 uint16_t length;
815 void *hal_tx_desc, *hal_tx_desc_cached;
816 qdf_dma_addr_t dma_addr;
817 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
818
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530819 enum cdp_sec_type sec_type = (tx_exc_metadata ?
820 tx_exc_metadata->sec_type : vdev->sec_type);
821
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530822 /* Return Buffer Manager ID */
823 uint8_t bm_id = ring_id;
824 void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
825
826 hal_tx_desc_cached = (void *) cached_desc;
827 qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
828
829 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
Pamidipati, Vijay110bf962017-03-24 21:38:20 +0530830 length = HAL_TX_EXT_DESC_WITH_META_DATA;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530831 type = HAL_TX_BUF_TYPE_EXT_DESC;
832 dma_addr = tx_desc->msdu_ext_desc->paddr;
833 } else {
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530834 length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530835 type = HAL_TX_BUF_TYPE_BUFFER;
Pamidipati, Vijay8a4e27c2017-04-06 01:04:08 +0530836 dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530837 }
838
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530839 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
840 hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
841 dma_addr , bm_id, tx_desc->id, type);
842 hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
843 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
844 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
Nandha Kishore Easwaran56c28c02018-03-01 11:04:45 +0530845 hal_tx_desc_set_lmac_id(hal_tx_desc_cached,
846 HAL_TX_DESC_DEFAULT_LMAC_ID);
Ishank Jain949674c2017-02-27 17:09:29 +0530847 hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
848 vdev->dscp_tid_map_id);
ruchi agrawal45f3ac42017-10-25 09:03:28 +0530849 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
Prathyusha Guduribe41d972018-01-19 14:17:14 +0530850 sec_type_map[sec_type]);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530851
Yun Parkb9a7b5a2017-09-06 14:34:58 -0700852 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
853 "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
Pamidipati, Vijayb90a79e2016-11-29 01:29:35 +0530854 __func__, length, type, (uint64_t)dma_addr,
Venkata Sharath Chandra Manchalad6ad0d52017-05-31 09:59:34 -0700855 tx_desc->pkt_offset, tx_desc->id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530856
857 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
858 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
859
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +0530860 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
861 vdev->hal_desc_addr_search_flags);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530862
chenguo8d0c2192017-12-01 19:35:53 +0800863 /* verify checksum offload configuration*/
864 if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
865 ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
866 || qdf_nbuf_is_tso(tx_desc->nbuf))) {
Ishank Jain5122f8f2017-03-15 22:22:47 +0530867 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530868 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530869 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530870
871 if (tid != HTT_TX_EXT_TID_INVALID)
872 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
873
874 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
875 hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
876
877
878 /* Sync cached descriptor with HW */
879 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
880
881 if (!hal_tx_desc) {
882 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
883 "%s TCL ring full ring_id:%d\n", __func__, ring_id);
Ishank Jainbc2d91f2017-01-03 18:14:54 +0530884 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
Ishank Jain57c42a12017-04-12 10:42:22 +0530885 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530886 return QDF_STATUS_E_RESOURCES;
887 }
888
889 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
890
891 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
Ishank Jain1e7401c2017-02-17 15:38:39 +0530892 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530893
Yue Ma245b47b2017-02-21 16:35:31 -0800894 /*
895 * If one packet is enqueued in HW, PM usage count needs to be
896 * incremented by one to prevent future runtime suspend. This
897 * should be tied with the success of enqueuing. It will be
898 * decremented after the packet has been sent.
899 */
900 hif_pm_runtime_get_noresume(soc->hif_handle);
901
Pamidipati, Vijay576bd152016-09-27 20:58:18 +0530902 return QDF_STATUS_SUCCESS;
903}
904
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530905
906/**
907 * dp_cce_classify() - Classify the frame based on CCE rules
908 * @vdev: DP vdev handle
909 * @nbuf: skb
910 *
911 * Classify frames based on CCE rules
912 * Return: bool( true if classified,
913 * else false)
914 */
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +0530915static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530916{
917 struct ether_header *eh = NULL;
918 uint16_t ether_type;
919 qdf_llc_t *llcHdr;
920 qdf_nbuf_t nbuf_clone = NULL;
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +0530921 qdf_dot3_qosframe_t *qos_wh = NULL;
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530922
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +0530923 /* for mesh packets don't do any classification */
924 if (qdf_unlikely(vdev->mesh_vdev))
925 return false;
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530926
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +0530927 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
928 eh = (struct ether_header *) qdf_nbuf_data(nbuf);
929 ether_type = eh->ether_type;
930 llcHdr = (qdf_llc_t *)(nbuf->data +
931 sizeof(struct ether_header));
932 } else {
933 qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
934
935 if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
936 if (qdf_unlikely(
937 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
938 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
939
940 ether_type = *(uint16_t *)(nbuf->data
941 + QDF_IEEE80211_4ADDR_HDR_LEN
942 + sizeof(qdf_llc_t)
943 - sizeof(ether_type));
944 llcHdr = (qdf_llc_t *)(nbuf->data +
945 QDF_IEEE80211_4ADDR_HDR_LEN);
946 } else {
947 ether_type = *(uint16_t *)(nbuf->data
948 + QDF_IEEE80211_3ADDR_HDR_LEN
949 + sizeof(qdf_llc_t)
950 - sizeof(ether_type));
951 llcHdr = (qdf_llc_t *)(nbuf->data +
952 QDF_IEEE80211_3ADDR_HDR_LEN);
953 }
954
955 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
956 && (ether_type ==
957 qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
958
959 DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
960 return true;
961 }
962 }
963
964 return false;
965 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530966
967 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +0530968 ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530969 sizeof(*llcHdr));
970 nbuf_clone = qdf_nbuf_clone(nbuf);
Ruchi, Agrawal1ca70f02018-03-19 13:53:07 +0530971 if (qdf_unlikely(nbuf_clone)) {
972 qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530973
Ruchi, Agrawal1ca70f02018-03-19 13:53:07 +0530974 if (ether_type == htons(ETHERTYPE_8021Q)) {
975 qdf_nbuf_pull_head(nbuf_clone,
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +0530976 sizeof(qdf_net_vlanhdr_t));
Ruchi, Agrawal1ca70f02018-03-19 13:53:07 +0530977 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530978 }
979 } else {
980 if (ether_type == htons(ETHERTYPE_8021Q)) {
981 nbuf_clone = qdf_nbuf_clone(nbuf);
Ruchi, Agrawal1ca70f02018-03-19 13:53:07 +0530982 if (qdf_unlikely(nbuf_clone)) {
983 qdf_nbuf_pull_head(nbuf_clone,
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530984 sizeof(qdf_net_vlanhdr_t));
Ruchi, Agrawal1ca70f02018-03-19 13:53:07 +0530985 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530986 }
987 }
988
989 if (qdf_unlikely(nbuf_clone))
990 nbuf = nbuf_clone;
991
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +0530992
Ruchi, Agrawal34721392017-11-13 18:02:09 +0530993 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
994 || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
995 || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
996 || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
997 || (qdf_nbuf_is_ipv4_pkt(nbuf)
998 && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
999 || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1000 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1001 if (qdf_unlikely(nbuf_clone != NULL))
1002 qdf_nbuf_free(nbuf_clone);
1003 return true;
1004 }
1005
1006 if (qdf_unlikely(nbuf_clone != NULL))
1007 qdf_nbuf_free(nbuf_clone);
1008
1009 return false;
1010}
1011
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301012/**
1013 * dp_tx_classify_tid() - Obtain TID to be used for this frame
1014 * @vdev: DP vdev handle
1015 * @nbuf: skb
1016 *
1017 * Extract the DSCP or PCP information from frame and map into TID value.
1018 * Software based TID classification is required when more than 2 DSCP-TID
1019 * mapping tables are needed.
Ishank Jain949674c2017-02-27 17:09:29 +05301020 * Hardware supports 2 DSCP-TID mapping tables
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301021 *
Ishank Jain949674c2017-02-27 17:09:29 +05301022 * Return: void
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301023 */
Ishank Jain949674c2017-02-27 17:09:29 +05301024static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1025 struct dp_tx_msdu_info_s *msdu_info)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301026{
Ishank Jain949674c2017-02-27 17:09:29 +05301027 uint8_t tos = 0, dscp_tid_override = 0;
1028 uint8_t *hdr_ptr, *L3datap;
1029 uint8_t is_mcast = 0;
1030 struct ether_header *eh = NULL;
1031 qdf_ethervlan_header_t *evh = NULL;
1032 uint16_t ether_type;
1033 qdf_llc_t *llcHdr;
1034 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1035
Neil Zhao48876362018-03-22 11:23:02 -07001036 DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1037
1038 if (vdev->dscp_tid_map_id <= 1)
1039 return;
1040
Ishank Jain949674c2017-02-27 17:09:29 +05301041 /* for mesh packets don't do any classification */
1042 if (qdf_unlikely(vdev->mesh_vdev))
1043 return;
1044
1045 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1046 eh = (struct ether_header *) nbuf->data;
1047 hdr_ptr = eh->ether_dhost;
1048 L3datap = hdr_ptr + sizeof(struct ether_header);
1049 } else {
1050 qdf_dot3_qosframe_t *qos_wh =
1051 (qdf_dot3_qosframe_t *) nbuf->data;
1052 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1053 qos_wh->i_qos[0] & DP_QOS_TID : 0;
1054 return;
1055 }
1056
1057 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1058 ether_type = eh->ether_type;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05301059
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +05301060 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
Ishank Jain949674c2017-02-27 17:09:29 +05301061 /*
1062 * Check if packet is dot3 or eth2 type.
1063 */
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +05301064 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
Ishank Jain949674c2017-02-27 17:09:29 +05301065 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1066 sizeof(*llcHdr));
1067
1068 if (ether_type == htons(ETHERTYPE_8021Q)) {
1069 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1070 sizeof(*llcHdr);
1071 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1072 + sizeof(*llcHdr) +
1073 sizeof(qdf_net_vlanhdr_t));
1074 } else {
1075 L3datap = hdr_ptr + sizeof(struct ether_header) +
1076 sizeof(*llcHdr);
1077 }
Ishank Jain949674c2017-02-27 17:09:29 +05301078 } else {
1079 if (ether_type == htons(ETHERTYPE_8021Q)) {
1080 evh = (qdf_ethervlan_header_t *) eh;
1081 ether_type = evh->ether_type;
1082 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1083 }
1084 }
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05301085
Ishank Jain949674c2017-02-27 17:09:29 +05301086 /*
1087 * Find priority from IP TOS DSCP field
1088 */
1089 if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1090 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1091 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1092 /* Only for unicast frames */
1093 if (!is_mcast) {
1094 /* send it on VO queue */
1095 msdu_info->tid = DP_VO_TID;
1096 }
1097 } else {
1098 /*
1099 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1100 * from TOS byte.
1101 */
1102 tos = ip->ip_tos;
1103 dscp_tid_override = 1;
1104
1105 }
1106 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1107 /* TODO
1108 * use flowlabel
1109 *igmpmld cases to be handled in phase 2
1110 */
1111 unsigned long ver_pri_flowlabel;
1112 unsigned long pri;
1113 ver_pri_flowlabel = *(unsigned long *) L3datap;
1114 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1115 DP_IPV6_PRIORITY_SHIFT;
1116 tos = pri;
1117 dscp_tid_override = 1;
1118 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1119 msdu_info->tid = DP_VO_TID;
1120 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1121 /* Only for unicast frames */
1122 if (!is_mcast) {
1123 /* send ucast arp on VO queue */
1124 msdu_info->tid = DP_VO_TID;
1125 }
1126 }
1127
1128 /*
1129 * Assign all MCAST packets to BE
1130 */
1131 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1132 if (is_mcast) {
1133 tos = 0;
1134 dscp_tid_override = 1;
1135 }
1136 }
1137
1138 if (dscp_tid_override == 1) {
1139 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1140 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1141 }
1142 return;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301143}
1144
Kabilan Kannan60e3b302017-09-07 20:06:17 -07001145#ifdef CONVERGED_TDLS_ENABLE
1146/**
1147 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1148 * @tx_desc: TX descriptor
1149 *
1150 * Return: None
1151 */
1152static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1153{
1154 if (tx_desc->vdev) {
1155 if (tx_desc->vdev->is_tdls_frame)
1156 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1157 tx_desc->vdev->is_tdls_frame = false;
1158 }
1159}
1160
1161/**
1162 * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1163 * @tx_desc: TX descriptor
1164 * @vdev: datapath vdev handle
1165 *
1166 * Return: None
1167 */
1168static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1169 struct dp_vdev *vdev)
1170{
1171 struct hal_tx_completion_status ts = {0};
1172 qdf_nbuf_t nbuf = tx_desc->nbuf;
1173
1174 hal_tx_comp_get_status(&tx_desc->comp, &ts);
1175 if (vdev->tx_non_std_data_callback.func) {
1176 qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1177 vdev->tx_non_std_data_callback.func(
1178 vdev->tx_non_std_data_callback.ctxt,
1179 nbuf, ts.status);
1180 return;
1181 }
1182}
1183#endif
1184
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301185/**
1186 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1187 * @vdev: DP vdev handle
1188 * @nbuf: skb
1189 * @tid: TID from HLOS for overriding default DSCP-TID mapping
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301190 * @meta_data: Metadata to the fw
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301191 * @tx_q: Tx queue to be used for this Tx frame
Ishank Jain9f174c62017-03-30 18:37:42 +05301192 * @peer_id: peer_id of the peer in case of NAWDS frames
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301193 * @tx_exc_metadata: Handle that holds exception path metadata
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301194 *
1195 * Return: NULL on success,
1196 * nbuf when it fails to send
1197 */
Jeff Johnson755f2612017-01-05 16:28:13 -08001198static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301199 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301200 struct cdp_tx_exception_metadata *tx_exc_metadata)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301201{
1202 struct dp_pdev *pdev = vdev->pdev;
1203 struct dp_soc *soc = pdev->soc;
1204 struct dp_tx_desc_s *tx_desc;
1205 QDF_STATUS status;
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301206 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301207 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
Ishank Jain9f174c62017-03-30 18:37:42 +05301208 uint16_t htt_tcl_metadata = 0;
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301209 uint8_t tid = msdu_info->tid;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301210
1211 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301212 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301213 msdu_info, tx_exc_metadata);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301214 if (!tx_desc) {
1215 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001216 "%s Tx_desc prepare Fail vdev %pK queue %d\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301217 __func__, vdev, tx_q->desc_pool_id);
Pamidipati, Vijayfc779602017-08-07 17:58:19 +05301218 return nbuf;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301219 }
1220
Ruchi, Agrawal34721392017-11-13 18:02:09 +05301221 if (qdf_unlikely(soc->cce_disable)) {
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +05301222 if (dp_cce_classify(vdev, nbuf) == true) {
Ruchi, Agrawal34721392017-11-13 18:02:09 +05301223 DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1224 tid = DP_VO_TID;
1225 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1226 }
1227 }
1228
Kabilan Kannan60e3b302017-09-07 20:06:17 -07001229 dp_tx_update_tdls_flags(tx_desc);
1230
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301231 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1232 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001233 "%s %d : HAL RING Access Failed -- %pK\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301234 __func__, __LINE__, hal_srng);
Ishank Jain1e7401c2017-02-17 15:38:39 +05301235 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
Venkata Sharath Chandra Manchala532cd5f2017-04-24 11:15:30 -07001236 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301237 goto fail_return;
1238 }
1239
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301240 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1241 htt_tcl_metadata = vdev->htt_tcl_metadata;
1242 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1243 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
Ishank Jain9f174c62017-03-30 18:37:42 +05301244 HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1245 HTT_TCL_METADATA_TYPE_PEER_BASED);
1246 HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1247 peer_id);
1248 } else
1249 htt_tcl_metadata = vdev->htt_tcl_metadata;
1250
Venkateswara Swamy Bandaru97ca7e92018-04-27 18:30:10 +05301251
1252 if (msdu_info->exception_fw) {
1253 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1254 }
1255
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301256 /* Enqueue the Tx MSDU descriptor to HW for transmit */
1257 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301258 htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301259
1260 if (status != QDF_STATUS_SUCCESS) {
1261 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001262 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301263 __func__, tx_desc, tx_q->ring_id);
Ravi Joshiab33d9b2017-02-11 21:43:28 -08001264 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301265 goto fail_return;
1266 }
1267
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +05301268 nbuf = NULL;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301269
1270fail_return:
Yue Ma245b47b2017-02-21 16:35:31 -08001271 if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1272 hal_srng_access_end(soc->hal_soc, hal_srng);
1273 hif_pm_runtime_put(soc->hif_handle);
1274 } else {
1275 hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1276 }
1277
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301278 return nbuf;
1279}
1280
1281/**
1282 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1283 * @vdev: DP vdev handle
1284 * @nbuf: skb
1285 * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1286 *
1287 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1288 *
1289 * Return: NULL on success,
1290 * nbuf when it fails to send
1291 */
Houston Hoffmanaa12e042017-01-30 14:34:43 -08001292#if QDF_LOCK_STATS
1293static noinline
1294#else
Jeff Johnson755f2612017-01-05 16:28:13 -08001295static
Houston Hoffmanaa12e042017-01-30 14:34:43 -08001296#endif
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301297qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Jeff Johnson755f2612017-01-05 16:28:13 -08001298 struct dp_tx_msdu_info_s *msdu_info)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301299{
1300 uint8_t i;
1301 struct dp_pdev *pdev = vdev->pdev;
1302 struct dp_soc *soc = pdev->soc;
1303 struct dp_tx_desc_s *tx_desc;
Ruchi, Agrawal34721392017-11-13 18:02:09 +05301304 bool is_cce_classified = false;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301305 QDF_STATUS status;
Venkateswara Swamy Bandaru97ca7e92018-04-27 18:30:10 +05301306 uint16_t htt_tcl_metadata = 0;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301307
1308 struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1309 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1310
1311 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1312 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001313 "%s %d : HAL RING Access Failed -- %pK\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301314 __func__, __LINE__, hal_srng);
Ishank Jain1e7401c2017-02-17 15:38:39 +05301315 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301316 return nbuf;
1317 }
1318
Ruchi, Agrawal34721392017-11-13 18:02:09 +05301319 if (qdf_unlikely(soc->cce_disable)) {
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +05301320 is_cce_classified = dp_cce_classify(vdev, nbuf);
Ruchi, Agrawal34721392017-11-13 18:02:09 +05301321 if (is_cce_classified) {
1322 DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1323 msdu_info->tid = DP_VO_TID;
1324 }
1325 }
1326
Ishank Jainc838b132017-02-17 11:08:18 +05301327 if (msdu_info->frm_type == dp_tx_frm_me)
1328 nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1329
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301330 i = 0;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -07001331 /* Print statement to track i and num_seg */
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301332 /*
1333 * For each segment (maps to 1 MSDU) , prepare software and hardware
1334 * descriptors using information in msdu_info
1335 */
1336 while (i < msdu_info->num_seg) {
1337 /*
1338 * Setup Tx descriptor for an MSDU, and MSDU extension
1339 * descriptor
1340 */
1341 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1342 tx_q->desc_pool_id);
1343
1344 if (!tx_desc) {
Venkata Sharath Chandra Manchala340c0d82017-06-15 08:52:23 -07001345 if (msdu_info->frm_type == dp_tx_frm_me) {
1346 dp_tx_me_free_buf(pdev,
1347 (void *)(msdu_info->u.sg_info
1348 .curr_seg->frags[0].vaddr));
1349 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301350 goto done;
1351 }
1352
Venkata Sharath Chandra Manchala340c0d82017-06-15 08:52:23 -07001353 if (msdu_info->frm_type == dp_tx_frm_me) {
1354 tx_desc->me_buffer =
1355 msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1356 tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1357 }
1358
Ruchi, Agrawal34721392017-11-13 18:02:09 +05301359 if (is_cce_classified)
1360 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1361
Venkateswara Swamy Bandaru97ca7e92018-04-27 18:30:10 +05301362 htt_tcl_metadata = vdev->htt_tcl_metadata;
1363 if (msdu_info->exception_fw) {
1364 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1365 }
1366
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301367 /*
1368 * Enqueue the Tx MSDU descriptor to HW for transmit
1369 */
1370 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
Venkateswara Swamy Bandaru97ca7e92018-04-27 18:30:10 +05301371 htt_tcl_metadata, tx_q->ring_id, NULL);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301372
1373 if (status != QDF_STATUS_SUCCESS) {
1374 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001375 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301376 __func__, tx_desc, tx_q->ring_id);
1377
Ishank Jainc838b132017-02-17 11:08:18 +05301378 if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1379 dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1380
Ravi Joshiab33d9b2017-02-11 21:43:28 -08001381 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301382 goto done;
1383 }
1384
1385 /*
1386 * TODO
1387 * if tso_info structure can be modified to have curr_seg
1388 * as first element, following 2 blocks of code (for TSO and SG)
1389 * can be combined into 1
1390 */
1391
1392 /*
1393 * For frames with multiple segments (TSO, ME), jump to next
1394 * segment.
1395 */
1396 if (msdu_info->frm_type == dp_tx_frm_tso) {
1397 if (msdu_info->u.tso_info.curr_seg->next) {
1398 msdu_info->u.tso_info.curr_seg =
1399 msdu_info->u.tso_info.curr_seg->next;
Ishank Jain5122f8f2017-03-15 22:22:47 +05301400
1401 /*
1402 * If this is a jumbo nbuf, then increment the number of
1403 * nbuf users for each additional segment of the msdu.
1404 * This will ensure that the skb is freed only after
1405 * receiving tx completion for all segments of an nbuf
1406 */
1407 qdf_nbuf_inc_users(nbuf);
1408
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301409 /* Check with MCL if this is needed */
Ishank Jain5122f8f2017-03-15 22:22:47 +05301410 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301411 }
1412 }
1413
1414 /*
1415 * For Multicast-Unicast converted packets,
1416 * each converted frame (for a client) is represented as
1417 * 1 segment
1418 */
Ishank Jainc838b132017-02-17 11:08:18 +05301419 if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1420 (msdu_info->frm_type == dp_tx_frm_me)) {
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301421 if (msdu_info->u.sg_info.curr_seg->next) {
1422 msdu_info->u.sg_info.curr_seg =
1423 msdu_info->u.sg_info.curr_seg->next;
1424 nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1425 }
1426 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301427 i++;
1428 }
1429
1430 nbuf = NULL;
1431
1432done:
Yue Ma245b47b2017-02-21 16:35:31 -08001433 if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1434 hal_srng_access_end(soc->hal_soc, hal_srng);
1435 hif_pm_runtime_put(soc->hif_handle);
1436 } else {
1437 hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1438 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301439
1440 return nbuf;
1441}
1442
1443/**
1444 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1445 * for SG frames
1446 * @vdev: DP vdev handle
1447 * @nbuf: skb
1448 * @seg_info: Pointer to Segment info Descriptor to be prepared
1449 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1450 *
1451 * Return: NULL on success,
1452 * nbuf when it fails to send
1453 */
Jeff Johnson755f2612017-01-05 16:28:13 -08001454static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301455 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1456{
1457 uint32_t cur_frag, nr_frags;
1458 qdf_dma_addr_t paddr;
1459 struct dp_tx_sg_info_s *sg_info;
1460
1461 sg_info = &msdu_info->u.sg_info;
1462 nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1463
Pamidipati, Vijay110bf962017-03-24 21:38:20 +05301464 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1465 QDF_DMA_TO_DEVICE)) {
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301466 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1467 "dma map error\n");
Ishank Jain57c42a12017-04-12 10:42:22 +05301468 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301469
1470 qdf_nbuf_free(nbuf);
1471 return NULL;
1472 }
1473
Pamidipati, Vijayda917d52017-07-18 20:13:22 +05301474 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1475 seg_info->frags[0].paddr_lo = paddr;
1476 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301477 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1478 seg_info->frags[0].vaddr = (void *) nbuf;
1479
1480 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1481 if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1482 nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1483 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1484 "frag dma map error\n");
Ishank Jain57c42a12017-04-12 10:42:22 +05301485 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301486 qdf_nbuf_free(nbuf);
1487 return NULL;
1488 }
1489
1490 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1491 seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1492 seg_info->frags[cur_frag + 1].paddr_hi =
1493 ((uint64_t) paddr) >> 32;
1494 seg_info->frags[cur_frag + 1].len =
1495 qdf_nbuf_get_frag_size(nbuf, cur_frag);
1496 }
1497
1498 seg_info->frag_cnt = (cur_frag + 1);
1499 seg_info->total_len = qdf_nbuf_len(nbuf);
1500 seg_info->next = NULL;
1501
1502 sg_info->curr_seg = seg_info;
1503
1504 msdu_info->frm_type = dp_tx_frm_sg;
1505 msdu_info->num_seg = 1;
1506
1507 return nbuf;
1508}
1509
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301510#ifdef MESH_MODE_SUPPORT
1511
1512/**
1513 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1514 and prepare msdu_info for mesh frames.
1515 * @vdev: DP vdev handle
1516 * @nbuf: skb
1517 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1518 *
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301519 * Return: NULL on failure,
1520 * nbuf when extracted successfully
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301521 */
1522static
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301523qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301524 struct dp_tx_msdu_info_s *msdu_info)
1525{
1526 struct meta_hdr_s *mhdr;
1527 struct htt_tx_msdu_desc_ext2_t *meta_data =
1528 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1529
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301530 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1531
1532 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1533 msdu_info->exception_fw = 0;
1534 goto remove_meta_hdr;
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301535 }
1536
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301537 msdu_info->exception_fw = 1;
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301538
nobelj2c3db262017-10-31 16:33:25 -07001539 qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301540
Venkateswara Swamy Bandaru80683042017-11-07 15:11:31 +05301541 meta_data->host_tx_desc_pool = 1;
Venkateswara Swamy Bandarud004c7e2018-01-29 17:53:52 +05301542 meta_data->update_peer_cache = 1;
1543 meta_data->learning_frame = 1;
Venkateswara Swamy Bandaru80683042017-11-07 15:11:31 +05301544
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301545 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1546 meta_data->power = mhdr->power;
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301547
Venkateswara Swamy Bandaru7e19ec52017-04-17 19:32:18 +05301548 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1549 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1550 meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1551 meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1552
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301553 meta_data->dyn_bw = 1;
1554
1555 meta_data->valid_pwr = 1;
1556 meta_data->valid_mcs_mask = 1;
1557 meta_data->valid_nss_mask = 1;
1558 meta_data->valid_preamble_type = 1;
1559 meta_data->valid_retries = 1;
1560 meta_data->valid_bw_info = 1;
1561 }
1562
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301563 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1564 meta_data->encrypt_type = 0;
1565 meta_data->valid_encrypt_type = 1;
Venkateswara Swamy Bandaru6523a022018-02-22 17:55:49 +05301566 meta_data->learning_frame = 0;
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301567 }
1568
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301569 meta_data->valid_key_flags = 1;
1570 meta_data->key_flags = (mhdr->keyix & 0x3);
1571
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301572remove_meta_hdr:
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301573 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1574 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1575 "qdf_nbuf_pull_head failed\n");
1576 qdf_nbuf_free(nbuf);
1577 return NULL;
1578 }
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +05301579
Venkateswara Swamy Bandaru165d0ab2018-02-12 19:26:56 +05301580 if (mhdr->flags & METAHDR_FLAG_NOQOS)
1581 msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1582 else
1583 msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1584
Venkateswara Swamy Bandaru37a3a452018-02-12 15:37:14 +05301585 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1586 "%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1587 " tid %d to_fw %d\n",
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +05301588 __func__, msdu_info->meta_data[0],
1589 msdu_info->meta_data[1],
1590 msdu_info->meta_data[2],
1591 msdu_info->meta_data[3],
Venkateswara Swamy Bandaru37a3a452018-02-12 15:37:14 +05301592 msdu_info->meta_data[4],
1593 msdu_info->meta_data[5],
1594 msdu_info->tid, msdu_info->exception_fw);
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +05301595
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301596 return nbuf;
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301597}
1598#else
1599static
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301600qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301601 struct dp_tx_msdu_info_s *msdu_info)
1602{
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301603 return nbuf;
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301604}
1605
1606#endif
1607
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301608#ifdef DP_FEATURE_NAWDS_TX
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301609/**
Ishank Jain9f174c62017-03-30 18:37:42 +05301610 * dp_tx_prepare_nawds(): Tramit NAWDS frames
1611 * @vdev: dp_vdev handle
1612 * @nbuf: skb
1613 * @tid: TID from HLOS for overriding default DSCP-TID mapping
1614 * @tx_q: Tx queue to be used for this Tx frame
1615 * @meta_data: Meta date for mesh
1616 * @peer_id: peer_id of the peer in case of NAWDS frames
1617 *
1618 * return: NULL on success nbuf on failure
1619 */
1620static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301621 struct dp_tx_msdu_info_s *msdu_info)
Ishank Jain9f174c62017-03-30 18:37:42 +05301622{
1623 struct dp_peer *peer = NULL;
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301624 struct dp_soc *soc = vdev->pdev->soc;
1625 struct dp_ast_entry *ast_entry = NULL;
1626 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1627 uint16_t peer_id = HTT_INVALID_PEER;
1628
1629 struct dp_peer *sa_peer = NULL;
Ishank Jain9f174c62017-03-30 18:37:42 +05301630 qdf_nbuf_t nbuf_copy;
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301631
1632 qdf_spin_lock_bh(&(soc->ast_lock));
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301633 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
1634
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301635 if (ast_entry)
1636 sa_peer = ast_entry->peer;
1637
1638 qdf_spin_unlock_bh(&(soc->ast_lock));
1639
Ishank Jain9f174c62017-03-30 18:37:42 +05301640 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1641 if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301642 (peer->nawds_enabled)) {
1643 if (sa_peer == peer) {
1644 QDF_TRACE(QDF_MODULE_ID_DP,
1645 QDF_TRACE_LEVEL_DEBUG,
1646 " %s: broadcast multicast packet",
1647 __func__);
1648 DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1649 continue;
1650 }
1651
Ishank Jain9f174c62017-03-30 18:37:42 +05301652 nbuf_copy = qdf_nbuf_copy(nbuf);
1653 if (!nbuf_copy) {
1654 QDF_TRACE(QDF_MODULE_ID_DP,
1655 QDF_TRACE_LEVEL_ERROR,
1656 "nbuf copy failed");
1657 }
1658
1659 peer_id = peer->peer_ids[0];
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301660 nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1661 msdu_info, peer_id, NULL);
Ishank Jain9f174c62017-03-30 18:37:42 +05301662 if (nbuf_copy != NULL) {
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301663 qdf_nbuf_free(nbuf_copy);
1664 continue;
Ishank Jain9f174c62017-03-30 18:37:42 +05301665 }
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301666 DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1667 1, qdf_nbuf_len(nbuf));
Ishank Jain9f174c62017-03-30 18:37:42 +05301668 }
1669 }
1670 if (peer_id == HTT_INVALID_PEER)
1671 return nbuf;
1672
Ishank Jain9f174c62017-03-30 18:37:42 +05301673 return NULL;
1674}
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301675#endif
Ishank Jain9f174c62017-03-30 18:37:42 +05301676
1677/**
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301678 * dp_check_exc_metadata() - Checks if parameters are valid
1679 * @tx_exc - holds all exception path parameters
1680 *
1681 * Returns true when all the parameters are valid else false
1682 *
1683 */
1684static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1685{
1686 if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1687 tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1688 tx_exc->sec_type > cdp_num_sec_types) {
1689 return false;
1690 }
1691
1692 return true;
1693}
1694
1695/**
1696 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1697 * @vap_dev: DP vdev handle
1698 * @nbuf: skb
1699 * @tx_exc_metadata: Handle that holds exception path meta data
1700 *
1701 * Entry point for Core Tx layer (DP_TX) invoked from
1702 * hard_start_xmit in OSIF/HDD to transmit frames through fw
1703 *
1704 * Return: NULL on success,
1705 * nbuf when it fails to send
1706 */
1707qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1708 struct cdp_tx_exception_metadata *tx_exc_metadata)
1709{
1710 struct ether_header *eh = NULL;
1711 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301712 struct dp_tx_msdu_info_s msdu_info;
1713
1714 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1715
1716 msdu_info.tid = tx_exc_metadata->tid;
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301717
1718 eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1720 "%s , skb %pM",
1721 __func__, nbuf->data);
1722
1723 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1724
1725 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1726 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1727 "Invalid parameters in exception path");
1728 goto fail;
1729 }
1730
1731 /* Basic sanity checks for unsupported packets */
1732
1733 /* MESH mode */
1734 if (qdf_unlikely(vdev->mesh_vdev)) {
1735 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1736 "Mesh mode is not supported in exception path");
1737 goto fail;
1738 }
1739
1740 /* TSO or SG */
1741 if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1742 qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1743 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1744 "TSO and SG are not supported in exception path");
1745
1746 goto fail;
1747 }
1748
1749 /* RAW */
1750 if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1751 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1752 "Raw frame is not supported in exception path");
1753 goto fail;
1754 }
1755
1756
1757 /* Mcast enhancement*/
1758 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1759 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1760 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1761 "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n");
1762 }
1763 }
1764
1765 /*
1766 * Get HW Queue to use for this frame.
1767 * TCL supports upto 4 DMA rings, out of which 3 rings are
1768 * dedicated for data and 1 for command.
1769 * "queue_id" maps to one hardware ring.
1770 * With each ring, we also associate a unique Tx descriptor pool
1771 * to minimize lock contention for these resources.
1772 */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301773 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301774
1775 /* Reset the control block */
1776 qdf_nbuf_reset_ctxt(nbuf);
1777
1778 /* Single linear frame */
1779 /*
1780 * If nbuf is a simple linear frame, use send_single function to
1781 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1782 * SRNG. There is no need to setup a MSDU extension descriptor.
1783 */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301784 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1785 tx_exc_metadata->peer_id, tx_exc_metadata);
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301786
1787 return nbuf;
1788
1789fail:
1790 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1791 "pkt send failed");
1792 return nbuf;
1793}
1794
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301795/**
1796 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1797 * @vap_dev: DP vdev handle
1798 * @nbuf: skb
1799 *
1800 * Entry point for Core Tx layer (DP_TX) invoked from
1801 * hard_start_xmit in OSIF/HDD
1802 *
1803 * Return: NULL on success,
1804 * nbuf when it fails to send
1805 */
1806#ifdef MESH_MODE_SUPPORT
1807qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1808{
1809 struct meta_hdr_s *mhdr;
1810 qdf_nbuf_t nbuf_mesh = NULL;
1811 qdf_nbuf_t nbuf_clone = NULL;
1812 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
Venkateswara Swamy Bandaru6523a022018-02-22 17:55:49 +05301813 uint8_t no_enc_frame = 0;
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301814
1815 nbuf_mesh = qdf_nbuf_unshare(nbuf);
1816 if (nbuf_mesh == NULL) {
1817 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1818 "qdf_nbuf_unshare failed\n");
1819 return nbuf;
1820 }
1821 nbuf = nbuf_mesh;
1822
1823 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
Venkateswara Swamy Bandaru6523a022018-02-22 17:55:49 +05301824
1825 if ((vdev->sec_type != cdp_sec_type_none) &&
1826 (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1827 no_enc_frame = 1;
1828
1829 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1830 !no_enc_frame) {
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301831 nbuf_clone = qdf_nbuf_clone(nbuf);
1832 if (nbuf_clone == NULL) {
1833 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1834 "qdf_nbuf_clone failed\n");
1835 return nbuf;
1836 }
1837 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1838 }
1839
1840 if (nbuf_clone) {
1841 if (!dp_tx_send(vap_dev, nbuf_clone)) {
1842 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1843 } else
1844 qdf_nbuf_free(nbuf_clone);
1845 }
1846
Venkateswara Swamy Bandaru6523a022018-02-22 17:55:49 +05301847 if (no_enc_frame)
1848 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1849 else
1850 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1851
1852 nbuf = dp_tx_send(vap_dev, nbuf);
1853 if ((nbuf == NULL) && no_enc_frame) {
1854 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1855 }
1856
1857 return nbuf;
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05301858}
1859
1860#else
1861
1862qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1863{
1864 return dp_tx_send(vap_dev, nbuf);
1865}
1866
1867#endif
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301868
1869/**
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301870 * dp_tx_send() - Transmit a frame on a given VAP
1871 * @vap_dev: DP vdev handle
1872 * @nbuf: skb
1873 *
1874 * Entry point for Core Tx layer (DP_TX) invoked from
1875 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1876 * cases
1877 *
1878 * Return: NULL on success,
1879 * nbuf when it fails to send
1880 */
1881qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1882{
Ishank Jain9f174c62017-03-30 18:37:42 +05301883 struct ether_header *eh = NULL;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301884 struct dp_tx_msdu_info_s msdu_info;
1885 struct dp_tx_seg_info_s seg_info;
1886 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
Ishank Jain9f174c62017-03-30 18:37:42 +05301887 uint16_t peer_id = HTT_INVALID_PEER;
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301888 qdf_nbuf_t nbuf_mesh = NULL;
1889
Ishank Jain2f81e962017-01-23 22:42:37 +05301890 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1891 qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1892
Tallapragada Kalyan274eb9e2017-05-16 18:59:10 +05301893 eh = (struct ether_header *)qdf_nbuf_data(nbuf);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05301894
Ishank Jain2f81e962017-01-23 22:42:37 +05301895 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Prathyusha Guduribe41d972018-01-19 14:17:14 +05301896 "%s , skb %pM",
1897 __func__, nbuf->data);
1898
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05301899 /*
1900 * Set Default Host TID value to invalid TID
1901 * (TID override disabled)
1902 */
1903 msdu_info.tid = HTT_TX_EXT_TID_INVALID;
Ishank Jaine73c4032017-03-16 11:48:15 +05301904 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301905
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301906 if (qdf_unlikely(vdev->mesh_vdev)) {
1907 nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1908 &msdu_info);
1909 if (nbuf_mesh == NULL) {
Houston Hoffman41b912c2017-08-30 14:27:51 -07001910 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05301911 "Extracting mesh metadata failed\n");
1912 return nbuf;
1913 }
1914 nbuf = nbuf_mesh;
1915 }
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +05301916
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301917 /*
1918 * Get HW Queue to use for this frame.
1919 * TCL supports upto 4 DMA rings, out of which 3 rings are
1920 * dedicated for data and 1 for command.
1921 * "queue_id" maps to one hardware ring.
1922 * With each ring, we also associate a unique Tx descriptor pool
1923 * to minimize lock contention for these resources.
1924 */
1925 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1926
1927 /*
1928 * TCL H/W supports 2 DSCP-TID mapping tables.
1929 * Table 1 - Default DSCP-TID mapping table
1930 * Table 2 - 1 DSCP-TID override table
1931 *
1932 * If we need a different DSCP-TID mapping for this vap,
1933 * call tid_classify to extract DSCP/ToS from frame and
1934 * map to a TID and store in msdu_info. This is later used
1935 * to fill in TCL Input descriptor (per-packet TID override).
1936 */
Neil Zhao48876362018-03-22 11:23:02 -07001937 dp_tx_classify_tid(vdev, nbuf, &msdu_info);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301938
1939 /* Reset the control block */
1940 qdf_nbuf_reset_ctxt(nbuf);
1941
1942 /*
1943 * Classify the frame and call corresponding
1944 * "prepare" function which extracts the segment (TSO)
1945 * and fragmentation information (for TSO , SG, ME, or Raw)
1946 * into MSDU_INFO structure which is later used to fill
1947 * SW and HW descriptors.
1948 */
1949 if (qdf_nbuf_is_tso(nbuf)) {
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301950 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001951 "%s TSO frame %pK\n", __func__, vdev);
Ishank Jain1e7401c2017-02-17 15:38:39 +05301952 DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
Ishank Jainbc2d91f2017-01-03 18:14:54 +05301953 qdf_nbuf_len(nbuf));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301954
Ishank Jain5122f8f2017-03-15 22:22:47 +05301955 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
Ishank Jain57c42a12017-04-12 10:42:22 +05301956 DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
Ishank Jain5122f8f2017-03-15 22:22:47 +05301957 return nbuf;
1958 }
1959
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301960 goto send_multiple;
1961 }
1962
1963 /* SG */
1964 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1965 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
1966
1967 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001968 "%s non-TSO SG frame %pK\n", __func__, vdev);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301969
Ishank Jain1e7401c2017-02-17 15:38:39 +05301970 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
Ishank Jainbc2d91f2017-01-03 18:14:54 +05301971 qdf_nbuf_len(nbuf));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301972
1973 goto send_multiple;
1974 }
1975
Ishank Jainc838b132017-02-17 11:08:18 +05301976#ifdef ATH_SUPPORT_IQUE
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301977 /* Mcast to Ucast Conversion*/
Ishank Jainc838b132017-02-17 11:08:18 +05301978 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301979 eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1980 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301981 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001982 "%s Mcast frm for ME %pK\n", __func__, vdev);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301983
Ishank Jain1e7401c2017-02-17 15:38:39 +05301984 DP_STATS_INC_PKT(vdev,
1985 tx_i.mcast_en.mcast_pkt, 1,
Ishank Jainbc2d91f2017-01-03 18:14:54 +05301986 qdf_nbuf_len(nbuf));
Pamidipati, Vijay726ea122018-02-07 18:27:00 +05301987 if (dp_tx_prepare_send_me(vdev, nbuf) ==
Pamidipati, Vijayaeff4442018-01-19 22:58:32 +05301988 QDF_STATUS_SUCCESS) {
Ishank Jainc838b132017-02-17 11:08:18 +05301989 return NULL;
1990 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301991 }
1992 }
Ishank Jainc838b132017-02-17 11:08:18 +05301993#endif
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301994
1995 /* RAW */
Ishank Jain949674c2017-02-27 17:09:29 +05301996 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
Venkateswara Swamy Bandaru99075962016-12-27 11:55:15 +05301997 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
1998 if (nbuf == NULL)
1999 return NULL;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302000
2001 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07002002 "%s Raw frame %pK\n", __func__, vdev);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302003
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302004 goto send_multiple;
2005
2006 }
2007
2008 /* Single linear frame */
2009 /*
2010 * If nbuf is a simple linear frame, use send_single function to
2011 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2012 * SRNG. There is no need to setup a MSDU extension descriptor.
2013 */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05302014 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302015
2016 return nbuf;
2017
2018send_multiple:
2019 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2020
2021 return nbuf;
2022}
2023
2024/**
2025 * dp_tx_reinject_handler() - Tx Reinject Handler
2026 * @tx_desc: software descriptor head pointer
2027 * @status : Tx completion status from HTT descriptor
2028 *
2029 * This function reinjects frames back to Target.
2030 * Todo - Host queue needs to be added
2031 *
2032 * Return: none
2033 */
Jeff Johnson755f2612017-01-05 16:28:13 -08002034static
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302035void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2036{
2037 struct dp_vdev *vdev;
Ishank Jain997955e2017-03-24 18:18:50 +05302038 struct dp_peer *peer = NULL;
2039 uint32_t peer_id = HTT_INVALID_PEER;
2040 qdf_nbuf_t nbuf = tx_desc->nbuf;
2041 qdf_nbuf_t nbuf_copy = NULL;
2042 struct dp_tx_msdu_info_s msdu_info;
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302043 struct dp_peer *sa_peer = NULL;
2044 struct dp_ast_entry *ast_entry = NULL;
2045 struct dp_soc *soc = NULL;
2046 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05302047#ifdef WDS_VENDOR_EXTENSION
2048 int is_mcast = 0, is_ucast = 0;
2049 int num_peers_3addr = 0;
2050 struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2051 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2052#endif
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302053
2054 vdev = tx_desc->vdev;
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302055 soc = vdev->pdev->soc;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302056
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002057 qdf_assert(vdev);
2058
Ishank Jain997955e2017-03-24 18:18:50 +05302059 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2060
2061 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2062
Houston Hoffman41b912c2017-08-30 14:27:51 -07002063 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002064 "%s Tx reinject path\n", __func__);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302065
Ishank Jain1e7401c2017-02-17 15:38:39 +05302066 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302067 qdf_nbuf_len(tx_desc->nbuf));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302068
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302069 qdf_spin_lock_bh(&(soc->ast_lock));
2070
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05302071 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
2072
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302073 if (ast_entry)
2074 sa_peer = ast_entry->peer;
2075
2076 qdf_spin_unlock_bh(&(soc->ast_lock));
Ishank Jain997955e2017-03-24 18:18:50 +05302077
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05302078#ifdef WDS_VENDOR_EXTENSION
2079 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2080 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2081 } else {
2082 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2083 }
2084 is_ucast = !is_mcast;
2085
2086 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2087 if (peer->bss_peer)
2088 continue;
2089
2090 /* Detect wds peers that use 3-addr framing for mcast.
2091 * if there are any, the bss_peer is used to send the
2092 * the mcast frame using 3-addr format. all wds enabled
2093 * peers that use 4-addr framing for mcast frames will
2094 * be duplicated and sent as 4-addr frames below.
2095 */
2096 if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2097 num_peers_3addr = 1;
2098 break;
2099 }
2100 }
2101#endif
2102
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05302103 if (qdf_unlikely(vdev->mesh_vdev)) {
2104 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
Ishank Jain997955e2017-03-24 18:18:50 +05302105 } else {
2106 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2107 if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05302108#ifdef WDS_VENDOR_EXTENSION
2109 /*
2110 * . if 3-addr STA, then send on BSS Peer
2111 * . if Peer WDS enabled and accept 4-addr mcast,
2112 * send mcast on that peer only
2113 * . if Peer WDS enabled and accept 4-addr ucast,
2114 * send ucast on that peer only
2115 */
2116 ((peer->bss_peer && num_peers_3addr && is_mcast) ||
2117 (peer->wds_enabled &&
2118 ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2119 (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2120#else
2121 ((peer->bss_peer &&
2122 !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2123 peer->nawds_enabled)) {
2124#endif
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302125 peer_id = DP_INVALID_PEER;
2126
2127 if (peer->nawds_enabled) {
2128 peer_id = peer->peer_ids[0];
2129 if (sa_peer == peer) {
2130 QDF_TRACE(
2131 QDF_MODULE_ID_DP,
2132 QDF_TRACE_LEVEL_DEBUG,
2133 " %s: multicast packet",
2134 __func__);
2135 DP_STATS_INC(peer,
2136 tx.nawds_mcast_drop, 1);
2137 continue;
2138 }
2139 }
2140
Ishank Jain997955e2017-03-24 18:18:50 +05302141 nbuf_copy = qdf_nbuf_copy(nbuf);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05302142
Ishank Jain997955e2017-03-24 18:18:50 +05302143 if (!nbuf_copy) {
2144 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -07002145 QDF_TRACE_LEVEL_DEBUG,
2146 FL("nbuf copy failed"));
Ishank Jain997955e2017-03-24 18:18:50 +05302147 break;
2148 }
2149
Ishank Jain997955e2017-03-24 18:18:50 +05302150 nbuf_copy = dp_tx_send_msdu_single(vdev,
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302151 nbuf_copy,
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05302152 &msdu_info,
Prathyusha Guduribe41d972018-01-19 14:17:14 +05302153 peer_id,
2154 NULL);
Ishank Jain997955e2017-03-24 18:18:50 +05302155
2156 if (nbuf_copy) {
2157 QDF_TRACE(QDF_MODULE_ID_DP,
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302158 QDF_TRACE_LEVEL_DEBUG,
2159 FL("pkt send failed"));
Ishank Jain997955e2017-03-24 18:18:50 +05302160 qdf_nbuf_free(nbuf_copy);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302161 } else {
2162 if (peer_id != DP_INVALID_PEER)
2163 DP_STATS_INC_PKT(peer,
2164 tx.nawds_mcast,
2165 1, qdf_nbuf_len(nbuf));
Ishank Jain997955e2017-03-24 18:18:50 +05302166 }
2167 }
2168 }
2169 }
2170
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302171 if (vdev->nawds_enabled) {
2172 peer_id = DP_INVALID_PEER;
2173
2174 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2175 1, qdf_nbuf_len(nbuf));
2176
2177 nbuf = dp_tx_send_msdu_single(vdev,
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05302178 nbuf,
2179 &msdu_info,
2180 peer_id, NULL);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302181
2182 if (nbuf) {
2183 QDF_TRACE(QDF_MODULE_ID_DP,
2184 QDF_TRACE_LEVEL_DEBUG,
2185 FL("pkt send failed"));
2186 qdf_nbuf_free(nbuf);
2187 }
2188 } else
2189 qdf_nbuf_free(nbuf);
2190
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002191 dp_tx_desc_release(tx_desc, tx_desc->pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302192}
2193
2194/**
2195 * dp_tx_inspect_handler() - Tx Inspect Handler
2196 * @tx_desc: software descriptor head pointer
2197 * @status : Tx completion status from HTT descriptor
2198 *
2199 * Handles Tx frames sent back to Host for inspection
2200 * (ProxyARP)
2201 *
2202 * Return: none
2203 */
Jeff Johnson755f2612017-01-05 16:28:13 -08002204static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302205{
2206
2207 struct dp_soc *soc;
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002208 struct dp_pdev *pdev = tx_desc->pdev;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302209
2210 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2211 "%s Tx inspect path\n",
2212 __func__);
2213
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002214 qdf_assert(pdev);
2215
2216 soc = pdev->soc;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302217
Ishank Jain1e7401c2017-02-17 15:38:39 +05302218 DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302219 qdf_nbuf_len(tx_desc->nbuf));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302220
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002221 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +05302222 dp_tx_desc_release(tx_desc, tx_desc->pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302223}
2224
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302225#ifdef FEATURE_PERPKT_INFO
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302226/**
2227 * dp_get_completion_indication_for_stack() - send completion to stack
2228 * @soc : dp_soc handle
2229 * @pdev: dp_pdev handle
2230 * @peer_id: peer_id of the peer for which completion came
2231 * @ppdu_id: ppdu_id
2232 * @first_msdu: first msdu
2233 * @last_msdu: last msdu
2234 * @netbuf: Buffer pointer for free
2235 *
2236 * This function is used for indication whether buffer needs to be
2237 * send to stack for free or not
2238*/
Radha krishna Simha Jiguru47876f62017-11-30 21:08:40 +05302239QDF_STATUS
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302240dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat20725572018-01-11 19:36:19 +05302241 uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2242 uint8_t last_msdu, qdf_nbuf_t netbuf)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302243{
2244 struct tx_capture_hdr *ppdu_hdr;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302245 struct dp_peer *peer = NULL;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302246
Soumya Bhat7422db82017-12-15 13:48:53 +05302247 if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302248 return QDF_STATUS_E_NOSUPPORT;
2249
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302250 peer = (peer_id == HTT_INVALID_PEER) ? NULL :
2251 dp_peer_find_by_id(soc, peer_id);
2252
2253 if (!peer) {
2254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2255 FL("Peer Invalid"));
2256 return QDF_STATUS_E_INVAL;
2257 }
2258
Soumya Bhat7422db82017-12-15 13:48:53 +05302259 if (pdev->mcopy_mode) {
Soumya Bhat2f54de22018-02-21 09:54:28 +05302260 if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2261 (pdev->m_copy_id.tx_peer_id == peer_id)) {
Soumya Bhat2f779b02017-10-24 13:10:44 +05302262 return QDF_STATUS_E_INVAL;
2263 }
2264
Soumya Bhat2f54de22018-02-21 09:54:28 +05302265 pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2266 pdev->m_copy_id.tx_peer_id = peer_id;
Soumya Bhat2f779b02017-10-24 13:10:44 +05302267 }
2268
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302269 if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2270 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2271 FL("No headroom"));
2272 return QDF_STATUS_E_NOMEM;
2273 }
2274
2275 ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
Soumya Bhat2f779b02017-10-24 13:10:44 +05302276 qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2277 IEEE80211_ADDR_LEN);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302278 ppdu_hdr->ppdu_id = ppdu_id;
2279 qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2280 IEEE80211_ADDR_LEN);
Soumya Bhat20725572018-01-11 19:36:19 +05302281 ppdu_hdr->peer_id = peer_id;
2282 ppdu_hdr->first_msdu = first_msdu;
2283 ppdu_hdr->last_msdu = last_msdu;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302284
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302285 return QDF_STATUS_SUCCESS;
2286}
2287
2288
2289/**
2290 * dp_send_completion_to_stack() - send completion to stack
2291 * @soc : dp_soc handle
2292 * @pdev: dp_pdev handle
2293 * @peer_id: peer_id of the peer for which completion came
2294 * @ppdu_id: ppdu_id
2295 * @netbuf: Buffer pointer for free
2296 *
2297 * This function is used to send completion to stack
2298 * to free buffer
2299*/
2300void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
2301 uint16_t peer_id, uint32_t ppdu_id,
2302 qdf_nbuf_t netbuf)
2303{
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302304 dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
Soumya Bhat2f779b02017-10-24 13:10:44 +05302305 netbuf, peer_id,
2306 WDI_NO_VAL, pdev->pdev_id);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302307}
2308#else
2309static QDF_STATUS
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302310dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat20725572018-01-11 19:36:19 +05302311 uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2312 uint8_t last_msdu, qdf_nbuf_t netbuf)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302313{
2314 return QDF_STATUS_E_NOSUPPORT;
2315}
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302316
2317static void
2318dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
2319 uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2320{
2321}
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302322#endif
2323
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302324/**
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302325 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2326 * @soc: Soc handle
2327 * @desc: software Tx descriptor to be processed
2328 *
2329 * Return: none
2330 */
2331static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2332 struct dp_tx_desc_s *desc)
2333{
2334 struct dp_vdev *vdev = desc->vdev;
2335 qdf_nbuf_t nbuf = desc->nbuf;
2336
Kabilan Kannan60e3b302017-09-07 20:06:17 -07002337 /* If it is TDLS mgmt, don't unmap or free the frame */
2338 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2339 return dp_non_std_tx_comp_free_buff(desc, vdev);
2340
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302341 /* 0 : MSDU buffer, 1 : MLE */
2342 if (desc->msdu_ext_desc) {
2343 /* TSO free */
2344 if (hal_tx_ext_desc_get_tso_enable(
2345 desc->msdu_ext_desc->vaddr)) {
2346 /* If remaining number of segment is 0
2347 * actual TSO may unmap and free */
chenguo94b76152018-01-24 19:39:23 +08002348 if (qdf_nbuf_get_users(nbuf) == 1)
2349 __qdf_nbuf_unmap_single(soc->osdev,
2350 nbuf,
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302351 QDF_DMA_TO_DEVICE);
chenguo94b76152018-01-24 19:39:23 +08002352
2353 qdf_nbuf_free(nbuf);
2354 return;
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302355 }
2356 }
2357
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302358 qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2359
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05302360 if (qdf_likely(!vdev->mesh_vdev))
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302361 qdf_nbuf_free(nbuf);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05302362 else {
2363 if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2364 qdf_nbuf_free(nbuf);
2365 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2366 } else
2367 vdev->osif_tx_free_ext((nbuf));
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302368 }
2369}
2370
2371/**
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302372 * dp_tx_mec_handler() - Tx MEC Notify Handler
2373 * @vdev: pointer to dp dev handler
2374 * @status : Tx completion status from HTT descriptor
2375 *
2376 * Handles MEC notify event sent from fw to Host
2377 *
2378 * Return: none
2379 */
2380#ifdef FEATURE_WDS
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +05302381void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302382{
2383
2384 struct dp_soc *soc;
2385 uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2386 struct dp_peer *peer;
2387 uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2388
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05302389 if (!vdev->wds_enabled)
2390 return;
2391
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302392 soc = vdev->pdev->soc;
2393 qdf_spin_lock_bh(&soc->peer_ref_mutex);
2394 peer = TAILQ_FIRST(&vdev->peer_list);
2395 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2396
2397 if (!peer) {
Houston Hoffman41b912c2017-08-30 14:27:51 -07002398 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302399 FL("peer is NULL"));
2400 return;
2401 }
2402
Houston Hoffman41b912c2017-08-30 14:27:51 -07002403 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302404 "%s Tx MEC Handler\n",
2405 __func__);
2406
2407 for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2408 mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2409 status[(DP_MAC_ADDR_LEN - 2) + i];
2410
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05302411 if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2412 dp_peer_add_ast(soc,
2413 peer,
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302414 mac_addr,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05302415 CDP_TXRX_AST_TYPE_MEC,
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302416 flags);
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302417}
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302418#endif
2419
2420/**
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302421 * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2422 * @tx_desc: software descriptor head pointer
2423 * @status : Tx completion status from HTT descriptor
2424 *
2425 * This function will process HTT Tx indication messages from Target
2426 *
2427 * Return: none
2428 */
Jeff Johnson755f2612017-01-05 16:28:13 -08002429static
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302430void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2431{
2432 uint8_t tx_status;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302433 struct dp_pdev *pdev;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302434 struct dp_vdev *vdev;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302435 struct dp_soc *soc;
2436 uint32_t *htt_status_word = (uint32_t *) status;
2437
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002438 qdf_assert(tx_desc->pdev);
2439
2440 pdev = tx_desc->pdev;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302441 vdev = tx_desc->vdev;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302442 soc = pdev->soc;
2443
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +05302444 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302445
2446 switch (tx_status) {
2447 case HTT_TX_FW2WBM_TX_STATUS_OK:
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302448 case HTT_TX_FW2WBM_TX_STATUS_DROP:
2449 case HTT_TX_FW2WBM_TX_STATUS_TTL:
2450 {
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302451 dp_tx_comp_free_buf(soc, tx_desc);
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +05302452 dp_tx_desc_release(tx_desc, tx_desc->pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302453 break;
2454 }
2455 case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2456 {
2457 dp_tx_reinject_handler(tx_desc, status);
2458 break;
2459 }
2460 case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2461 {
2462 dp_tx_inspect_handler(tx_desc, status);
2463 break;
2464 }
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05302465 case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2466 {
2467 dp_tx_mec_handler(vdev, status);
2468 break;
2469 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302470 default:
Houston Hoffman41b912c2017-08-30 14:27:51 -07002471 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302472 "%s Invalid HTT tx_status %d\n",
2473 __func__, tx_status);
2474 break;
2475 }
2476}
2477
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05302478#ifdef MESH_MODE_SUPPORT
2479/**
2480 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2481 * in mesh meta header
2482 * @tx_desc: software descriptor head pointer
2483 * @ts: pointer to tx completion stats
2484 * Return: none
2485 */
2486static
2487void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2488 struct hal_tx_completion_status *ts)
2489{
2490 struct meta_hdr_s *mhdr;
2491 qdf_nbuf_t netbuf = tx_desc->nbuf;
2492
2493 if (!tx_desc->msdu_ext_desc) {
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05302494 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2495 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07002496 "netbuf %pK offset %d\n",
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05302497 netbuf, tx_desc->pkt_offset);
2498 return;
2499 }
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05302500 }
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05302501 if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2502 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07002503 "netbuf %pK offset %d\n", netbuf,
Venkateswara Swamy Bandaru6d840bc2017-07-10 15:35:28 +05302504 sizeof(struct meta_hdr_s));
2505 return;
2506 }
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05302507
2508 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2509 mhdr->rssi = ts->ack_frame_rssi;
Venkateswara Swamy Bandaru15c68da2017-05-18 11:54:20 +05302510 mhdr->channel = tx_desc->pdev->operating_channel;
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05302511}
2512
2513#else
2514static
2515void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2516 struct hal_tx_completion_status *ts)
2517{
2518}
2519
2520#endif
2521
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302522/**
2523 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2524 * @peer: Handle to DP peer
2525 * @ts: pointer to HAL Tx completion stats
2526 * @length: MSDU length
2527 *
2528 * Return: None
2529 */
2530static void dp_tx_update_peer_stats(struct dp_peer *peer,
2531 struct hal_tx_completion_status *ts, uint32_t length)
2532{
2533 struct dp_pdev *pdev = peer->vdev->pdev;
2534 struct dp_soc *soc = pdev->soc;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302535 uint8_t mcs, pkt_type;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302536
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302537 mcs = ts->mcs;
2538 pkt_type = ts->pkt_type;
2539
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302540 if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2541 return;
2542
Pamidipati, Vijay87a93cf2018-02-01 22:21:26 +05302543 if (peer->bss_peer) {
2544 DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2545 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2546 } else {
2547 if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
2548 DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2549 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2550 }
2551 }
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08002552
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302553 DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2554 (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2555
2556 DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2557 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2558
2559 DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2560 (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2561
2562 DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2563 (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2564
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08002565 DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2566 (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2567
2568 DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2569 (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2570
2571 DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2572 (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2573
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302574 if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2575 return;
Pranita Solankefc2ff392017-12-15 19:25:13 +05302576
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302577 DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
Pranita Solankefc2ff392017-12-15 19:25:13 +05302578
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302579 DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
Anish Nataraj50347012018-03-06 21:12:45 +05302580 DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302581
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302582 if (!(soc->process_tx_status))
2583 return;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302584
Pranita Solankeed0aba62018-01-12 19:14:31 +05302585 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302586 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2587 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2588 ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
Pranita Solankeed0aba62018-01-12 19:14:31 +05302589 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302590 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2591 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2592 ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
Pranita Solankeed0aba62018-01-12 19:14:31 +05302593 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302594 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2595 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2596 ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
Pranita Solankeed0aba62018-01-12 19:14:31 +05302597 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302598 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2599 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2600 ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
Pranita Solankeed0aba62018-01-12 19:14:31 +05302601 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05302602 ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2603 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2604 ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302605 DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2606 DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2607 DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2608 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2609 DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302610 DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2611 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302612 DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2613
2614 if (soc->cdp_soc.ol_ops->update_dp_stats) {
2615 soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
2616 &peer->stats, ts->peer_id,
2617 UPDATE_PEER_STATS);
2618 }
2619}
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05302620
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302621/**
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302622 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2623 * @tx_desc: software descriptor head pointer
Ishank Jain1e7401c2017-02-17 15:38:39 +05302624 * @length: packet length
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302625 *
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302626 * Return: none
2627 */
Ishank Jain1e7401c2017-02-17 15:38:39 +05302628static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2629 uint32_t length)
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302630{
2631 struct hal_tx_completion_status ts;
Ishank Jain1e7401c2017-02-17 15:38:39 +05302632 struct dp_soc *soc = NULL;
2633 struct dp_vdev *vdev = tx_desc->vdev;
2634 struct dp_peer *peer = NULL;
Pranita Solankea5a3ae72018-01-18 21:45:27 +05302635 struct ether_header *eh =
2636 (struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05302637
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302638 hal_tx_comp_get_status(&tx_desc->comp, &ts);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302639
Yun Park11d46e02017-11-27 10:51:53 -08002640 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Vijay Pamidipatid61006c2017-01-20 19:35:57 +05302641 "-------------------- \n"
2642 "Tx Completion Stats: \n"
2643 "-------------------- \n"
2644 "ack_frame_rssi = %d \n"
2645 "first_msdu = %d \n"
2646 "last_msdu = %d \n"
2647 "msdu_part_of_amsdu = %d \n"
2648 "rate_stats valid = %d \n"
2649 "bw = %d \n"
2650 "pkt_type = %d \n"
2651 "stbc = %d \n"
2652 "ldpc = %d \n"
2653 "sgi = %d \n"
2654 "mcs = %d \n"
2655 "ofdma = %d \n"
2656 "tones_in_ru = %d \n"
2657 "tsf = %d \n"
2658 "ppdu_id = %d \n"
2659 "transmit_cnt = %d \n"
2660 "tid = %d \n"
2661 "peer_id = %d \n",
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302662 ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
Vijay Pamidipatid61006c2017-01-20 19:35:57 +05302663 ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2664 ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2665 ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2666 ts.ppdu_id, ts.transmit_cnt, ts.tid,
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302667 ts.peer_id);
2668
Ishank Jain1e7401c2017-02-17 15:38:39 +05302669 if (!vdev) {
2670 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302671 "invalid vdev");
2672 goto out;
Ishank Jain1e7401c2017-02-17 15:38:39 +05302673 }
2674
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302675 soc = vdev->pdev->soc;
2676
2677 /* Update SoC level stats */
2678 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2679 (ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2680
2681 /* Update per-packet stats */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05302682 if (qdf_unlikely(vdev->mesh_vdev) &&
2683 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302684 dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2685
2686 /* Update peer level stats */
Ishank Jain1e7401c2017-02-17 15:38:39 +05302687 peer = dp_peer_find_by_id(soc, ts.peer_id);
2688 if (!peer) {
2689 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2690 "invalid peer");
Ishank Jaine73c4032017-03-16 11:48:15 +05302691 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
Ishank Jain1e7401c2017-02-17 15:38:39 +05302692 goto out;
2693 }
Pranita Solankea5a3ae72018-01-18 21:45:27 +05302694
Pamidipati, Vijay87a93cf2018-02-01 22:21:26 +05302695 if (qdf_likely(peer->vdev->tx_encap_type ==
2696 htt_cmn_pkt_type_ethernet)) {
2697 if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
2698 DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05302699 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05302700
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05302701 dp_tx_update_peer_stats(peer, &ts, length);
Ishank Jain1e7401c2017-02-17 15:38:39 +05302702
Ishank Jain1e7401c2017-02-17 15:38:39 +05302703out:
Ishank Jain1e7401c2017-02-17 15:38:39 +05302704 return;
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05302705}
2706
2707/**
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302708 * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2709 * @soc: core txrx main context
2710 * @comp_head: software descriptor head pointer
2711 *
2712 * This function will process batch of descriptors reaped by dp_tx_comp_handler
2713 * and release the software descriptors after processing is complete
2714 *
2715 * Return: none
2716 */
Jeff Johnson755f2612017-01-05 16:28:13 -08002717static void dp_tx_comp_process_desc(struct dp_soc *soc,
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302718 struct dp_tx_desc_s *comp_head)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302719{
2720 struct dp_tx_desc_s *desc;
2721 struct dp_tx_desc_s *next;
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302722 struct hal_tx_completion_status ts = {0};
2723 uint32_t length;
Ishank Jain1e7401c2017-02-17 15:38:39 +05302724 struct dp_peer *peer;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302725
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002726 DP_HIST_INIT();
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302727 desc = comp_head;
2728
2729 while (desc) {
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302730 hal_tx_comp_get_status(&desc->comp, &ts);
Ishank Jain1e7401c2017-02-17 15:38:39 +05302731 peer = dp_peer_find_by_id(soc, ts.peer_id);
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302732 length = qdf_nbuf_len(desc->nbuf);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302733
Pranita Solankea12b4b32017-11-20 23:04:14 +05302734 dp_tx_comp_process_tx_status(desc, length);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302735
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302736 /*currently m_copy/tx_capture is not supported for scatter gather packets*/
2737 if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc,
2738 desc->pdev, ts.peer_id, ts.ppdu_id,
2739 ts.first_msdu, ts.last_msdu,
2740 desc->nbuf) == QDF_STATUS_SUCCESS)) {
2741 qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2742 QDF_DMA_TO_DEVICE);
2743
2744 dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
2745 ts.ppdu_id, desc->nbuf);
2746 } else {
2747 dp_tx_comp_free_buf(soc, desc);
2748 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302749
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002750 DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
Pamidipati, Vijay4f7c3052017-07-25 10:01:00 +05302751
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302752 next = desc->next;
Ravi Joshiab33d9b2017-02-11 21:43:28 -08002753 dp_tx_desc_release(desc, desc->pool_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302754 desc = next;
2755 }
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002756 DP_TX_HIST_STATS_PER_PDEV();
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302757}
2758
2759/**
2760 * dp_tx_comp_handler() - Tx completion handler
2761 * @soc: core txrx main context
2762 * @ring_id: completion ring id
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302763 * @quota: No. of packets/descriptors that can be serviced in one loop
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302764 *
2765 * This function will collect hardware release ring element contents and
2766 * handle descriptor contents. Based on contents, free packet or handle error
2767 * conditions
2768 *
2769 * Return: none
2770 */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302771uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302772{
2773 void *tx_comp_hal_desc;
2774 uint8_t buffer_src;
2775 uint8_t pool_id;
2776 uint32_t tx_desc_id;
2777 struct dp_tx_desc_s *tx_desc = NULL;
2778 struct dp_tx_desc_s *head_desc = NULL;
2779 struct dp_tx_desc_s *tail_desc = NULL;
2780 uint32_t num_processed;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302781 uint32_t count;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302782
2783 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
2784 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07002785 "%s %d : HAL RING Access Failed -- %pK\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302786 __func__, __LINE__, hal_srng);
2787 return 0;
2788 }
2789
2790 num_processed = 0;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302791 count = 0;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302792
2793 /* Find head descriptor from completion ring */
2794 while (qdf_likely(tx_comp_hal_desc =
2795 hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
2796
2797 buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
2798
2799 /* If this buffer was not released by TQM or FW, then it is not
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302800 * Tx completion indication, assert */
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302801 if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
2802 (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2803
2804 QDF_TRACE(QDF_MODULE_ID_DP,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302805 QDF_TRACE_LEVEL_FATAL,
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302806 "Tx comp release_src != TQM | FW");
2807
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302808 qdf_assert_always(0);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302809 }
2810
2811 /* Get descriptor id */
2812 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
2813 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
2814 DP_TX_DESC_ID_POOL_OS;
2815
2816 /* Pool ID is out of limit. Error */
2817 if (pool_id > wlan_cfg_get_num_tx_desc_pool(
2818 soc->wlan_cfg_ctx)) {
2819 QDF_TRACE(QDF_MODULE_ID_DP,
2820 QDF_TRACE_LEVEL_FATAL,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302821 "Tx Comp pool id %d not valid",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302822 pool_id);
2823
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302824 qdf_assert_always(0);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302825 }
2826
2827 /* Find Tx descriptor */
2828 tx_desc = dp_tx_desc_find(soc, pool_id,
2829 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
2830 DP_TX_DESC_ID_PAGE_OS,
2831 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
2832 DP_TX_DESC_ID_OFFSET_OS);
2833
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302834 /*
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302835 * If the release source is FW, process the HTT status
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302836 */
2837 if (qdf_unlikely(buffer_src ==
2838 HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2839 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
2840 hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
2841 htt_tx_status);
2842 dp_tx_process_htt_completion(tx_desc,
2843 htt_tx_status);
2844 } else {
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302845 /* Pool id is not matching. Error */
Ruchi, Agrawale8eeb442018-02-12 16:19:58 +05302846 if (tx_desc->pool_id != pool_id) {
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05302847 QDF_TRACE(QDF_MODULE_ID_DP,
2848 QDF_TRACE_LEVEL_FATAL,
2849 "Tx Comp pool id %d not matched %d",
2850 pool_id, tx_desc->pool_id);
2851
2852 qdf_assert_always(0);
2853 }
2854
2855 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
2856 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
2857 QDF_TRACE(QDF_MODULE_ID_DP,
2858 QDF_TRACE_LEVEL_FATAL,
2859 "Txdesc invalid, flgs = %x,id = %d",
2860 tx_desc->flags, tx_desc_id);
2861 qdf_assert_always(0);
2862 }
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302863
2864 /* First ring descriptor on the cycle */
2865 if (!head_desc) {
2866 head_desc = tx_desc;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302867 tail_desc = tx_desc;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302868 }
2869
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302870 tail_desc->next = tx_desc;
2871 tx_desc->next = NULL;
Pamidipati, Vijayb90a79e2016-11-29 01:29:35 +05302872 tail_desc = tx_desc;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302873
2874 /* Collect hw completion contents */
2875 hal_tx_comp_desc_sync(tx_comp_hal_desc,
Pranita Solankea12b4b32017-11-20 23:04:14 +05302876 &tx_desc->comp, 1);
Pamidipati, Vijayb90a79e2016-11-29 01:29:35 +05302877
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302878 }
2879
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302880 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
Yue Ma245b47b2017-02-21 16:35:31 -08002881 /* Decrement PM usage count if the packet has been sent.*/
2882 hif_pm_runtime_put(soc->hif_handle);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302883
2884 /*
2885 * Processed packet count is more than given quota
2886 * stop to processing
2887 */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302888 if ((num_processed >= quota))
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302889 break;
2890
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05302891 count++;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302892 }
2893
2894 hal_srng_access_end(soc->hal_soc, hal_srng);
2895
2896 /* Process the reaped descriptors */
2897 if (head_desc)
2898 dp_tx_comp_process_desc(soc, head_desc);
2899
2900 return num_processed;
2901}
2902
Kabilan Kannan78acc112017-10-10 16:16:32 -07002903#ifdef CONVERGED_TDLS_ENABLE
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302904/**
Kabilan Kannan60e3b302017-09-07 20:06:17 -07002905 * dp_tx_non_std() - Allow the control-path SW to send data frames
2906 *
2907 * @data_vdev - which vdev should transmit the tx data frames
2908 * @tx_spec - what non-standard handling to apply to the tx data frames
2909 * @msdu_list - NULL-terminated list of tx MSDUs
2910 *
2911 * Return: NULL on success,
2912 * nbuf when it fails to send
2913 */
2914qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
Kabilan Kannan78acc112017-10-10 16:16:32 -07002915 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Kabilan Kannan60e3b302017-09-07 20:06:17 -07002916{
2917 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2918
2919 if (tx_spec & OL_TX_SPEC_NO_FREE)
2920 vdev->is_tdls_frame = true;
2921 return dp_tx_send(vdev_handle, msdu_list);
2922}
Kabilan Kannan78acc112017-10-10 16:16:32 -07002923#endif
Kabilan Kannan60e3b302017-09-07 20:06:17 -07002924
2925/**
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302926 * dp_tx_vdev_attach() - attach vdev to dp tx
2927 * @vdev: virtual device instance
2928 *
2929 * Return: QDF_STATUS_SUCCESS: success
2930 * QDF_STATUS_E_RESOURCES: Error return
2931 */
2932QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
2933{
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302934 /*
2935 * Fill HTT TCL Metadata with Vdev ID and MAC ID
2936 */
2937 HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
2938 HTT_TCL_METADATA_TYPE_VDEV_BASED);
2939
2940 HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
2941 vdev->vdev_id);
2942
2943 HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
Manoj Ekbote6f565862017-02-16 10:01:24 -08002944 DP_SW2HW_MACID(vdev->pdev->pdev_id));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302945
2946 /*
2947 * Set HTT Extension Valid bit to 0 by default
2948 */
2949 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
2950
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05302951 dp_tx_vdev_update_search_flags(vdev);
2952
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302953 return QDF_STATUS_SUCCESS;
2954}
2955
2956/**
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05302957 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
2958 * @vdev: virtual device instance
2959 *
2960 * Return: void
2961 *
2962 */
2963void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
2964{
2965 /*
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07002966 * Enable both AddrY (SA based search) and AddrX (Da based search)
2967 * for TDLS link
2968 *
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05302969 * Enable AddrY (SA based search) only for non-WDS STA and
2970 * ProxySTA VAP modes.
2971 *
2972 * In all other VAP modes, only DA based search should be
2973 * enabled
2974 */
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07002975 if (vdev->opmode == wlan_op_mode_sta &&
2976 vdev->tdls_link_connected)
2977 vdev->hal_desc_addr_search_flags =
2978 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
2979 else if ((vdev->opmode == wlan_op_mode_sta &&
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05302980 (!vdev->wds_enabled || vdev->proxysta_vdev)))
2981 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
2982 else
2983 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
2984}
2985
Soumya Bhat33a88222018-03-21 14:47:00 +05302986#ifdef QCA_LL_TX_FLOW_CONTROL_V2
2987static void dp_tx_desc_flush(struct dp_vdev *vdev)
2988{
2989}
2990#else /* QCA_LL_TX_FLOW_CONTROL_V2! */
2991
2992/* dp_tx_desc_flush() - release resources associated
2993 * to tx_desc
2994 * @vdev: virtual device instance
2995 *
2996 * This function will free all outstanding Tx buffers,
2997 * including ME buffer for which either free during
2998 * completion didn't happened or completion is not
2999 * received.
3000*/
3001static void dp_tx_desc_flush(struct dp_vdev *vdev)
3002{
3003 uint8_t i, num_pool;
3004 uint32_t j;
3005 uint32_t num_desc;
3006 struct dp_soc *soc = vdev->pdev->soc;
3007 struct dp_tx_desc_s *tx_desc = NULL;
3008 struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3009
3010 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3011 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3012
3013 for (i = 0; i < num_pool; i++) {
3014 for (j = 0; j < num_desc; j++) {
3015 tx_desc_pool = &((soc)->tx_desc[(i)]);
3016 if (tx_desc_pool &&
3017 tx_desc_pool->desc_pages.cacheable_pages) {
3018 tx_desc = dp_tx_desc_find(soc, i,
3019 (j & DP_TX_DESC_ID_PAGE_MASK) >>
3020 DP_TX_DESC_ID_PAGE_OS,
3021 (j & DP_TX_DESC_ID_OFFSET_MASK) >>
3022 DP_TX_DESC_ID_OFFSET_OS);
3023
3024 if (tx_desc && (tx_desc->vdev == vdev) &&
3025 (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3026 dp_tx_comp_free_buf(soc, tx_desc);
3027 dp_tx_desc_release(tx_desc, i);
3028 }
3029 }
3030 }
3031 }
3032}
3033#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3034
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05303035/**
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303036 * dp_tx_vdev_detach() - detach vdev from dp tx
3037 * @vdev: virtual device instance
3038 *
3039 * Return: QDF_STATUS_SUCCESS: success
3040 * QDF_STATUS_E_RESOURCES: Error return
3041 */
3042QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3043{
Soumya Bhat33a88222018-03-21 14:47:00 +05303044 dp_tx_desc_flush(vdev);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303045 return QDF_STATUS_SUCCESS;
3046}
3047
3048/**
3049 * dp_tx_pdev_attach() - attach pdev to dp tx
3050 * @pdev: physical device instance
3051 *
3052 * Return: QDF_STATUS_SUCCESS: success
3053 * QDF_STATUS_E_RESOURCES: Error return
3054 */
3055QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3056{
3057 struct dp_soc *soc = pdev->soc;
3058
3059 /* Initialize Flow control counters */
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +05303060 qdf_atomic_init(&pdev->num_tx_exception);
3061 qdf_atomic_init(&pdev->num_tx_outstanding);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303062
3063 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3064 /* Initialize descriptors in TCL Ring */
3065 hal_tx_init_data_ring(soc->hal_soc,
3066 soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3067 }
3068
3069 return QDF_STATUS_SUCCESS;
3070}
3071
Soumya Bhat33a88222018-03-21 14:47:00 +05303072/**
3073 * dp_tx_pdev_detach() - detach pdev from dp tx
3074 * @pdev: physical device instance
3075 *
3076 * Return: QDF_STATUS_SUCCESS: success
3077 * QDF_STATUS_E_RESOURCES: Error return
3078 */
3079QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3080{
3081 dp_tx_me_exit(pdev);
3082 return QDF_STATUS_SUCCESS;
3083}
3084
Ruchi, Agrawale8eeb442018-02-12 16:19:58 +05303085#ifdef QCA_LL_TX_FLOW_CONTROL_V2
3086/* Pools will be allocated dynamically */
3087static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3088 int num_desc)
3089{
3090 uint8_t i;
3091
3092 for (i = 0; i < num_pool; i++) {
3093 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3094 soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3095 }
3096
3097 return 0;
3098}
3099
3100static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3101{
3102 uint8_t i;
3103
3104 for (i = 0; i < num_pool; i++)
3105 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3106}
Ruchi, Agrawale8eeb442018-02-12 16:19:58 +05303107#else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3108static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3109 int num_desc)
3110{
3111 uint8_t i;
3112
3113 /* Allocate software Tx descriptor pools */
3114 for (i = 0; i < num_pool; i++) {
3115 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3117 "%s Tx Desc Pool alloc %d failed %pK\n",
3118 __func__, i, soc);
3119 return ENOMEM;
3120 }
3121 }
3122 return 0;
3123}
3124
3125static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3126{
3127 uint8_t i;
3128
3129 for (i = 0; i < num_pool; i++) {
Soumya Bhat33a88222018-03-21 14:47:00 +05303130 qdf_assert_always(!soc->tx_desc[i].num_allocated);
Ruchi, Agrawale8eeb442018-02-12 16:19:58 +05303131 if (dp_tx_desc_pool_free(soc, i)) {
3132 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3133 "%s Tx Desc Pool Free failed\n", __func__);
3134 }
3135 }
3136}
3137
Ruchi, Agrawale8eeb442018-02-12 16:19:58 +05303138#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05303139
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303140/**
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303141 * dp_tx_soc_detach() - detach soc from dp tx
3142 * @soc: core txrx main context
3143 *
3144 * This function will detach dp tx into main device context
3145 * will free dp tx resource and initialize resources
3146 *
3147 * Return: QDF_STATUS_SUCCESS: success
3148 * QDF_STATUS_E_RESOURCES: Error return
3149 */
3150QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3151{
3152 uint8_t num_pool;
Leo Chang5ea93a42016-11-03 12:39:49 -07003153 uint16_t num_desc;
3154 uint16_t num_ext_desc;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303155 uint8_t i;
3156
3157 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3158 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3159 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3160
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003161 dp_tx_flow_control_deinit(soc);
3162 dp_tx_delete_static_pools(soc, num_pool);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303163
3164 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3165 "%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
3166 __func__, num_pool, num_desc);
3167
3168 for (i = 0; i < num_pool; i++) {
3169 if (dp_tx_ext_desc_pool_free(soc, i)) {
3170 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3171 "%s Tx Ext Desc Pool Free failed\n",
3172 __func__);
3173 return QDF_STATUS_E_RESOURCES;
3174 }
3175 }
3176
3177 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Leo Chang5ea93a42016-11-03 12:39:49 -07003178 "%s MSDU Ext Desc Pool %d Free descs = %d\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303179 __func__, num_pool, num_ext_desc);
3180
Ishank Jain5122f8f2017-03-15 22:22:47 +05303181 for (i = 0; i < num_pool; i++) {
3182 dp_tx_tso_desc_pool_free(soc, i);
3183 }
3184
3185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3186 "%s TSO Desc Pool %d Free descs = %d\n",
3187 __func__, num_pool, num_desc);
3188
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -07003189
3190 for (i = 0; i < num_pool; i++)
3191 dp_tx_tso_num_seg_pool_free(soc, i);
3192
3193
3194 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3195 "%s TSO Num of seg Desc Pool %d Free descs = %d\n",
3196 __func__, num_pool, num_desc);
3197
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303198 return QDF_STATUS_SUCCESS;
3199}
3200
3201/**
3202 * dp_tx_soc_attach() - attach soc to dp tx
3203 * @soc: core txrx main context
3204 *
3205 * This function will attach dp tx into main device context
3206 * will allocate dp tx resource and initialize resources
3207 *
3208 * Return: QDF_STATUS_SUCCESS: success
3209 * QDF_STATUS_E_RESOURCES: Error return
3210 */
3211QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3212{
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003213 uint8_t i;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303214 uint8_t num_pool;
3215 uint32_t num_desc;
3216 uint32_t num_ext_desc;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303217
3218 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3219 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3220 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3221
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003222 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3223 goto fail;
3224
3225 dp_tx_flow_control_init(soc);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303226
3227 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3228 "%s Tx Desc Alloc num_pool = %d, descs = %d\n",
3229 __func__, num_pool, num_desc);
3230
3231 /* Allocate extension tx descriptor pools */
3232 for (i = 0; i < num_pool; i++) {
3233 if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3234 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003235 "MSDU Ext Desc Pool alloc %d failed %pK\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303236 i, soc);
3237
3238 goto fail;
3239 }
3240 }
3241
3242 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Leo Chang5ea93a42016-11-03 12:39:49 -07003243 "%s MSDU Ext Desc Alloc %d, descs = %d\n",
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303244 __func__, num_pool, num_ext_desc);
3245
Ishank Jain5122f8f2017-03-15 22:22:47 +05303246 for (i = 0; i < num_pool; i++) {
3247 if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3248 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003249 "TSO Desc Pool alloc %d failed %pK\n",
Ishank Jain5122f8f2017-03-15 22:22:47 +05303250 i, soc);
3251
3252 goto fail;
3253 }
3254 }
3255
3256 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3257 "%s TSO Desc Alloc %d, descs = %d\n",
3258 __func__, num_pool, num_desc);
3259
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -07003260 for (i = 0; i < num_pool; i++) {
3261 if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3262 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003263 "TSO Num of seg Pool alloc %d failed %pK\n",
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -07003264 i, soc);
3265
3266 goto fail;
3267 }
3268 }
3269
3270 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3271 "%s TSO Num of seg pool Alloc %d, descs = %d\n",
3272 __func__, num_pool, num_desc);
3273
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303274 /* Initialize descriptors in TCL Rings */
3275 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3276 for (i = 0; i < soc->num_tcl_data_rings; i++) {
3277 hal_tx_init_data_ring(soc->hal_soc,
3278 soc->tcl_data_ring[i].hal_srng);
3279 }
3280 }
3281
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05303282 /*
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05303283 * todo - Add a runtime config option to enable this.
3284 */
Debashis Duttaf645222017-01-20 19:29:25 -08003285 /*
3286 * Due to multiple issues on NPR EMU, enable it selectively
3287 * only for NPR EMU, should be removed, once NPR platforms
3288 * are stable.
3289 */
chenguo9bece1a2017-12-19 18:49:41 +08003290 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
Vijay Pamidipati5bcfa312016-12-20 11:44:38 +05303291
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303292 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3293 "%s HAL Tx init Success\n", __func__);
3294
3295 return QDF_STATUS_SUCCESS;
3296
3297fail:
3298 /* Detach will take care of freeing only allocated resources */
3299 dp_tx_soc_detach(soc);
3300 return QDF_STATUS_E_RESOURCES;
3301}
Ishank Jainc838b132017-02-17 11:08:18 +05303302
3303/*
3304 * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3305 * pdev: pointer to DP PDEV structure
3306 * seg_info_head: Pointer to the head of list
3307 *
3308 * return: void
3309 */
Pamidipati, Vijay726ea122018-02-07 18:27:00 +05303310static void dp_tx_me_mem_free(struct dp_pdev *pdev,
Ishank Jainc838b132017-02-17 11:08:18 +05303311 struct dp_tx_seg_info_s *seg_info_head)
3312{
3313 struct dp_tx_me_buf_t *mc_uc_buf;
3314 struct dp_tx_seg_info_s *seg_info_new = NULL;
3315 qdf_nbuf_t nbuf = NULL;
3316 uint64_t phy_addr;
3317
3318 while (seg_info_head) {
3319 nbuf = seg_info_head->nbuf;
3320 mc_uc_buf = (struct dp_tx_me_buf_t *)
Pamidipati, Vijayfb0d54d2018-01-03 20:06:08 +05303321 seg_info_head->frags[0].vaddr;
Ishank Jainc838b132017-02-17 11:08:18 +05303322 phy_addr = seg_info_head->frags[0].paddr_hi;
3323 phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3324 qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3325 phy_addr,
3326 QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3327 dp_tx_me_free_buf(pdev, mc_uc_buf);
3328 qdf_nbuf_free(nbuf);
3329 seg_info_new = seg_info_head;
3330 seg_info_head = seg_info_head->next;
3331 qdf_mem_free(seg_info_new);
3332 }
3333}
3334
3335/**
3336 * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
3337 * @vdev: DP VDEV handle
3338 * @nbuf: Multicast nbuf
3339 * @newmac: Table of the clients to which packets have to be sent
3340 * @new_mac_cnt: No of clients
3341 *
3342 * return: no of converted packets
3343 */
3344uint16_t
3345dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3346 uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3347{
3348 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3349 struct dp_pdev *pdev = vdev->pdev;
3350 struct ether_header *eh;
3351 uint8_t *data;
3352 uint16_t len;
3353
3354 /* reference to frame dst addr */
3355 uint8_t *dstmac;
3356 /* copy of original frame src addr */
3357 uint8_t srcmac[DP_MAC_ADDR_LEN];
3358
3359 /* local index into newmac */
3360 uint8_t new_mac_idx = 0;
3361 struct dp_tx_me_buf_t *mc_uc_buf;
3362 qdf_nbuf_t nbuf_clone;
3363 struct dp_tx_msdu_info_s msdu_info;
3364 struct dp_tx_seg_info_s *seg_info_head = NULL;
3365 struct dp_tx_seg_info_s *seg_info_tail = NULL;
3366 struct dp_tx_seg_info_s *seg_info_new;
3367 struct dp_tx_frag_info_s data_frag;
3368 qdf_dma_addr_t paddr_data;
3369 qdf_dma_addr_t paddr_mcbuf = 0;
3370 uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3371 QDF_STATUS status;
3372
Kiran Venkatappaa7b68422017-07-14 20:53:37 +05303373 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3374
Ishank Jainc838b132017-02-17 11:08:18 +05303375 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3376
3377 eh = (struct ether_header *) nbuf;
3378 qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3379
3380 len = qdf_nbuf_len(nbuf);
3381
3382 data = qdf_nbuf_data(nbuf);
3383
3384 status = qdf_nbuf_map(vdev->osdev, nbuf,
3385 QDF_DMA_TO_DEVICE);
3386
3387 if (status) {
3388 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3389 "Mapping failure Error:%d", status);
3390 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
Pamidipati, Vijay726ea122018-02-07 18:27:00 +05303391 qdf_nbuf_free(nbuf);
3392 return 1;
Ishank Jainc838b132017-02-17 11:08:18 +05303393 }
3394
3395 paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3396
3397 /*preparing data fragment*/
3398 data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3399 data_frag.paddr_lo = (uint32_t)paddr_data;
Pamidipati, Vijayda917d52017-07-18 20:13:22 +05303400 data_frag.paddr_hi = (((uint64_t) paddr_data) >> 32);
Ishank Jainc838b132017-02-17 11:08:18 +05303401 data_frag.len = len - DP_MAC_ADDR_LEN;
3402
3403 for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3404 dstmac = newmac[new_mac_idx];
3405 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3406 "added mac addr (%pM)", dstmac);
3407
3408 /* Check for NULL Mac Address */
3409 if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3410 continue;
3411
3412 /* frame to self mac. skip */
3413 if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3414 continue;
3415
3416 /*
3417 * TODO: optimize to avoid malloc in per-packet path
3418 * For eg. seg_pool can be made part of vdev structure
3419 */
3420 seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3421
3422 if (!seg_info_new) {
3423 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3424 "alloc failed");
3425 DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3426 goto fail_seg_alloc;
3427 }
3428
3429 mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3430 if (mc_uc_buf == NULL)
3431 goto fail_buf_alloc;
3432
3433 /*
3434 * TODO: Check if we need to clone the nbuf
3435 * Or can we just use the reference for all cases
3436 */
3437 if (new_mac_idx < (new_mac_cnt - 1)) {
3438 nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3439 if (nbuf_clone == NULL) {
3440 DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3441 goto fail_clone;
3442 }
3443 } else {
3444 /*
3445 * Update the ref
3446 * to account for frame sent without cloning
3447 */
3448 qdf_nbuf_ref(nbuf);
3449 nbuf_clone = nbuf;
3450 }
3451
3452 qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3453
3454 status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3455 QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3456 &paddr_mcbuf);
3457
3458 if (status) {
3459 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3460 "Mapping failure Error:%d", status);
3461 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3462 goto fail_map;
3463 }
3464
3465 seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
3466 seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3467 seg_info_new->frags[0].paddr_hi =
Pamidipati, Vijayda917d52017-07-18 20:13:22 +05303468 ((uint64_t) paddr_mcbuf >> 32);
Ishank Jainc838b132017-02-17 11:08:18 +05303469 seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3470
3471 seg_info_new->frags[1] = data_frag;
3472 seg_info_new->nbuf = nbuf_clone;
3473 seg_info_new->frag_cnt = 2;
3474 seg_info_new->total_len = len;
3475
3476 seg_info_new->next = NULL;
3477
3478 if (seg_info_head == NULL)
3479 seg_info_head = seg_info_new;
3480 else
3481 seg_info_tail->next = seg_info_new;
3482
3483 seg_info_tail = seg_info_new;
3484 }
3485
Pamidipati, Vijay726ea122018-02-07 18:27:00 +05303486 if (!seg_info_head) {
3487 goto free_return;
3488 }
Ishank Jainc838b132017-02-17 11:08:18 +05303489
3490 msdu_info.u.sg_info.curr_seg = seg_info_head;
3491 msdu_info.num_seg = new_mac_cnt;
3492 msdu_info.frm_type = dp_tx_frm_me;
3493
3494 DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3495 dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3496
3497 while (seg_info_head->next) {
3498 seg_info_new = seg_info_head;
3499 seg_info_head = seg_info_head->next;
3500 qdf_mem_free(seg_info_new);
3501 }
3502 qdf_mem_free(seg_info_head);
3503
Pamidipati, Vijay726ea122018-02-07 18:27:00 +05303504 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3505 qdf_nbuf_free(nbuf);
Ishank Jainc838b132017-02-17 11:08:18 +05303506 return new_mac_cnt;
3507
3508fail_map:
3509 qdf_nbuf_free(nbuf_clone);
3510
3511fail_clone:
3512 dp_tx_me_free_buf(pdev, mc_uc_buf);
3513
3514fail_buf_alloc:
3515 qdf_mem_free(seg_info_new);
3516
3517fail_seg_alloc:
3518 dp_tx_me_mem_free(pdev, seg_info_head);
Pamidipati, Vijay726ea122018-02-07 18:27:00 +05303519
3520free_return:
Ishank Jainc838b132017-02-17 11:08:18 +05303521 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
Pamidipati, Vijay726ea122018-02-07 18:27:00 +05303522 qdf_nbuf_free(nbuf);
3523 return 1;
Ishank Jainc838b132017-02-17 11:08:18 +05303524}
Ruchi, Agrawalc0f9c972018-02-02 11:24:05 +05303525