blob: 8b5a220510d5668096b761a720e5a08d599dafff [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dhanashri Atre83d373d2015-07-28 16:45:59 -07002 * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Anurag Chouhanc73697b2016-02-21 15:05:43 +053028#include <qdf_net_types.h> /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053030#include <qdf_util.h> /* qdf_assert */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053031#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan50220ce2016-02-18 20:11:33 +053033#include <qdf_time.h> /* qdf_system_ticks */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034#endif
35
36#include <ol_htt_tx_api.h> /* htt_tx_desc_id */
37
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038#include <ol_tx_desc.h>
39#include <ol_txrx_internal.h>
40#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
41#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
42#endif
43#include <ol_txrx.h>
44
45#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
46extern uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
47#endif
48
49#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
50static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
51 struct ol_tx_desc_t *tx_desc)
52{
53 if (tx_desc->pkt_type != 0xff) {
54 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
55 "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p",
56 __func__, tx_desc->pkt_type, pdev);
Anurag Chouhanc5548422016-02-24 18:33:27 +053057 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058 }
59 if ((uint32_t *) tx_desc->htt_tx_desc <
60 g_dbg_htt_desc_start_addr
61 || (uint32_t *) tx_desc->htt_tx_desc >
62 g_dbg_htt_desc_end_addr) {
63 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
64 "%s Potential htt_desc curruption:0x%p pdev:0x%p\n",
65 __func__, tx_desc->htt_tx_desc, pdev);
Anurag Chouhanc5548422016-02-24 18:33:27 +053066 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067 }
68}
69static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
70{
71 tx_desc->pkt_type = 0xff;
72}
73#ifdef QCA_COMPUTE_TX_DELAY
74static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
75{
76 if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
77 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n",
78 __func__, tx_desc->entry_timestamp_ticks);
Anurag Chouhanc5548422016-02-24 18:33:27 +053079 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080080 }
Anurag Chouhan50220ce2016-02-18 20:11:33 +053081 tx_desc->entry_timestamp_ticks = qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080082}
83static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
84{
85 tx_desc->entry_timestamp_ticks = 0xffffffff;
86}
87#endif
88#else
89static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
90 struct ol_tx_desc_t *tx_desc)
91{
92 return;
93}
94static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
95{
96 return;
97}
98static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
99{
100 return;
101}
102static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
103{
104 return;
105}
106#endif
107
Manjunathappa Prakashaf88fc72016-11-02 17:26:22 -0700108#ifdef CONFIG_HL_SUPPORT
109
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530110/**
111 * ol_tx_desc_vdev_update() - vedv assign.
112 * @tx_desc: tx descriptor pointer
113 * @vdev: vdev handle
114 *
115 * Return: None
116 */
117static inline void
118ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
119 struct ol_txrx_vdev_t *vdev)
120{
121 tx_desc->vdev = vdev;
122}
Manjunathappa Prakashaf88fc72016-11-02 17:26:22 -0700123#else
124
125static inline void
126ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
127 struct ol_txrx_vdev_t *vdev)
128{
129 return;
130}
131#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530132
133#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
134
135/**
136 * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
137 * @vdev: vdev handle
138 *
139 * Return: None
140 */
141static inline void
142ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
143{
144 qdf_atomic_inc(&vdev->tx_desc_count);
145}
146#else
147
148static inline void
149ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
150{
151 return;
152}
153
154#endif
155
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800156#ifndef QCA_LL_TX_FLOW_CONTROL_V2
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530157
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158/**
159 * ol_tx_desc_alloc() - allocate descriptor from freelist
160 * @pdev: pdev handle
161 * @vdev: vdev handle
162 *
163 * Return: tx descriptor pointer/ NULL in case of error
164 */
165static
166struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
167 struct ol_txrx_vdev_t *vdev)
168{
169 struct ol_tx_desc_t *tx_desc = NULL;
170
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530171 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800172 if (pdev->tx_desc.freelist) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700173 tx_desc = ol_tx_get_desc_global_pool(pdev);
Nirav Shah76291962016-04-25 10:50:37 +0530174 ol_tx_desc_dup_detect_set(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 ol_tx_desc_sanity_checks(pdev, tx_desc);
176 ol_tx_desc_compute_delay(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800177 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530178 qdf_spin_unlock_bh(&pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530179
180 if (!tx_desc)
181 return NULL;
182
183 ol_tx_desc_vdev_update(tx_desc, vdev);
184 ol_tx_desc_count_inc(vdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +0530185 qdf_atomic_inc(&tx_desc->ref_cnt);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530186
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 return tx_desc;
188}
189
190/**
191 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
192 * @pdev: pdev handler
193 * @vdev: vdev handler
194 * @msdu_info: msdu handler
195 *
196 * Return: tx descriptor or NULL
197 */
198struct ol_tx_desc_t *
199ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
200 struct ol_txrx_vdev_t *vdev,
201 struct ol_txrx_msdu_info_t *msdu_info)
202{
203 return ol_tx_desc_alloc(pdev, vdev);
204}
205
206#else
207/**
208 * ol_tx_desc_alloc() -allocate tx descriptor
209 * @pdev: pdev handler
210 * @vdev: vdev handler
211 * @pool: flow pool
212 *
213 * Return: tx descriptor or NULL
214 */
215static
216struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
217 struct ol_txrx_vdev_t *vdev,
218 struct ol_tx_flow_pool_t *pool)
219{
220 struct ol_tx_desc_t *tx_desc = NULL;
221
222 if (pool) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530223 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800224 if (pool->avail_desc) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700225 tx_desc = ol_tx_get_desc_flow_pool(pool);
Nirav Shah76291962016-04-25 10:50:37 +0530226 ol_tx_desc_dup_detect_set(pdev, tx_desc);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530227 if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800228 pool->status = FLOW_POOL_ACTIVE_PAUSED;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530229 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 /* pause network queues */
231 pdev->pause_cb(vdev->vdev_id,
232 WLAN_STOP_ALL_NETIF_QUEUE,
233 WLAN_DATA_FLOW_CONTROL);
234 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530235 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236 }
237 ol_tx_desc_sanity_checks(pdev, tx_desc);
238 ol_tx_desc_compute_delay(tx_desc);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +0530239 qdf_atomic_inc(&tx_desc->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240 } else {
Nirav Shahda008342016-05-17 18:50:40 +0530241 pool->pkt_drop_no_desc++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530242 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800243 }
244 } else {
245 pdev->pool_stats.pkt_drop_no_pool++;
246 }
247
248 return tx_desc;
249}
250
251/**
252 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
253 * @pdev: pdev handler
254 * @vdev: vdev handler
255 * @msdu_info: msdu handler
256 *
257 * Return: tx descriptor or NULL
258 */
259#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
260struct ol_tx_desc_t *
261ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
262 struct ol_txrx_vdev_t *vdev,
263 struct ol_txrx_msdu_info_t *msdu_info)
264{
Anurag Chouhanc5548422016-02-24 18:33:27 +0530265 if (qdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266 return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
267 else
268 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
269}
270#else
271struct ol_tx_desc_t *
272ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
273 struct ol_txrx_vdev_t *vdev,
274 struct ol_txrx_msdu_info_t *msdu_info)
275{
276 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
277}
278#endif
279#endif
280
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530281/**
282 * ol_tx_desc_alloc_hl() - allocate tx descriptor
283 * @pdev: pdev handle
284 * @vdev: vdev handle
285 * @msdu_info: tx msdu info
286 *
287 * Return: tx descriptor pointer/ NULL in case of error
288 */
289static struct ol_tx_desc_t *
290ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
291 struct ol_txrx_vdev_t *vdev,
292 struct ol_txrx_msdu_info_t *msdu_info)
293{
294 struct ol_tx_desc_t *tx_desc;
295
296 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
297 if (!tx_desc)
298 return NULL;
299
300 qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt);
301
302 return tx_desc;
303}
304
305#if defined(CONFIG_PER_VDEV_TX_DESC_POOL) && defined(CONFIG_HL_SUPPORT)
306
307/**
308 * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.
309 * @tx_desc: tx desc
310 *
311 * Return: None
312 */
313static inline void
314ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
315{
316 qdf_atomic_dec(&tx_desc->vdev->tx_desc_count);
317 tx_desc->vdev = NULL;
318}
319#else
320
321static inline void
322ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
323{
324 return;
325}
326#endif
327
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328#ifndef QCA_LL_TX_FLOW_CONTROL_V2
329/**
330 * ol_tx_desc_free() - put descriptor to freelist
331 * @pdev: pdev handle
332 * @tx_desc: tx descriptor
333 *
334 * Return: None
335 */
336void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
337{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530338 qdf_spin_lock_bh(&pdev->tx_mutex);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700339
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700340 if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530341 if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530342 qdf_print("%s %d TSO desc is NULL!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 __func__, __LINE__);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530344 qdf_assert(0);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700345 } else {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800346 ol_tso_free_segment(pdev, tx_desc->tso_desc);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700347 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800348 }
Nirav Shah76291962016-04-25 10:50:37 +0530349 ol_tx_desc_dup_detect_reset(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350 ol_tx_desc_reset_pkt_type(tx_desc);
351 ol_tx_desc_reset_timestamp(tx_desc);
352
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700353 ol_tx_put_desc_global_pool(pdev, tx_desc);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354 ol_tx_desc_vdev_rm(tx_desc);
355
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530356 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357}
358
359#else
360/**
361 * ol_tx_desc_free() - put descriptor to pool freelist
362 * @pdev: pdev handle
363 * @tx_desc: tx descriptor
364 *
365 * Return: None
366 */
367void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
368{
369 struct ol_tx_flow_pool_t *pool = tx_desc->pool;
370
371#if defined(FEATURE_TSO)
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700372 if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530373 if (qdf_unlikely(tx_desc->tso_desc == NULL))
Anurag Chouhan6d760662016-02-20 16:05:43 +0530374 qdf_print("%s %d TSO desc is NULL!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 __func__, __LINE__);
376 else
377 ol_tso_free_segment(pdev, tx_desc->tso_desc);
378 }
379#endif
380 ol_tx_desc_reset_pkt_type(tx_desc);
381 ol_tx_desc_reset_timestamp(tx_desc);
382
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530383 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah76291962016-04-25 10:50:37 +0530384 ol_tx_desc_dup_detect_reset(pdev, tx_desc);
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700385 ol_tx_put_desc_flow_pool(pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 switch (pool->status) {
387 case FLOW_POOL_ACTIVE_PAUSED:
388 if (pool->avail_desc > pool->start_th) {
389 pdev->pause_cb(pool->member_flow_id,
390 WLAN_WAKE_ALL_NETIF_QUEUE,
391 WLAN_DATA_FLOW_CONTROL);
392 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
393 }
394 break;
395 case FLOW_POOL_INVALID:
396 if (pool->avail_desc == pool->flow_pool_size) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530397 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398 ol_tx_free_invalid_flow_pool(pool);
Anurag Chouhan6d760662016-02-20 16:05:43 +0530399 qdf_print("%s %d pool is INVALID State!!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400 __func__, __LINE__);
401 return;
402 }
403 break;
404 case FLOW_POOL_ACTIVE_UNPAUSED:
405 break;
406 default:
Anurag Chouhan6d760662016-02-20 16:05:43 +0530407 qdf_print("%s %d pool is INACTIVE State!!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408 __func__, __LINE__);
409 break;
410 };
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530411 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800412
413}
414#endif
415
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530417dump_pkt(qdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800418{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530419 qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530420 qdf_nbuf_data(nbuf), (long long unsigned int)nbuf_paddr, len);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800421 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530422 qdf_nbuf_data(nbuf), len, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800423}
424
425const uint32_t htt_to_ce_pkt_type[] = {
426 [htt_pkt_type_raw] = tx_pkt_type_raw,
427 [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
428 [htt_pkt_type_ethernet] = tx_pkt_type_802_3,
429 [htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
430 [htt_pkt_type_eth2] = tx_pkt_type_eth2,
431 [htt_pkt_num_types] = 0xffffffff
432};
433
Nirav Shah2e583a02016-04-30 14:06:12 +0530434#define WISA_DEST_PORT_6MBPS 50000
435#define WISA_DEST_PORT_24MBPS 50001
436
437/**
438 * ol_tx_get_wisa_ext_hdr_type() - get header type for WiSA mode
439 * @netbuf: network buffer
440 *
441 * Return: extension header type
442 */
443enum extension_header_type
444ol_tx_get_wisa_ext_hdr_type(qdf_nbuf_t netbuf)
445{
446 uint8_t *buf = qdf_nbuf_data(netbuf);
447 uint16_t dport;
448
449 if (qdf_is_macaddr_group(
450 (struct qdf_mac_addr *)(buf + QDF_NBUF_DEST_MAC_OFFSET))) {
451
452 dport = (uint16_t)(*(uint16_t *)(buf +
453 QDF_NBUF_TRAC_IPV4_OFFSET +
454 QDF_NBUF_TRAC_IPV4_HEADER_SIZE + sizeof(uint16_t)));
455
456 if (dport == QDF_SWAP_U16(WISA_DEST_PORT_6MBPS))
457 return WISA_MODE_EXT_HEADER_6MBPS;
458 else if (dport == QDF_SWAP_U16(WISA_DEST_PORT_24MBPS))
459 return WISA_MODE_EXT_HEADER_24MBPS;
460 else
461 return EXT_HEADER_NOT_PRESENT;
462 } else {
463 return EXT_HEADER_NOT_PRESENT;
464 }
465}
466
467/**
468 * ol_tx_get_ext_header_type() - extension header is required or not
469 * @vdev: vdev pointer
470 * @netbuf: network buffer
471 *
472 * This function returns header type and if extension header is
473 * not required than returns EXT_HEADER_NOT_PRESENT.
474 *
475 * Return: extension header type
476 */
477enum extension_header_type
478ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev,
479 qdf_nbuf_t netbuf)
480{
481 if (vdev->is_wisa_mode_enable == true)
482 return ol_tx_get_wisa_ext_hdr_type(netbuf);
483 else
484 return EXT_HEADER_NOT_PRESENT;
485}
486
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800487struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
488 struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530489 qdf_nbuf_t netbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800490 struct ol_txrx_msdu_info_t *msdu_info)
491{
492 struct ol_tx_desc_t *tx_desc;
493 unsigned int i;
494 uint32_t num_frags;
Nirav Shah2e583a02016-04-30 14:06:12 +0530495 enum extension_header_type type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496
497 msdu_info->htt.info.vdev_id = vdev->vdev_id;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530498 msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
499 switch (qdf_nbuf_get_exemption_type(netbuf)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530500 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
501 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 /* We want to encrypt this frame */
503 msdu_info->htt.action.do_encrypt = 1;
504 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530505 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800506 /* We don't want to encrypt this frame */
507 msdu_info->htt.action.do_encrypt = 0;
508 break;
509 default:
Anurag Chouhanc5548422016-02-24 18:33:27 +0530510 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800511 break;
512 }
513
514 /* allocate the descriptor */
515 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
516 if (!tx_desc)
517 return NULL;
518
519 /* initialize the SW tx descriptor */
520 tx_desc->netbuf = netbuf;
521
522 if (msdu_info->tso_info.is_tso) {
523 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700524 tx_desc->pkt_type = OL_TX_FRM_TSO;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800525 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
526 } else {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700527 tx_desc->pkt_type = OL_TX_FRM_STD;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800528 }
529
Nirav Shah2e583a02016-04-30 14:06:12 +0530530 type = ol_tx_get_ext_header_type(vdev, netbuf);
531
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800532 /* initialize the HW tx descriptor */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533 htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
534 tx_desc->htt_tx_desc_paddr,
535 ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
Nirav Shah2e583a02016-04-30 14:06:12 +0530536 &msdu_info->tso_info, NULL, type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537
538 /*
539 * Initialize the fragmentation descriptor.
540 * Skip the prefix fragment (HTT tx descriptor) that was added
541 * during the call to htt_tx_desc_init above.
542 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530543 num_frags = qdf_nbuf_get_num_frags(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530545 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
546 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800547 : num_frags;
548#if defined(HELIUMPLUS_PADDR64)
549 /*
550 * Use num_frags - 1, since 1 frag is used to store
551 * the HTT/HTC descriptor
552 * Refer to htt_tx_desc_init()
553 */
554 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
555 num_frags - 1);
556#else /* ! defined(HELIUMPLUSPADDR64) */
557 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
558 num_frags - 1);
559#endif /* defined(HELIUMPLUS_PADDR64) */
560
561 if (msdu_info->tso_info.is_tso) {
562 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
563 tx_desc->htt_frag_desc, &msdu_info->tso_info);
564 TXRX_STATS_TSO_SEG_UPDATE(pdev,
565 msdu_info->tso_info.curr_seg->seg);
566 } else {
567 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530568 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530569 qdf_dma_addr_t frag_paddr;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800570#ifdef HELIUMPLUS_DEBUG
571 void *frag_vaddr;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530572 frag_vaddr = qdf_nbuf_get_frag_vaddr(netbuf, i);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800573#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530574 frag_len = qdf_nbuf_get_frag_len(netbuf, i);
575 frag_paddr = qdf_nbuf_get_frag_paddr(netbuf, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800576#if defined(HELIUMPLUS_PADDR64)
577 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
578 frag_paddr, frag_len);
579#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530580 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_vaddr=0x%p frag_paddr=0x%llx len=%zu\n",
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800581 __func__, __LINE__, tx_desc->htt_frag_desc,
582 i-1, frag_vaddr, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583 dump_pkt(netbuf, frag_paddr, 64);
584#endif /* HELIUMPLUS_DEBUG */
585#else /* ! defined(HELIUMPLUSPADDR64) */
586 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1,
587 frag_paddr, frag_len);
588#endif /* defined(HELIUMPLUS_PADDR64) */
589 }
590 }
591
592#if defined(HELIUMPLUS_DEBUG)
Himanshu Agarwalfd681722016-10-27 19:07:25 +0530593 ol_txrx_dump_frag_desc("ol_tx_desc_ll()", tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800594#endif
595 return tx_desc;
596}
597
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530598struct ol_tx_desc_t *
599ol_tx_desc_hl(
600 struct ol_txrx_pdev_t *pdev,
601 struct ol_txrx_vdev_t *vdev,
602 qdf_nbuf_t netbuf,
603 struct ol_txrx_msdu_info_t *msdu_info)
604{
605 struct ol_tx_desc_t *tx_desc;
606
607 /* FIX THIS: these inits should probably be done by tx classify */
608 msdu_info->htt.info.vdev_id = vdev->vdev_id;
609 msdu_info->htt.info.frame_type = pdev->htt_pkt_type;
610 msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
611 switch (qdf_nbuf_get_exemption_type(netbuf)) {
612 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
613 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
614 /* We want to encrypt this frame */
615 msdu_info->htt.action.do_encrypt = 1;
616 break;
617 case QDF_NBUF_EXEMPT_ALWAYS:
618 /* We don't want to encrypt this frame */
619 msdu_info->htt.action.do_encrypt = 0;
620 break;
621 default:
622 qdf_assert(0);
623 break;
624 }
625
626 /* allocate the descriptor */
627 tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info);
628 if (!tx_desc)
629 return NULL;
630
631 /* initialize the SW tx descriptor */
632 tx_desc->netbuf = netbuf;
633 /* fix this - get pkt_type from msdu_info */
634 tx_desc->pkt_type = OL_TX_FRM_STD;
635
636#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
637 tx_desc->orig_l2_hdr_bytes = 0;
638#endif
639 /* the HW tx descriptor will be initialized later by the caller */
640
641 return tx_desc;
642}
643
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800644void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
645 ol_tx_desc_list *tx_descs, int had_error)
646{
647 struct ol_tx_desc_t *tx_desc, *tmp;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530648 qdf_nbuf_t msdus = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800649
650 TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530651 qdf_nbuf_t msdu = tx_desc->netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800652
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530653 qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800654#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
655 /* restore original hdr offset */
656 OL_TX_RESTORE_HDR(tx_desc, msdu);
657#endif
Mohit Khanna38d0e932016-08-31 19:49:22 -0700658 if (qdf_nbuf_get_users(msdu) <= 1)
659 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
660
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661 /* free the tx desc */
662 ol_tx_desc_free(pdev, tx_desc);
663 /* link the netbuf into a list to free as a batch */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530664 qdf_nbuf_set_next(msdu, msdus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800665 msdus = msdu;
666 }
667 /* free the netbufs as a batch */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530668 qdf_nbuf_tx_free(msdus, had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669}
670
671void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
672 struct ol_tx_desc_t *tx_desc, int had_error)
673{
674 int mgmt_type;
675 ol_txrx_mgmt_tx_cb ota_ack_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530677 qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800678#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
679 /* restore original hdr offset */
680 OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
681#endif
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700682 if (tx_desc->pkt_type == OL_TX_FRM_NO_FREE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800683 /* free the tx desc but don't unmap or free the frame */
684 if (pdev->tx_data_callback.func) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530685 qdf_nbuf_set_next(tx_desc->netbuf, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800686 pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
687 tx_desc->netbuf, had_error);
688 ol_tx_desc_free(pdev, tx_desc);
689 return;
690 }
691 /* let the code below unmap and free the frame */
692 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530693 qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800694 /* check the frame type to see what kind of special steps are needed */
695 if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
696 (tx_desc->pkt_type != 0xff)) {
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530697 qdf_dma_addr_t frag_desc_paddr = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800698
699#if defined(HELIUMPLUS_PADDR64)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800700 frag_desc_paddr = tx_desc->htt_frag_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 /* FIX THIS -
702 * The FW currently has trouble using the host's fragments
703 * table for management frames. Until this is fixed,
704 * rather than specifying the fragment table to the FW,
705 * the host SW will specify just the address of the initial
706 * fragment.
707 * Now that the mgmt frame is done, the HTT tx desc's frags
708 * table pointer needs to be reset.
709 */
710#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530711 qdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
Leo Chang376398b2015-10-23 14:19:02 -0700712 __func__, __LINE__, tx_desc->id,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800713 frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714#endif /* HELIUMPLUS_DEBUG */
715#endif /* HELIUMPLUS_PADDR64 */
716 htt_tx_desc_frags_table_set(pdev->htt_pdev,
717 tx_desc->htt_tx_desc, 0,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800718 frag_desc_paddr, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800719
720 mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
721 /*
722 * we already checked the value when the mgmt frame was
723 * provided to the txrx layer.
724 * no need to check it a 2nd time.
725 */
726 ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb;
727 if (ota_ack_cb) {
728 void *ctxt;
729 ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt;
730 ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
731 }
732 /* free the netbuf */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530733 qdf_nbuf_free(tx_desc->netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800734 } else {
735 /* single regular frame */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530736 qdf_nbuf_set_next(tx_desc->netbuf, NULL);
737 qdf_nbuf_tx_free(tx_desc->netbuf, had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800738 }
739 /* free the tx desc */
740 ol_tx_desc_free(pdev, tx_desc);
741}
742
743#if defined(FEATURE_TSO)
744/**
745 * htt_tso_alloc_segment() - function to allocate a TSO segment
746 * element
747 * @pdev: HTT pdev
748 * @tso_seg: This is the output. The TSO segment element.
749 *
750 * Allocates a TSO segment element from the free list held in
751 * the HTT pdev
752 *
753 * Return: none
754 */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530755struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800756{
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530757 struct qdf_tso_seg_elem_t *tso_seg = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800758
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530759 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800760 if (pdev->tso_seg_pool.freelist) {
761 pdev->tso_seg_pool.num_free--;
762 tso_seg = pdev->tso_seg_pool.freelist;
763 pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
764 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530765 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800766
767 return tso_seg;
768}
769
770/**
771 * ol_tso_free_segment() - function to free a TSO segment
772 * element
773 * @pdev: HTT pdev
774 * @tso_seg: The TSO segment element to be freed
775 *
776 * Returns a TSO segment element to the free list held in the
777 * HTT pdev
778 *
779 * Return: none
780 */
781
782void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530783 struct qdf_tso_seg_elem_t *tso_seg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800784{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530785 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800786 tso_seg->next = pdev->tso_seg_pool.freelist;
787 pdev->tso_seg_pool.freelist = tso_seg;
788 pdev->tso_seg_pool.num_free++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530789 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800790}
791#endif