blob: af610fd9c7b16ab36befb92ac23715b552dd900a [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dhanashri Atre83d373d2015-07-28 16:45:59 -07002 * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Anurag Chouhanc73697b2016-02-21 15:05:43 +053028#include <qdf_net_types.h> /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053030#include <qdf_util.h> /* qdf_assert */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053031#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan50220ce2016-02-18 20:11:33 +053033#include <qdf_time.h> /* qdf_system_ticks */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034#endif
35
36#include <ol_htt_tx_api.h> /* htt_tx_desc_id */
37
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038#include <ol_tx_desc.h>
39#include <ol_txrx_internal.h>
40#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
41#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
42#endif
43#include <ol_txrx.h>
44
45#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
47 struct ol_tx_desc_t *tx_desc)
48{
gbiane55c9562016-11-01 14:47:47 +080049 if (tx_desc->pkt_type != ol_tx_frm_freed) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080050 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
51 "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p",
52 __func__, tx_desc->pkt_type, pdev);
Anurag Chouhanc5548422016-02-24 18:33:27 +053053 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080055}
56static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
57{
gbiane55c9562016-11-01 14:47:47 +080058 tx_desc->pkt_type = ol_tx_frm_freed;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059}
60#ifdef QCA_COMPUTE_TX_DELAY
61static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
62{
63 if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
64 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n",
65 __func__, tx_desc->entry_timestamp_ticks);
Anurag Chouhanc5548422016-02-24 18:33:27 +053066 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067 }
Anurag Chouhan50220ce2016-02-18 20:11:33 +053068 tx_desc->entry_timestamp_ticks = qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080069}
70static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
71{
72 tx_desc->entry_timestamp_ticks = 0xffffffff;
73}
74#endif
75#else
76static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
77 struct ol_tx_desc_t *tx_desc)
78{
79 return;
80}
81static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
82{
83 return;
84}
85static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
86{
87 return;
88}
89static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
90{
91 return;
92}
93#endif
94
Manjunathappa Prakashaf88fc72016-11-02 17:26:22 -070095#ifdef CONFIG_HL_SUPPORT
96
Siddarth Poddarb2011f62016-04-27 20:45:42 +053097/**
98 * ol_tx_desc_vdev_update() - vedv assign.
99 * @tx_desc: tx descriptor pointer
100 * @vdev: vdev handle
101 *
102 * Return: None
103 */
104static inline void
105ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
106 struct ol_txrx_vdev_t *vdev)
107{
108 tx_desc->vdev = vdev;
109}
Manjunathappa Prakashaf88fc72016-11-02 17:26:22 -0700110#else
111
112static inline void
113ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
114 struct ol_txrx_vdev_t *vdev)
115{
116 return;
117}
118#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530119
120#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
121
122/**
123 * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
124 * @vdev: vdev handle
125 *
126 * Return: None
127 */
128static inline void
129ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
130{
131 qdf_atomic_inc(&vdev->tx_desc_count);
132}
133#else
134
135static inline void
136ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
137{
138 return;
139}
140
141#endif
142
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143#ifndef QCA_LL_TX_FLOW_CONTROL_V2
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530144
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800145/**
146 * ol_tx_desc_alloc() - allocate descriptor from freelist
147 * @pdev: pdev handle
148 * @vdev: vdev handle
149 *
150 * Return: tx descriptor pointer/ NULL in case of error
151 */
152static
153struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
154 struct ol_txrx_vdev_t *vdev)
155{
156 struct ol_tx_desc_t *tx_desc = NULL;
157
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530158 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800159 if (pdev->tx_desc.freelist) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700160 tx_desc = ol_tx_get_desc_global_pool(pdev);
Nirav Shah76291962016-04-25 10:50:37 +0530161 ol_tx_desc_dup_detect_set(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800162 ol_tx_desc_sanity_checks(pdev, tx_desc);
163 ol_tx_desc_compute_delay(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530165 qdf_spin_unlock_bh(&pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530166
167 if (!tx_desc)
168 return NULL;
169
170 ol_tx_desc_vdev_update(tx_desc, vdev);
171 ol_tx_desc_count_inc(vdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +0530172 qdf_atomic_inc(&tx_desc->ref_cnt);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530173
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800174 return tx_desc;
175}
176
177/**
178 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
179 * @pdev: pdev handler
180 * @vdev: vdev handler
181 * @msdu_info: msdu handler
182 *
183 * Return: tx descriptor or NULL
184 */
185struct ol_tx_desc_t *
186ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
187 struct ol_txrx_vdev_t *vdev,
188 struct ol_txrx_msdu_info_t *msdu_info)
189{
190 return ol_tx_desc_alloc(pdev, vdev);
191}
192
193#else
194/**
195 * ol_tx_desc_alloc() -allocate tx descriptor
196 * @pdev: pdev handler
197 * @vdev: vdev handler
198 * @pool: flow pool
199 *
200 * Return: tx descriptor or NULL
201 */
202static
203struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
204 struct ol_txrx_vdev_t *vdev,
205 struct ol_tx_flow_pool_t *pool)
206{
207 struct ol_tx_desc_t *tx_desc = NULL;
208
209 if (pool) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530210 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211 if (pool->avail_desc) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700212 tx_desc = ol_tx_get_desc_flow_pool(pool);
Nirav Shah76291962016-04-25 10:50:37 +0530213 ol_tx_desc_dup_detect_set(pdev, tx_desc);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530214 if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800215 pool->status = FLOW_POOL_ACTIVE_PAUSED;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530216 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217 /* pause network queues */
218 pdev->pause_cb(vdev->vdev_id,
219 WLAN_STOP_ALL_NETIF_QUEUE,
220 WLAN_DATA_FLOW_CONTROL);
221 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530222 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223 }
224 ol_tx_desc_sanity_checks(pdev, tx_desc);
225 ol_tx_desc_compute_delay(tx_desc);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +0530226 qdf_atomic_inc(&tx_desc->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800227 } else {
Nirav Shahda008342016-05-17 18:50:40 +0530228 pool->pkt_drop_no_desc++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530229 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 }
231 } else {
232 pdev->pool_stats.pkt_drop_no_pool++;
233 }
234
235 return tx_desc;
236}
237
238/**
239 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
240 * @pdev: pdev handler
241 * @vdev: vdev handler
242 * @msdu_info: msdu handler
243 *
244 * Return: tx descriptor or NULL
245 */
246#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
247struct ol_tx_desc_t *
248ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
249 struct ol_txrx_vdev_t *vdev,
250 struct ol_txrx_msdu_info_t *msdu_info)
251{
Anurag Chouhanc5548422016-02-24 18:33:27 +0530252 if (qdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800253 return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
254 else
255 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
256}
257#else
258struct ol_tx_desc_t *
259ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
260 struct ol_txrx_vdev_t *vdev,
261 struct ol_txrx_msdu_info_t *msdu_info)
262{
263 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
264}
265#endif
266#endif
267
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530268/**
269 * ol_tx_desc_alloc_hl() - allocate tx descriptor
270 * @pdev: pdev handle
271 * @vdev: vdev handle
272 * @msdu_info: tx msdu info
273 *
274 * Return: tx descriptor pointer/ NULL in case of error
275 */
276static struct ol_tx_desc_t *
277ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
278 struct ol_txrx_vdev_t *vdev,
279 struct ol_txrx_msdu_info_t *msdu_info)
280{
281 struct ol_tx_desc_t *tx_desc;
282
283 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
284 if (!tx_desc)
285 return NULL;
286
287 qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt);
288
289 return tx_desc;
290}
291
292#if defined(CONFIG_PER_VDEV_TX_DESC_POOL) && defined(CONFIG_HL_SUPPORT)
293
294/**
295 * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.
296 * @tx_desc: tx desc
297 *
298 * Return: None
299 */
300static inline void
301ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
302{
303 qdf_atomic_dec(&tx_desc->vdev->tx_desc_count);
304 tx_desc->vdev = NULL;
305}
306#else
307
308static inline void
309ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
310{
311 return;
312}
313#endif
314
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315#ifndef QCA_LL_TX_FLOW_CONTROL_V2
316/**
317 * ol_tx_desc_free() - put descriptor to freelist
318 * @pdev: pdev handle
319 * @tx_desc: tx descriptor
320 *
321 * Return: None
322 */
323void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
324{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530325 qdf_spin_lock_bh(&pdev->tx_mutex);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700326
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700327 if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530328 if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530329 qdf_print("%s %d TSO desc is NULL!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330 __func__, __LINE__);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530331 qdf_assert(0);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700332 } else {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333 ol_tso_free_segment(pdev, tx_desc->tso_desc);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700334 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335 }
Nirav Shah76291962016-04-25 10:50:37 +0530336 ol_tx_desc_dup_detect_reset(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800337 ol_tx_desc_reset_pkt_type(tx_desc);
338 ol_tx_desc_reset_timestamp(tx_desc);
339
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700340 ol_tx_put_desc_global_pool(pdev, tx_desc);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530341 ol_tx_desc_vdev_rm(tx_desc);
342
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530343 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344}
345
346#else
347/**
348 * ol_tx_desc_free() - put descriptor to pool freelist
349 * @pdev: pdev handle
350 * @tx_desc: tx descriptor
351 *
352 * Return: None
353 */
354void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
355{
356 struct ol_tx_flow_pool_t *pool = tx_desc->pool;
357
358#if defined(FEATURE_TSO)
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700359 if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530360 if (qdf_unlikely(tx_desc->tso_desc == NULL))
Anurag Chouhan6d760662016-02-20 16:05:43 +0530361 qdf_print("%s %d TSO desc is NULL!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362 __func__, __LINE__);
363 else
364 ol_tso_free_segment(pdev, tx_desc->tso_desc);
365 }
366#endif
367 ol_tx_desc_reset_pkt_type(tx_desc);
368 ol_tx_desc_reset_timestamp(tx_desc);
369
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530370 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah76291962016-04-25 10:50:37 +0530371 ol_tx_desc_dup_detect_reset(pdev, tx_desc);
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700372 ol_tx_put_desc_flow_pool(pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 switch (pool->status) {
374 case FLOW_POOL_ACTIVE_PAUSED:
375 if (pool->avail_desc > pool->start_th) {
376 pdev->pause_cb(pool->member_flow_id,
377 WLAN_WAKE_ALL_NETIF_QUEUE,
378 WLAN_DATA_FLOW_CONTROL);
379 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
380 }
381 break;
382 case FLOW_POOL_INVALID:
383 if (pool->avail_desc == pool->flow_pool_size) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530384 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800385 ol_tx_free_invalid_flow_pool(pool);
Anurag Chouhan6d760662016-02-20 16:05:43 +0530386 qdf_print("%s %d pool is INVALID State!!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800387 __func__, __LINE__);
388 return;
389 }
390 break;
391 case FLOW_POOL_ACTIVE_UNPAUSED:
392 break;
393 default:
Anurag Chouhan6d760662016-02-20 16:05:43 +0530394 qdf_print("%s %d pool is INACTIVE State!!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 __func__, __LINE__);
396 break;
397 };
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530398 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800399
400}
401#endif
402
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530404dump_pkt(qdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530406 qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530407 qdf_nbuf_data(nbuf), (long long unsigned int)nbuf_paddr, len);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800408 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530409 qdf_nbuf_data(nbuf), len, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800410}
411
412const uint32_t htt_to_ce_pkt_type[] = {
413 [htt_pkt_type_raw] = tx_pkt_type_raw,
414 [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
415 [htt_pkt_type_ethernet] = tx_pkt_type_802_3,
416 [htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
417 [htt_pkt_type_eth2] = tx_pkt_type_eth2,
418 [htt_pkt_num_types] = 0xffffffff
419};
420
Nirav Shah2e583a02016-04-30 14:06:12 +0530421#define WISA_DEST_PORT_6MBPS 50000
422#define WISA_DEST_PORT_24MBPS 50001
423
424/**
425 * ol_tx_get_wisa_ext_hdr_type() - get header type for WiSA mode
426 * @netbuf: network buffer
427 *
428 * Return: extension header type
429 */
430enum extension_header_type
431ol_tx_get_wisa_ext_hdr_type(qdf_nbuf_t netbuf)
432{
433 uint8_t *buf = qdf_nbuf_data(netbuf);
434 uint16_t dport;
435
436 if (qdf_is_macaddr_group(
437 (struct qdf_mac_addr *)(buf + QDF_NBUF_DEST_MAC_OFFSET))) {
438
439 dport = (uint16_t)(*(uint16_t *)(buf +
440 QDF_NBUF_TRAC_IPV4_OFFSET +
441 QDF_NBUF_TRAC_IPV4_HEADER_SIZE + sizeof(uint16_t)));
442
443 if (dport == QDF_SWAP_U16(WISA_DEST_PORT_6MBPS))
444 return WISA_MODE_EXT_HEADER_6MBPS;
445 else if (dport == QDF_SWAP_U16(WISA_DEST_PORT_24MBPS))
446 return WISA_MODE_EXT_HEADER_24MBPS;
447 else
448 return EXT_HEADER_NOT_PRESENT;
449 } else {
450 return EXT_HEADER_NOT_PRESENT;
451 }
452}
453
454/**
455 * ol_tx_get_ext_header_type() - extension header is required or not
456 * @vdev: vdev pointer
457 * @netbuf: network buffer
458 *
459 * This function returns header type and if extension header is
460 * not required than returns EXT_HEADER_NOT_PRESENT.
461 *
462 * Return: extension header type
463 */
464enum extension_header_type
465ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev,
466 qdf_nbuf_t netbuf)
467{
468 if (vdev->is_wisa_mode_enable == true)
469 return ol_tx_get_wisa_ext_hdr_type(netbuf);
470 else
471 return EXT_HEADER_NOT_PRESENT;
472}
473
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800474struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
475 struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530476 qdf_nbuf_t netbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477 struct ol_txrx_msdu_info_t *msdu_info)
478{
479 struct ol_tx_desc_t *tx_desc;
480 unsigned int i;
481 uint32_t num_frags;
Nirav Shah2e583a02016-04-30 14:06:12 +0530482 enum extension_header_type type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483
484 msdu_info->htt.info.vdev_id = vdev->vdev_id;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530485 msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
486 switch (qdf_nbuf_get_exemption_type(netbuf)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530487 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
488 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800489 /* We want to encrypt this frame */
490 msdu_info->htt.action.do_encrypt = 1;
491 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530492 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800493 /* We don't want to encrypt this frame */
494 msdu_info->htt.action.do_encrypt = 0;
495 break;
496 default:
Anurag Chouhanc5548422016-02-24 18:33:27 +0530497 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498 break;
499 }
500
501 /* allocate the descriptor */
502 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
503 if (!tx_desc)
504 return NULL;
505
506 /* initialize the SW tx descriptor */
507 tx_desc->netbuf = netbuf;
508
509 if (msdu_info->tso_info.is_tso) {
510 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700511 tx_desc->pkt_type = OL_TX_FRM_TSO;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800512 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
513 } else {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700514 tx_desc->pkt_type = OL_TX_FRM_STD;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800515 }
516
Nirav Shah2e583a02016-04-30 14:06:12 +0530517 type = ol_tx_get_ext_header_type(vdev, netbuf);
518
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800519 /* initialize the HW tx descriptor */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800520 htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
521 tx_desc->htt_tx_desc_paddr,
522 ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
Nirav Shah2e583a02016-04-30 14:06:12 +0530523 &msdu_info->tso_info, NULL, type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800524
525 /*
526 * Initialize the fragmentation descriptor.
527 * Skip the prefix fragment (HTT tx descriptor) that was added
528 * during the call to htt_tx_desc_init above.
529 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530530 num_frags = qdf_nbuf_get_num_frags(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530532 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
533 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800534 : num_frags;
535#if defined(HELIUMPLUS_PADDR64)
536 /*
537 * Use num_frags - 1, since 1 frag is used to store
538 * the HTT/HTC descriptor
539 * Refer to htt_tx_desc_init()
540 */
541 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
542 num_frags - 1);
543#else /* ! defined(HELIUMPLUSPADDR64) */
544 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
545 num_frags - 1);
546#endif /* defined(HELIUMPLUS_PADDR64) */
547
548 if (msdu_info->tso_info.is_tso) {
549 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
550 tx_desc->htt_frag_desc, &msdu_info->tso_info);
551 TXRX_STATS_TSO_SEG_UPDATE(pdev,
552 msdu_info->tso_info.curr_seg->seg);
553 } else {
554 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530555 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530556 qdf_dma_addr_t frag_paddr;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800557#ifdef HELIUMPLUS_DEBUG
558 void *frag_vaddr;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530559 frag_vaddr = qdf_nbuf_get_frag_vaddr(netbuf, i);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800560#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530561 frag_len = qdf_nbuf_get_frag_len(netbuf, i);
562 frag_paddr = qdf_nbuf_get_frag_paddr(netbuf, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800563#if defined(HELIUMPLUS_PADDR64)
564 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
565 frag_paddr, frag_len);
566#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530567 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_vaddr=0x%p frag_paddr=0x%llx len=%zu\n",
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800568 __func__, __LINE__, tx_desc->htt_frag_desc,
569 i-1, frag_vaddr, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570 dump_pkt(netbuf, frag_paddr, 64);
571#endif /* HELIUMPLUS_DEBUG */
572#else /* ! defined(HELIUMPLUSPADDR64) */
573 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1,
574 frag_paddr, frag_len);
575#endif /* defined(HELIUMPLUS_PADDR64) */
576 }
577 }
578
579#if defined(HELIUMPLUS_DEBUG)
Himanshu Agarwalfd681722016-10-27 19:07:25 +0530580 ol_txrx_dump_frag_desc("ol_tx_desc_ll()", tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800581#endif
582 return tx_desc;
583}
584
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530585struct ol_tx_desc_t *
586ol_tx_desc_hl(
587 struct ol_txrx_pdev_t *pdev,
588 struct ol_txrx_vdev_t *vdev,
589 qdf_nbuf_t netbuf,
590 struct ol_txrx_msdu_info_t *msdu_info)
591{
592 struct ol_tx_desc_t *tx_desc;
593
594 /* FIX THIS: these inits should probably be done by tx classify */
595 msdu_info->htt.info.vdev_id = vdev->vdev_id;
596 msdu_info->htt.info.frame_type = pdev->htt_pkt_type;
597 msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
598 switch (qdf_nbuf_get_exemption_type(netbuf)) {
599 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
600 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
601 /* We want to encrypt this frame */
602 msdu_info->htt.action.do_encrypt = 1;
603 break;
604 case QDF_NBUF_EXEMPT_ALWAYS:
605 /* We don't want to encrypt this frame */
606 msdu_info->htt.action.do_encrypt = 0;
607 break;
608 default:
609 qdf_assert(0);
610 break;
611 }
612
613 /* allocate the descriptor */
614 tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info);
615 if (!tx_desc)
616 return NULL;
617
618 /* initialize the SW tx descriptor */
619 tx_desc->netbuf = netbuf;
620 /* fix this - get pkt_type from msdu_info */
621 tx_desc->pkt_type = OL_TX_FRM_STD;
622
623#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
624 tx_desc->orig_l2_hdr_bytes = 0;
625#endif
626 /* the HW tx descriptor will be initialized later by the caller */
627
628 return tx_desc;
629}
630
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
632 ol_tx_desc_list *tx_descs, int had_error)
633{
634 struct ol_tx_desc_t *tx_desc, *tmp;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530635 qdf_nbuf_t msdus = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800636
637 TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530638 qdf_nbuf_t msdu = tx_desc->netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800639
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530640 qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800641#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
642 /* restore original hdr offset */
643 OL_TX_RESTORE_HDR(tx_desc, msdu);
644#endif
Mohit Khanna38d0e932016-08-31 19:49:22 -0700645 if (qdf_nbuf_get_users(msdu) <= 1)
646 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
647
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800648 /* free the tx desc */
649 ol_tx_desc_free(pdev, tx_desc);
650 /* link the netbuf into a list to free as a batch */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530651 qdf_nbuf_set_next(msdu, msdus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800652 msdus = msdu;
653 }
654 /* free the netbufs as a batch */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530655 qdf_nbuf_tx_free(msdus, had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800656}
657
658void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
659 struct ol_tx_desc_t *tx_desc, int had_error)
660{
661 int mgmt_type;
662 ol_txrx_mgmt_tx_cb ota_ack_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800663
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530664 qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800665#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
666 /* restore original hdr offset */
667 OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
668#endif
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700669 if (tx_desc->pkt_type == OL_TX_FRM_NO_FREE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800670 /* free the tx desc but don't unmap or free the frame */
671 if (pdev->tx_data_callback.func) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530672 qdf_nbuf_set_next(tx_desc->netbuf, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800673 pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
674 tx_desc->netbuf, had_error);
675 ol_tx_desc_free(pdev, tx_desc);
676 return;
677 }
678 /* let the code below unmap and free the frame */
679 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530680 qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800681 /* check the frame type to see what kind of special steps are needed */
682 if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
gbiane55c9562016-11-01 14:47:47 +0800683 (tx_desc->pkt_type != ol_tx_frm_freed)) {
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530684 qdf_dma_addr_t frag_desc_paddr = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800685
686#if defined(HELIUMPLUS_PADDR64)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800687 frag_desc_paddr = tx_desc->htt_frag_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800688 /* FIX THIS -
689 * The FW currently has trouble using the host's fragments
690 * table for management frames. Until this is fixed,
691 * rather than specifying the fragment table to the FW,
692 * the host SW will specify just the address of the initial
693 * fragment.
694 * Now that the mgmt frame is done, the HTT tx desc's frags
695 * table pointer needs to be reset.
696 */
697#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530698 qdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
Leo Chang376398b2015-10-23 14:19:02 -0700699 __func__, __LINE__, tx_desc->id,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800700 frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701#endif /* HELIUMPLUS_DEBUG */
702#endif /* HELIUMPLUS_PADDR64 */
703 htt_tx_desc_frags_table_set(pdev->htt_pdev,
704 tx_desc->htt_tx_desc, 0,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800705 frag_desc_paddr, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706
707 mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
708 /*
709 * we already checked the value when the mgmt frame was
710 * provided to the txrx layer.
711 * no need to check it a 2nd time.
712 */
713 ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb;
714 if (ota_ack_cb) {
715 void *ctxt;
716 ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt;
717 ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
718 }
719 /* free the netbuf */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530720 qdf_nbuf_free(tx_desc->netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800721 } else {
722 /* single regular frame */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530723 qdf_nbuf_set_next(tx_desc->netbuf, NULL);
724 qdf_nbuf_tx_free(tx_desc->netbuf, had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800725 }
726 /* free the tx desc */
727 ol_tx_desc_free(pdev, tx_desc);
728}
729
730#if defined(FEATURE_TSO)
731/**
732 * htt_tso_alloc_segment() - function to allocate a TSO segment
733 * element
734 * @pdev: HTT pdev
735 * @tso_seg: This is the output. The TSO segment element.
736 *
737 * Allocates a TSO segment element from the free list held in
738 * the HTT pdev
739 *
740 * Return: none
741 */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530742struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800743{
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530744 struct qdf_tso_seg_elem_t *tso_seg = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800745
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530746 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800747 if (pdev->tso_seg_pool.freelist) {
748 pdev->tso_seg_pool.num_free--;
749 tso_seg = pdev->tso_seg_pool.freelist;
750 pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
751 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530752 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800753
754 return tso_seg;
755}
756
757/**
758 * ol_tso_free_segment() - function to free a TSO segment
759 * element
760 * @pdev: HTT pdev
761 * @tso_seg: The TSO segment element to be freed
762 *
763 * Returns a TSO segment element to the free list held in the
764 * HTT pdev
765 *
766 * Return: none
767 */
768
769void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530770 struct qdf_tso_seg_elem_t *tso_seg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800771{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530772 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800773 tso_seg->next = pdev->tso_seg_pool.freelist;
774 pdev->tso_seg_pool.freelist = tso_seg;
775 pdev->tso_seg_pool.num_free++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530776 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800777}
778#endif