blob: c4e78ae6de4607c64ea27aca88400caf84975287 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dhanashri Atre83d373d2015-07-28 16:45:59 -07002 * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Anurag Chouhanc73697b2016-02-21 15:05:43 +053028#include <qdf_net_types.h> /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053030#include <qdf_util.h> /* qdf_assert */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053031#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan50220ce2016-02-18 20:11:33 +053033#include <qdf_time.h> /* qdf_system_ticks */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034#endif
35
36#include <ol_htt_tx_api.h> /* htt_tx_desc_id */
37
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038#include <ol_tx_desc.h>
39#include <ol_txrx_internal.h>
40#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
41#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
42#endif
43#include <ol_txrx.h>
44
45#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
46extern uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
47#endif
48
49#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
50static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
51 struct ol_tx_desc_t *tx_desc)
52{
53 if (tx_desc->pkt_type != 0xff) {
54 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
55 "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p",
56 __func__, tx_desc->pkt_type, pdev);
Anurag Chouhanc5548422016-02-24 18:33:27 +053057 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058 }
59 if ((uint32_t *) tx_desc->htt_tx_desc <
60 g_dbg_htt_desc_start_addr
61 || (uint32_t *) tx_desc->htt_tx_desc >
62 g_dbg_htt_desc_end_addr) {
63 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
64 "%s Potential htt_desc curruption:0x%p pdev:0x%p\n",
65 __func__, tx_desc->htt_tx_desc, pdev);
Anurag Chouhanc5548422016-02-24 18:33:27 +053066 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067 }
68}
69static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
70{
71 tx_desc->pkt_type = 0xff;
72}
73#ifdef QCA_COMPUTE_TX_DELAY
74static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
75{
76 if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
77 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n",
78 __func__, tx_desc->entry_timestamp_ticks);
Anurag Chouhanc5548422016-02-24 18:33:27 +053079 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080080 }
Anurag Chouhan50220ce2016-02-18 20:11:33 +053081 tx_desc->entry_timestamp_ticks = qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080082}
83static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
84{
85 tx_desc->entry_timestamp_ticks = 0xffffffff;
86}
87#endif
88#else
89static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
90 struct ol_tx_desc_t *tx_desc)
91{
92 return;
93}
94static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
95{
96 return;
97}
98static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
99{
100 return;
101}
102static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
103{
104 return;
105}
106#endif
107
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530108/**
109 * ol_tx_desc_vdev_update() - vedv assign.
110 * @tx_desc: tx descriptor pointer
111 * @vdev: vdev handle
112 *
113 * Return: None
114 */
115static inline void
116ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
117 struct ol_txrx_vdev_t *vdev)
118{
119 tx_desc->vdev = vdev;
120}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530121
122#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
123
124/**
125 * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
126 * @vdev: vdev handle
127 *
128 * Return: None
129 */
130static inline void
131ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
132{
133 qdf_atomic_inc(&vdev->tx_desc_count);
134}
135#else
136
137static inline void
138ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
139{
140 return;
141}
142
143#endif
144
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800145#ifndef QCA_LL_TX_FLOW_CONTROL_V2
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530146
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147/**
148 * ol_tx_desc_alloc() - allocate descriptor from freelist
149 * @pdev: pdev handle
150 * @vdev: vdev handle
151 *
152 * Return: tx descriptor pointer/ NULL in case of error
153 */
154static
155struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
156 struct ol_txrx_vdev_t *vdev)
157{
158 struct ol_tx_desc_t *tx_desc = NULL;
159
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530160 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161 if (pdev->tx_desc.freelist) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700162 tx_desc = ol_tx_get_desc_global_pool(pdev);
Nirav Shah76291962016-04-25 10:50:37 +0530163 ol_tx_desc_dup_detect_set(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 ol_tx_desc_sanity_checks(pdev, tx_desc);
165 ol_tx_desc_compute_delay(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800166 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530167 qdf_spin_unlock_bh(&pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530168
169 if (!tx_desc)
170 return NULL;
171
172 ol_tx_desc_vdev_update(tx_desc, vdev);
173 ol_tx_desc_count_inc(vdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +0530174 qdf_atomic_inc(&tx_desc->ref_cnt);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530175
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176 return tx_desc;
177}
178
179/**
180 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
181 * @pdev: pdev handler
182 * @vdev: vdev handler
183 * @msdu_info: msdu handler
184 *
185 * Return: tx descriptor or NULL
186 */
187struct ol_tx_desc_t *
188ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
189 struct ol_txrx_vdev_t *vdev,
190 struct ol_txrx_msdu_info_t *msdu_info)
191{
192 return ol_tx_desc_alloc(pdev, vdev);
193}
194
195#else
196/**
197 * ol_tx_desc_alloc() -allocate tx descriptor
198 * @pdev: pdev handler
199 * @vdev: vdev handler
200 * @pool: flow pool
201 *
202 * Return: tx descriptor or NULL
203 */
204static
205struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
206 struct ol_txrx_vdev_t *vdev,
207 struct ol_tx_flow_pool_t *pool)
208{
209 struct ol_tx_desc_t *tx_desc = NULL;
210
211 if (pool) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530212 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213 if (pool->avail_desc) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700214 tx_desc = ol_tx_get_desc_flow_pool(pool);
Nirav Shah76291962016-04-25 10:50:37 +0530215 ol_tx_desc_dup_detect_set(pdev, tx_desc);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530216 if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217 pool->status = FLOW_POOL_ACTIVE_PAUSED;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530218 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219 /* pause network queues */
220 pdev->pause_cb(vdev->vdev_id,
221 WLAN_STOP_ALL_NETIF_QUEUE,
222 WLAN_DATA_FLOW_CONTROL);
223 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530224 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800225 }
226 ol_tx_desc_sanity_checks(pdev, tx_desc);
227 ol_tx_desc_compute_delay(tx_desc);
Himanshu Agarwal0527e8f2016-08-11 14:58:34 +0530228 ol_tx_desc_vdev_update(tx_desc, vdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +0530229 qdf_atomic_inc(&tx_desc->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 } else {
Nirav Shahda008342016-05-17 18:50:40 +0530231 pool->pkt_drop_no_desc++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530232 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233 }
234 } else {
235 pdev->pool_stats.pkt_drop_no_pool++;
236 }
237
238 return tx_desc;
239}
240
241/**
242 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
243 * @pdev: pdev handler
244 * @vdev: vdev handler
245 * @msdu_info: msdu handler
246 *
247 * Return: tx descriptor or NULL
248 */
249#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
250struct ol_tx_desc_t *
251ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
252 struct ol_txrx_vdev_t *vdev,
253 struct ol_txrx_msdu_info_t *msdu_info)
254{
Anurag Chouhanc5548422016-02-24 18:33:27 +0530255 if (qdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800256 return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
257 else
258 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
259}
260#else
261struct ol_tx_desc_t *
262ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
263 struct ol_txrx_vdev_t *vdev,
264 struct ol_txrx_msdu_info_t *msdu_info)
265{
266 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
267}
268#endif
269#endif
270
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530271/**
272 * ol_tx_desc_alloc_hl() - allocate tx descriptor
273 * @pdev: pdev handle
274 * @vdev: vdev handle
275 * @msdu_info: tx msdu info
276 *
277 * Return: tx descriptor pointer/ NULL in case of error
278 */
279static struct ol_tx_desc_t *
280ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
281 struct ol_txrx_vdev_t *vdev,
282 struct ol_txrx_msdu_info_t *msdu_info)
283{
284 struct ol_tx_desc_t *tx_desc;
285
286 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
287 if (!tx_desc)
288 return NULL;
289
290 qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt);
291
292 return tx_desc;
293}
294
295#if defined(CONFIG_PER_VDEV_TX_DESC_POOL) && defined(CONFIG_HL_SUPPORT)
296
297/**
298 * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.
299 * @tx_desc: tx desc
300 *
301 * Return: None
302 */
303static inline void
304ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
305{
306 qdf_atomic_dec(&tx_desc->vdev->tx_desc_count);
307 tx_desc->vdev = NULL;
308}
309#else
310
311static inline void
312ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
313{
314 return;
315}
316#endif
317
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800318#ifndef QCA_LL_TX_FLOW_CONTROL_V2
319/**
320 * ol_tx_desc_free() - put descriptor to freelist
321 * @pdev: pdev handle
322 * @tx_desc: tx descriptor
323 *
324 * Return: None
325 */
326void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
327{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530328 qdf_spin_lock_bh(&pdev->tx_mutex);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700329
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700330 if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530331 if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530332 qdf_print("%s %d TSO desc is NULL!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333 __func__, __LINE__);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530334 qdf_assert(0);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700335 } else {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336 ol_tso_free_segment(pdev, tx_desc->tso_desc);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700337 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800338 }
Nirav Shah76291962016-04-25 10:50:37 +0530339 ol_tx_desc_dup_detect_reset(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 ol_tx_desc_reset_pkt_type(tx_desc);
341 ol_tx_desc_reset_timestamp(tx_desc);
342
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700343 ol_tx_put_desc_global_pool(pdev, tx_desc);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530344 ol_tx_desc_vdev_rm(tx_desc);
345
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530346 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800347}
348
349#else
350/**
351 * ol_tx_desc_free() - put descriptor to pool freelist
352 * @pdev: pdev handle
353 * @tx_desc: tx descriptor
354 *
355 * Return: None
356 */
357void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
358{
359 struct ol_tx_flow_pool_t *pool = tx_desc->pool;
360
361#if defined(FEATURE_TSO)
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700362 if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530363 if (qdf_unlikely(tx_desc->tso_desc == NULL))
Anurag Chouhan6d760662016-02-20 16:05:43 +0530364 qdf_print("%s %d TSO desc is NULL!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365 __func__, __LINE__);
366 else
367 ol_tso_free_segment(pdev, tx_desc->tso_desc);
368 }
369#endif
370 ol_tx_desc_reset_pkt_type(tx_desc);
371 ol_tx_desc_reset_timestamp(tx_desc);
372
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530373 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah76291962016-04-25 10:50:37 +0530374 ol_tx_desc_dup_detect_reset(pdev, tx_desc);
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700375 ol_tx_put_desc_flow_pool(pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 switch (pool->status) {
377 case FLOW_POOL_ACTIVE_PAUSED:
378 if (pool->avail_desc > pool->start_th) {
379 pdev->pause_cb(pool->member_flow_id,
380 WLAN_WAKE_ALL_NETIF_QUEUE,
381 WLAN_DATA_FLOW_CONTROL);
382 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
383 }
384 break;
385 case FLOW_POOL_INVALID:
386 if (pool->avail_desc == pool->flow_pool_size) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530387 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388 ol_tx_free_invalid_flow_pool(pool);
Anurag Chouhan6d760662016-02-20 16:05:43 +0530389 qdf_print("%s %d pool is INVALID State!!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800390 __func__, __LINE__);
391 return;
392 }
393 break;
394 case FLOW_POOL_ACTIVE_UNPAUSED:
395 break;
396 default:
Anurag Chouhan6d760662016-02-20 16:05:43 +0530397 qdf_print("%s %d pool is INACTIVE State!!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398 __func__, __LINE__);
399 break;
400 };
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530401 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402
403}
404#endif
405
406extern void
407dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
408
409void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530410dump_pkt(qdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800411{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530412 qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530413 qdf_nbuf_data(nbuf), (long long unsigned int)nbuf_paddr, len);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800414 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530415 qdf_nbuf_data(nbuf), len, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416}
417
418const uint32_t htt_to_ce_pkt_type[] = {
419 [htt_pkt_type_raw] = tx_pkt_type_raw,
420 [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
421 [htt_pkt_type_ethernet] = tx_pkt_type_802_3,
422 [htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
423 [htt_pkt_type_eth2] = tx_pkt_type_eth2,
424 [htt_pkt_num_types] = 0xffffffff
425};
426
Nirav Shah2e583a02016-04-30 14:06:12 +0530427#define WISA_DEST_PORT_6MBPS 50000
428#define WISA_DEST_PORT_24MBPS 50001
429
430/**
431 * ol_tx_get_wisa_ext_hdr_type() - get header type for WiSA mode
432 * @netbuf: network buffer
433 *
434 * Return: extension header type
435 */
436enum extension_header_type
437ol_tx_get_wisa_ext_hdr_type(qdf_nbuf_t netbuf)
438{
439 uint8_t *buf = qdf_nbuf_data(netbuf);
440 uint16_t dport;
441
442 if (qdf_is_macaddr_group(
443 (struct qdf_mac_addr *)(buf + QDF_NBUF_DEST_MAC_OFFSET))) {
444
445 dport = (uint16_t)(*(uint16_t *)(buf +
446 QDF_NBUF_TRAC_IPV4_OFFSET +
447 QDF_NBUF_TRAC_IPV4_HEADER_SIZE + sizeof(uint16_t)));
448
449 if (dport == QDF_SWAP_U16(WISA_DEST_PORT_6MBPS))
450 return WISA_MODE_EXT_HEADER_6MBPS;
451 else if (dport == QDF_SWAP_U16(WISA_DEST_PORT_24MBPS))
452 return WISA_MODE_EXT_HEADER_24MBPS;
453 else
454 return EXT_HEADER_NOT_PRESENT;
455 } else {
456 return EXT_HEADER_NOT_PRESENT;
457 }
458}
459
460/**
461 * ol_tx_get_ext_header_type() - extension header is required or not
462 * @vdev: vdev pointer
463 * @netbuf: network buffer
464 *
465 * This function returns header type and if extension header is
466 * not required than returns EXT_HEADER_NOT_PRESENT.
467 *
468 * Return: extension header type
469 */
470enum extension_header_type
471ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev,
472 qdf_nbuf_t netbuf)
473{
474 if (vdev->is_wisa_mode_enable == true)
475 return ol_tx_get_wisa_ext_hdr_type(netbuf);
476 else
477 return EXT_HEADER_NOT_PRESENT;
478}
479
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
481 struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530482 qdf_nbuf_t netbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483 struct ol_txrx_msdu_info_t *msdu_info)
484{
485 struct ol_tx_desc_t *tx_desc;
486 unsigned int i;
487 uint32_t num_frags;
Nirav Shah2e583a02016-04-30 14:06:12 +0530488 enum extension_header_type type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800489
490 msdu_info->htt.info.vdev_id = vdev->vdev_id;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530491 msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
492 switch (qdf_nbuf_get_exemption_type(netbuf)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530493 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
494 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495 /* We want to encrypt this frame */
496 msdu_info->htt.action.do_encrypt = 1;
497 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530498 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499 /* We don't want to encrypt this frame */
500 msdu_info->htt.action.do_encrypt = 0;
501 break;
502 default:
Anurag Chouhanc5548422016-02-24 18:33:27 +0530503 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504 break;
505 }
506
507 /* allocate the descriptor */
508 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
509 if (!tx_desc)
510 return NULL;
511
512 /* initialize the SW tx descriptor */
513 tx_desc->netbuf = netbuf;
514
515 if (msdu_info->tso_info.is_tso) {
516 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700517 tx_desc->pkt_type = OL_TX_FRM_TSO;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800518 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
519 } else {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700520 tx_desc->pkt_type = OL_TX_FRM_STD;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521 }
522
Nirav Shah2e583a02016-04-30 14:06:12 +0530523 type = ol_tx_get_ext_header_type(vdev, netbuf);
524
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800525 /* initialize the HW tx descriptor */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526 htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
527 tx_desc->htt_tx_desc_paddr,
528 ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
Nirav Shah2e583a02016-04-30 14:06:12 +0530529 &msdu_info->tso_info, NULL, type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800530
531 /*
532 * Initialize the fragmentation descriptor.
533 * Skip the prefix fragment (HTT tx descriptor) that was added
534 * during the call to htt_tx_desc_init above.
535 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530536 num_frags = qdf_nbuf_get_num_frags(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530538 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
539 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 : num_frags;
541#if defined(HELIUMPLUS_PADDR64)
542 /*
543 * Use num_frags - 1, since 1 frag is used to store
544 * the HTT/HTC descriptor
545 * Refer to htt_tx_desc_init()
546 */
547 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
548 num_frags - 1);
549#else /* ! defined(HELIUMPLUSPADDR64) */
550 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
551 num_frags - 1);
552#endif /* defined(HELIUMPLUS_PADDR64) */
553
554 if (msdu_info->tso_info.is_tso) {
555 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
556 tx_desc->htt_frag_desc, &msdu_info->tso_info);
557 TXRX_STATS_TSO_SEG_UPDATE(pdev,
558 msdu_info->tso_info.curr_seg->seg);
559 } else {
560 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530561 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530562 qdf_dma_addr_t frag_paddr;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800563#ifdef HELIUMPLUS_DEBUG
564 void *frag_vaddr;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530565 frag_vaddr = qdf_nbuf_get_frag_vaddr(netbuf, i);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800566#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530567 frag_len = qdf_nbuf_get_frag_len(netbuf, i);
568 frag_paddr = qdf_nbuf_get_frag_paddr(netbuf, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800569#if defined(HELIUMPLUS_PADDR64)
570 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
571 frag_paddr, frag_len);
572#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530573 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_vaddr=0x%p frag_paddr=0x%llx len=%zu\n",
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800574 __func__, __LINE__, tx_desc->htt_frag_desc,
575 i-1, frag_vaddr, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800576 dump_pkt(netbuf, frag_paddr, 64);
577#endif /* HELIUMPLUS_DEBUG */
578#else /* ! defined(HELIUMPLUSPADDR64) */
579 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1,
580 frag_paddr, frag_len);
581#endif /* defined(HELIUMPLUS_PADDR64) */
582 }
583 }
584
585#if defined(HELIUMPLUS_DEBUG)
586 dump_frag_desc("ol_tx_desc_ll()", tx_desc);
587#endif
588 return tx_desc;
589}
590
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530591struct ol_tx_desc_t *
592ol_tx_desc_hl(
593 struct ol_txrx_pdev_t *pdev,
594 struct ol_txrx_vdev_t *vdev,
595 qdf_nbuf_t netbuf,
596 struct ol_txrx_msdu_info_t *msdu_info)
597{
598 struct ol_tx_desc_t *tx_desc;
599
600 /* FIX THIS: these inits should probably be done by tx classify */
601 msdu_info->htt.info.vdev_id = vdev->vdev_id;
602 msdu_info->htt.info.frame_type = pdev->htt_pkt_type;
603 msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
604 switch (qdf_nbuf_get_exemption_type(netbuf)) {
605 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
606 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
607 /* We want to encrypt this frame */
608 msdu_info->htt.action.do_encrypt = 1;
609 break;
610 case QDF_NBUF_EXEMPT_ALWAYS:
611 /* We don't want to encrypt this frame */
612 msdu_info->htt.action.do_encrypt = 0;
613 break;
614 default:
615 qdf_assert(0);
616 break;
617 }
618
619 /* allocate the descriptor */
620 tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info);
621 if (!tx_desc)
622 return NULL;
623
624 /* initialize the SW tx descriptor */
625 tx_desc->netbuf = netbuf;
626 /* fix this - get pkt_type from msdu_info */
627 tx_desc->pkt_type = OL_TX_FRM_STD;
628
629#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
630 tx_desc->orig_l2_hdr_bytes = 0;
631#endif
632 /* the HW tx descriptor will be initialized later by the caller */
633
634 return tx_desc;
635}
636
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
638 ol_tx_desc_list *tx_descs, int had_error)
639{
640 struct ol_tx_desc_t *tx_desc, *tmp;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530641 qdf_nbuf_t msdus = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800642
643 TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530644 qdf_nbuf_t msdu = tx_desc->netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800645
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530646 qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800647#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
648 /* restore original hdr offset */
649 OL_TX_RESTORE_HDR(tx_desc, msdu);
650#endif
Mohit Khanna38d0e932016-08-31 19:49:22 -0700651 if (qdf_nbuf_get_users(msdu) <= 1)
652 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
653
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800654 /* free the tx desc */
655 ol_tx_desc_free(pdev, tx_desc);
656 /* link the netbuf into a list to free as a batch */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530657 qdf_nbuf_set_next(msdu, msdus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800658 msdus = msdu;
659 }
660 /* free the netbufs as a batch */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530661 qdf_nbuf_tx_free(msdus, had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800662}
663
664void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
665 struct ol_tx_desc_t *tx_desc, int had_error)
666{
667 int mgmt_type;
668 ol_txrx_mgmt_tx_cb ota_ack_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530670 qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800671#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
672 /* restore original hdr offset */
673 OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
674#endif
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700675 if (tx_desc->pkt_type == OL_TX_FRM_NO_FREE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676 /* free the tx desc but don't unmap or free the frame */
677 if (pdev->tx_data_callback.func) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530678 qdf_nbuf_set_next(tx_desc->netbuf, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800679 pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
680 tx_desc->netbuf, had_error);
681 ol_tx_desc_free(pdev, tx_desc);
682 return;
683 }
684 /* let the code below unmap and free the frame */
685 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530686 qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800687 /* check the frame type to see what kind of special steps are needed */
688 if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
689 (tx_desc->pkt_type != 0xff)) {
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530690 qdf_dma_addr_t frag_desc_paddr = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800691
692#if defined(HELIUMPLUS_PADDR64)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800693 frag_desc_paddr = tx_desc->htt_frag_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800694 /* FIX THIS -
695 * The FW currently has trouble using the host's fragments
696 * table for management frames. Until this is fixed,
697 * rather than specifying the fragment table to the FW,
698 * the host SW will specify just the address of the initial
699 * fragment.
700 * Now that the mgmt frame is done, the HTT tx desc's frags
701 * table pointer needs to be reset.
702 */
703#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530704 qdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
Leo Chang376398b2015-10-23 14:19:02 -0700705 __func__, __LINE__, tx_desc->id,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800706 frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800707#endif /* HELIUMPLUS_DEBUG */
708#endif /* HELIUMPLUS_PADDR64 */
709 htt_tx_desc_frags_table_set(pdev->htt_pdev,
710 tx_desc->htt_tx_desc, 0,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800711 frag_desc_paddr, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800712
713 mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
714 /*
715 * we already checked the value when the mgmt frame was
716 * provided to the txrx layer.
717 * no need to check it a 2nd time.
718 */
719 ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb;
720 if (ota_ack_cb) {
721 void *ctxt;
722 ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt;
723 ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
724 }
725 /* free the netbuf */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530726 qdf_nbuf_free(tx_desc->netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800727 } else {
728 /* single regular frame */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530729 qdf_nbuf_set_next(tx_desc->netbuf, NULL);
730 qdf_nbuf_tx_free(tx_desc->netbuf, had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800731 }
732 /* free the tx desc */
733 ol_tx_desc_free(pdev, tx_desc);
734}
735
736#if defined(FEATURE_TSO)
737/**
738 * htt_tso_alloc_segment() - function to allocate a TSO segment
739 * element
740 * @pdev: HTT pdev
741 * @tso_seg: This is the output. The TSO segment element.
742 *
743 * Allocates a TSO segment element from the free list held in
744 * the HTT pdev
745 *
746 * Return: none
747 */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530748struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800749{
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530750 struct qdf_tso_seg_elem_t *tso_seg = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800751
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530752 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800753 if (pdev->tso_seg_pool.freelist) {
754 pdev->tso_seg_pool.num_free--;
755 tso_seg = pdev->tso_seg_pool.freelist;
756 pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
757 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530758 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800759
760 return tso_seg;
761}
762
763/**
764 * ol_tso_free_segment() - function to free a TSO segment
765 * element
766 * @pdev: HTT pdev
767 * @tso_seg: The TSO segment element to be freed
768 *
769 * Returns a TSO segment element to the free list held in the
770 * HTT pdev
771 *
772 * Return: none
773 */
774
775void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530776 struct qdf_tso_seg_elem_t *tso_seg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800777{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530778 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800779 tso_seg->next = pdev->tso_seg_pool.freelist;
780 pdev->tso_seg_pool.freelist = tso_seg;
781 pdev->tso_seg_pool.num_free++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530782 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800783}
784#endif