blob: 96342bca7ce9a2288781278cb59f349d337588b7 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Ashish Kumar Dhanotiya94ffbd12019-08-08 18:00:59 +05302 * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * @file ol_tx_desc.h
21 * @brief API definitions for the tx descriptor module within the data SW.
22 */
23#ifndef _OL_TX_DESC__H_
24#define _OL_TX_DESC__H_
25
Ashish Kumar Dhanotiya94ffbd12019-08-08 18:00:59 +053026#include "queue.h" /* TAILQ_HEAD */
Nirav Shahcbc6d722016-03-01 16:24:53 +053027#include <qdf_nbuf.h> /* qdf_nbuf_t */
Dhanashri Atre12a08392016-02-17 13:10:34 -080028#include <cdp_txrx_cmn.h> /* ol_txrx_vdev_t, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080029#include <ol_txrx_internal.h> /*TXRX_ASSERT2 */
Nirav Shah2e583a02016-04-30 14:06:12 +053030#include <ol_htt_tx_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080031
Nirav Shah76291962016-04-25 10:50:37 +053032#define DIV_BY_8 3
33#define DIV_BY_32 5
34#define MOD_BY_8 0x7
35#define MOD_BY_32 0x1F
36
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037struct ol_tx_desc_t *
38ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
39 struct ol_txrx_vdev_t *vdev,
40 struct ol_txrx_msdu_info_t *msdu_info);
41
42
43/**
44 * @brief Allocate and initialize a tx descriptor for a LL system.
45 * @details
46 * Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor
47 * for private use within the host data SW, and a HTT tx descriptor for
48 * downloading tx meta-data to the target FW/HW.
49 * Fill in the fields of this pair of tx descriptors based on the
50 * information in the netbuf.
51 * For LL, this includes filling in a fragmentation descriptor to
52 * specify to the MAC HW where to find the tx frame's fragments.
53 *
54 * @param pdev - the data physical device sending the data
55 * (for accessing the tx desc pool)
56 * @param vdev - the virtual device sending the data
57 * (for specifying the transmitter address for multicast / broadcast data)
58 * @param netbuf - the tx frame
59 * @param msdu_info - tx meta-data
60 */
61struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
62 struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +053063 qdf_nbuf_t netbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080064 struct ol_txrx_msdu_info_t *msdu_info);
65
Siddarth Poddarb2011f62016-04-27 20:45:42 +053066
67/**
68 * @brief Allocate and initialize a tx descriptor for a HL system.
69 * @details
70 * Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor
71 * for private use within the host data SW, and a HTT tx descriptor for
72 * downloading tx meta-data to the target FW/HW.
73 * Fill in the fields of this pair of tx descriptors based on the
74 * information in the netbuf.
75 *
76 * @param pdev - the data physical device sending the data
77 * (for accessing the tx desc pool)
78 * @param vdev - the virtual device sending the data
79 * (for specifying the transmitter address for multicast / broadcast data)
80 * @param netbuf - the tx frame
81 * @param msdu_info - tx meta-data
82 */
83struct ol_tx_desc_t *
84ol_tx_desc_hl(
85 struct ol_txrx_pdev_t *pdev,
86 struct ol_txrx_vdev_t *vdev,
87 qdf_nbuf_t netbuf,
88 struct ol_txrx_msdu_info_t *msdu_info);
89
90
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080091/**
Jeff Johnson3dca2222018-05-12 15:10:43 -070092 * @brief Use a tx descriptor ID to find the corresponding descriptor object.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080093 *
94 * @param pdev - the data physical device sending the data
95 * @param tx_desc_id - the ID of the descriptor in question
96 * @return the descriptor object that has the specified ID
97 */
Leo Chang376398b2015-10-23 14:19:02 -070098static inline struct ol_tx_desc_t *ol_tx_desc_find(
99 struct ol_txrx_pdev_t *pdev, uint16_t tx_desc_id)
100{
101 void **td_base = (void **)pdev->tx_desc.desc_pages.cacheable_pages;
102
103 return &((union ol_tx_desc_list_elem_t *)
104 (td_base[tx_desc_id >> pdev->tx_desc.page_divider] +
105 (pdev->tx_desc.desc_reserved_size *
106 (tx_desc_id & pdev->tx_desc.offset_filter))))->tx_desc;
107}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800108
109/**
Jeff Johnson3dca2222018-05-12 15:10:43 -0700110 * @brief Use a tx descriptor ID to find the corresponding descriptor object
gbiane55c9562016-11-01 14:47:47 +0800111 * and add sanity check.
112 *
113 * @param pdev - the data physical device sending the data
114 * @param tx_desc_id - the ID of the descriptor in question
115 * @return the descriptor object that has the specified ID,
116 * if failure, will return NULL.
117 */
118
119#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
120static inline struct ol_tx_desc_t *
121ol_tx_desc_find_check(struct ol_txrx_pdev_t *pdev, u_int16_t tx_desc_id)
122{
123 struct ol_tx_desc_t *tx_desc;
124
Tiger Yu6a10e3e2017-12-28 11:01:34 +0800125 if (tx_desc_id >= pdev->tx_desc.pool_size)
126 return NULL;
127
gbiane55c9562016-11-01 14:47:47 +0800128 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
129
Yun Parkcb0bb182017-04-06 22:23:20 -0700130 if (tx_desc->pkt_type == ol_tx_frm_freed)
gbiane55c9562016-11-01 14:47:47 +0800131 return NULL;
gbiane55c9562016-11-01 14:47:47 +0800132
133 return tx_desc;
134}
135
136#else
137
138static inline struct ol_tx_desc_t *
139ol_tx_desc_find_check(struct ol_txrx_pdev_t *pdev, u_int16_t tx_desc_id)
140{
jiadb2062772017-05-19 10:57:26 +0800141 struct ol_tx_desc_t *tx_desc;
142
Tiger Yu6a10e3e2017-12-28 11:01:34 +0800143 if (tx_desc_id >= pdev->tx_desc.pool_size)
144 return NULL;
145
jiadb2062772017-05-19 10:57:26 +0800146 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
147
148 /* check against invalid tx_desc_id */
149 if (ol_cfg_is_high_latency(pdev->ctrl_pdev) && !tx_desc->vdev)
150 return NULL;
151
152 return tx_desc;
gbiane55c9562016-11-01 14:47:47 +0800153}
154#endif
155
156/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800157 * @brief Free a list of tx descriptors and the tx frames they refer to.
158 * @details
159 * Free a batch of "standard" tx descriptors and their tx frames.
160 * Free each tx descriptor, by returning it to the freelist.
161 * Unmap each netbuf, and free the netbufs as a batch.
Jeff Johnson3dca2222018-05-12 15:10:43 -0700162 * Irregular tx frames like TSO or management frames that require
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800163 * special handling are processed by the ol_tx_desc_frame_free_nonstd
164 * function rather than this function.
165 *
166 * @param pdev - the data physical device that sent the data
167 * @param tx_descs - a list of SW tx descriptors for the tx frames
168 * @param had_error - bool indication of whether the transmission failed.
169 * This is provided to callback functions that get notified of
170 * the tx frame completion.
171 */
172void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
173 ol_tx_desc_list *tx_descs, int had_error);
174
175/**
176 * @brief Free a non-standard tx frame and its tx descriptor.
177 * @details
178 * Check the tx frame type (e.g. TSO vs. management) to determine what
179 * special steps, if any, need to be performed prior to freeing the
180 * tx frame and its tx descriptor.
181 * This function can also be used to free single standard tx frames.
182 * After performing any special steps based on tx frame type, free the
183 * tx descriptor, i.e. return it to the freelist, and unmap and
184 * free the netbuf referenced by the tx descriptor.
185 *
186 * @param pdev - the data physical device that sent the data
187 * @param tx_desc - the SW tx descriptor for the tx frame that was sent
188 * @param had_error - bool indication of whether the transmission failed.
189 * This is provided to callback functions that get notified of
190 * the tx frame completion.
191 */
192void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
193 struct ol_tx_desc_t *tx_desc, int had_error);
194
195/*
196 * @brief Determine the ID of a tx descriptor.
197 *
198 * @param pdev - the physical device that is sending the data
199 * @param tx_desc - the descriptor whose ID is being determined
200 * @return numeric ID that uniquely identifies the tx descriptor
201 */
202static inline uint16_t
203ol_tx_desc_id(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
204{
Leo Chang376398b2015-10-23 14:19:02 -0700205 TXRX_ASSERT2(tx_desc->id < pdev->tx_desc.pool_size);
206 return tx_desc->id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800207}
208
209/*
210 * @brief Retrieves the beacon headr for the vdev
211 * @param pdev - opaque pointe to scn
212 * @param vdevid - vdev id
213 * @return void pointer to the beacon header for the given vdev
214 */
215
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800216void *ol_ath_get_bcn_header(struct cdp_cfg *cfg_pdev, A_UINT32 vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217
218/*
219 * @brief Free a tx descriptor, without freeing the matching frame.
220 * @details
221 * This function is using during the function call that submits tx frames
222 * into the txrx layer, for cases where a tx descriptor is successfully
223 * allocated, but for other reasons the frame could not be accepted.
224 *
225 * @param pdev - the data physical device that is sending the data
226 * @param tx_desc - the descriptor being freed
227 */
228void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc);
229
230#if defined(FEATURE_TSO)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530231struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232
233void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530234 struct qdf_tso_seg_elem_t *tso_seg);
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530235struct qdf_tso_num_seg_elem_t *ol_tso_num_seg_alloc(
236 struct ol_txrx_pdev_t *pdev);
237void ol_tso_num_seg_free(struct ol_txrx_pdev_t *pdev,
238 struct qdf_tso_num_seg_elem_t *tso_num_seg);
Himanshu Agarwal0e90a7b2017-03-24 17:28:40 +0530239void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
Poddar, Siddarth39ba8e02017-03-03 16:46:39 +0530240 struct ol_txrx_msdu_info_t *msdu_info,
241 bool is_tso_seg_mapping_done);
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530242
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700243#else
244#define ol_tso_alloc_segment(pdev) /*no-op*/
245#define ol_tso_free_segment(pdev, tso_seg) /*no-op*/
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530246#define ol_tso_num_seg_alloc(pdev) /*no-op*/
247#define ol_tso_num_seg_free(pdev, tso_num_seg) /*no-op*/
Poddar, Siddarth39ba8e02017-03-03 16:46:39 +0530248/*no-op*/
249#define ol_free_remaining_tso_segs(vdev, msdu_info, is_tso_seg_mapping_done)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800250#endif
251
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700252/**
253 * ol_tx_get_desc_global_pool() - get descriptor from global pool
254 * @pdev: pdev handler
255 *
256 * Caller needs to take lock and do sanity checks.
257 *
258 * Return: tx descriptor
259 */
260static inline
261struct ol_tx_desc_t *ol_tx_get_desc_global_pool(struct ol_txrx_pdev_t *pdev)
262{
263 struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.freelist->tx_desc;
Yun Parkcb0bb182017-04-06 22:23:20 -0700264
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700265 pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
266 pdev->tx_desc.num_free--;
267 return tx_desc;
268}
269
270/**
271 * ol_tx_put_desc_global_pool() - put descriptor to global pool freelist
272 * @pdev: pdev handle
273 * @tx_desc: tx descriptor
274 *
275 * Caller needs to take lock and do sanity checks.
276 *
277 * Return: none
278 */
279static inline
280void ol_tx_put_desc_global_pool(struct ol_txrx_pdev_t *pdev,
281 struct ol_tx_desc_t *tx_desc)
282{
283 ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
284 pdev->tx_desc.freelist;
285 pdev->tx_desc.freelist =
286 (union ol_tx_desc_list_elem_t *)tx_desc;
287 pdev->tx_desc.num_free++;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700288}
289
290
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Nirav Shaha3cc7192018-03-23 00:03:24 +0530292
293#ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
294int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void);
295#else
296static inline
297int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)
298{
299 return 0;
300}
301#endif
302
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool);
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700304/**
305 * ol_tx_get_desc_flow_pool() - get descriptor from flow pool
306 * @pool: flow pool
307 *
308 * Caller needs to take lock and do sanity checks.
309 *
310 * Return: tx descriptor
311 */
312static inline
313struct ol_tx_desc_t *ol_tx_get_desc_flow_pool(struct ol_tx_flow_pool_t *pool)
314{
315 struct ol_tx_desc_t *tx_desc = &pool->freelist->tx_desc;
Yun Parkcb0bb182017-04-06 22:23:20 -0700316
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700317 pool->freelist = pool->freelist->next;
318 pool->avail_desc--;
319 return tx_desc;
320}
321
322/**
323 * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
324 * @pool: flow pool
325 * @tx_desc: tx descriptor
326 *
327 * Caller needs to take lock and do sanity checks.
328 *
329 * Return: none
330 */
331static inline
332void ol_tx_put_desc_flow_pool(struct ol_tx_flow_pool_t *pool,
333 struct ol_tx_desc_t *tx_desc)
334{
335 tx_desc->pool = pool;
336 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = pool->freelist;
337 pool->freelist = (union ol_tx_desc_list_elem_t *)tx_desc;
338 pool->avail_desc++;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700339}
340
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800341#else
342static inline int ol_tx_free_invalid_flow_pool(void *pool)
343{
344 return 0;
345}
346#endif
347
Nirav Shah76291962016-04-25 10:50:37 +0530348#ifdef DESC_DUP_DETECT_DEBUG
349/**
350 * ol_tx_desc_dup_detect_init() - initialize descriptor duplication logic
351 * @pdev: pdev handle
352 * @pool_size: global pool size
353 *
354 * Return: none
355 */
356static inline
357void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t pool_size)
358{
359 uint16_t size = (pool_size >> DIV_BY_8) +
Houston Hoffman088e4b92016-09-01 13:51:06 -0700360 sizeof(*pdev->tx_desc.free_list_bitmap);
Nirav Shah76291962016-04-25 10:50:37 +0530361 pdev->tx_desc.free_list_bitmap = qdf_mem_malloc(size);
Nirav Shah76291962016-04-25 10:50:37 +0530362}
363
364/**
365 * ol_tx_desc_dup_detect_deinit() - deinit descriptor duplication logic
366 * @pdev: pdev handle
367 *
368 * Return: none
369 */
370static inline
371void ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t *pdev)
372{
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -0800373 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
374 "%s: pool_size %d num_free %d\n", __func__,
Nirav Shah76291962016-04-25 10:50:37 +0530375 pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
376 if (pdev->tx_desc.free_list_bitmap)
377 qdf_mem_free(pdev->tx_desc.free_list_bitmap);
378}
379
380/**
381 * ol_tx_desc_dup_detect_set() - set bit for msdu_id
382 * @pdev: pdev handle
383 * @tx_desc: tx descriptor
384 *
385 * Return: none
386 */
387static inline
388void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev,
389 struct ol_tx_desc_t *tx_desc)
390{
391 uint16_t msdu_id = ol_tx_desc_id(pdev, tx_desc);
Houston Hoffman088e4b92016-09-01 13:51:06 -0700392 bool test;
Nirav Shah76291962016-04-25 10:50:37 +0530393
394 if (!pdev->tx_desc.free_list_bitmap)
395 return;
396
Houston Hoffman088e4b92016-09-01 13:51:06 -0700397 if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530398 qdf_print("msdu_id %d > pool_size %d",
399 msdu_id, pdev->tx_desc.pool_size);
Houston Hoffman088e4b92016-09-01 13:51:06 -0700400 QDF_BUG(0);
401 }
402
403 test = test_and_set_bit(msdu_id, pdev->tx_desc.free_list_bitmap);
404 if (qdf_unlikely(test)) {
Nirav Shah76291962016-04-25 10:50:37 +0530405 uint16_t size = (pdev->tx_desc.pool_size >> DIV_BY_8) +
406 ((pdev->tx_desc.pool_size & MOD_BY_8) ? 1 : 0);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530407 qdf_print("duplicate msdu_id %d detected!!", msdu_id);
Nirav Shah76291962016-04-25 10:50:37 +0530408 qdf_trace_hex_dump(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
409 (void *)pdev->tx_desc.free_list_bitmap, size);
410 QDF_BUG(0);
411 }
Nirav Shah76291962016-04-25 10:50:37 +0530412}
413
414/**
415 * ol_tx_desc_dup_detect_reset() - reset bit for msdu_id
416 * @pdev: pdev handle
417 * @tx_desc: tx descriptor
418 *
419 * Return: none
420 */
421static inline
422void ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t *pdev,
423 struct ol_tx_desc_t *tx_desc)
424{
425 uint16_t msdu_id = ol_tx_desc_id(pdev, tx_desc);
Houston Hoffman088e4b92016-09-01 13:51:06 -0700426 bool test;
Nirav Shah76291962016-04-25 10:50:37 +0530427
428 if (!pdev->tx_desc.free_list_bitmap)
429 return;
430
Houston Hoffman088e4b92016-09-01 13:51:06 -0700431 if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530432 qdf_print("msdu_id %d > pool_size %d",
433 msdu_id, pdev->tx_desc.pool_size);
Houston Hoffman088e4b92016-09-01 13:51:06 -0700434 QDF_BUG(0);
435 }
436
437 test = !test_and_clear_bit(msdu_id, pdev->tx_desc.free_list_bitmap);
438 if (qdf_unlikely(test)) {
Nirav Shah76291962016-04-25 10:50:37 +0530439 uint16_t size = (pdev->tx_desc.pool_size >> DIV_BY_8) +
440 ((pdev->tx_desc.pool_size & MOD_BY_8) ? 1 : 0);
441 qdf_print("duplicate free msg received for msdu_id %d!!\n",
442 msdu_id);
443 qdf_trace_hex_dump(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
444 (void *)pdev->tx_desc.free_list_bitmap, size);
445 QDF_BUG(0);
446 }
Nirav Shah76291962016-04-25 10:50:37 +0530447}
448#else
449static inline
450void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t size)
451{
452}
453
454static inline
455void ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t *pdev)
456{
457}
458
459static inline
460void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev,
461 struct ol_tx_desc_t *tx_desc)
462{
463}
464
465static inline
466void ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t *pdev,
467 struct ol_tx_desc_t *tx_desc)
468{
469}
470#endif
471
Nirav Shah2e583a02016-04-30 14:06:12 +0530472enum extension_header_type
473ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev,
474 qdf_nbuf_t netbuf);
475enum extension_header_type
476ol_tx_get_wisa_ext_type(qdf_nbuf_t netbuf);
477
478
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800479#endif /* _OL_TX_DESC__H_ */