qcacld-3.0: Update driver to use QDF NBUF APIs(1/2)
Update driver to use QDF NBUF APIs
Change-Id: I4409b6c046de1221b57baed45088d5f3b898b565
CRs-Fixed: 981188
diff --git a/core/cdf/inc/cdf_nbuf.h b/core/cdf/inc/cdf_nbuf.h
deleted file mode 100644
index 01771c6..0000000
--- a/core/cdf/inc/cdf_nbuf.h
+++ /dev/null
@@ -1,1125 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: cdf_nbuf_public network buffer API
- * This file defines the network buffer abstraction.
- */
-
-#ifndef _CDF_NBUF_H
-#define _CDF_NBUF_H
-#include <qdf_util.h>
-#include <qdf_types.h>
-#include <qdf_net_types.h>
-#include <qdf_lock.h>
-#include <i_cdf_nbuf.h>
-#include <asm/cacheflush.h>
-
-#define IPA_NBUF_OWNER_ID 0xaa55aa55
-#define NBUF_PKT_TRAC_TYPE_EAPOL 0x02
-#define NBUF_PKT_TRAC_TYPE_DHCP 0x04
-#define NBUF_PKT_TRAC_TYPE_MGMT_ACTION 0x08
-#define NBUF_PKT_TRAC_MAX_STRING 12
-#define NBUF_PKT_TRAC_PROTO_STRING 4
-#define NBUF_PKT_ERROR 1
-
-/* Tracked Packet types */
-#define NBUF_TX_PKT_INVALID 0
-#define NBUF_TX_PKT_DATA_TRACK 1
-#define NBUF_TX_PKT_MGMT_TRACK 2
-
-/* Different Packet states */
-#define NBUF_TX_PKT_HDD 1
-#define NBUF_TX_PKT_TXRX_ENQUEUE 2
-#define NBUF_TX_PKT_TXRX_DEQUEUE 3
-#define NBUF_TX_PKT_TXRX 4
-#define NBUF_TX_PKT_HTT 5
-#define NBUF_TX_PKT_HTC 6
-#define NBUF_TX_PKT_HIF 7
-#define NBUF_TX_PKT_CE 8
-#define NBUF_TX_PKT_FREE 9
-#define NBUF_TX_PKT_STATE_MAX 10
-
-
-/**
- * @cdf_nbuf_t - Platform indepedent packet abstraction
- */
-typedef __cdf_nbuf_t cdf_nbuf_t;
-
-/**
- * @cdf_dma_map_cb_t - Dma map callback prototype
- */
-typedef void (*cdf_dma_map_cb_t)(void *arg, cdf_nbuf_t buf,
- qdf_dma_map_t dmap);
-
-/**
- * @__CDF_NBUF_NULL - invalid handle
- */
-#define CDF_NBUF_NULL __CDF_NBUF_NULL
-/**
- * @cdf_nbuf_queue_t - Platform independent packet queue abstraction
- */
-typedef __cdf_nbuf_queue_t cdf_nbuf_queue_t;
-
-/* BUS/DMA mapping routines */
-
-/**
- * cdf_nbuf_map() - map a buffer to local bus address space
- * @osdev: OS device
- * @buf: Buf to be mapped (mapping info is stored in the buf's meta-data area)
- * @dir: DMA direction
- *
- * Return: Status of the operation
- */
-static inline QDF_STATUS
-cdf_nbuf_map(qdf_device_t osdev, cdf_nbuf_t buf, qdf_dma_dir_t dir)
-{
- return __cdf_nbuf_map(osdev, buf, dir);
-}
-
-/**
- * cdf_nbuf_unmap() - unmap a previously mapped buf
- * @osdev: OS device
- * @buf: Buf to be unmapped (mapping info is stored in the buf's meta-data area)
- * @dir: DMA direction
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_unmap(qdf_device_t osdev, cdf_nbuf_t buf, qdf_dma_dir_t dir)
-{
- __cdf_nbuf_unmap(osdev, buf, dir);
-}
-
-/**
- * cdf_nbuf_map_single() - map a single buffer to local bus address space
- * @osdev: OS device
- * @buf: Buf to be mapped (mapping info is stored in the buf's meta-data area)
- * @dir: DMA direction
- *
- * Return: Status of the operation
- */
-static inline QDF_STATUS
-cdf_nbuf_map_single(qdf_device_t osdev, cdf_nbuf_t buf, qdf_dma_dir_t dir)
-{
- return __cdf_nbuf_map_single(osdev, buf, dir);
-}
-
-/**
- * cdf_nbuf_unmap_single() - unmap a previously mapped buf
- * @osdev: OS device
- * @buf: Buf to be unmapped (mapping info is stored in the buf's meta-data area)
- * @dir: DMA direction
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_unmap_single(qdf_device_t osdev, cdf_nbuf_t buf, qdf_dma_dir_t dir)
-{
- __cdf_nbuf_unmap_single(osdev, buf, dir);
-}
-
-/**
- * cdf_nbuf_get_num_frags() - get number of fragments
- * @buf: Network buffer
- *
- * Return: Number of fragments
- */
-static inline int cdf_nbuf_get_num_frags(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_get_num_frags(buf);
-}
-
-/**
- * cdf_nbuf_get_frag_len() - get fragment length
- * @buf: Network buffer
- * @frag_num: Fragment number
- *
- * Return: Fragment length
- */
-static inline int cdf_nbuf_get_frag_len(cdf_nbuf_t buf, int frag_num)
-{
- BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
- return __cdf_nbuf_get_frag_len(buf, frag_num);
-}
-
-/**
- * cdf_nbuf_get_frag_vaddr() - get fragment virtual address
- * @buf: Network buffer
- * @frag_num: Fragment number
- *
- * Return: Fragment virtual address
- */
-static inline unsigned char *cdf_nbuf_get_frag_vaddr(cdf_nbuf_t buf,
- int frag_num)
-{
- BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
- return __cdf_nbuf_get_frag_vaddr(buf, frag_num);
-}
-
-/**
- * cdf_nbuf_get_frag_paddr() - get fragment physical address
- * @buf: Network buffer
- * @frag_num: Fragment number
- *
- * Return: Fragment physical address
- */
-static inline qdf_dma_addr_t cdf_nbuf_get_frag_paddr(cdf_nbuf_t buf, int frag_num)
-{
- BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
- return __cdf_nbuf_get_frag_paddr(buf, frag_num);
-}
-
-/**
- * cdf_nbuf_get_frag_is_wordstream() - is fragment wordstream
- * @buf: Network buffer
- * @frag_num: Fragment number
- *
- * Return: Fragment wordstream or not
- */
-static inline int cdf_nbuf_get_frag_is_wordstream(cdf_nbuf_t buf, int frag_num)
-{
- BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
- return __cdf_nbuf_get_frag_is_wordstream(buf, frag_num);
-}
-
-/**
- * cdf_nbuf_set_frag_is_wordstream() - set fragment wordstream
- * @buf: Network buffer
- * @frag_num: Fragment number
- * @is_wordstream: Wordstream
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_set_frag_is_wordstream(cdf_nbuf_t buf, int frag_num, int is_wordstream)
-{
- BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
- __cdf_nbuf_set_frag_is_wordstream(buf, frag_num, is_wordstream);
-}
-
-/**
- * cdf_nbuf_ipa_owned_get - gets the ipa_owned flag
- * @buf: Network buffer
- *
- * Return: none
- */
-static inline int cdf_nbuf_ipa_owned_get(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_ipa_owned_get(buf);
-}
-
-/**
- * cdf_nbuf_ipa_owned_set - sets the ipa_owned flag
- * @buf: Network buffer
- *
- * Return: none
- */
-static inline void cdf_nbuf_ipa_owned_set(cdf_nbuf_t buf)
-{
- __cdf_nbuf_ipa_owned_set(buf);
-}
-
-/**
- * cdf_nbuf_ipa_priv_get - gets the ipa_priv field
- * @buf: Network buffer
- *
- * Return: none
- */
-static inline int cdf_nbuf_ipa_priv_get(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_ipa_priv_get(buf);
-}
-
-/**
- * cdf_nbuf_ipa_priv_set - sets the ipa_priv field
- * @buf: Network buffer
- *
- * Return: none
- */
-static inline void cdf_nbuf_ipa_priv_set(cdf_nbuf_t buf, uint32_t priv)
-{
- BUG_ON(priv & 0x80000000); /* priv is 31 bits only */
- __cdf_nbuf_ipa_priv_set(buf, priv);
-}
-
-/**
- * cdf_nbuf_mapped_paddr_get - gets the paddr of nbuf->data
- * @buf: Network buffer
- *
- * Return: none
- */
-static inline qdf_dma_addr_t
-cdf_nbuf_mapped_paddr_get(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_mapped_paddr_get(buf);
-}
-
-/**
- * cdf_nbuf_mapped_paddr_set - sets the paddr of nbuf->data
- * @buf: Network buffer
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_mapped_paddr_set(cdf_nbuf_t buf, qdf_dma_addr_t paddr)
-{
- __cdf_nbuf_mapped_paddr_set(buf, paddr);
-}
-
-/**
- * cdf_nbuf_frag_push_head() - push fragment head
- * @buf: Network buffer
- * @frag_len: Fragment length
- * @frag_vaddr: Fragment virtual address
- * @frag_paddr_lo: Fragment physical address lo
- * @frag_paddr_hi: Fragment physical address hi
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_frag_push_head(cdf_nbuf_t buf,
- int frag_len,
- char *frag_vaddr,
- qdf_dma_addr_t frag_paddr)
-{
- __cdf_nbuf_frag_push_head(buf, frag_len, frag_vaddr, frag_paddr);
-}
-
-#ifdef MEMORY_DEBUG
-void cdf_net_buf_debug_init(void);
-void cdf_net_buf_debug_exit(void);
-void cdf_net_buf_debug_clean(void);
-void cdf_net_buf_debug_add_node(cdf_nbuf_t net_buf, size_t size,
- uint8_t *file_name, uint32_t line_num);
-void cdf_net_buf_debug_delete_node(cdf_nbuf_t net_buf);
-void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf);
-
-/* nbuf allocation rouines */
-
-/**
- * cdf_nbuf_alloc() - Allocate cdf_nbuf
- * @hdl: Platform device object
- * @size: Data buffer size for this cdf_nbuf including max header
- * size
- * @reserve: Headroom to start with.
- * @align: Alignment for the start buffer.
- * @prio: Indicate if the nbuf is high priority (some OSes e.g darwin
- * polls few times if allocation fails and priority is true)
- *
- * The nbuf created is guarenteed to have only 1 physical segment
- *
- * Return: The new cdf_nbuf instance or NULL if there's not enough memory.
- */
-
-#define cdf_nbuf_alloc(d, s, r, a, p) \
- cdf_nbuf_alloc_debug(d, s, r, a, p, __FILE__, __LINE__)
-static inline cdf_nbuf_t
-cdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size, int reserve,
- int align, int prio, uint8_t *file_name,
- uint32_t line_num)
-{
- cdf_nbuf_t net_buf;
- net_buf = __cdf_nbuf_alloc(osdev, size, reserve, align, prio);
-
- /* Store SKB in internal CDF tracking table */
- if (qdf_likely(net_buf))
- cdf_net_buf_debug_add_node(net_buf, size, file_name, line_num);
-
- return net_buf;
-}
-
-/**
- * cdf_nbuf_free() - free cdf_nbuf
- * @net_buf: Network buffer to free
- *
- * Return: none
- */
-static inline void cdf_nbuf_free(cdf_nbuf_t net_buf)
-{
- /* Remove SKB from internal CDF tracking table */
- if (qdf_likely(net_buf))
- cdf_net_buf_debug_delete_node(net_buf);
-
- __cdf_nbuf_free(net_buf);
-}
-
-#else
-
-static inline void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf)
-{
- return;
-}
-
-/* Nbuf allocation rouines */
-
-/**
- * cdf_nbuf_alloc() - allocate cdf_nbuf
- * @hdl: Platform device object
- * @size: Data buffer size for this cdf_nbuf including max header
- * size
- * @reserve: Headroom to start with.
- * @align: Alignment for the start buffer.
- * @prio: Indicate if the nbuf is high priority (some OSes e.g darwin
- * polls few times if allocation fails and priority is true)
- *
- * The nbuf created is guarenteed to have only 1 physical segment
- *
- * Return: new cdf_nbuf instance or NULL if there's not enough memory.
- */
-static inline cdf_nbuf_t
-cdf_nbuf_alloc(qdf_device_t osdev,
- qdf_size_t size, int reserve, int align, int prio)
-{
- return __cdf_nbuf_alloc(osdev, size, reserve, align, prio);
-}
-
-/**
- * cdf_nbuf_free() - free cdf_nbuf
- * @buf: Network buffer to free
- *
- * Return: none
- */
-static inline void cdf_nbuf_free(cdf_nbuf_t buf)
-{
- __cdf_nbuf_free(buf);
-}
-
-#endif
-
-/**
- * cdf_nbuf_tx_free() - free a list of cdf_nbufs and tell the OS their tx
- * status (if req'd)
- * @bufs: List of netbufs to free
- * @tx_err: Whether the tx frames were transmitted successfully
- *
- * Return: none
- */
-static inline void cdf_nbuf_tx_free(cdf_nbuf_t buf_list, int tx_err)
-{
- __cdf_nbuf_tx_free(buf_list, tx_err);
-}
-
-/**
- * cdf_nbuf_copy() - copy src buffer into dst.
- * @buf: source nbuf to copy from
- *
- * This API is useful, for example, because most native buffer provide a way to
- * copy a chain into a single buffer. Therefore as a side effect, it also
- * "linearizes" a buffer (which is perhaps why you'll use it mostly). It
- * creates a writeable copy.
- *
- *
- * Return: new nbuf
- */
-static inline cdf_nbuf_t cdf_nbuf_copy(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_copy(buf);
-}
-
-/**
- * cdf_nbuf_cat() - link two nbufs, the new buf is piggybacked into older one
- * @dst: Buffer to piggyback into
- * @src: Buffer to put
- *
- * Return: Status of the call - 0 successful
- */
-static inline QDF_STATUS cdf_nbuf_cat(cdf_nbuf_t dst, cdf_nbuf_t src)
-{
- return __cdf_nbuf_cat(dst, src);
-}
-
-/**
- * @cdf_nbuf_copy_bits() - return the length of the copy bits for skb
- * @skb: SKB pointer
- * @offset: offset
- * @len: Length
- * @to: To
- *
- * Return: int32_t
- */
-static inline int32_t
-cdf_nbuf_copy_bits(cdf_nbuf_t nbuf, uint32_t offset, uint32_t len, void *to)
-{
- return __cdf_nbuf_copy_bits(nbuf, offset, len, to);
-}
-
-/**
- * cdf_nbuf_clone() - clone the nbuf (copy is readonly)
- * @buf: nbuf to clone from
- *
- * Return: cloned buffer
- */
-static inline cdf_nbuf_t cdf_nbuf_clone(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_clone(buf);
-}
-
-/* nbuf manipulation routines */
-
-/**
- * @cdf_nbuf_head() - return the address of an nbuf's buffer
- * @buf: netbuf
- *
- * Return: head address
- */
-static inline uint8_t *cdf_nbuf_head(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_head(buf);
-}
-
-/**
- * cdf_nbuf_data() - Return the address of the start of data within an nbuf
- * @buf: Network buffer
- *
- * Return: Data address
- */
-static inline uint8_t *cdf_nbuf_data(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_data(buf);
-}
-
-/**
- * cdf_nbuf_headroom() - amount of headroom int the current nbuf
- * @buf: Network buffer
- *
- * Return: Amount of head room
- */
-static inline uint32_t cdf_nbuf_headroom(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_headroom(buf);
-}
-
-/**
- * cdf_nbuf_tailroom() - amount of tail space available
- * @buf: Network buffer
- *
- * Return: amount of tail room
- */
-static inline uint32_t cdf_nbuf_tailroom(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_tailroom(buf);
-}
-
-/**
- * cdf_nbuf_push_head() - push data in the front
- * @buf: Network buf instance
- * @size: Size to be pushed
- *
- * Return: New data pointer of this buf after data has been pushed,
- * or NULL if there is not enough room in this buf.
- */
-static inline uint8_t *cdf_nbuf_push_head(cdf_nbuf_t buf, qdf_size_t size)
-{
- return __cdf_nbuf_push_head(buf, size);
-}
-
-/**
- * cdf_nbuf_put_tail() - puts data in the end
- * @buf: Network buf instance
- * @size: Size to be pushed
- *
- * Return: Data pointer of this buf where new data has to be
- * put, or NULL if there is not enough room in this buf.
- */
-static inline uint8_t *cdf_nbuf_put_tail(cdf_nbuf_t buf, qdf_size_t size)
-{
- return __cdf_nbuf_put_tail(buf, size);
-}
-
-/**
- * cdf_nbuf_pull_head() - pull data out from the front
- * @buf: Network buf instance
- * @size: Size to be popped
- *
- * Return: New data pointer of this buf after data has been popped,
- * or NULL if there is not sufficient data to pull.
- */
-static inline uint8_t *cdf_nbuf_pull_head(cdf_nbuf_t buf, qdf_size_t size)
-{
- return __cdf_nbuf_pull_head(buf, size);
-}
-
-/**
- * cdf_nbuf_trim_tail() - trim data out from the end
- * @buf: Network buf instance
- * @size: Size to be popped
- *
- * Return: none
- */
-static inline void cdf_nbuf_trim_tail(cdf_nbuf_t buf, qdf_size_t size)
-{
- __cdf_nbuf_trim_tail(buf, size);
-}
-
-/**
- * cdf_nbuf_len() - get the length of the buf
- * @buf: Network buf instance
- *
- * Return: total length of this buf.
- */
-static inline qdf_size_t cdf_nbuf_len(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_len(buf);
-}
-
-/**
- * cdf_nbuf_set_pktlen() - set the length of the buf
- * @buf: Network buf instance
- * @size: Size to be set
- *
- * Return: none
- */
-static inline void cdf_nbuf_set_pktlen(cdf_nbuf_t buf, uint32_t len)
-{
- __cdf_nbuf_set_pktlen(buf, len);
-}
-
-/**
- * cdf_nbuf_reserve() - trim data out from the end
- * @buf: Network buf instance
- * @size: Size to be popped
- *
- * Return: none
- */
-static inline void cdf_nbuf_reserve(cdf_nbuf_t buf, qdf_size_t size)
-{
- __cdf_nbuf_reserve(buf, size);
-}
-
-/**
- * cdf_nbuf_peek_header() - return the data pointer & length of the header
- * @buf: Network nbuf
- * @addr: Data pointer
- * @len: Length of the data
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_peek_header(cdf_nbuf_t buf, uint8_t **addr, uint32_t *len)
-{
- __cdf_nbuf_peek_header(buf, addr, len);
-}
-
-/* nbuf private context routines */
-
-/* nbuf queue routines */
-
-/**
- * cdf_nbuf_queue_init() - initialize buf queue
- * @head: Network buf queue head
- *
- * Return: none
- */
-static inline void cdf_nbuf_queue_init(cdf_nbuf_queue_t *head)
-{
- __cdf_nbuf_queue_init(head);
-}
-
-/**
- * cdf_nbuf_queue_add() - append a nbuf to the tail of the buf queue
- * @head: Network buf queue head
- * @buf: Network buf
- *
- * Return: none
- */
-static inline void cdf_nbuf_queue_add(cdf_nbuf_queue_t *head, cdf_nbuf_t buf)
-{
- __cdf_nbuf_queue_add(head, buf);
-}
-
-/**
- * cdf_nbuf_queue_insert_head() - insert nbuf at the head of queue
- * @head: Network buf queue head
- * @buf: Network buf
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_queue_insert_head(cdf_nbuf_queue_t *head, cdf_nbuf_t buf)
-{
- __cdf_nbuf_queue_insert_head(head, buf);
-}
-
-/**
- * cdf_nbuf_queue_remove() - retrieve a buf from the head of the buf queue
- * @head: Network buf queue head
- *
- * Return: The head buf in the buf queue.
- */
-static inline cdf_nbuf_t cdf_nbuf_queue_remove(cdf_nbuf_queue_t *head)
-{
- return __cdf_nbuf_queue_remove(head);
-}
-
-/**
- * cdf_nbuf_queue_len() - get the length of the queue
- * @head: Network buf queue head
- *
- * Return: length of the queue
- */
-static inline uint32_t cdf_nbuf_queue_len(cdf_nbuf_queue_t *head)
-{
- return __cdf_nbuf_queue_len(head);
-}
-
-/**
- * cdf_nbuf_queue_next() - get the next guy/packet of the given buffer
- * @buf: Network buffer
- *
- * Return: next buffer/packet
- */
-static inline cdf_nbuf_t cdf_nbuf_queue_next(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_queue_next(buf);
-}
-
-/**
- * @cdf_nbuf_is_queue_empty() - check if the buf queue is empty
- * @nbq: Network buf queue handle
- *
- * Return: true if queue is empty
- * false if queue is not emty
- */
-static inline bool cdf_nbuf_is_queue_empty(cdf_nbuf_queue_t *nbq)
-{
- return __cdf_nbuf_is_queue_empty(nbq);
-}
-
-/**
- * cdf_nbuf_next() - get the next packet in the linked list
- * @buf: Network buffer
- *
- * This function can be used when nbufs are directly linked into a list,
- * rather than using a separate network buffer queue object.
- *
- * Return: next network buffer in the linked list
- */
-static inline cdf_nbuf_t cdf_nbuf_next(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_next(buf);
-}
-
-/**
- * cdf_nbuf_get_protocol() - return the protocol value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb protocol
- */
-static inline uint16_t cdf_nbuf_get_protocol(struct sk_buff *skb)
-{
- return __cdf_nbuf_get_protocol(skb);
-}
-
-/**
- * cdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb ip_summed
- */
-static inline uint8_t cdf_nbuf_get_ip_summed(struct sk_buff *skb)
-{
- return __cdf_nbuf_get_ip_summed(skb);
-}
-
-/**
- * cdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
- * @skb: Pointer to network buffer
- * @ip_summed: ip checksum
- *
- * Return: none
- */
-static inline void cdf_nbuf_set_ip_summed(struct sk_buff *skb, uint8_t ip_summed)
-{
- __cdf_nbuf_set_ip_summed(skb, ip_summed);
-}
-
-/**
- * cdf_nbuf_set_next() - add a packet to a linked list
- * @this_buf: Predecessor buffer
- * @next_buf: Successor buffer
- *
- * This function can be used to directly link nbufs, rather than using
- * a separate network buffer queue object.
- *
- * Return: none
- */
-static inline void cdf_nbuf_set_next(cdf_nbuf_t this_buf, cdf_nbuf_t next_buf)
-{
- __cdf_nbuf_set_next(this_buf, next_buf);
-}
-
-/* nbuf extension routines */
-
-/**
- * cdf_nbuf_set_next_ext() - link extension of this packet contained in a new
- * nbuf
- * @this_buf: predecessor buffer
- * @next_buf: successor buffer
- *
- * This function is used to link up many nbufs containing a single logical
- * packet - not a collection of packets. Do not use for linking the first
- * extension to the head
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_set_next_ext(cdf_nbuf_t this_buf, cdf_nbuf_t next_buf)
-{
- __cdf_nbuf_set_next_ext(this_buf, next_buf);
-}
-
-/**
- * cdf_nbuf_next_ext() - get the next packet extension in the linked list
- * @buf: Network buffer
- *
- * Return: Next network buffer in the linked list
- */
-static inline cdf_nbuf_t cdf_nbuf_next_ext(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_next_ext(buf);
-}
-
-/**
- * cdf_nbuf_append_ext_list() - link list of packet extensions to the head
- * segment
- * @head_buf: Network buf holding head segment (single)
- * @ext_list: Network buf list holding linked extensions to the head
- * @ext_len: Total length of all buffers in the extension list
- *
- * This function is used to link up a list of packet extensions (seg1, 2,
- * ...) to the nbuf holding the head segment (seg0)
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_append_ext_list(cdf_nbuf_t head_buf, cdf_nbuf_t ext_list,
- qdf_size_t ext_len)
-{
- __cdf_nbuf_append_ext_list(head_buf, ext_list, ext_len);
-}
-
-/**
- * cdf_nbuf_get_tx_cksum() - gets the tx checksum offload demand
- * @buf: Network buffer
- *
- * Return: qdf_nbuf_tx_cksum_t checksum offload demand for the frame
- */
-static inline qdf_nbuf_tx_cksum_t cdf_nbuf_get_tx_cksum(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_get_tx_cksum(buf);
-}
-
-/**
- * cdf_nbuf_set_rx_cksum() - drivers that support hw checksumming use this to
- * indicate checksum info to the stack.
- * @buf: Network buffer
- * @cksum: Checksum
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_set_rx_cksum(cdf_nbuf_t buf, qdf_nbuf_rx_cksum_t *cksum)
-{
- __cdf_nbuf_set_rx_cksum(buf, cksum);
-}
-
-/**
- * cdf_nbuf_get_tid() - this function extracts the TID value from nbuf
- * @buf: Network buffer
- *
- * Return: TID value
- */
-static inline uint8_t cdf_nbuf_get_tid(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_get_tid(buf);
-}
-
-/**
- * cdf_nbuf_set_tid() - this function sets the TID value in nbuf
- * @buf: Network buffer
- * @tid: TID value
- *
- * Return: none
- */
-static inline void cdf_nbuf_set_tid(cdf_nbuf_t buf, uint8_t tid)
-{
- __cdf_nbuf_set_tid(buf, tid);
-}
-
-/**
- * cdf_nbuf_get_exemption_type() - this function extracts the exemption type
- * from nbuf
- * @buf: Network buffer
- *
- * Return: Exemption type
- */
-static inline uint8_t cdf_nbuf_get_exemption_type(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_get_exemption_type(buf);
-}
-
-/**
- * cdf_nbuf_set_protocol() - this function peeks data into the buffer at given
- * offset
- * @buf: Network buffer
- * @proto: Protocol
- *
- * Return: none
- */
-static inline void cdf_nbuf_set_protocol(cdf_nbuf_t buf, uint16_t proto)
-{
- __cdf_nbuf_set_protocol(buf, proto);
-}
-
-/**
- * cdf_nbuf_trace_get_proto_type() - this function return packet proto type
- * @buf: Network buffer
- *
- * Return: Packet protocol type
- */
-static inline uint8_t cdf_nbuf_trace_get_proto_type(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_trace_get_proto_type(buf);
-}
-
-#ifdef QCA_PKT_PROTO_TRACE
-/**
- * cdf_nbuf_trace_set_proto_type() - this function updates packet proto type
- * @buf: Network buffer
- * @proto_type: Protocol type
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_trace_set_proto_type(cdf_nbuf_t buf, uint8_t proto_type)
-{
- __cdf_nbuf_trace_set_proto_type(buf, proto_type);
-}
-#else
-#define cdf_nbuf_trace_set_proto_type(buf, proto_type) /*NO OP*/
-#endif
-
-/**
- * cdf_nbuf_reg_trace_cb() - this function registers protocol trace callback
- * @cb_func_ptr: Callback pointer
- *
- * Return: none
- */
-static inline void cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr)
-{
- __cdf_nbuf_reg_trace_cb(cb_func_ptr);
-}
-
-/**
- * cdf_nbuf_trace_update() - this function updates protocol event
- * @buf: Network buffer
- * @event_string: Event string pointer
- *
- * Return: none
- */
-static inline void cdf_nbuf_trace_update(cdf_nbuf_t buf, char *event_string)
-{
- __cdf_nbuf_trace_update(buf, event_string);
-}
-
-/**
- * cdf_nbuf_set_tx_parallel_dnload_frm() - set tx parallel download
- * @buf: Network buffer
- * @candi: Candidate of parallel download frame
- *
- * This function stores a flag specifying this TX frame is suitable for
- * downloading though a 2nd TX data pipe that is used for short frames for
- * protocols that can accept out-of-order delivery.
- *
- * Return: none
- */
-static inline void
-cdf_nbuf_set_tx_parallel_dnload_frm(cdf_nbuf_t buf, uint8_t candi)
-{
- __cdf_nbuf_set_tx_htt2_frm(buf, candi);
-}
-
-/**
- * cdf_nbuf_get_tx_parallel_dnload_frm() - get tx parallel download
- * @buf: Network buffer
- *
- * This function return whether this TX frame is allow to download though a 2nd
- * TX data pipe or not.
- *
- * Return: none
- */
-static inline uint8_t cdf_nbuf_get_tx_parallel_dnload_frm(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_get_tx_htt2_frm(buf);
-}
-
-/**
- * cdf_invalidate_range() - invalidate the virtual address range specified by
- * start and end addresses.
- * Note: This does not write back the cache entries.
- *
- * Return: none
- */
-
-#ifdef MSM_PLATFORM
-static inline void cdf_invalidate_range(void *start, void *end)
-{
- dmac_inv_range(start, end);
-}
-#else
-static inline void cdf_invalidate_range(void *start, void *end)
-{
-}
-#endif
-
-#if defined(FEATURE_TSO)
-/**
- * cdf_nbuf_reset_num_frags() - resets the number of frags to 0 (valid range: 0..1)
- * @buf: Network buffer
- *
- * Return: Number of fragments
- */
-static inline int cdf_nbuf_reset_num_frags(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_reset_num_frags(buf);
-}
-
-/**
- * cdf_nbuf_is_tso() - is the network buffer a jumbo packet?
- * @buf: Network buffer
- *
- * Return: true - jumbo packet false - not a jumbo packet
- */
-static inline bool cdf_nbuf_is_tso(cdf_nbuf_t nbuf)
-{
- return __cdf_nbuf_is_tso(nbuf);
-}
-
-/**
- * cdf_nbuf_get_tso_info() - function to divide a jumbo TSO
- * network buffer into segments
- * @nbuf: network buffer to be segmented
- * @tso_info: This is the output. The information about the
- * TSO segments will be populated within this.
- *
- * This function fragments a TCP jumbo packet into smaller
- * segments to be transmitted by the driver. It chains the TSO
- * segments created into a list.
- *
- * Return: number of TSO segments
- */
-static inline uint32_t cdf_nbuf_get_tso_info(qdf_device_t osdev,
- cdf_nbuf_t nbuf, struct qdf_tso_info_t *tso_info)
-{
- return __cdf_nbuf_get_tso_info(osdev, nbuf, tso_info);
-}
-
-/**
- * cdf_nbuf_get_tso_num_seg() - function to calculate the number
- * of TCP segments within the TSO jumbo packet
- * @nbuf: TSO jumbo network buffer to be segmented
- *
- * This function calculates the number of TCP segments that the
- network buffer can be divided into.
- *
- * Return: number of TCP segments
- */
-static inline uint32_t cdf_nbuf_get_tso_num_seg(cdf_nbuf_t nbuf)
-{
- return __cdf_nbuf_get_tso_num_seg(nbuf);
-}
-
-/**
- * cdf_nbuf_inc_users() - function to increment the number of
- * users referencing this network buffer
- *
- * @nbuf: network buffer
- *
- * This function increments the number of users referencing this
- * network buffer
- *
- * Return: the network buffer
- */
-static inline void cdf_nbuf_inc_users(cdf_nbuf_t nbuf)
-{
- __cdf_nbuf_inc_users(nbuf);
-}
-#endif /*TSO*/
-
-/**
- * cdf_nbuf_data_attr_get() - Get data_attr field from cvg_nbuf_cb
- *
- * @nbuf: Network buffer (skb on linux)
- *
- * This function returns the values of data_attr field
- * in struct cvg_nbuf_cb{}, to which skb->cb is typecast.
- * This value is actually the value programmed in CE descriptor.
- *
- * Return: Value of data_attr
- */
-static inline
-uint32_t cdf_nbuf_data_attr_get(cdf_nbuf_t buf)
-{
- return __cdf_nbuf_data_attr_get(buf);
-}
-
-/**
- * cdf_nbuf_data_attr_set() - Sets data_attr field in cvg_nbuf_cb
- *
- * @nbuf: Network buffer (skb on linux)
- * @data_attr: Value to be stored cvg_nbuf_cb->data_attr
- *
- * This function stores the value to be programmed in CE
- * descriptor as part skb->cb which is typecast to struct cvg_nbuf_cb{}
- *
- * Return: void
- */
-static inline
-void cdf_nbuf_data_attr_set(cdf_nbuf_t buf, uint32_t data_attr)
-{
- __cdf_nbuf_data_attr_set(buf, data_attr);
-}
-
-/**
- * cdf_nbuf_tx_info_get() - Parse skb and get Tx metadata
- *
- * @nbuf: Network buffer (skb on linux)
- *
- * This function parses the payload to figure out relevant
- * Tx meta-data e.g. whether to enable tx_classify bit
- * in CE.
- *
- * Return: void
- */
-#define cdf_nbuf_tx_info_get __cdf_nbuf_tx_info_get
-
-void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state);
-void cdf_nbuf_tx_desc_count_display(void);
-void cdf_nbuf_tx_desc_count_clear(void);
-
-#endif
diff --git a/core/cdf/src/cdf_nbuf.c b/core/cdf/src/cdf_nbuf.c
deleted file mode 100644
index 2968d9d..0000000
--- a/core/cdf/src/cdf_nbuf.c
+++ /dev/null
@@ -1,1040 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: cdf_nbuf.c
- *
- * Connectivity driver framework(CDF) network buffer management APIs
- */
-
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/skbuff.h>
-#include <linux/module.h>
-#if defined(FEATURE_TSO)
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#endif /* FEATURE_TSO */
-#include <qdf_types.h>
-#include <cdf_nbuf.h>
-#include <qdf_mem.h>
-#include <qdf_trace.h>
-#include <qdf_status.h>
-#include <qdf_lock.h>
-
-/* Packet Counter */
-static uint32_t nbuf_tx_mgmt[NBUF_TX_PKT_STATE_MAX];
-static uint32_t nbuf_tx_data[NBUF_TX_PKT_STATE_MAX];
-
-/**
- * cdf_nbuf_tx_desc_count_display() - Displays the packet counter
- *
- * Return: none
- */
-void cdf_nbuf_tx_desc_count_display(void)
-{
- qdf_print("Current Snapshot of the Driver:\n");
- qdf_print("Data Packets:\n");
- qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
- nbuf_tx_data[NBUF_TX_PKT_HDD] -
- (nbuf_tx_data[NBUF_TX_PKT_TXRX] +
- nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
- nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE]),
- nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
- nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE],
- nbuf_tx_data[NBUF_TX_PKT_TXRX] - nbuf_tx_data[NBUF_TX_PKT_HTT],
- nbuf_tx_data[NBUF_TX_PKT_HTT] - nbuf_tx_data[NBUF_TX_PKT_HTC]);
- qdf_print(" HTC %d HIF %d CE %d TX_COMP %d\n",
- nbuf_tx_data[NBUF_TX_PKT_HTC] - nbuf_tx_data[NBUF_TX_PKT_HIF],
- nbuf_tx_data[NBUF_TX_PKT_HIF] - nbuf_tx_data[NBUF_TX_PKT_CE],
- nbuf_tx_data[NBUF_TX_PKT_CE] - nbuf_tx_data[NBUF_TX_PKT_FREE],
- nbuf_tx_data[NBUF_TX_PKT_FREE]);
- qdf_print("Mgmt Packets:\n");
- qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
- nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_ENQUEUE] -
- nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_DEQUEUE],
- nbuf_tx_mgmt[NBUF_TX_PKT_TXRX] - nbuf_tx_mgmt[NBUF_TX_PKT_HTT],
- nbuf_tx_mgmt[NBUF_TX_PKT_HTT] - nbuf_tx_mgmt[NBUF_TX_PKT_HTC],
- nbuf_tx_mgmt[NBUF_TX_PKT_HTC] - nbuf_tx_mgmt[NBUF_TX_PKT_HIF],
- nbuf_tx_mgmt[NBUF_TX_PKT_HIF] - nbuf_tx_mgmt[NBUF_TX_PKT_CE],
- nbuf_tx_mgmt[NBUF_TX_PKT_CE] - nbuf_tx_mgmt[NBUF_TX_PKT_FREE],
- nbuf_tx_mgmt[NBUF_TX_PKT_FREE]);
-}
-
-/**
- * cdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
- * @packet_type : packet type either mgmt/data
- * @current_state : layer at which the packet currently present
- *
- * Return: none
- */
-static inline void cdf_nbuf_tx_desc_count_update(uint8_t packet_type,
- uint8_t current_state)
-{
- switch (packet_type) {
- case NBUF_TX_PKT_MGMT_TRACK:
- nbuf_tx_mgmt[current_state]++;
- break;
- case NBUF_TX_PKT_DATA_TRACK:
- nbuf_tx_data[current_state]++;
- break;
- default:
- break;
- }
-}
-
-/**
- * cdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
- *
- * Return: none
- */
-void cdf_nbuf_tx_desc_count_clear(void)
-{
- memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
- memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
-}
-
-/**
- * cdf_nbuf_set_state() - Updates the packet state
- * @nbuf: network buffer
- * @current_state : layer at which the packet currently is
- *
- * This function updates the packet state to the layer at which the packet
- * currently is
- *
- * Return: none
- */
-void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state)
-{
- /*
- * Only Mgmt, Data Packets are tracked. WMI messages
- * such as scan commands are not tracked
- */
- uint8_t packet_type;
- packet_type = NBUF_CB_TX_PACKET_TRACK(nbuf);
-
- if ((packet_type != NBUF_TX_PKT_DATA_TRACK) &&
- (packet_type != NBUF_TX_PKT_MGMT_TRACK)) {
- return;
- }
- NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
- cdf_nbuf_tx_desc_count_update(packet_type,
- current_state);
-}
-
-/* globals do not need to be initialized to NULL/0 */
-cdf_nbuf_trace_update_t trace_update_cb;
-
-/**
- * __cdf_nbuf_alloc() - Allocate nbuf
- * @hdl: Device handle
- * @size: Netbuf requested size
- * @reserve: Reserve
- * @align: Align
- * @prio: Priority
- *
- * This allocates an nbuf aligns if needed and reserves some space in the front,
- * since the reserve is done after alignment the reserve value if being
- * unaligned will result in an unaligned address.
- *
- * Return: nbuf or %NULL if no memory
- */
-struct sk_buff *__cdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
- int align, int prio)
-{
- struct sk_buff *skb;
- unsigned long offset;
-
- if (align)
- size += (align - 1);
-
- skb = dev_alloc_skb(size);
-
- if (!skb) {
- pr_err("ERROR:NBUF alloc failed\n");
- return NULL;
- }
- memset(skb->cb, 0x0, sizeof(skb->cb));
-
- /*
- * The default is for netbuf fragments to be interpreted
- * as wordstreams rather than bytestreams.
- */
- NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
- NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
-
- /*
- * XXX:how about we reserve first then align
- * Align & make sure that the tail & data are adjusted properly
- */
-
- if (align) {
- offset = ((unsigned long)skb->data) % align;
- if (offset)
- skb_reserve(skb, align - offset);
- }
-
- /*
- * NOTE:alloc doesn't take responsibility if reserve unaligns the data
- * pointer
- */
- skb_reserve(skb, reserve);
-
- return skb;
-}
-
-/**
- * __cdf_nbuf_free() - free the nbuf its interrupt safe
- * @skb: Pointer to network buffer
- *
- * Return: none
- */
-void __cdf_nbuf_free(struct sk_buff *skb)
-{
- if (cdf_nbuf_ipa_owned_get(skb))
- /* IPA cleanup function will need to be called here */
- QDF_BUG(1);
- else
- dev_kfree_skb_any(skb);
-}
-
-/**
- * __cdf_nbuf_map() - get the dma map of the nbuf
- * @osdev: OS device
- * @bmap: Bitmap
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: QDF_STATUS
- */
-QDF_STATUS
-__cdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
-{
-#ifdef CDF_OS_DEBUG
- struct skb_shared_info *sh = skb_shinfo(skb);
-#endif
- qdf_assert((dir == QDF_DMA_TO_DEVICE)
- || (dir == QDF_DMA_FROM_DEVICE));
-
- /*
- * Assume there's only a single fragment.
- * To support multiple fragments, it would be necessary to change
- * cdf_nbuf_t to be a separate object that stores meta-info
- * (including the bus address for each fragment) and a pointer
- * to the underlying sk_buff.
- */
- qdf_assert(sh->nr_frags == 0);
-
- return __cdf_nbuf_map_single(osdev, skb, dir);
-
- return QDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_nbuf_unmap() - to unmap a previously mapped buf
- * @osdev: OS device
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: none
- */
-void
-__cdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
-{
- qdf_assert((dir == QDF_DMA_TO_DEVICE)
- || (dir == QDF_DMA_FROM_DEVICE));
-
- qdf_assert(((dir == QDF_DMA_TO_DEVICE)
- || (dir == QDF_DMA_FROM_DEVICE)));
- /*
- * Assume there's a single fragment.
- * If this is not true, the assertion in __cdf_nbuf_map will catch it.
- */
- __cdf_nbuf_unmap_single(osdev, skb, dir);
-}
-
-/**
- * __cdf_nbuf_map_single() - dma map of the nbuf
- * @osdev: OS device
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: QDF_STATUS
- */
-QDF_STATUS
-__cdf_nbuf_map_single(qdf_device_t osdev, cdf_nbuf_t buf, qdf_dma_dir_t dir)
-{
- qdf_dma_addr_t paddr;
-
-/* tempory hack for simulation */
-#ifdef A_SIMOS_DEVHOST
- NBUF_CB_PADDR(buf) = paddr = buf->data;
- return QDF_STATUS_SUCCESS;
-#else
- /* assume that the OS only provides a single fragment */
- NBUF_CB_PADDR(buf) = paddr =
- dma_map_single(osdev->dev, buf->data,
- skb_end_pointer(buf) - buf->data, dir);
- return dma_mapping_error(osdev->dev, paddr)
- ? QDF_STATUS_E_FAILURE
- : QDF_STATUS_SUCCESS;
-#endif /* #ifdef A_SIMOS_DEVHOST */
-}
-
-/**
- * __cdf_nbuf_unmap_single() - dma unmap nbuf
- * @osdev: OS device
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: none
- */
-void
-__cdf_nbuf_unmap_single(qdf_device_t osdev, cdf_nbuf_t buf, qdf_dma_dir_t dir)
-{
-#if !defined(A_SIMOS_DEVHOST)
- dma_unmap_single(osdev->dev, NBUF_CB_PADDR(buf),
- skb_end_pointer(buf) - buf->data, dir);
-#endif /* #if !defined(A_SIMOS_DEVHOST) */
-}
-
-/**
- * __cdf_nbuf_set_rx_cksum() - set rx checksum
- * @skb: Pointer to network buffer
- * @cksum: Pointer to checksum value
- *
- * Return: QDF_STATUS
- */
-QDF_STATUS
-__cdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
-{
- switch (cksum->l4_result) {
- case QDF_NBUF_RX_CKSUM_NONE:
- skb->ip_summed = CHECKSUM_NONE;
- break;
- case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum = cksum->val;
- break;
- default:
- pr_err("ADF_NET:Unknown checksum type\n");
- qdf_assert(0);
- return QDF_STATUS_E_NOSUPPORT;
- }
- return QDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_nbuf_get_tx_cksum() - get tx checksum
- * @skb: Pointer to network buffer
- *
- * Return: TX checksum value
- */
-qdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb)
-{
- switch (skb->ip_summed) {
- case CHECKSUM_NONE:
- return QDF_NBUF_TX_CKSUM_NONE;
- case CHECKSUM_PARTIAL:
- /* XXX ADF and Linux checksum don't map with 1-to-1. This is
- * not 100% correct */
- return QDF_NBUF_TX_CKSUM_TCP_UDP;
- case CHECKSUM_COMPLETE:
- return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
- default:
- return QDF_NBUF_TX_CKSUM_NONE;
- }
-}
-
-/**
- * __cdf_nbuf_get_tid() - get tid
- * @skb: Pointer to network buffer
- *
- * Return: tid
- */
-uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb)
-{
- return skb->priority;
-}
-
-/**
- * __cdf_nbuf_set_tid() - set tid
- * @skb: Pointer to network buffer
- *
- * Return: none
- */
-void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
-{
- skb->priority = tid;
-}
-
-/**
- * __cdf_nbuf_set_tid() - set tid
- * @skb: Pointer to network buffer
- *
- * Return: none
- */
-uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb)
-{
- return QDF_NBUF_EXEMPT_NO_EXEMPTION;
-}
-
-/**
- * __cdf_nbuf_reg_trace_cb() - register trace callback
- * @cb_func_ptr: Pointer to trace callback function
- *
- * Return: none
- */
-void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr)
-{
- trace_update_cb = cb_func_ptr;
- return;
-}
-
-#ifdef QCA_PKT_PROTO_TRACE
-/**
- * __cdf_nbuf_trace_update() - update trace event
- * @skb: Pointer to network buffer
- * @event_string: Pointer to trace callback function
- *
- * Return: none
- */
-void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string)
-{
- char string_buf[NBUF_PKT_TRAC_MAX_STRING];
-
- if ((!trace_update_cb) || (!event_string))
- return;
-
- if (!cdf_nbuf_trace_get_proto_type(buf))
- return;
-
- /* Buffer over flow */
- if (NBUF_PKT_TRAC_MAX_STRING <=
- (cdf_str_len(event_string) + NBUF_PKT_TRAC_PROTO_STRING)) {
- return;
- }
-
- qdf_mem_zero(string_buf, NBUF_PKT_TRAC_MAX_STRING);
- qdf_mem_copy(string_buf, event_string, cdf_str_len(event_string));
- if (NBUF_PKT_TRAC_TYPE_EAPOL & cdf_nbuf_trace_get_proto_type(buf)) {
- qdf_mem_copy(string_buf + cdf_str_len(event_string),
- "EPL", NBUF_PKT_TRAC_PROTO_STRING);
- } else if (NBUF_PKT_TRAC_TYPE_DHCP & cdf_nbuf_trace_get_proto_type(buf)) {
- qdf_mem_copy(string_buf + cdf_str_len(event_string),
- "DHC", NBUF_PKT_TRAC_PROTO_STRING);
- } else if (NBUF_PKT_TRAC_TYPE_MGMT_ACTION &
- cdf_nbuf_trace_get_proto_type(buf)) {
- qdf_mem_copy(string_buf + cdf_str_len(event_string),
- "MACT", NBUF_PKT_TRAC_PROTO_STRING);
- }
-
- trace_update_cb(string_buf);
- return;
-}
-#endif /* QCA_PKT_PROTO_TRACE */
-
-#ifdef MEMORY_DEBUG
-#define CDF_NET_BUF_TRACK_MAX_SIZE (1024)
-
-/**
- * struct cdf_nbuf_track_t - Network buffer track structure
- *
- * @p_next: Pointer to next
- * @net_buf: Pointer to network buffer
- * @file_name: File name
- * @line_num: Line number
- * @size: Size
- */
-struct cdf_nbuf_track_t {
- struct cdf_nbuf_track_t *p_next;
- cdf_nbuf_t net_buf;
- uint8_t *file_name;
- uint32_t line_num;
- size_t size;
-};
-
-spinlock_t g_cdf_net_buf_track_lock;
-typedef struct cdf_nbuf_track_t CDF_NBUF_TRACK;
-
-CDF_NBUF_TRACK *gp_cdf_net_buf_track_tbl[CDF_NET_BUF_TRACK_MAX_SIZE];
-
-/**
- * cdf_net_buf_debug_init() - initialize network buffer debug functionality
- *
- * CDF network buffer debug feature tracks all SKBs allocated by WLAN driver
- * in a hash table and when driver is unloaded it reports about leaked SKBs.
- * WLAN driver module whose allocated SKB is freed by network stack are
- * suppose to call cdf_net_buf_debug_release_skb() such that the SKB is not
- * reported as memory leak.
- *
- * Return: none
- */
-void cdf_net_buf_debug_init(void)
-{
- uint32_t i;
- unsigned long irq_flag;
-
- spin_lock_init(&g_cdf_net_buf_track_lock);
-
- spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
- for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++)
- gp_cdf_net_buf_track_tbl[i] = NULL;
-
- spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
- return;
-}
-
-/**
- * cdf_net_buf_debug_init() - exit network buffer debug functionality
- *
- * Exit network buffer tracking debug functionality and log SKB memory leaks
- *
- * Return: none
- */
-void cdf_net_buf_debug_exit(void)
-{
- uint32_t i;
- unsigned long irq_flag;
- CDF_NBUF_TRACK *p_node;
- CDF_NBUF_TRACK *p_prev;
-
- spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
- for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
- p_node = gp_cdf_net_buf_track_tbl[i];
- while (p_node) {
- p_prev = p_node;
- p_node = p_node->p_next;
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
- "SKB buf memory Leak@ File %s, @Line %d, size %zu\n",
- p_prev->file_name, p_prev->line_num,
- p_prev->size);
- }
- }
-
- spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
- return;
-}
-
-/**
- * cdf_net_buf_debug_clean() - clean up network buffer debug functionality
- *
- * Return: none
- */
-void cdf_net_buf_debug_clean(void)
-{
- uint32_t i;
- unsigned long irq_flag;
- CDF_NBUF_TRACK *p_node;
- CDF_NBUF_TRACK *p_prev;
-
- spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
- for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
- p_node = gp_cdf_net_buf_track_tbl[i];
- while (p_node) {
- p_prev = p_node;
- p_node = p_node->p_next;
- qdf_mem_free(p_prev);
- }
- }
-
- spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
- return;
-}
-
-/**
- * cdf_net_buf_debug_hash() - hash network buffer pointer
- *
- * Return: hash value
- */
-uint32_t cdf_net_buf_debug_hash(cdf_nbuf_t net_buf)
-{
- uint32_t i;
-
- i = (uint32_t) ((uintptr_t) net_buf & (CDF_NET_BUF_TRACK_MAX_SIZE - 1));
-
- return i;
-}
-
-/**
- * cdf_net_buf_debug_look_up() - look up network buffer in debug hash table
- *
- * Return: If skb is found in hash table then return pointer to network buffer
- * else return %NULL
- */
-CDF_NBUF_TRACK *cdf_net_buf_debug_look_up(cdf_nbuf_t net_buf)
-{
- uint32_t i;
- CDF_NBUF_TRACK *p_node;
-
- i = cdf_net_buf_debug_hash(net_buf);
- p_node = gp_cdf_net_buf_track_tbl[i];
-
- while (p_node) {
- if (p_node->net_buf == net_buf)
- return p_node;
- p_node = p_node->p_next;
- }
-
- return NULL;
-
-}
-
-/**
- * cdf_net_buf_debug_add_node() - store skb in debug hash table
- *
- * Return: none
- */
-void cdf_net_buf_debug_add_node(cdf_nbuf_t net_buf, size_t size,
- uint8_t *file_name, uint32_t line_num)
-{
- uint32_t i;
- unsigned long irq_flag;
- CDF_NBUF_TRACK *p_node;
-
- spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
- i = cdf_net_buf_debug_hash(net_buf);
- p_node = cdf_net_buf_debug_look_up(net_buf);
-
- if (p_node) {
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
- "Double allocation of skb ! Already allocated from %s %d",
- p_node->file_name, p_node->line_num);
- QDF_ASSERT(0);
- goto done;
- } else {
- p_node = (CDF_NBUF_TRACK *) qdf_mem_malloc(sizeof(*p_node));
- if (p_node) {
- p_node->net_buf = net_buf;
- p_node->file_name = file_name;
- p_node->line_num = line_num;
- p_node->size = size;
- p_node->p_next = gp_cdf_net_buf_track_tbl[i];
- gp_cdf_net_buf_track_tbl[i] = p_node;
- } else {
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
- "Mem alloc failed ! Could not track skb from %s %d of size %zu",
- file_name, line_num, size);
- QDF_ASSERT(0);
- }
- }
-
-done:
- spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
- return;
-}
-
-/**
- * cdf_net_buf_debug_delete_node() - remove skb from debug hash table
- *
- * Return: none
- */
-void cdf_net_buf_debug_delete_node(cdf_nbuf_t net_buf)
-{
- uint32_t i;
- bool found = false;
- CDF_NBUF_TRACK *p_head;
- CDF_NBUF_TRACK *p_node;
- unsigned long irq_flag;
- CDF_NBUF_TRACK *p_prev;
-
- spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
- i = cdf_net_buf_debug_hash(net_buf);
- p_head = gp_cdf_net_buf_track_tbl[i];
-
- /* Unallocated SKB */
- if (!p_head)
- goto done;
-
- p_node = p_head;
- /* Found at head of the table */
- if (p_head->net_buf == net_buf) {
- gp_cdf_net_buf_track_tbl[i] = p_node->p_next;
- qdf_mem_free((void *)p_node);
- found = true;
- goto done;
- }
-
- /* Search in collision list */
- while (p_node) {
- p_prev = p_node;
- p_node = p_node->p_next;
- if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
- p_prev->p_next = p_node->p_next;
- qdf_mem_free((void *)p_node);
- found = true;
- break;
- }
- }
-
-done:
- if (!found) {
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
- "Unallocated buffer ! Double free of net_buf %p ?",
- net_buf);
- QDF_ASSERT(0);
- }
-
- spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
- return;
-}
-
-/**
- * cdf_net_buf_debug_release_skb() - release skb to avoid memory leak
- *
- * WLAN driver module whose allocated SKB is freed by network stack are
- * suppose to call this API before returning SKB to network stack such
- * that the SKB is not reported as memory leak.
- *
- * Return: none
- */
-void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf)
-{
- cdf_net_buf_debug_delete_node(net_buf);
-}
-
-#endif /*MEMORY_DEBUG */
-#if defined(FEATURE_TSO)
-
-struct cdf_tso_cmn_seg_info_t {
- uint16_t ethproto;
- uint16_t ip_tcp_hdr_len;
- uint16_t l2_len;
- unsigned char *eit_hdr;
- unsigned int eit_hdr_len;
- struct tcphdr *tcphdr;
- uint16_t ipv4_csum_en;
- uint16_t tcp_ipv4_csum_en;
- uint16_t tcp_ipv6_csum_en;
- uint16_t ip_id;
- uint32_t tcp_seq_num;
-};
-
-/**
- * __cdf_nbuf_get_tso_cmn_seg_info() - get TSO common
- * information
- *
- * Get the TSO information that is common across all the TCP
- * segments of the jumbo packet
- *
- * Return: 0 - success 1 - failure
- */
-uint8_t __cdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
- struct cdf_tso_cmn_seg_info_t *tso_info)
-{
- /* Get ethernet type and ethernet header length */
- tso_info->ethproto = vlan_get_protocol(skb);
-
- /* Determine whether this is an IPv4 or IPv6 packet */
- if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
- /* for IPv4, get the IP ID and enable TCP and IP csum */
- struct iphdr *ipv4_hdr = ip_hdr(skb);
- tso_info->ip_id = ntohs(ipv4_hdr->id);
- tso_info->ipv4_csum_en = 1;
- tso_info->tcp_ipv4_csum_en = 1;
- if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
- qdf_print("TSO IPV4 proto 0x%x not TCP\n",
- ipv4_hdr->protocol);
- return 1;
- }
- } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
- /* for IPv6, enable TCP csum. No IP ID or IP csum */
- tso_info->tcp_ipv6_csum_en = 1;
- } else {
- qdf_print("TSO: ethertype 0x%x is not supported!\n",
- tso_info->ethproto);
- return 1;
- }
-
- tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
- tso_info->tcphdr = tcp_hdr(skb);
- tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
- /* get pointer to the ethernet + IP + TCP header and their length */
- tso_info->eit_hdr = skb->data;
- tso_info->eit_hdr_len = (skb_transport_header(skb)
- - skb_mac_header(skb)) + tcp_hdrlen(skb);
- tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len;
- return 0;
-}
-
-/**
- * cdf_dmaaddr_to_32s - return high and low parts of dma_addr
- *
- * Returns the high and low 32-bits of the DMA addr in the provided ptrs
- *
- * Return: N/A
-*/
-static inline void cdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
- uint32_t *lo, uint32_t *hi)
-{
- if (sizeof(dmaaddr) > sizeof(uint32_t)) {
- *lo = (uint32_t) (dmaaddr & 0x0ffffffff);
- *hi = (uint32_t) (dmaaddr >> 32);
- } else {
- *lo = dmaaddr;
- *hi = 0;
- }
-}
-/**
- * __cdf_nbuf_get_tso_info() - function to divide a TSO nbuf
- * into segments
- * @nbuf: network buffer to be segmented
- * @tso_info: This is the output. The information about the
- * TSO segments will be populated within this.
- *
- * This function fragments a TCP jumbo packet into smaller
- * segments to be transmitted by the driver. It chains the TSO
- * segments created into a list.
- *
- * Return: number of TSO segments
- */
-uint32_t __cdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
- struct qdf_tso_info_t *tso_info)
-{
- /* common accross all segments */
- struct cdf_tso_cmn_seg_info_t tso_cmn_info;
-
- /* segment specific */
- char *tso_frag_vaddr;
- qdf_dma_addr_t tso_frag_paddr = 0;
- uint32_t tso_frag_paddr_lo, tso_frag_paddr_hi;
- uint32_t num_seg = 0;
- struct cdf_tso_seg_elem_t *curr_seg;
- const struct skb_frag_struct *frag = NULL;
- uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
- uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
- uint32_t foffset = 0; /* offset into the skb's fragment */
- uint32_t skb_proc = 0; /* bytes of the skb that have been processed*/
- uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
-
- memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
-
- if (qdf_unlikely(__cdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) {
- qdf_print("TSO: error getting common segment info\n");
- return 0;
- }
- curr_seg = tso_info->tso_seg_list;
-
- /* length of the first chunk of data in the skb */
- skb_proc = skb_frag_len = skb_headlen(skb);
-
- /* the 0th tso segment's 0th fragment always contains the EIT header */
- /* update the remaining skb fragment length and TSO segment length */
- skb_frag_len -= tso_cmn_info.eit_hdr_len;
- skb_proc -= tso_cmn_info.eit_hdr_len;
-
- /* get the address to the next tso fragment */
- tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
- /* get the length of the next tso fragment */
- tso_frag_len = min(skb_frag_len, tso_seg_size);
- tso_frag_paddr = dma_map_single(osdev->dev,
- tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
- cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
-
- num_seg = tso_info->num_segs;
- tso_info->num_segs = 0;
- tso_info->is_tso = 1;
-
- while (num_seg && curr_seg) {
- int i = 1; /* tso fragment index */
- int j = 0; /* skb fragment index */
- uint8_t more_tso_frags = 1;
- uint8_t from_frag_table = 0;
-
- /* Initialize the flags to 0 */
- memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
- tso_info->num_segs++;
-
- /* The following fields remain the same across all segments of
- a jumbo packet */
- curr_seg->seg.tso_flags.tso_enable = 1;
- curr_seg->seg.tso_flags.partial_checksum_en = 0;
- curr_seg->seg.tso_flags.ipv4_checksum_en =
- tso_cmn_info.ipv4_csum_en;
- curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
- tso_cmn_info.tcp_ipv6_csum_en;
- curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
- tso_cmn_info.tcp_ipv4_csum_en;
- curr_seg->seg.tso_flags.l2_len = 0;
- curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
- curr_seg->seg.num_frags = 0;
-
- /* The following fields change for the segments */
- curr_seg->seg.tso_flags.ip_id = tso_cmn_info.ip_id;
- tso_cmn_info.ip_id++;
-
- curr_seg->seg.tso_flags.syn = tso_cmn_info.tcphdr->syn;
- curr_seg->seg.tso_flags.rst = tso_cmn_info.tcphdr->rst;
- curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
- curr_seg->seg.tso_flags.ack = tso_cmn_info.tcphdr->ack;
- curr_seg->seg.tso_flags.urg = tso_cmn_info.tcphdr->urg;
- curr_seg->seg.tso_flags.ece = tso_cmn_info.tcphdr->ece;
- curr_seg->seg.tso_flags.cwr = tso_cmn_info.tcphdr->cwr;
-
- curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info.tcp_seq_num;
-
- /* First fragment for each segment always contains the ethernet,
- IP and TCP header */
- curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr;
- curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len;
- tso_info->total_len = curr_seg->seg.tso_frags[0].length;
- {
- qdf_dma_addr_t mapped;
- uint32_t lo, hi;
-
- mapped = dma_map_single(osdev->dev, tso_cmn_info.eit_hdr,
- tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE);
- cdf_dmaaddr_to_32s(mapped, &lo, &hi);
- curr_seg->seg.tso_frags[0].paddr_low_32 = lo;
- curr_seg->seg.tso_frags[0].paddr_upper_16 = (hi & 0xffff);
- }
- curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
- curr_seg->seg.num_frags++;
-
- while (more_tso_frags) {
- curr_seg->seg.tso_frags[i].vaddr = tso_frag_vaddr;
- curr_seg->seg.tso_frags[i].length = tso_frag_len;
- tso_info->total_len +=
- curr_seg->seg.tso_frags[i].length;
- curr_seg->seg.tso_flags.ip_len +=
- curr_seg->seg.tso_frags[i].length;
- curr_seg->seg.num_frags++;
- skb_proc = skb_proc - curr_seg->seg.tso_frags[i].length;
-
- /* increment the TCP sequence number */
- tso_cmn_info.tcp_seq_num += tso_frag_len;
- curr_seg->seg.tso_frags[i].paddr_upper_16 =
- (tso_frag_paddr_hi & 0xffff);
- curr_seg->seg.tso_frags[i].paddr_low_32 =
- tso_frag_paddr_lo;
-
- /* if there is no more data left in the skb */
- if (!skb_proc)
- return tso_info->num_segs;
-
- /* get the next payload fragment information */
- /* check if there are more fragments in this segment */
- if ((tso_seg_size - tso_frag_len)) {
- more_tso_frags = 1;
- i++;
- } else {
- more_tso_frags = 0;
- /* reset i and the tso payload size */
- i = 1;
- tso_seg_size = skb_shinfo(skb)->gso_size;
- }
-
- /* if the next fragment is contiguous */
- if (tso_frag_len < skb_frag_len) {
- skb_frag_len = skb_frag_len - tso_frag_len;
- tso_frag_len = min(skb_frag_len, tso_seg_size);
- tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
- if (from_frag_table) {
- tso_frag_paddr =
- skb_frag_dma_map(osdev->dev,
- frag, foffset,
- tso_frag_len,
- DMA_TO_DEVICE);
- cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
- } else {
- tso_frag_paddr =
- dma_map_single(osdev->dev,
- tso_frag_vaddr,
- tso_frag_len,
- DMA_TO_DEVICE);
- cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
- }
- } else { /* the next fragment is not contiguous */
- tso_frag_len = min(skb_frag_len, tso_seg_size);
- frag = &skb_shinfo(skb)->frags[j];
- skb_frag_len = skb_frag_size(frag);
-
- tso_frag_vaddr = skb_frag_address(frag);
- tso_frag_paddr = skb_frag_dma_map(osdev->dev,
- frag, 0, tso_frag_len,
- DMA_TO_DEVICE);
- cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
- foffset += tso_frag_len;
- from_frag_table = 1;
- j++;
- }
- }
- num_seg--;
- /* if TCP FIN flag was set, set it in the last segment */
- if (!num_seg)
- curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
-
- curr_seg = curr_seg->next;
- }
- return tso_info->num_segs;
-}
-
-/**
- * __cdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
- * into segments
- * @nbuf: network buffer to be segmented
- * @tso_info: This is the output. The information about the
- * TSO segments will be populated within this.
- *
- * This function fragments a TCP jumbo packet into smaller
- * segments to be transmitted by the driver. It chains the TSO
- * segments created into a list.
- *
- * Return: 0 - success, 1 - failure
- */
-uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
-{
- uint32_t gso_size, tmp_len, num_segs = 0;
-
- gso_size = skb_shinfo(skb)->gso_size;
- tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
- + tcp_hdrlen(skb));
- while (tmp_len) {
- num_segs++;
- if (tmp_len > gso_size)
- tmp_len -= gso_size;
- else
- break;
- }
- return num_segs;
-}
-
-#endif /* FEATURE_TSO */
diff --git a/core/cdf/src/i_cdf_nbuf.h b/core/cdf/src/i_cdf_nbuf.h
deleted file mode 100644
index 160a5ff..0000000
--- a/core/cdf/src/i_cdf_nbuf.h
+++ /dev/null
@@ -1,1056 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: i_cdf_nbuf.h
- *
- * Linux implementation of skbuf
- */
-#ifndef _I_CDF_NET_BUF_H
-#define _I_CDF_NET_BUF_H
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/dma-mapping.h>
-#include <qdf_types.h>
-#include <qdf_status.h>
-
-/*
- * Use socket buffer as the underlying implentation as skbuf .
- * Linux use sk_buff to represent both packet and data,
- * so we use sk_buffer to represent both skbuf .
- */
-typedef struct sk_buff *__cdf_nbuf_t;
-
-/* NBUFCB_TX_MAX_OS_FRAGS
- * max tx fragments provided by the OS
- */
-#define NBUF_CB_TX_MAX_OS_FRAGS 1
-
-/* NBUF_CB_TX_MAX_EXTRA_FRAGS -
- * max tx fragments added by the driver
- * The driver will always add one tx fragment (the tx descriptor)
- */
-#define NBUF_CB_TX_MAX_EXTRA_FRAGS 2
-
-/*
- * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
- */
-typedef union {
- uint64_t u64;
- qdf_dma_addr_t dma_addr;
-} cdf_paddr_t;
-
-/**
- * struct cdf_nbuf_cb - network buffer control block contents (skb->cb)
- * - data passed between layers of the driver.
- *
- * Notes:
- * 1. Hard limited to 48 bytes. Please count your bytes
- * 2. The size of this structure has to be easily calculatable and
- * consistently so: do not use any conditional compile flags
- * 3. Split into a common part followed by a tx/rx overlay
- * 4. There is only one extra frag, which represents the HTC/HTT header
- *
- * @common.paddr : physical addressed retrived by dma_map of nbuf->data
- * @rx.lro_flags : hardware assisted flags:
- * @rx.lro_eligible : flag to indicate whether the MSDU is LRO eligible
- * @rx.tcp_proto : L4 protocol is TCP
- * @rx.tcp_pure_ack : A TCP ACK packet with no payload
- * @rx.ipv6_proto : L3 protocol is IPV6
- * @rx.ip_offset : offset to IP header
- * @rx.tcp_offset : offset to TCP header
- * @rx.tcp_udp_chksum : L4 payload checksum
- * @rx.tcp_seq_num : TCP sequence number
- * @rx.tcp_ack_num : TCP ACK number
- * @rx.flow_id_toeplitz: 32-bit 5-tuple Toeplitz hash
- * @tx.extra_frag : represent HTC/HTT header
- * @tx.efrag.vaddr : virtual address of ~
- * @tx.efrag.paddr : physical/DMA address of ~
- * @tx.efrag.len : length of efrag pointed by the above pointers
- * @tx.efrag.num : number of extra frags ( 0 or 1)
- * @tx.efrag.flags.nbuf : flag, nbuf payload to be swapped (wordstream)
- * @tx.efrag.flags.efrag : flag, efrag payload to be swapped (wordstream)
- * @tx.efrag.flags.chfrag_start: used by WIN
- * @tx.efrags.flags.chfrag_end: used by WIN
- * @tx.data_attr : value that is programmed into CE descr, includes:
- * + (1) CE classification enablement bit
- * + (2) packet type (802.3 or Ethernet type II)
- * + (3) packet offset (usually length of HTC/HTT descr)
- * @tx.trace : combined structure for DP and protocol trace
- * @tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
- * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
- * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
- * @tx.trace.proto_type : bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
- * + (MGMT_ACTION)] - 4 bits
- * @tx.trace.dp_trace : flag (Datapath trace)
- * @tx.trace.htt2_frm : flag (high-latency path only)
- * @tx.trace.vdev_id : vdev (for protocol trace)
- * @tx.ipa.owned : packet owned by IPA
- * @tx.ipa.priv : private data, used by IPA
- */
-struct cdf_nbuf_cb {
- /* common */
- cdf_paddr_t paddr; /* of skb->data */
- /* valid only in one direction */
- union {
- /* Note: MAX: 40 bytes */
- struct {
- uint32_t lro_eligible:1,
- tcp_proto:1,
- tcp_pure_ack:1,
- ipv6_proto:1,
- ip_offset:7,
- tcp_offset:7;
- uint32_t tcp_udp_chksum:16,
- tcp_win:16;
- uint32_t tcp_seq_num;
- uint32_t tcp_ack_num;
- uint32_t flow_id_toeplitz;
- } rx; /* 20 bytes */
-
- /* Note: MAX: 40 bytes */
- struct {
- struct {
- unsigned char *vaddr;
- cdf_paddr_t paddr;
- uint16_t len;
- uint8_t num; /* 0: cmn.addr; 1: tx.efrag */
- union {
- struct {
- uint8_t flag_efrag:1,
- flag_nbuf:1,
- /* following for WIN */
- flag_chfrag_start:1,
- flag_chfrag_end:1,
- reserved:4;
- } bits;
- uint8_t u8;
- } flags;
- } extra_frag; /* 20 bytes */
- uint32_t data_attr; /* 4 bytes */
- union {
- struct {
- uint8_t packet_state;
- uint8_t packet_track:4,
- proto_type:4;
- uint8_t dp_trace:1,
- htt2_frm:1,
- rsrvd:6;
- uint8_t vdev_id;
- } hl;
- struct {
- uint8_t packet_state;
- uint8_t packet_track:4,
- proto_type:4;
- uint8_t dp_trace:1,
- rsrvd:7;
- uint8_t vdev_id;
- } ll; /* low latency */
- } trace; /* 4 bytes */
- struct {
- uint32_t owned:1,
- priv:31;
- } ipa; /* 4 */
- } tx; /* 32 bytes */
- } u;
-}; /* struct cdf_nbuf_cb: MAX 48 bytes */
-
-/**
- * access macros to cdf_nbuf_cb
- * Note: These macros can be used as L-values as well as R-values.
- * When used as R-values, they effectively function as "get" macros
- * When used as L_values, they effectively function as "set" macros
- */
-#define NBUF_CB_PADDR(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
-#define NBUF_CB_RX_LRO_ELIGIBLE(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
-#define NBUF_CB_RX_TCP_PROTO(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
-#define NBUF_CB_RX_TCP_PURE_ACK(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
-#define NBUF_CB_RX_IPV6_PROTO(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
-#define NBUF_CB_RX_IP_OFFSET(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
-#define NBUF_CB_RX_TCP_OFFSET(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
-#define NBUF_CB_RX_TCP_CHKSUM(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
-#define NBUF_CB_RX_TCP_OFFSET(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
-#define NBUF_CB_RX_TCP_WIN(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
-#define NBUF_CB_RX_TCP_SEQ_NUM(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_seq_num)
-#define NBUF_CB_RX_TCP_ACK_NUM(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_ack_num)
-#define NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id_toeplitz)
-
-#define NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.vaddr)
-#define NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.paddr.dma_addr)
-#define NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.len)
-#define NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.num)
-#define NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.u8)
-#define NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.bits.flag_efrag)
-#define NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.bits.flag_nbuf)
-#define NBUF_CB_TX_DATA_ATTR(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.data_attr)
-#define NBUF_CB_TX_PACKET_STATE(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.packet_state)
-#define NBUF_CB_TX_PACKET_TRACK(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.packet_track)
-#define NBUF_CB_TX_PROTO_TYPE(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.proto_type)
-#define NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
- cdf_nbuf_set_state(skb, PACKET_STATE)
-#define NBUF_CB_TX_DP_TRACE(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.dp_trace)
-#define NBUF_CB_TX_HL_HTT2_FRM(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.hl.htt2_frm)
-#define NBUF_CB_TX_VDEV_ID(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.vdev_id)
-#define NBUF_CB_TX_IPA_OWNED(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.ipa.owned)
-#define NBUF_CB_TX_IPA_PRIV(skb) \
- (((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.ipa.priv)
-
-#define __cdf_nbuf_get_num_frags(skb) \
- (NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
-
-#if defined(FEATURE_TSO)
-#define __cdf_nbuf_reset_num_frags(skb) \
- (NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
-#endif
-/**
- * end of nbuf->cb access macros
- */
-
-typedef void (*cdf_nbuf_trace_update_t)(char *);
-
-#define __cdf_nbuf_mapped_paddr_get(skb) \
- NBUF_CB_PADDR(skb)
-
-#define __cdf_nbuf_mapped_paddr_set(skb, paddr) \
- (NBUF_CB_PADDR(skb) = (paddr))
-
-#define __cdf_nbuf_frag_push_head( \
- skb, frag_len, frag_vaddr, frag_paddr) \
- do { \
- NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1; \
- NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr; \
- NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr; \
- NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len; \
- } while (0)
-
-#define __cdf_nbuf_get_frag_vaddr(skb, frag_num) \
- ((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
- NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
-
-#define __cdf_nbuf_get_frag_paddr(skb, frag_num) \
- ((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
- NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) : \
- /* assume that the OS only provides a single fragment */ \
- NBUF_CB_PADDR(skb))
-
-#define __cdf_nbuf_get_frag_len(skb, frag_num) \
- ((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
- NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
-
-#define __cdf_nbuf_get_frag_is_wordstream(skb, frag) \
- ((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
- ? (NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb)) \
- : (NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
-
-#define __cdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm) \
- do { \
- if (frag_num >= NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
- frag_num = NBUF_CB_TX_MAX_EXTRA_FRAGS; \
- if (frag_num) \
- NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = is_wstrm; \
- else \
- NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = is_wstrm; \
- } while (0)
-
-#define __cdf_nbuf_trace_set_proto_type(skb, proto_type) \
- (NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
-#define __cdf_nbuf_trace_get_proto_type(skb) \
- NBUF_CB_TX_PROTO_TYPE(skb)
-
-#define __cdf_nbuf_data_attr_get(skb) \
- NBUF_CB_TX_DATA_ATTR(skb)
-#define __cdf_nbuf_data_attr_set(skb, data_attr) \
- (NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
-
-#define __cdf_nbuf_ipa_owned_get(skb) \
- NBUF_CB_TX_IPA_OWNED(skb)
-
-#define __cdf_nbuf_ipa_owned_set(skb) \
- (NBUF_CB_TX_IPA_OWNED(skb) = 1)
-
-#define __cdf_nbuf_ipa_priv_get(skb) \
- NBUF_CB_TX_IPA_PRIV(skb)
-
-#define __cdf_nbuf_ipa_priv_set(skb, priv) \
- (NBUF_CB_TX_IPA_PRIV(skb) = (priv))
-
-/*
- * prototypes. Implemented in cdf_nbuf.c
- */
-__cdf_nbuf_t __cdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve,
- int align, int prio);
-void __cdf_nbuf_free(struct sk_buff *skb);
-QDF_STATUS __cdf_nbuf_map(__qdf_device_t osdev,
- struct sk_buff *skb, qdf_dma_dir_t dir);
-void __cdf_nbuf_unmap(__qdf_device_t osdev,
- struct sk_buff *skb, qdf_dma_dir_t dir);
-QDF_STATUS __cdf_nbuf_map_single(__qdf_device_t osdev,
- struct sk_buff *skb, qdf_dma_dir_t dir);
-void __cdf_nbuf_unmap_single(__qdf_device_t osdev,
- struct sk_buff *skb, qdf_dma_dir_t dir);
-void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr);
-
-#ifdef QCA_PKT_PROTO_TRACE
-void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string);
-#else
-#define __cdf_nbuf_trace_update(skb, event_string)
-#endif /* QCA_PKT_PROTO_TRACE */
-
-/**
- * __cdf_os_to_status() - OS to CDF status conversion
- * @error : OS error
- *
- * Return: CDF status
- */
-static inline QDF_STATUS __cdf_os_to_status(signed int error)
-{
- switch (error) {
- case 0:
- return QDF_STATUS_SUCCESS;
- case ENOMEM:
- case -ENOMEM:
- return QDF_STATUS_E_NOMEM;
- default:
- return QDF_STATUS_E_NOSUPPORT;
- }
-}
-
-/**
- * __cdf_nbuf_len() - return the amount of valid data in the skb
- * @skb: Pointer to network buffer
- *
- * This API returns the amount of valid data in the skb, If there are frags
- * then it returns total length.
- *
- * Return: network buffer length
- */
-static inline size_t __cdf_nbuf_len(struct sk_buff *skb)
-{
- int i, extra_frag_len = 0;
-
- i = NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
- if (i > 0)
- extra_frag_len = NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
-
- return extra_frag_len + skb->len;
-}
-
-/**
- * __cdf_nbuf_cat() - link two nbufs
- * @dst: Buffer to piggyback into
- * @src: Buffer to put
- *
- * Link tow nbufs the new buf is piggybacked into the older one. The older
- * (src) skb is released.
- *
- * Return: QDF_STATUS (status of the call) if failed the src skb
- * is released
- */
-static inline QDF_STATUS
-__cdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
-{
- QDF_STATUS error = 0;
-
- qdf_assert(dst && src);
-
- /*
- * Since pskb_expand_head unconditionally reallocates the skb->head
- * buffer, first check whether the current buffer is already large
- * enough.
- */
- if (skb_tailroom(dst) < src->len) {
- error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
- if (error)
- return __cdf_os_to_status(error);
- }
- memcpy(skb_tail_pointer(dst), src->data, src->len);
-
- skb_put(dst, src->len);
- dev_kfree_skb_any(src);
-
- return __cdf_os_to_status(error);
-}
-
-/*
- * nbuf manipulation routines
- */
-
-/**
- * __cdf_nbuf_headroom() - return the amount of tail space available
- * @buf: Pointer to network buffer
- *
- * Return: amount of tail room
- */
-static inline int __cdf_nbuf_headroom(struct sk_buff *skb)
-{
- return skb_headroom(skb);
-}
-
-/**
- * __cdf_nbuf_tailroom() - return the amount of tail space available
- * @buf: Pointer to network buffer
- *
- * Return: amount of tail room
- */
-static inline uint32_t __cdf_nbuf_tailroom(struct sk_buff *skb)
-{
- return skb_tailroom(skb);
-}
-
-/**
- * __cdf_nbuf_push_head() - Push data in the front
- * @skb: Pointer to network buffer
- * @size: size to be pushed
- *
- * Return: New data pointer of this buf after data has been pushed,
- * or NULL if there is not enough room in this buf.
- */
-static inline uint8_t *__cdf_nbuf_push_head(struct sk_buff *skb, size_t size)
-{
- if (NBUF_CB_PADDR(skb))
- NBUF_CB_PADDR(skb) -= size;
-
- return skb_push(skb, size);
-}
-
-/**
- * __cdf_nbuf_put_tail() - Puts data in the end
- * @skb: Pointer to network buffer
- * @size: size to be pushed
- *
- * Return: data pointer of this buf where new data has to be
- * put, or NULL if there is not enough room in this buf.
- */
-static inline uint8_t *__cdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
-{
- if (skb_tailroom(skb) < size) {
- if (unlikely(pskb_expand_head(skb, 0,
- size - skb_tailroom(skb), GFP_ATOMIC))) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
- }
- return skb_put(skb, size);
-}
-
-/**
- * __cdf_nbuf_pull_head() - pull data out from the front
- * @skb: Pointer to network buffer
- * @size: size to be popped
- *
- * Return: New data pointer of this buf after data has been popped,
- * or NULL if there is not sufficient data to pull.
- */
-static inline uint8_t *__cdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
-{
- if (NBUF_CB_PADDR(skb))
- NBUF_CB_PADDR(skb) += size;
-
- return skb_pull(skb, size);
-}
-
-/**
- * __cdf_nbuf_trim_tail() - trim data out from the end
- * @skb: Pointer to network buffer
- * @size: size to be popped
- *
- * Return: none
- */
-static inline void __cdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
-{
- return skb_trim(skb, skb->len - size);
-}
-
-
-/*
- * prototypes. Implemented in cdf_nbuf.c
- */
-qdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb);
-QDF_STATUS __cdf_nbuf_set_rx_cksum(struct sk_buff *skb,
- qdf_nbuf_rx_cksum_t *cksum);
-uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb);
-void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
-uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb);
-
-/*
- * cdf_nbuf_pool_delete() implementation - do nothing in linux
- */
-#define __cdf_nbuf_pool_delete(osdev)
-
-/**
- * __cdf_nbuf_clone() - clone the nbuf (copy is readonly)
- * @skb: Pointer to network buffer
- *
- * if GFP_ATOMIC is overkill then we can check whether its
- * called from interrupt context and then do it or else in
- * normal case use GFP_KERNEL
- *
- * example use "in_irq() || irqs_disabled()"
- *
- * Return: cloned skb
- */
-static inline struct sk_buff *__cdf_nbuf_clone(struct sk_buff *skb)
-{
- return skb_clone(skb, GFP_ATOMIC);
-}
-
-/**
- * __cdf_nbuf_copy() - returns a private copy of the skb
- * @skb: Pointer to network buffer
- *
- * This API returns a private copy of the skb, the skb returned is completely
- * modifiable by callers
- *
- * Return: skb or NULL
- */
-static inline struct sk_buff *__cdf_nbuf_copy(struct sk_buff *skb)
-{
- return skb_copy(skb, GFP_ATOMIC);
-}
-
-#define __cdf_nbuf_reserve skb_reserve
-
-/***********************XXX: misc api's************************/
-
-/**
- * __cdf_nbuf_head() - return the pointer the skb's head pointer
- * @skb: Pointer to network buffer
- *
- * Return: Pointer to head buffer
- */
-static inline uint8_t *__cdf_nbuf_head(struct sk_buff *skb)
-{
- return skb->head;
-}
-
-/**
- * __cdf_nbuf_data() - return the pointer to data header in the skb
- * @skb: Pointer to network buffer
- *
- * Return: Pointer to skb data
- */
-static inline uint8_t *__cdf_nbuf_data(struct sk_buff *skb)
-{
- return skb->data;
-}
-
-/**
- * __cdf_nbuf_get_protocol() - return the protocol value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb protocol
- */
-static inline uint16_t __cdf_nbuf_get_protocol(struct sk_buff *skb)
-{
- return skb->protocol;
-}
-
-/**
- * __cdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb ip_summed
- */
-static inline uint8_t __cdf_nbuf_get_ip_summed(struct sk_buff *skb)
-{
- return skb->ip_summed;
-}
-
-/**
- * __cdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
- * @skb: Pointer to network buffer
- * @ip_summed: ip checksum
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_ip_summed(struct sk_buff *skb, uint8_t ip_summed)
-{
- skb->ip_summed = ip_summed;
-}
-
-/**
- * __cdf_nbuf_get_priority() - return the priority value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb priority
- */
-static inline uint32_t __cdf_nbuf_get_priority(struct sk_buff *skb)
-{
- return skb->priority;
-}
-
-/**
- * __cdf_nbuf_set_priority() - sets the priority value of the skb
- * @skb: Pointer to network buffer
- * @p: priority
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
-{
- skb->priority = p;
-}
-
-/**
- * __cdf_nbuf_set_next() - sets the next skb pointer of the current skb
- * @skb: Current skb
- * @next_skb: Next skb
- *
- * Return: void
- */
-static inline void
-__cdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
-{
- skb->next = skb_next;
-}
-
-/**
- * __cdf_nbuf_next() - return the next skb pointer of the current skb
- * @skb: Current skb
- *
- * Return: the next skb pointed to by the current skb
- */
-static inline struct sk_buff *__cdf_nbuf_next(struct sk_buff *skb)
-{
- return skb->next;
-}
-
-/**
- * __cdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
- * @skb: Current skb
- * @next_skb: Next skb
- *
- * This fn is used to link up extensions to the head skb. Does not handle
- * linking to the head
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
-{
- skb->next = skb_next;
-}
-
-/**
- * __cdf_nbuf_next_ext() - return the next skb pointer of the current skb
- * @skb: Current skb
- *
- * Return: the next skb pointed to by the current skb
- */
-static inline struct sk_buff *__cdf_nbuf_next_ext(struct sk_buff *skb)
-{
- return skb->next;
-}
-
-/**
- * __cdf_nbuf_append_ext_list() - link list of packet extensions to the head
- * @skb_head: head_buf nbuf holding head segment (single)
- * @ext_list: nbuf list holding linked extensions to the head
- * @ext_len: Total length of all buffers in the extension list
- *
- * This function is used to link up a list of packet extensions (seg1, 2,* ...)
- * to the nbuf holding the head segment (seg0)
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_append_ext_list(struct sk_buff *skb_head,
- struct sk_buff *ext_list, size_t ext_len)
-{
- skb_shinfo(skb_head)->frag_list = ext_list;
- skb_head->data_len = ext_len;
- skb_head->len += skb_head->data_len;
-}
-
-/**
- * __cdf_nbuf_tx_free() - free skb list
- * @skb: Pointer to network buffer
- * @tx_err: TX error
- *
- * Return: none
- */
-static inline void __cdf_nbuf_tx_free(struct sk_buff *bufs, int tx_err)
-{
- while (bufs) {
- struct sk_buff *next = __cdf_nbuf_next(bufs);
- __cdf_nbuf_free(bufs);
- bufs = next;
- }
-}
-
-/**
- * __cdf_nbuf_get_age() - return the checksum value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: checksum value
- */
-static inline uint32_t __cdf_nbuf_get_age(struct sk_buff *skb)
-{
- return skb->csum;
-}
-
-/**
- * __cdf_nbuf_set_age() - sets the checksum value of the skb
- * @skb: Pointer to network buffer
- * @v: Value
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
-{
- skb->csum = v;
-}
-
-/**
- * __cdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
- * @skb: Pointer to network buffer
- * @adj: Adjustment value
- *
- * Return: none
- */
-static inline void __cdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
-{
- skb->csum -= adj;
-}
-
-/**
- * __cdf_nbuf_copy_bits() - return the length of the copy bits for skb
- * @skb: Pointer to network buffer
- * @offset: Offset value
- * @len: Length
- * @to: Destination pointer
- *
- * Return: length of the copy bits for skb
- */
-static inline int32_t
-__cdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
-{
- return skb_copy_bits(skb, offset, to, len);
-}
-
-/**
- * __cdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
- * @skb: Pointer to network buffer
- * @len: Packet length
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
-{
- if (skb->len > len) {
- skb_trim(skb, len);
- } else {
- if (skb_tailroom(skb) < len - skb->len) {
- if (unlikely(pskb_expand_head(skb, 0,
- len - skb->len - skb_tailroom(skb),
- GFP_ATOMIC))) {
- dev_kfree_skb_any(skb);
- qdf_assert(0);
- }
- }
- skb_put(skb, (len - skb->len));
- }
-}
-
-/**
- * __cdf_nbuf_set_protocol() - sets the protocol value of the skb
- * @skb: Pointer to network buffer
- * @protocol: Protocol type
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
-{
- skb->protocol = protocol;
-}
-
-#define __cdf_nbuf_set_tx_htt2_frm(skb, candi) \
- (NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
-#define __cdf_nbuf_get_tx_htt2_frm(skb) \
- NBUF_CB_TX_HL_HTT2_FRM(skb)
-
-#if defined(FEATURE_TSO)
-uint32_t __cdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
- struct qdf_tso_info_t *tso_info);
-
-uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
-
-static inline bool __cdf_nbuf_is_tso(struct sk_buff *skb)
-{
- return skb_is_gso(skb);
-}
-
-static inline void __cdf_nbuf_inc_users(struct sk_buff *skb)
-{
- atomic_inc(&skb->users);
- return;
-}
-#endif /* TSO */
-
-/**
- * __cdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
- * and get hw_classify by peeking
- * into packet
- * @nbuf: Network buffer (skb on Linux)
- * @pkt_type: Pkt type (from enum htt_pkt_type)
- * @pkt_subtype: Bit 4 of this field in HTT descriptor
- * needs to be set in case of CE classification support
- * Is set by this macro.
- * @hw_classify: This is a flag which is set to indicate
- * CE classification is enabled.
- * Do not set this bit for VLAN packets
- * OR for mcast / bcast frames.
- *
- * This macro parses the payload to figure out relevant Tx meta-data e.g.
- * whether to enable tx_classify bit in CE.
- *
- * Overrides pkt_type only if required for 802.3 frames (original ethernet)
- * If protocol is less than ETH_P_802_3_MIN (0x600), then
- * it is the length and a 802.3 frame else it is Ethernet Type II
- * (RFC 894).
- * Bit 4 in pkt_subtype is the tx_classify bit
- *
- * Return: void
- */
-#define __cdf_nbuf_tx_info_get(skb, pkt_type, \
- pkt_subtype, hw_classify) \
-do { \
- struct ethhdr *eh = (struct ethhdr *)skb->data; \
- uint16_t ether_type = ntohs(eh->h_proto); \
- bool is_mc_bc; \
- \
- is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) || \
- is_multicast_ether_addr((uint8_t *)eh); \
- \
- if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) { \
- hw_classify = 1; \
- pkt_subtype = 0x01 << \
- HTT_TX_CLASSIFY_BIT_S; \
- } \
- \
- if (unlikely(ether_type < ETH_P_802_3_MIN)) \
- pkt_type = htt_pkt_type_ethernet; \
- \
-} while (0)
-
-/**
- * nbuf private buffer routines
- */
-
-/**
- * __cdf_nbuf_peek_header() - return the header's addr & m_len
- * @skb: Pointer to network buffer
- * @addr: Pointer to store header's addr
- * @m_len: network buffer length
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
-{
- *addr = skb->data;
- *len = skb->len;
-}
-
-/**
- * typedef struct __cdf_nbuf_queue_t - network buffer queue
- * @head: Head pointer
- * @tail: Tail pointer
- * @qlen: Queue length
- */
-typedef struct __cdf_nbuf_qhead {
- struct sk_buff *head;
- struct sk_buff *tail;
- unsigned int qlen;
-} __cdf_nbuf_queue_t;
-
-/******************Functions *************/
-
-/**
- * __cdf_nbuf_queue_init() - initiallize the queue head
- * @qhead: Queue head
- *
- * Return: CDF status
- */
-static inline QDF_STATUS __cdf_nbuf_queue_init(__cdf_nbuf_queue_t *qhead)
-{
- memset(qhead, 0, sizeof(struct __cdf_nbuf_qhead));
- return QDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_nbuf_queue_add() - add an skb in the tail of the queue
- * @qhead: Queue head
- * @skb: Pointer to network buffer
- *
- * This is a lockless version, driver must acquire locks if it
- * needs to synchronize
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_queue_add(__cdf_nbuf_queue_t *qhead, struct sk_buff *skb)
-{
- skb->next = NULL; /*Nullify the next ptr */
-
- if (!qhead->head)
- qhead->head = skb;
- else
- qhead->tail->next = skb;
-
- qhead->tail = skb;
- qhead->qlen++;
-}
-
-/**
- * __cdf_nbuf_queue_insert_head() - add an skb at the head of the queue
- * @qhead: Queue head
- * @skb: Pointer to network buffer
- *
- * This is a lockless version, driver must acquire locks if it needs to
- * synchronize
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_queue_insert_head(__cdf_nbuf_queue_t *qhead, __cdf_nbuf_t skb)
-{
- if (!qhead->head) {
- /*Empty queue Tail pointer Must be updated */
- qhead->tail = skb;
- }
- skb->next = qhead->head;
- qhead->head = skb;
- qhead->qlen++;
-}
-
-/**
- * __cdf_nbuf_queue_remove() - remove a skb from the head of the queue
- * @qhead: Queue head
- *
- * This is a lockless version. Driver should take care of the locks
- *
- * Return: skb or NULL
- */
-static inline
-struct sk_buff *__cdf_nbuf_queue_remove(__cdf_nbuf_queue_t *qhead)
-{
- __cdf_nbuf_t tmp = NULL;
-
- if (qhead->head) {
- qhead->qlen--;
- tmp = qhead->head;
- if (qhead->head == qhead->tail) {
- qhead->head = NULL;
- qhead->tail = NULL;
- } else {
- qhead->head = tmp->next;
- }
- tmp->next = NULL;
- }
- return tmp;
-}
-
-/**
- * __cdf_nbuf_queue_len() - return the queue length
- * @qhead: Queue head
- *
- * Return: Queue length
- */
-static inline uint32_t __cdf_nbuf_queue_len(__cdf_nbuf_queue_t *qhead)
-{
- return qhead->qlen;
-}
-
-/**
- * __cdf_nbuf_queue_next() - return the next skb from packet chain
- * @skb: Pointer to network buffer
- *
- * This API returns the next skb from packet chain, remember the skb is
- * still in the queue
- *
- * Return: NULL if no packets are there
- */
-static inline struct sk_buff *__cdf_nbuf_queue_next(struct sk_buff *skb)
-{
- return skb->next;
-}
-
-/**
- * __cdf_nbuf_is_queue_empty() - check if the queue is empty or not
- * @qhead: Queue head
- *
- * Return: true if length is 0 else false
- */
-static inline bool __cdf_nbuf_is_queue_empty(__cdf_nbuf_queue_t *qhead)
-{
- return qhead->qlen == 0;
-}
-
-/*
- * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
- * Because the queue head will most likely put in some structure,
- * we don't use pointer type as the definition.
- */
-
-/*
- * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
- * Because the queue head will most likely put in some structure,
- * we don't use pointer type as the definition.
- */
-#endif /*_I_CDF_NET_BUF_H */
diff --git a/core/cds/inc/cds_packet.h b/core/cds/inc/cds_packet.h
index 44ce1ef..f63e903 100644
--- a/core/cds/inc/cds_packet.h
+++ b/core/cds/inc/cds_packet.h
@@ -54,11 +54,11 @@
struct cds_pkt_t;
typedef struct cds_pkt_t cds_pkt_t;
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
-#define CDS_PKT_TRAC_TYPE_EAPOL NBUF_PKT_TRAC_TYPE_EAPOL
-#define CDS_PKT_TRAC_TYPE_DHCP NBUF_PKT_TRAC_TYPE_DHCP
-#define CDS_PKT_TRAC_TYPE_MGMT_ACTION NBUF_PKT_TRAC_TYPE_MGMT_ACTION /* Managment action frame */
+#define CDS_PKT_TRAC_TYPE_EAPOL QDF_NBUF_PKT_TRAC_TYPE_EAPOL
+#define CDS_PKT_TRAC_TYPE_DHCP QDF_NBUF_PKT_TRAC_TYPE_DHCP
+#define CDS_PKT_TRAC_TYPE_MGMT_ACTION QDF_NBUF_PKT_TRAC_TYPE_MGMT_ACTION
#define CDS_PKT_TRAC_DUMP_CMD 9999
diff --git a/core/cds/src/cds_packet.c b/core/cds/src/cds_packet.c
index b0f6fdc..f60bd2d 100644
--- a/core/cds/src/cds_packet.c
+++ b/core/cds/src/cds_packet.c
@@ -43,7 +43,7 @@
#include <qdf_mc_timer.h>
#include <qdf_trace.h>
#include <wlan_hdd_main.h>
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_mem.h"
#define TX_PKT_MIN_HEADROOM (64)
@@ -83,8 +83,8 @@
return QDF_STATUS_E_INVAL;
}
- /* Free up the Adf nbuf */
- cdf_nbuf_free(packet->pkt_buf);
+ /* Free up the qdf nbuf */
+ qdf_nbuf_free(packet->pkt_buf);
packet->pkt_buf = NULL;
@@ -120,7 +120,7 @@
return QDF_STATUS_E_INVAL;
}
/* return the requested information */
- *pPacketSize = cdf_nbuf_len(pPacket->pkt_buf);
+ *pPacketSize = qdf_nbuf_len(pPacket->pkt_buf);
return QDF_STATUS_SUCCESS;
}
@@ -265,7 +265,7 @@
/* Register callback function to NBUF
* Lower layer event also will be reported to here */
- cdf_nbuf_reg_trace_cb(cds_pkt_trace_buf_update);
+ qdf_nbuf_reg_trace_cb(cds_pkt_trace_buf_update);
return;
}
@@ -293,18 +293,18 @@
uint8_t *file_name, uint32_t line_num)
{
QDF_STATUS cdf_ret_status = QDF_STATUS_E_FAILURE;
- cdf_nbuf_t nbuf;
+ qdf_nbuf_t nbuf;
- nbuf =
- cdf_nbuf_alloc_debug(NULL, roundup(size + TX_PKT_MIN_HEADROOM, 4),
- TX_PKT_MIN_HEADROOM, sizeof(uint32_t), false,
+ nbuf = qdf_nbuf_alloc_debug(NULL,
+ roundup(size + TX_PKT_MIN_HEADROOM, 4),
+ TX_PKT_MIN_HEADROOM, sizeof(uint32_t), false,
file_name, line_num);
if (nbuf != NULL) {
- cdf_nbuf_put_tail(nbuf, size);
- cdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
+ qdf_nbuf_put_tail(nbuf, size);
+ qdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
*ppPacket = nbuf;
- *data = cdf_nbuf_data(nbuf);
+ *data = qdf_nbuf_data(nbuf);
cdf_ret_status = QDF_STATUS_SUCCESS;
}
@@ -318,16 +318,16 @@
QDF_STATUS cds_packet_alloc(uint16_t size, void **data, void **ppPacket)
{
QDF_STATUS cdf_ret_status = QDF_STATUS_E_FAILURE;
- cdf_nbuf_t nbuf;
+ qdf_nbuf_t nbuf;
- nbuf = cdf_nbuf_alloc(NULL, roundup(size + TX_PKT_MIN_HEADROOM, 4),
+ nbuf = qdf_nbuf_alloc(NULL, roundup(size + TX_PKT_MIN_HEADROOM, 4),
TX_PKT_MIN_HEADROOM, sizeof(uint32_t), false);
if (nbuf != NULL) {
- cdf_nbuf_put_tail(nbuf, size);
- cdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
+ qdf_nbuf_put_tail(nbuf, size);
+ qdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
*ppPacket = nbuf;
- *data = cdf_nbuf_data(nbuf);
+ *data = qdf_nbuf_data(nbuf);
cdf_ret_status = QDF_STATUS_SUCCESS;
}
@@ -341,5 +341,5 @@
---------------------------------------------------------------------------*/
void cds_packet_free(void *pPacket)
{
- cdf_nbuf_free((cdf_nbuf_t) pPacket);
+ qdf_nbuf_free((qdf_nbuf_t) pPacket);
}
diff --git a/core/cds/src/cds_sched.c b/core/cds/src/cds_sched.c
index 0d23648..292c4a6 100644
--- a/core/cds/src/cds_sched.c
+++ b/core/cds/src/cds_sched.c
@@ -731,7 +731,7 @@
{
struct list_head local_list;
struct cds_ol_rx_pkt *pkt, *tmp;
- cdf_nbuf_t buf, next_buf;
+ qdf_nbuf_t buf, next_buf;
INIT_LIST_HEAD(&local_list);
spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
@@ -750,8 +750,8 @@
list_del(&pkt->list);
buf = pkt->Rxpkt;
while (buf) {
- next_buf = cdf_nbuf_queue_next(buf);
- cdf_nbuf_free(buf);
+ next_buf = qdf_nbuf_queue_next(buf);
+ qdf_nbuf_free(buf);
buf = next_buf;
}
cds_free_ol_rx_pkt(pSchedContext, pkt);
diff --git a/core/dp/htt/htt.c b/core/dp/htt/htt.c
index ea8a933..58a7dcc 100644
--- a/core/dp/htt/htt.c
+++ b/core/dp/htt/htt.c
@@ -121,14 +121,14 @@
void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt_union *pkt, *next;
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
pkt = pdev->htt_htc_pkt_misclist;
while (pkt) {
next = pkt->u.next;
- netbuf = (cdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
- cdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
- cdf_nbuf_free(netbuf);
+ netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
+ qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_free(netbuf);
qdf_mem_free(pkt);
pkt = next;
}
diff --git a/core/dp/htt/htt_fw_stats.c b/core/dp/htt/htt_fw_stats.c
index eb86320..0bf4c13 100644
--- a/core/dp/htt/htt_fw_stats.c
+++ b/core/dp/htt/htt_fw_stats.c
@@ -32,7 +32,7 @@
#include <htc_api.h> /* HTC_PACKET */
#include <htt.h> /* HTT_T2H_MSG_TYPE, etc. */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <qdf_mem.h> /* qdf_mem_set */
#include <ol_fw_tx_dbg.h> /* ol_fw_tx_dbg_ppdu_base */
diff --git a/core/dp/htt/htt_h2t.c b/core/dp/htt/htt_h2t.c
index bb8e714..39762b5 100644
--- a/core/dp/htt/htt_h2t.c
+++ b/core/dp/htt/htt_h2t.c
@@ -43,7 +43,7 @@
*/
#include <qdf_mem.h> /* qdf_mem_copy */
-#include <cdf_nbuf.h> /* cdf_nbuf_map_single */
+#include <qdf_nbuf.h> /* qdf_nbuf_map_single */
#include <htc_api.h> /* HTC_PACKET */
#include <htc.h> /* HTC_HDR_ALIGNMENT_PADDING */
#include <htt.h> /* HTT host->target msg defs */
@@ -62,25 +62,25 @@
static void
htt_h2t_send_complete_free_netbuf(void *pdev, A_STATUS status,
- cdf_nbuf_t netbuf, uint16_t msdu_id)
+ qdf_nbuf_t netbuf, uint16_t msdu_id)
{
- cdf_nbuf_free(netbuf);
+ qdf_nbuf_free(netbuf);
}
void htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
{
void (*send_complete_part2)(void *pdev, A_STATUS status,
- cdf_nbuf_t msdu, uint16_t msdu_id);
+ qdf_nbuf_t msdu, uint16_t msdu_id);
struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
struct htt_htc_pkt *htt_pkt;
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
send_complete_part2 = htc_pkt->pPktContext;
htt_pkt = container_of(htc_pkt, struct htt_htc_pkt, htc_pkt);
/* process (free or keep) the netbuf that held the message */
- netbuf = (cdf_nbuf_t) htc_pkt->pNetBufContext;
+ netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
if (send_complete_part2 != NULL) {
send_complete_part2(htt_pkt->pdev_ctxt, htc_pkt->Status, netbuf,
htt_pkt->msdu_id);
@@ -101,7 +101,7 @@
A_STATUS rc = A_OK;
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
u_int32_t *msg_word;
struct htt_tx_frag_desc_bank_cfg_t *bank_cfg;
@@ -115,7 +115,7 @@
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
- msg = cdf_nbuf_alloc(
+ msg = qdf_nbuf_alloc(
pdev->osdev,
HTT_MSG_BUF_SIZE(sizeof(struct htt_tx_frag_desc_bank_cfg_t)),
/* reserve room for the HTC header */
@@ -131,14 +131,14 @@
* separately during the below call to adf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
- cdf_nbuf_put_tail(msg, sizeof(struct htt_tx_frag_desc_bank_cfg_t));
+ qdf_nbuf_put_tail(msg, sizeof(struct htt_tx_frag_desc_bank_cfg_t));
/* fill in the message contents */
- msg_word = (u_int32_t *) cdf_nbuf_data(msg);
+ msg_word = (u_int32_t *) qdf_nbuf_data(msg);
memset(msg_word, 0 , sizeof(struct htt_tx_frag_desc_bank_cfg_t));
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG);
@@ -170,8 +170,8 @@
SET_HTC_PACKET_INFO_TX(
&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
@@ -187,7 +187,7 @@
A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
@@ -201,7 +201,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for the HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
+ msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
true);
if (!msg) {
@@ -212,23 +212,23 @@
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
- * separately during the below call to cdf_nbuf_push_head.
+ * separately during the below call to qdf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
- cdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES);
+ qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg), cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg), qdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
@@ -247,7 +247,7 @@
A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
int enable_ctrl_data, enable_mgmt_data,
enable_null_data, enable_phy_data, enable_hdr,
@@ -264,7 +264,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for the HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev,
+ msg = qdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
true);
@@ -275,16 +275,16 @@
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
- * separately during the below call to cdf_nbuf_push_head.
+ * separately during the below call to qdf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
- cdf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1));
+ qdf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1));
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG);
@@ -414,8 +414,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
HTC_TX_PACKET_TAG_RUNTIME_PUT);
@@ -437,7 +437,7 @@
uint8_t cfg_stat_type, uint32_t cfg_val, uint64_t cookie)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
uint16_t htc_tag = 1;
@@ -463,7 +463,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
- msg = cdf_nbuf_alloc(pdev->osdev,
+ msg = qdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_H2T_STATS_REQ_MSG_SZ),
/* reserve room for HTC header */
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
@@ -473,13 +473,13 @@
return -EINVAL; /* failure */
}
/* set the length of the message */
- cdf_nbuf_put_tail(msg, HTT_H2T_STATS_REQ_MSG_SZ);
+ qdf_nbuf_put_tail(msg, HTT_H2T_STATS_REQ_MSG_SZ);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_STATS_REQ);
@@ -504,8 +504,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
htc_tag); /* tag - not relevant here */
@@ -524,7 +524,7 @@
A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, uint8_t sync_cnt)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
@@ -538,7 +538,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ),
+ msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
@@ -546,13 +546,13 @@
return A_NO_MEMORY;
}
/* set the length of the message */
- cdf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ);
+ qdf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SYNC);
@@ -560,8 +560,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
HTC_TX_PACKET_TAG_RUNTIME_PUT);
@@ -582,7 +582,7 @@
int max_subfrms_ampdu, int max_subfrms_amsdu)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
@@ -596,7 +596,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_AGGR_CFG_MSG_SZ),
+ msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_AGGR_CFG_MSG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
@@ -604,13 +604,13 @@
return -EINVAL; /* failure */
}
/* set the length of the message */
- cdf_nbuf_put_tail(msg, HTT_AGGR_CFG_MSG_SZ);
+ qdf_nbuf_put_tail(msg, HTT_AGGR_CFG_MSG_SZ);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_AGGR_CFG);
@@ -627,8 +627,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
HTC_TX_PACKET_TAG_RUNTIME_PUT);
@@ -657,7 +657,7 @@
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
@@ -671,7 +671,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
+ msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
@@ -679,13 +679,13 @@
return A_NO_MEMORY;
}
/* set the length of the message */
- cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
+ qdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
@@ -734,8 +734,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
HTC_TX_PACKET_TAG_RUNTIME_PUT);
@@ -749,7 +749,7 @@
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
@@ -763,7 +763,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
+ msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
@@ -771,13 +771,13 @@
return -A_NO_MEMORY;
}
/* set the length of the message */
- cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
+ qdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
@@ -878,8 +878,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
@@ -904,7 +904,7 @@
bool uc_active, bool is_tx)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
uint8_t active_target = 0;
@@ -919,7 +919,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev,
+ msg = qdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
@@ -928,13 +928,13 @@
return -A_NO_MEMORY;
}
/* set the length of the message */
- cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
+ qdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
if (uc_active && is_tx)
@@ -951,8 +951,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
@@ -973,7 +973,7 @@
int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
- cdf_nbuf_t msg;
+ qdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
@@ -987,7 +987,7 @@
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
- msg = cdf_nbuf_alloc(pdev->osdev,
+ msg = qdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
@@ -996,13 +996,13 @@
return -A_NO_MEMORY;
}
/* set the length of the message */
- cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
+ qdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
/* fill in the message contents */
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
- cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+ qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word,
@@ -1011,8 +1011,8 @@
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
- cdf_nbuf_data(msg),
- cdf_nbuf_len(msg),
+ qdf_nbuf_data(msg),
+ qdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
diff --git a/core/dp/htt/htt_internal.h b/core/dp/htt/htt_internal.h
index 8b1cb4c..e462b13 100644
--- a/core/dp/htt/htt_internal.h
+++ b/core/dp/htt/htt_internal.h
@@ -29,7 +29,7 @@
#define _HTT_INTERNAL__H_
#include <athdefs.h> /* A_STATUS */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <qdf_util.h> /* qdf_assert */
#include <htc_api.h> /* HTC_PACKET */
@@ -133,10 +133,10 @@
bool in_use;
};
#endif
-static inline struct htt_host_rx_desc_base *htt_rx_desc(cdf_nbuf_t msdu)
+static inline struct htt_host_rx_desc_base *htt_rx_desc(qdf_nbuf_t msdu)
{
return (struct htt_host_rx_desc_base *)
- (((size_t) (cdf_nbuf_head(msdu) + HTT_RX_DESC_ALIGN_MASK)) &
+ (((size_t) (qdf_nbuf_head(msdu) + HTT_RX_DESC_ALIGN_MASK)) &
~HTT_RX_DESC_ALIGN_MASK);
}
@@ -191,7 +191,7 @@
*
* Return: none
*/
-static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
+static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
struct htt_host_rx_desc_base *rx_desc)
{
NBUF_CB_RX_LRO_ELIGIBLE(msdu) = rx_desc->msdu_end.lro_eligible;
@@ -212,7 +212,7 @@
#else
static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
{}
-static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
+static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
struct htt_host_rx_desc_base *rx_desc) {}
#endif /* FEATURE_LRO */
@@ -388,19 +388,19 @@
#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu) do { \
HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
- _msdu = cdf_nbuf_queue_remove(&_pdev->txnbufq);\
+ _msdu = qdf_nbuf_queue_remove(&_pdev->txnbufq);\
HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
} while (0)
#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu) do { \
HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
- cdf_nbuf_queue_add(&_pdev->txnbufq, _msdu); \
+ qdf_nbuf_queue_add(&_pdev->txnbufq, _msdu); \
HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
} while (0)
#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu) do { \
HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
- cdf_nbuf_queue_insert_head(&_pdev->txnbufq, _msdu);\
+ qdf_nbuf_queue_insert_head(&_pdev->txnbufq, _msdu);\
HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
} while (0)
#else
@@ -463,9 +463,9 @@
int
htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
- cdf_nbuf_t netbuf);
+ qdf_nbuf_t netbuf);
-cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr);
+qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr);
#ifdef IPA_OFFLOAD
int
@@ -551,7 +551,7 @@
static inline
void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev,
uint32_t paddr,
- cdf_nbuf_t rx_netbuf)
+ qdf_nbuf_t rx_netbuf)
{
if (pdev->rx_buff_list) {
pdev->rx_buff_list[pdev->rx_buff_index].paddr =
@@ -574,7 +574,7 @@
*/
static inline
void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
- cdf_nbuf_t netbuf)
+ qdf_nbuf_t netbuf)
{
uint32_t index;
@@ -609,13 +609,13 @@
static inline
void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev,
uint32_t paddr,
- cdf_nbuf_t rx_netbuf)
+ qdf_nbuf_t rx_netbuf)
{
return;
}
static inline
void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
- cdf_nbuf_t netbuf)
+ qdf_nbuf_t netbuf)
{
return;
}
diff --git a/core/dp/htt/htt_rx.c b/core/dp/htt/htt_rx.c
index eb532ce..72b7047 100644
--- a/core/dp/htt/htt_rx.c
+++ b/core/dp/htt/htt_rx.c
@@ -41,7 +41,7 @@
#include <qdf_mem.h> /* qdf_mem_malloc,free, etc. */
#include <qdf_types.h> /* qdf_print, bool */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_timer.h> /* qdf_timer_free */
#include <htt.h> /* HTT_HL_RX_DESC_SIZE */
@@ -128,13 +128,13 @@
listnode_offset);
if (hash_entry->netbuf) {
#ifdef DEBUG_DMA_DONE
- cdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
+ qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
QDF_DMA_BIDIRECTIONAL);
#else
- cdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
+ qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
QDF_DMA_FROM_DEVICE);
#endif
- cdf_nbuf_free(hash_entry->netbuf);
+ qdf_nbuf_free(hash_entry->netbuf);
hash_entry->paddr = 0;
}
list_iter = list_iter->next;
@@ -230,11 +230,11 @@
idx = *(pdev->rx_ring.alloc_idx.vaddr);
while (num > 0) {
qdf_dma_addr_t paddr;
- cdf_nbuf_t rx_netbuf;
+ qdf_nbuf_t rx_netbuf;
int headroom;
rx_netbuf =
- cdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
+ qdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
0, 4, false);
if (!rx_netbuf) {
qdf_timer_stop(&pdev->rx_ring.
@@ -269,26 +269,26 @@
smp_mb();
#endif
/*
- * Adjust cdf_nbuf_data to point to the location in the buffer
+ * Adjust qdf_nbuf_data to point to the location in the buffer
* where the rx descriptor will be filled in.
*/
- headroom = cdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
- cdf_nbuf_push_head(rx_netbuf, headroom);
+ headroom = qdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
+ qdf_nbuf_push_head(rx_netbuf, headroom);
#ifdef DEBUG_DMA_DONE
status =
- cdf_nbuf_map(pdev->osdev, rx_netbuf,
+ qdf_nbuf_map(pdev->osdev, rx_netbuf,
QDF_DMA_BIDIRECTIONAL);
#else
status =
- cdf_nbuf_map(pdev->osdev, rx_netbuf,
+ qdf_nbuf_map(pdev->osdev, rx_netbuf,
QDF_DMA_FROM_DEVICE);
#endif
if (status != QDF_STATUS_SUCCESS) {
- cdf_nbuf_free(rx_netbuf);
+ qdf_nbuf_free(rx_netbuf);
goto fail;
}
- paddr = cdf_nbuf_get_frag_paddr(rx_netbuf, 0);
+ paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
if (pdev->cfg.is_full_reorder_offload) {
if (qdf_unlikely
(htt_rx_hash_list_insert(pdev, paddr,
@@ -296,13 +296,13 @@
qdf_print("%s: hash insert failed!\n",
__func__);
#ifdef DEBUG_DMA_DONE
- cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
+ qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
QDF_DMA_BIDIRECTIONAL);
#else
- cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
+ qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
QDF_DMA_FROM_DEVICE);
#endif
- cdf_nbuf_free(rx_netbuf);
+ qdf_nbuf_free(rx_netbuf);
goto fail;
}
htt_rx_dbg_rxbuf_set(pdev, paddr, rx_netbuf);
@@ -360,17 +360,17 @@
while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
#ifdef DEBUG_DMA_DONE
- cdf_nbuf_unmap(pdev->osdev,
+ qdf_nbuf_unmap(pdev->osdev,
pdev->rx_ring.buf.
netbufs_ring[sw_rd_idx],
QDF_DMA_BIDIRECTIONAL);
#else
- cdf_nbuf_unmap(pdev->osdev,
+ qdf_nbuf_unmap(pdev->osdev,
pdev->rx_ring.buf.
netbufs_ring[sw_rd_idx],
QDF_DMA_FROM_DEVICE);
#endif
- cdf_nbuf_free(pdev->rx_ring.buf.
+ qdf_nbuf_free(pdev->rx_ring.buf.
netbufs_ring[sw_rd_idx]);
sw_rd_idx++;
sw_rd_idx &= pdev->rx_ring.size_mask;
@@ -623,10 +623,10 @@
*inspect = rx_msdu_fw_desc & FW_RX_DESC_INSPECT_M;
}
-static inline cdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
+static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
{
int idx;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
@@ -644,7 +644,7 @@
return msdu;
}
-static inline cdf_nbuf_t
+static inline qdf_nbuf_t
htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, uint32_t paddr)
{
HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
@@ -657,7 +657,7 @@
#ifdef CHECKSUM_OFFLOAD
static inline
void
-htt_set_checksum_result_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu,
+htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
struct htt_host_rx_desc_base *rx_desc)
{
#define MAX_IP_VER 2
@@ -704,7 +704,7 @@
QDF_NBUF_RX_CKSUM_NONE :
QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
}
- cdf_nbuf_set_rx_cksum(msdu, &cksum);
+ qdf_nbuf_set_rx_cksum(msdu, &cksum);
#undef MAX_IP_VER
#undef MAX_PROTO_VAL
}
@@ -713,13 +713,13 @@
#endif
#ifdef DEBUG_DMA_DONE
-void htt_rx_print_rx_indication(cdf_nbuf_t rx_ind_msg, htt_pdev_handle pdev)
+void htt_rx_print_rx_indication(qdf_nbuf_t rx_ind_msg, htt_pdev_handle pdev)
{
uint32_t *msg_word;
int byte_offset;
int mpdu_range, num_mpdu_range;
- msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
qdf_print
("------------------HTT RX IND-----------------------------\n");
@@ -795,11 +795,11 @@
int
htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
- cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
+ qdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu)
{
int msdu_len, msdu_chaining = 0;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
struct htt_host_rx_desc_base *rx_desc;
uint8_t *rx_ind_data;
uint32_t *msg_word, num_msdu_bytes;
@@ -807,7 +807,7 @@
uint8_t pad_bytes = 0;
HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
- rx_ind_data = cdf_nbuf_data(rx_ind_msg);
+ rx_ind_data = qdf_nbuf_data(rx_ind_msg);
msg_word = (uint32_t *) rx_ind_data;
msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
@@ -830,14 +830,14 @@
* Set the netbuf length to be the entire buffer length
* initially, so the unmap will unmap the entire buffer.
*/
- cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+ qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
#ifdef DEBUG_DMA_DONE
- cdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
+ qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
#else
- cdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
+ qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
#endif
- /* cache consistency has been taken care of by cdf_nbuf_unmap */
+ /* cache consistency has been taken care of by qdf_nbuf_unmap */
/*
* Now read the rx descriptor.
@@ -854,7 +854,7 @@
* than the descriptor.
*/
- cdf_nbuf_pull_head(msdu,
+ qdf_nbuf_pull_head(msdu,
HTT_RX_STD_DESC_RESERVATION + pad_bytes);
/*
@@ -879,7 +879,7 @@
RX_ATTENTION_0_MSDU_DONE_MASK))) {
qdf_mdelay(1);
- cdf_invalidate_range((void *)rx_desc,
+ qdf_invalidate_range((void *)rx_desc,
(void *)((char *)rx_desc +
HTT_RX_STD_DESC_RESERVATION));
@@ -895,7 +895,7 @@
#ifdef HTT_RX_RESTORE
qdf_print("RX done bit error detected!\n");
- cdf_nbuf_set_next(msdu, NULL);
+ qdf_nbuf_set_next(msdu, NULL);
*tail_msdu = msdu;
pdev->rx_ring.rx_reset = 1;
return msdu_chaining;
@@ -997,7 +997,7 @@
if (msdu_len > 0x3000)
break;
#endif
- cdf_nbuf_trim_tail(msdu,
+ qdf_nbuf_trim_tail(msdu,
HTT_RX_BUF_SIZE -
(RX_STD_DESC_SIZE +
msdu_len));
@@ -1005,10 +1005,10 @@
} while (0);
while (msdu_chained--) {
- cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
- cdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
+ qdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
+ qdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
msdu_len -= HTT_RX_BUF_SIZE;
- cdf_nbuf_set_next(msdu, next);
+ qdf_nbuf_set_next(msdu, next);
msdu = next;
msdu_chaining = 1;
@@ -1025,7 +1025,7 @@
RX_STD_DESC_SIZE);
}
- cdf_nbuf_trim_tail(next,
+ qdf_nbuf_trim_tail(next,
HTT_RX_BUF_SIZE -
(RX_STD_DESC_SIZE +
msdu_len));
@@ -1038,11 +1038,11 @@
RX_MSDU_END_4_LAST_MSDU_LSB;
if (last_msdu) {
- cdf_nbuf_set_next(msdu, NULL);
+ qdf_nbuf_set_next(msdu, NULL);
break;
} else {
- cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
- cdf_nbuf_set_next(msdu, next);
+ qdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
+ qdf_nbuf_set_next(msdu, next);
msdu = next;
}
}
@@ -1065,26 +1065,26 @@
int
htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
- cdf_nbuf_t offload_deliver_msg,
+ qdf_nbuf_t offload_deliver_msg,
int *vdev_id,
int *peer_id,
int *tid,
uint8_t *fw_desc,
- cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
+ qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
{
- cdf_nbuf_t buf;
+ qdf_nbuf_t buf;
uint32_t *msdu_hdr, msdu_len;
*head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
/* Fake read mpdu_desc to keep desc ptr in sync */
htt_rx_mpdu_desc_list_next(pdev, NULL);
- cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
+ qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
#ifdef DEBUG_DMA_DONE
- cdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
+ qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
#else
- cdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
+ qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
#endif
- msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
+ msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
/* First dword */
msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
@@ -1096,8 +1096,8 @@
*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
- cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
- cdf_nbuf_set_pktlen(buf, msdu_len);
+ qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
+ qdf_nbuf_set_pktlen(buf, msdu_len);
return 0;
}
@@ -1109,9 +1109,9 @@
int *peer_id,
int *tid,
uint8_t *fw_desc,
- cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
+ qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
{
- cdf_nbuf_t buf;
+ qdf_nbuf_t buf;
uint32_t *msdu_hdr, msdu_len;
uint32_t *curr_msdu;
uint32_t paddr;
@@ -1125,13 +1125,13 @@
qdf_print("%s: netbuf pop failed!\n", __func__);
return 0;
}
- cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
+ qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
#ifdef DEBUG_DMA_DONE
- cdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
+ qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
#else
- cdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
+ qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
#endif
- msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
+ msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
/* First dword */
msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
@@ -1143,13 +1143,13 @@
*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
- cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
- cdf_nbuf_set_pktlen(buf, msdu_len);
+ qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
+ qdf_nbuf_set_pktlen(buf, msdu_len);
return 0;
}
extern void
-dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
+dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
#ifdef RX_HASH_DEBUG
#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
@@ -1160,10 +1160,10 @@
/* Return values: 1 - success, 0 - failure */
int
htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
- cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
+ qdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu)
{
- cdf_nbuf_t msdu, next, prev = NULL;
+ qdf_nbuf_t msdu, next, prev = NULL;
uint8_t *rx_ind_data;
uint32_t *msg_word;
unsigned int msdu_count = 0;
@@ -1172,7 +1172,7 @@
HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
- rx_ind_data = cdf_nbuf_data(rx_ind_msg);
+ rx_ind_data = qdf_nbuf_data(rx_ind_msg);
msg_word = (uint32_t *) rx_ind_data;
offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
@@ -1206,14 +1206,14 @@
* Set the netbuf length to be the entire buffer length
* initially, so the unmap will unmap the entire buffer.
*/
- cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+ qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
#ifdef DEBUG_DMA_DONE
- cdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
+ qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
#else
- cdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
+ qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
#endif
- /* cache consistency has been taken care of by cdf_nbuf_unmap */
+ /* cache consistency has been taken care of by qdf_nbuf_unmap */
rx_desc = htt_rx_desc(msdu);
htt_rx_extract_lro_info(msdu, rx_desc);
@@ -1222,14 +1222,14 @@
* Make the netbuf's data pointer point to the payload rather
* than the descriptor.
*/
- cdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
+ qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
#if HTT_PADDR64
#define NEXT_FIELD_OFFSET_IN32 2
#else /* ! HTT_PADDR64 */
#define NEXT_FIELD_OFFSET_IN32 1
#endif /* HTT_PADDR64 */
#
- cdf_nbuf_trim_tail(msdu,
+ qdf_nbuf_trim_tail(msdu,
HTT_RX_BUF_SIZE -
(RX_STD_DESC_SIZE +
HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
@@ -1263,7 +1263,7 @@
return 0;
} else {
*tail_msdu = prev;
- cdf_nbuf_set_next(prev, NULL);
+ qdf_nbuf_set_next(prev, NULL);
return 1;
}
} else { /* if this is not the last msdu */
@@ -1284,7 +1284,7 @@
* next pointer of the preceding msdu
*/
if (prev) {
- cdf_nbuf_set_next(prev, next);
+ qdf_nbuf_set_next(prev, next);
} else {
/* if this is the first msdu, update the
* head pointer
@@ -1311,22 +1311,22 @@
*tail_msdu = NULL;
return 0;
}
- cdf_nbuf_set_next(msdu, next);
+ qdf_nbuf_set_next(msdu, next);
prev = msdu;
msdu = next;
} else {
*tail_msdu = msdu;
- cdf_nbuf_set_next(msdu, NULL);
+ qdf_nbuf_set_next(msdu, NULL);
}
}
return 1;
}
-/* Util fake function that has same prototype as cdf_nbuf_clone that just
+/* Util fake function that has same prototype as qdf_nbuf_clone that just
* retures the same nbuf
*/
-cdf_nbuf_t htt_rx_cdf_noclone_buf(cdf_nbuf_t buf)
+qdf_nbuf_t htt_rx_cdf_noclone_buf(qdf_nbuf_t buf)
{
return buf;
}
@@ -1393,15 +1393,15 @@
/* This function is used by montior mode code to restitch an MSDU list
* corresponding to an MPDU back into an MPDU by linking up the skbs.
*/
-cdf_nbuf_t
+qdf_nbuf_t
htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
- cdf_nbuf_t head_msdu,
+ qdf_nbuf_t head_msdu,
struct ieee80211_rx_status *rx_status,
unsigned clone_not_reqd)
{
- cdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list_cloned;
- cdf_nbuf_t (*clone_nbuf_fn)(cdf_nbuf_t buf);
+ qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list_cloned;
+ qdf_nbuf_t (*clone_nbuf_fn)(qdf_nbuf_t buf);
unsigned decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
is_amsdu, is_first_frag, amsdu_pad, msdu_len;
@@ -1415,7 +1415,7 @@
* waste cycles cloning the packets
*/
clone_nbuf_fn =
- clone_not_reqd ? htt_rx_cdf_noclone_buf : cdf_nbuf_clone;
+ clone_not_reqd ? htt_rx_cdf_noclone_buf : qdf_nbuf_clone;
/* The nbuf has been pulled just beyond the status and points to the
* payload
@@ -1452,13 +1452,13 @@
frag_list_sum_len = 0;
is_first_frag = 1;
- msdu_len = cdf_nbuf_len(mpdu_buf);
+ msdu_len = qdf_nbuf_len(mpdu_buf);
/* Drop the zero-length msdu */
if (!msdu_len)
goto mpdu_stitch_fail;
- msdu_orig = cdf_nbuf_next(head_msdu);
+ msdu_orig = qdf_nbuf_next(head_msdu);
while (msdu_orig) {
@@ -1472,7 +1472,7 @@
head_frag_list_cloned = msdu;
}
- msdu_len = cdf_nbuf_len(msdu);
+ msdu_len = qdf_nbuf_len(msdu);
/* Drop the zero-length msdu */
if (!msdu_len)
goto mpdu_stitch_fail;
@@ -1480,22 +1480,22 @@
frag_list_sum_len += msdu_len;
/* Maintain the linking of the cloned MSDUS */
- cdf_nbuf_set_next_ext(prev_buf, msdu);
+ qdf_nbuf_set_next_ext(prev_buf, msdu);
/* Move to the next */
prev_buf = msdu;
- msdu_orig = cdf_nbuf_next(msdu_orig);
+ msdu_orig = qdf_nbuf_next(msdu_orig);
}
/* The last msdu length need be larger than HTT_FCS_LEN */
if (msdu_len < HTT_FCS_LEN)
goto mpdu_stitch_fail;
- cdf_nbuf_trim_tail(prev_buf, HTT_FCS_LEN);
+ qdf_nbuf_trim_tail(prev_buf, HTT_FCS_LEN);
/* If there were more fragments to this RAW frame */
if (head_frag_list_cloned) {
- cdf_nbuf_append_ext_list(mpdu_buf,
+ qdf_nbuf_append_ext_list(mpdu_buf,
head_frag_list_cloned,
frag_list_sum_len);
}
@@ -1544,7 +1544,7 @@
* accomodating any radio-tap /prism like PHY header
*/
#define HTT_MAX_MONITOR_HEADER (512)
- mpdu_buf = cdf_nbuf_alloc(pdev->osdev,
+ mpdu_buf = qdf_nbuf_alloc(pdev->osdev,
HTT_MAX_MONITOR_HEADER + mpdu_buf_len,
HTT_MAX_MONITOR_HEADER, 4, false);
@@ -1556,7 +1556,7 @@
*/
prev_buf = mpdu_buf;
- dest = cdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
+ dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
if (!dest)
goto mpdu_stitch_fail;
qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
@@ -1591,7 +1591,7 @@
} else {
/* Maintain the linking of the cloned MSDUS */
- cdf_nbuf_set_next_ext(prev_buf, msdu);
+ qdf_nbuf_set_next_ext(prev_buf, msdu);
/* Reload the hdr ptr only on non-first MSDUs */
rx_desc = htt_rx_desc(msdu_orig);
@@ -1600,18 +1600,18 @@
}
/* Copy this buffers MSDU related status into the prev buffer */
- dest = cdf_nbuf_put_tail(prev_buf, msdu_llc_len + amsdu_pad);
+ dest = qdf_nbuf_put_tail(prev_buf, msdu_llc_len + amsdu_pad);
dest += amsdu_pad;
qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
/* Push the MSDU buffer beyond the decap header */
- cdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
+ qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
frag_list_sum_len +=
- msdu_llc_len + cdf_nbuf_len(msdu) + amsdu_pad;
+ msdu_llc_len + qdf_nbuf_len(msdu) + amsdu_pad;
/* Set up intra-AMSDU pad to be added to start of next buffer -
* AMSDU pad is 4 byte pad on AMSDU subframe */
- amsdu_pad = (msdu_llc_len + cdf_nbuf_len(msdu)) & 0x3;
+ amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
/* TODO FIXME How do we handle MSDUs that have fraglist - Should
@@ -1621,12 +1621,12 @@
/* Move to the next */
prev_buf = msdu;
- msdu_orig = cdf_nbuf_next(msdu_orig);
+ msdu_orig = qdf_nbuf_next(msdu_orig);
}
/* TODO: Convert this to suitable cdf routines */
- cdf_nbuf_append_ext_list(mpdu_buf, head_frag_list_cloned,
+ qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list_cloned,
frag_list_sum_len);
mpdu_stitch_done:
@@ -1644,8 +1644,8 @@
msdu = head_msdu;
while (msdu) {
msdu_orig = msdu;
- msdu = cdf_nbuf_next(msdu);
- cdf_nbuf_set_next(msdu_orig, NULL);
+ msdu = qdf_nbuf_next(msdu);
+ qdf_nbuf_set_next(msdu_orig, NULL);
}
}
@@ -1656,27 +1656,27 @@
if (!clone_not_reqd) {
/* Free the head buffer */
if (mpdu_buf)
- cdf_nbuf_free(mpdu_buf);
+ qdf_nbuf_free(mpdu_buf);
/* Free the partial list */
while (head_frag_list_cloned) {
msdu = head_frag_list_cloned;
head_frag_list_cloned =
- cdf_nbuf_next_ext(head_frag_list_cloned);
- cdf_nbuf_free(msdu);
+ qdf_nbuf_next_ext(head_frag_list_cloned);
+ qdf_nbuf_free(msdu);
}
} else {
/* Free the alloced head buffer */
if (decap_format != HW_RX_DECAP_FORMAT_RAW)
if (mpdu_buf)
- cdf_nbuf_free(mpdu_buf);
+ qdf_nbuf_free(mpdu_buf);
/* Free the orig buffers */
msdu = head_msdu;
while (msdu) {
msdu_orig = msdu;
- msdu = cdf_nbuf_next(msdu);
- cdf_nbuf_free(msdu_orig);
+ msdu = qdf_nbuf_next(msdu);
+ qdf_nbuf_free(msdu_orig);
}
}
@@ -1698,8 +1698,8 @@
* to either htt_rx_amsdu_pop_ll or htt_rx_amsdu_rx_in_order_pop_ll.
*/
int (*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
- cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+ qdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu);
/*
* htt_rx_frag_pop -
@@ -1707,20 +1707,20 @@
* to either htt_rx_amsdu_pop_ll
*/
int (*htt_rx_frag_pop)(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
- cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+ qdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu);
int
(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
- cdf_nbuf_t offload_deliver_msg,
+ qdf_nbuf_t offload_deliver_msg,
int *vdev_id,
int *peer_id,
int *tid,
uint8_t *fw_desc,
- cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+ qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf);
void * (*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg);
+ qdf_nbuf_t rx_ind_msg);
bool (*htt_rx_mpdu_desc_retry)(
htt_pdev_handle pdev, void *mpdu_desc);
@@ -1744,17 +1744,17 @@
int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
-void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, qdf_nbuf_t msdu);
bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
bool (*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
void *mpdu_desc, uint8_t *key_id);
-void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
{
int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
- cdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
+ qdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
return (void *)htt_rx_desc(netbuf);
}
@@ -1773,12 +1773,12 @@
uint8_t *phy_mode);
void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
- cdf_nbuf_t netbuf)
+ qdf_nbuf_t netbuf)
{
return (void *)htt_rx_desc(netbuf);
}
-void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu)
{
return htt_rx_desc(msdu);
}
@@ -1834,12 +1834,12 @@
return true;
}
-void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
+void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
{
- cdf_nbuf_free(msdu);
+ qdf_nbuf_free(msdu);
}
-void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
+void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
{
/*
* The rx descriptor is in the same buffer as the rx MSDU payload,
@@ -1944,7 +1944,7 @@
Returns 0 - success, 1 - failure */
int
htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
- cdf_nbuf_t netbuf)
+ qdf_nbuf_t netbuf)
{
int i;
struct htt_rx_hash_entry *hash_element = NULL;
@@ -1994,11 +1994,11 @@
/* Given a physical address this function will find the corresponding network
buffer from the hash table.
Note: this function is not thread-safe */
-cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr)
+qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr)
{
uint32_t i;
struct htt_list_node *list_iter = NULL;
- cdf_nbuf_t netbuf = NULL;
+ qdf_nbuf_t netbuf = NULL;
struct htt_rx_hash_entry *hash_entry;
i = RX_HASH_FUNCTION(paddr);
@@ -2162,7 +2162,7 @@
*pdev->rx_ring.target_idx.vaddr = 0;
} else {
pdev->rx_ring.buf.netbufs_ring =
- qdf_mem_malloc(pdev->rx_ring.size * sizeof(cdf_nbuf_t));
+ qdf_mem_malloc(pdev->rx_ring.size * sizeof(qdf_nbuf_t));
if (!pdev->rx_ring.buf.netbufs_ring)
goto fail1;
diff --git a/core/dp/htt/htt_t2h.c b/core/dp/htt/htt_t2h.c
index 6cf12e9..efefeb4 100644
--- a/core/dp/htt/htt_t2h.c
+++ b/core/dp/htt/htt_t2h.c
@@ -38,7 +38,7 @@
#include <wma.h>
#include <htc_api.h> /* HTC_PACKET */
#include <htt.h> /* HTT_T2H_MSG_TYPE, etc. */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_htt_rx_api.h>
#include <ol_htt_tx_api.h>
@@ -86,16 +86,16 @@
#endif
}
-static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, cdf_nbuf_t msg)
+static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
{
uint32_t *msg_word;
unsigned num_msdu_bytes;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
struct htt_host_rx_desc_base *rx_desc;
int start_idx;
uint8_t *p_fw_msdu_rx_desc = 0;
- msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(msg);
num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
/*
@@ -123,22 +123,22 @@
*/
start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
- cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
- cdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
+ qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+ qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
rx_desc = htt_rx_desc(msdu);
*((uint8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
rx_desc->msdu_end.last_msdu = 1;
- cdf_nbuf_map(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
+ qdf_nbuf_map(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
}
/* Target to host Msg/event handler for low priority messages*/
-void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
+void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg)
{
struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
uint32_t *msg_word;
enum htt_t2h_msg_type msg_type;
- msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
switch (msg_type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF:
@@ -473,7 +473,7 @@
break;
};
/* Free the indication buffer */
- cdf_nbuf_free(htt_t2h_msg);
+ qdf_nbuf_free(htt_t2h_msg);
}
/* Generic Target to host Msg/event handler for low priority messages
@@ -484,7 +484,7 @@
void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
{
struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
- cdf_nbuf_t htt_t2h_msg = (cdf_nbuf_t) pkt->pPktContext;
+ qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
uint32_t *msg_word;
enum htt_t2h_msg_type msg_type;
@@ -492,21 +492,21 @@
if (pkt->Status != A_OK) {
if (pkt->Status != A_ECANCELED)
pdev->stats.htc_err_cnt++;
- cdf_nbuf_free(htt_t2h_msg);
+ qdf_nbuf_free(htt_t2h_msg);
return;
}
#ifdef HTT_RX_RESTORE
if (qdf_unlikely(pdev->rx_ring.rx_reset)) {
qdf_print("rx restore ..\n");
- cdf_nbuf_free(htt_t2h_msg);
+ qdf_nbuf_free(htt_t2h_msg);
return;
}
#endif
/* confirm alignment */
- HTT_ASSERT3((((unsigned long)cdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
+ HTT_ASSERT3((((unsigned long)qdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
- msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
#if defined(HELIUMPLUS_DEBUG)
@@ -686,7 +686,7 @@
};
/* Free the indication buffer */
- cdf_nbuf_free(htt_t2h_msg);
+ qdf_nbuf_free(htt_t2h_msg);
}
/*--- target->host HTT message Info Element access methods ------------------*/
@@ -717,43 +717,43 @@
/*--- rx indication message ---*/
-int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+int htt_rx_ind_flush(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
- msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
return HTT_RX_IND_FLUSH_VALID_GET(*msg_word);
}
void
htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start, unsigned *seq_num_end)
{
uint32_t *msg_word;
- msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
msg_word++;
*seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
*seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
}
-int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+int htt_rx_ind_release(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
- msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
return HTT_RX_IND_REL_VALID_GET(*msg_word);
}
void
htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start, unsigned *seq_num_end)
{
uint32_t *msg_word;
- msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
msg_word++;
*seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word);
*seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word);
@@ -761,13 +761,13 @@
void
htt_rx_ind_mpdu_range_info(struct htt_pdev_t *pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
int mpdu_range_num,
enum htt_rx_status *status, int *mpdu_count)
{
uint32_t *msg_word;
- msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num;
*status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word);
*mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word);
@@ -783,13 +783,13 @@
*
* Return: RSSI in dBm, or HTT_INVALID_RSSI
*/
-int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
{
int8_t rssi;
uint32_t *msg_word;
msg_word = (uint32_t *)
- (cdf_nbuf_data(rx_ind_msg) +
+ (qdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
@@ -813,7 +813,7 @@
* Return: RSSI, or HTT_INVALID_RSSI
*/
int16_t
-htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
int8_t chain)
{
int8_t rssi;
@@ -823,7 +823,7 @@
return HTT_RSSI_INVALID;
msg_word = (uint32_t *)
- (cdf_nbuf_data(rx_ind_msg) +
+ (qdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
@@ -869,13 +869,13 @@
* Return the data rate provided in a rx indication message.
*/
void
-htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+htt_rx_ind_legacy_rate(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
uint8_t *legacy_rate, uint8_t *legacy_rate_sel)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
- (cdf_nbuf_data(rx_ind_msg) +
+ (qdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
@@ -901,14 +901,14 @@
* Return the timestamp provided in a rx indication message.
*/
void
-htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+htt_rx_ind_timestamp(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
uint32_t *timestamp_microsec,
uint8_t *timestamp_submicrosec)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
- (cdf_nbuf_data(rx_ind_msg) +
+ (qdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
@@ -934,12 +934,12 @@
* Return: TSF timestamp
*/
uint32_t
-htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+htt_rx_ind_tsf32(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
- (cdf_nbuf_data(rx_ind_msg) +
+ (qdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
@@ -959,12 +959,12 @@
* Return: Extended TID
*/
uint8_t
-htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+htt_rx_ind_ext_tid(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
- (cdf_nbuf_data(rx_ind_msg));
+ (qdf_nbuf_data(rx_ind_msg));
return HTT_RX_IND_EXT_TID_GET(*msg_word);
}
@@ -987,12 +987,12 @@
void
htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
- cdf_nbuf_t rx_frag_ind_msg,
+ qdf_nbuf_t rx_frag_ind_msg,
int *seq_num_start, int *seq_num_end)
{
uint32_t *msg_word;
- msg_word = (uint32_t *) cdf_nbuf_data(rx_frag_ind_msg);
+ msg_word = (uint32_t *) qdf_nbuf_data(rx_frag_ind_msg);
msg_word++;
*seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
*seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
diff --git a/core/dp/htt/htt_tx.c b/core/dp/htt/htt_tx.c
index d53caf9..757ba44 100644
--- a/core/dp/htt/htt_tx.c
+++ b/core/dp/htt/htt_tx.c
@@ -39,7 +39,7 @@
#include <osdep.h> /* uint32_t, offsetof, etc. */
#include <qdf_types.h> /* qdf_dma_addr_t */
#include <qdf_mem.h> /* qdf_mem_alloc_consistent et al */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_time.h> /* qdf_mdelay */
#include <htt.h> /* htt_tx_msdu_desc_t */
@@ -548,7 +548,7 @@
because of No CE desc*/
void htt_tx_sched(htt_pdev_handle pdev)
{
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
int download_len = pdev->download_len;
int packet_len;
@@ -556,7 +556,7 @@
while (msdu != NULL) {
int not_accepted;
/* packet length includes HTT tx desc frag added above */
- packet_len = cdf_nbuf_len(msdu);
+ packet_len = qdf_nbuf_len(msdu);
if (packet_len < download_len) {
/*
* This case of packet length being less than the
@@ -583,7 +583,7 @@
}
}
-int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
+int htt_tx_send_std(htt_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t msdu_id)
{
int download_len = pdev->download_len;
@@ -591,7 +591,7 @@
int packet_len;
/* packet length includes HTT tx desc frag added above */
- packet_len = cdf_nbuf_len(msdu);
+ packet_len = qdf_nbuf_len(msdu);
if (packet_len < download_len) {
/*
* This case of packet length being less than the nominal
@@ -605,17 +605,17 @@
download_len = packet_len;
}
- NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_HTT);
DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
- (uint8_t *)(cdf_nbuf_data(msdu)),
- sizeof(cdf_nbuf_data(msdu))));
- if (cdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
+ (uint8_t *)(qdf_nbuf_data(msdu)),
+ sizeof(qdf_nbuf_data(msdu))));
+ if (qdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
htt_tx_sched(pdev);
return 0;
}
- cdf_nbuf_trace_update(msdu, "HT:T:");
+ qdf_nbuf_trace_update(msdu, "HT:T:");
if (htc_send_data_pkt
(pdev->htc_pdev, msdu, pdev->htc_endpoint, download_len)) {
HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
@@ -643,8 +643,8 @@
htt_tx_resume_handler(void *context) { }
#endif
-cdf_nbuf_t
-htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
+qdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
{
qdf_print("*** %s curently only applies for HL systems\n", __func__);
qdf_assert(0);
@@ -654,7 +654,7 @@
int
htt_tx_send_nonstd(htt_pdev_handle pdev,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
uint16_t msdu_id, enum htt_pkt_type pkt_type)
{
int download_len;
@@ -678,15 +678,15 @@
#ifdef QCA_TX_HTT2_SUPPORT
static inline HTC_ENDPOINT_ID
-htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, qdf_nbuf_t msdu)
{
/*
* TX HTT2 service mainly for small sized frame and check if
* this candidate frame allow or not.
*/
if ((pdev->htc_tx_htt2_endpoint != ENDPOINT_UNUSED) &&
- cdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
- (cdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
+ qdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
+ (qdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
return pdev->htc_tx_htt2_endpoint;
else
return pdev->htc_endpoint;
@@ -697,7 +697,7 @@
static inline int
htt_tx_send_base(htt_pdev_handle pdev,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
uint16_t msdu_id, int download_len, uint8_t more_data)
{
struct htt_host_tx_desc_t *htt_host_tx_desc;
@@ -711,7 +711,7 @@
* Retrieve it so we can provide its HTC header space to HTC.
*/
htt_host_tx_desc = (struct htt_host_tx_desc_t *)
- cdf_nbuf_get_frag_vaddr(msdu, 0);
+ qdf_nbuf_get_frag_vaddr(msdu, 0);
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
@@ -721,7 +721,7 @@
pkt->pdev_ctxt = pdev->txrx_pdev;
/* packet length includes HTT tx desc frag added above */
- packet_len = cdf_nbuf_len(msdu);
+ packet_len = qdf_nbuf_len(msdu);
if (packet_len < download_len) {
/*
* This case of packet length being less than the nominal
@@ -746,23 +746,23 @@
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msdu);
- cdf_nbuf_trace_update(msdu, "HT:T:");
- NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
+ qdf_nbuf_trace_update(msdu, "HT:T:");
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_HTT);
DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
- (uint8_t *)(cdf_nbuf_data(msdu)),
- sizeof(cdf_nbuf_data(msdu))));
+ (uint8_t *)(qdf_nbuf_data(msdu)),
+ sizeof(qdf_nbuf_data(msdu))));
htc_send_data_pkt(pdev->htc_pdev, &pkt->htc_pkt, more_data);
return 0; /* success */
}
-cdf_nbuf_t
-htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
+qdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
{
- cdf_nbuf_t rejected = NULL;
+ qdf_nbuf_t rejected = NULL;
uint16_t *msdu_id_storage;
uint16_t msdu_id;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
/*
* FOR NOW, iterate through the batch, sending the frames singly.
* Eventually HTC and HIF should be able to accept a batch of
@@ -770,14 +770,14 @@
*/
msdu = head_msdu;
while (num_msdus--) {
- cdf_nbuf_t next_msdu = cdf_nbuf_next(msdu);
+ qdf_nbuf_t next_msdu = qdf_nbuf_next(msdu);
msdu_id_storage = ol_tx_msdu_id_storage(msdu);
msdu_id = *msdu_id_storage;
/* htt_tx_send_base returns 0 as success and 1 as failure */
if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len,
num_msdus)) {
- cdf_nbuf_set_next(msdu, rejected);
+ qdf_nbuf_set_next(msdu, rejected);
rejected = msdu;
}
msdu = next_msdu;
@@ -787,7 +787,7 @@
int
htt_tx_send_nonstd(htt_pdev_handle pdev,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
uint16_t msdu_id, enum htt_pkt_type pkt_type)
{
int download_len;
@@ -806,7 +806,7 @@
return htt_tx_send_base(pdev, msdu, msdu_id, download_len, 0);
}
-int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
+int htt_tx_send_std(htt_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t msdu_id)
{
return htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, 0);
}
@@ -865,7 +865,7 @@
unsigned int uc_tx_partition_base)
{
unsigned int tx_buffer_count;
- cdf_nbuf_t buffer_vaddr;
+ qdf_nbuf_t buffer_vaddr;
qdf_dma_addr_t buffer_paddr;
uint32_t *header_ptr;
uint32_t *ring_vaddr;
@@ -876,7 +876,7 @@
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
- buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
+ buffer_vaddr = qdf_nbuf_alloc(pdev->osdev,
uc_tx_buf_sz, 0, 4, false);
if (!buffer_vaddr) {
qdf_print("%s: TX BUF alloc fail, loop index: %d",
@@ -885,8 +885,8 @@
}
/* Init buffer */
- qdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
- header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
+ qdf_mem_zero(qdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
+ header_ptr = (uint32_t *) qdf_nbuf_data(buffer_vaddr);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
@@ -896,8 +896,8 @@
*header_ptr |= ((uint16_t) uc_tx_partition_base +
tx_buffer_count) << 16;
- cdf_nbuf_map(pdev->osdev, buffer_vaddr, QDF_DMA_BIDIRECTIONAL);
- buffer_paddr = cdf_nbuf_get_frag_paddr(buffer_vaddr, 0);
+ qdf_nbuf_map(pdev->osdev, buffer_vaddr, QDF_DMA_BIDIRECTIONAL);
+ buffer_paddr = qdf_nbuf_get_frag_paddr(buffer_vaddr, 0);
header_ptr++;
*header_ptr = (uint32_t) (buffer_paddr +
IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
@@ -924,7 +924,7 @@
unsigned int uc_tx_partition_base)
{
unsigned int tx_buffer_count;
- cdf_nbuf_t buffer_vaddr;
+ qdf_nbuf_t buffer_vaddr;
qdf_dma_addr_t buffer_paddr;
uint32_t *header_ptr;
uint32_t *ring_vaddr;
@@ -936,7 +936,7 @@
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
- buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
+ buffer_vaddr = qdf_nbuf_alloc(pdev->osdev,
uc_tx_buf_sz, 0, 4, false);
if (!buffer_vaddr) {
qdf_print("%s: TX BUF alloc fail, loop index: %d",
@@ -945,8 +945,8 @@
}
/* Init buffer */
- qdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
- header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
+ qdf_mem_zero(qdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
+ header_ptr = (uint32_t *) qdf_nbuf_data(buffer_vaddr);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
@@ -956,8 +956,8 @@
*header_ptr |= ((uint16_t) uc_tx_partition_base +
tx_buffer_count) << 16;
- cdf_nbuf_map(pdev->osdev, buffer_vaddr, QDF_DMA_BIDIRECTIONAL);
- buffer_paddr = cdf_nbuf_get_frag_paddr(buffer_vaddr, 0);
+ qdf_nbuf_map(pdev->osdev, buffer_vaddr, QDF_DMA_BIDIRECTIONAL);
+ buffer_paddr = qdf_nbuf_get_frag_paddr(buffer_vaddr, 0);
header_ptr++;
/* Frag Desc Pointer */
@@ -1019,7 +1019,7 @@
}
/* Allocate TX COMP Ring */
- tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
+ tx_comp_ring_size = uc_tx_buf_cnt * sizeof(qdf_nbuf_t);
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
qdf_mem_alloc_consistent(
pdev->osdev, pdev->osdev->dev,
@@ -1035,15 +1035,15 @@
/* Allocate TX BUF vAddress Storage */
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
- (cdf_nbuf_t *) qdf_mem_malloc(uc_tx_buf_cnt *
- sizeof(cdf_nbuf_t));
+ (qdf_nbuf_t *) qdf_mem_malloc(uc_tx_buf_cnt *
+ sizeof(qdf_nbuf_t));
if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
qdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
return_code = -ENOBUFS;
goto free_tx_comp_base;
}
qdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
- uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
+ uc_tx_buf_cnt * sizeof(qdf_nbuf_t));
pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = htt_tx_ipa_uc_wdi_tx_buf_alloc(
pdev, uc_tx_buf_sz, uc_tx_buf_cnt, uc_tx_partition_base);
@@ -1090,7 +1090,8 @@
if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
qdf_mem_free_consistent(
pdev->osdev,
- ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * sizeof(cdf_nbuf_t),
+ ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) *
+ sizeof(qdf_nbuf_t),
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
@@ -1101,11 +1102,11 @@
/* Free each single buffer */
for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
- cdf_nbuf_unmap(pdev->osdev,
+ qdf_nbuf_unmap(pdev->osdev,
pdev->ipa_uc_tx_rsc.
tx_buf_pool_vaddr_strg[idx],
QDF_DMA_FROM_DEVICE);
- cdf_nbuf_free(pdev->ipa_uc_tx_rsc.
+ qdf_nbuf_free(pdev->ipa_uc_tx_rsc.
tx_buf_pool_vaddr_strg[idx]);
}
}
diff --git a/core/dp/htt/htt_types.h b/core/dp/htt/htt_types.h
index 671200c..0be83c8 100644
--- a/core/dp/htt/htt_types.h
+++ b/core/dp/htt/htt_types.h
@@ -33,7 +33,7 @@
#include <qdf_lock.h> /* qdf_spinlock_t */
#include <qdf_timer.h> /* qdf_timer_t */
#include <qdf_atomic.h> /* qdf_atomic_inc */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <htc_api.h> /* HTC_PACKET */
#include <ol_ctrl_api.h> /* ol_pdev_handle */
@@ -83,9 +83,9 @@
};
struct htt_tx_mgmt_desc_buf {
- cdf_nbuf_t msg_buf;
+ qdf_nbuf_t msg_buf;
A_BOOL is_inuse;
- cdf_nbuf_t mgmt_frm;
+ qdf_nbuf_t mgmt_frm;
};
struct htt_tx_mgmt_desc_ctxt {
@@ -100,7 +100,7 @@
struct htt_rx_hash_entry {
A_UINT32 paddr;
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
A_UINT8 fromlist;
struct htt_list_node listnode;
#ifdef RX_HASH_DEBUG
@@ -133,7 +133,7 @@
struct uc_shared_mem_t tx_comp_base;
uint32_t tx_comp_idx_paddr;
- cdf_nbuf_t *tx_buf_pool_vaddr_strg;
+ qdf_nbuf_t *tx_buf_pool_vaddr_strg;
uint32_t alloc_tx_buf_cnt;
};
@@ -229,7 +229,7 @@
#ifdef ATH_11AC_TXCOMPACT
HTT_TX_MUTEX_TYPE txnbufq_mutex;
- cdf_nbuf_queue_t txnbufq;
+ qdf_nbuf_queue_t txnbufq;
struct htt_htc_pkt_union *htt_htc_pkt_misclist;
#endif
@@ -259,7 +259,7 @@
* The host SW uses this netbufs ring to locate the nw
* buffer objects whose data buffers the HW has filled.
*/
- cdf_nbuf_t *netbufs_ring;
+ qdf_nbuf_t *netbufs_ring;
/*
* Ring of buffer addresses -
* This ring holds the "physical" device address of the
@@ -370,7 +370,7 @@
int download_len;
void (*tx_send_complete_part2)(void *pdev, A_STATUS status,
- cdf_nbuf_t msdu, uint16_t msdu_id);
+ qdf_nbuf_t msdu, uint16_t msdu_id);
HTT_TX_MUTEX_TYPE htt_tx_mutex;
diff --git a/core/dp/ol/inc/ol_htt_api.h b/core/dp/ol/inc/ol_htt_api.h
index 0cf49b5..e0a7cd8 100644
--- a/core/dp/ol/inc/ol_htt_api.h
+++ b/core/dp/ol/inc/ol_htt_api.h
@@ -36,7 +36,7 @@
#define _OL_HTT_API__H_
#include <qdf_types.h> /* qdf_device_t */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <athdefs.h> /* A_STATUS */
#include <htc_api.h> /* HTC_HANDLE */
#include <ol_ctrl_api.h> /* ol_pdev_handle */
diff --git a/core/dp/ol/inc/ol_htt_rx_api.h b/core/dp/ol/inc/ol_htt_rx_api.h
index 230d87b..942b4cb 100644
--- a/core/dp/ol/inc/ol_htt_rx_api.h
+++ b/core/dp/ol/inc/ol_htt_rx_api.h
@@ -40,7 +40,7 @@
/* #include <osapi_linux.h> / * uint16_t, etc. * / */
#include <osdep.h> /* uint16_t, etc. */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <qdf_types.h> /* bool */
#include <htt.h> /* HTT_RX_IND_MPDU_STATUS */
@@ -109,7 +109,7 @@
* -OR-
* 0 - the message's rx flush command is invalid and should be ignored
*/
-int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+int htt_rx_ind_flush(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg);
/**
* @brief Return the sequence number starting the range of MPDUs to flush.
@@ -138,7 +138,7 @@
*/
void
htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start, unsigned *seq_num_end);
/**
@@ -160,7 +160,7 @@
* -OR-
* 0 - the message's rx release command is invalid and should be ignored
*/
-int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+int htt_rx_ind_release(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg);
/**
* @brief Return the sequence number starting the range of MPDUs to release.
@@ -189,7 +189,7 @@
*/
void
htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start,
unsigned *seq_num_end);
@@ -234,7 +234,7 @@
*/
void
htt_rx_ind_mpdu_range_info(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
int mpdu_range_num,
enum htt_rx_status *status, int *mpdu_count);
@@ -248,27 +248,27 @@
* @return RSSI in dBm, or HTT_INVALID_RSSI
*/
int16_t
-htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg);
int16_t
-htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
int8_t chain);
void
-htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+htt_rx_ind_legacy_rate(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
uint8_t *legacy_rate, uint8_t *legacy_rate_sel);
void
-htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+htt_rx_ind_timestamp(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
uint32_t *timestamp_microsec,
uint8_t *timestamp_submicrosec);
uint32_t
-htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+htt_rx_ind_tsf32(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg);
uint8_t
-htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+htt_rx_ind_ext_tid(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg);
/*==================== rx MPDU descriptor access methods ====================*/
@@ -654,13 +654,13 @@
*/
extern int
(*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
- cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+ qdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu);
extern int
(*htt_rx_frag_pop)(htt_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
- cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+ qdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu);
/**
* @brief Return a linked list of buffers holding one MSDU
@@ -685,12 +685,12 @@
*/
extern int
(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
- cdf_nbuf_t offload_deliver_msg,
+ qdf_nbuf_t offload_deliver_msg,
int *vdev_id,
int *peer_id,
int *tid,
uint8_t *fw_desc,
- cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+ qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf);
/**
* @brief Return the rx descriptor for the next rx MPDU.
@@ -713,7 +713,7 @@
* by an rx ind msg
*/
extern void *
-(*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+(*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg);
/**
* @brief Retrieve a previously-stored rx descriptor from a MSDU buffer.
@@ -727,7 +727,7 @@
* @return the corresponding abstract rx MSDU descriptor
*/
extern void *
-(*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+(*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, qdf_nbuf_t msdu);
/**
* @brief Free both an rx MSDU descriptor and the associated MSDU buffer.
@@ -749,7 +749,7 @@
* @param rx_msdu_desc - rx descriptor for the MSDU being freed
* @param msdu - rx frame buffer for the MSDU being freed
*/
-void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
+void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu);
/**
* @brief Look up and free the rx descriptor for a MSDU.
@@ -766,7 +766,7 @@
* @param htt_pdev - the HTT instance the rx data was received on
* @param msdu - rx frame buffer for the rx MSDU descriptor being freed
*/
-void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
+void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu);
/**
* @brief Add new MSDU buffers for the target to fill.
@@ -805,9 +805,9 @@
* list, else operates on a cloned nbuf
* @return network buffer handle to the MPDU
*/
-cdf_nbuf_t
+qdf_nbuf_t
htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
- cdf_nbuf_t head_msdu,
+ qdf_nbuf_t head_msdu,
struct ieee80211_rx_status *rx_status,
unsigned clone_not_reqd);
@@ -822,7 +822,7 @@
*/
void
htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
- cdf_nbuf_t rx_frag_ind_msg,
+ qdf_nbuf_t rx_frag_ind_msg,
int *seq_num_start, int *seq_num_end);
/**
* @brief Return the HL rx desc size
@@ -837,7 +837,7 @@
* @param msdu - network buffer handle
* @param vowstats - handle to vow ext stats.
*/
-void htt_rx_get_vowext_stats(cdf_nbuf_t msdu, struct vow_extstats *vowstats);
+void htt_rx_get_vowext_stats(qdf_nbuf_t msdu, struct vow_extstats *vowstats);
/**
* @brief parses the offload message passed by the target.
@@ -859,5 +859,5 @@
int *peer_id,
int *tid,
uint8_t *fw_desc,
- cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+ qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf);
#endif /* _OL_HTT_RX_API__H_ */
diff --git a/core/dp/ol/inc/ol_htt_tx_api.h b/core/dp/ol/inc/ol_htt_tx_api.h
index e2497f3..a6b785c 100644
--- a/core/dp/ol/inc/ol_htt_tx_api.h
+++ b/core/dp/ol/inc/ol_htt_tx_api.h
@@ -39,7 +39,7 @@
/* #include <osapi_linux.h> / * uint16_t, etc. * / */
#include <osdep.h> /* uint16_t, etc. */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_cfg.h> /* wlan_frm_fmt */
#include <htt.h> /* needed by inline functions */
@@ -256,13 +256,13 @@
uint8_t *dest_addr;
*/
- uint8_t l3_hdr_offset; /* wrt cdf_nbuf_data(msdu), in bytes */
+ uint8_t l3_hdr_offset; /* wrt qdf_nbuf_data(msdu), in bytes */
/* l4_hdr_offset is not currently used.
* It could be used to specify to a TCP/UDP checksum computation
* engine where the TCP/UDP header starts.
*/
- /* uint8_t l4_hdr_offset; - wrt cdf_nbuf_data(msdu), in bytes */
+ /* uint8_t l4_hdr_offset; - wrt qdf_nbuf_data(msdu), in bytes */
} info;
/* the action sub-struct specifies how to process the MSDU */
struct {
@@ -419,7 +419,7 @@
* function assumes the tx frame is the default frame type, as specified
* by ol_cfg_frame_type. "Raw" frames need to be transmitted through the
* alternate htt_tx_send_nonstd function.
- * The tx descriptor has already been attached to the cdf_nbuf object during
+ * The tx descriptor has already been attached to the qdf_nbuf object during
* a preceding call to htt_tx_desc_init.
*
* @param htt_pdev - the handle of the physical device sending the tx data
@@ -428,7 +428,7 @@
* @return 0 -> success, -OR- 1 -> failure
*/
int
-htt_tx_send_std(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu, uint16_t msdu_id);
+htt_tx_send_std(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu, uint16_t msdu_id);
/**
* @brief Download a Batch Of Tx MSDUs
@@ -442,9 +442,9 @@
* @param num_msdus - The total Number of MSDU's provided for batch tx
* @return null-terminated linked-list of unaccepted frames
*/
-cdf_nbuf_t
+qdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle htt_pdev,
- cdf_nbuf_t head_msdu, int num_msdus);
+ qdf_nbuf_t head_msdu, int num_msdus);
/* The htt scheduler for queued packets in htt
* htt when unable to send to HTC because of lack of resource
@@ -459,7 +459,7 @@
*/
int
htt_tx_send_nonstd(htt_pdev_handle htt_pdev,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
uint16_t msdu_id, enum htt_pkt_type pkt_type);
/**
@@ -532,7 +532,7 @@
void *htt_tx_desc,
qdf_dma_addr_t htt_tx_desc_paddr,
uint16_t msdu_id,
- cdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
+ qdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
struct qdf_tso_info_t *tso_info,
struct ocb_tx_ctrl_hdr_t *tx_ctrl,
uint8_t is_dsrc)
@@ -568,7 +568,7 @@
if (qdf_likely(pdev->cfg.ce_classify_enabled)) {
if (qdf_likely(pkt_type == htt_pkt_type_eth2 ||
pkt_type == htt_pkt_type_ethernet))
- cdf_nbuf_tx_info_get(msdu, pkt_type, pkt_subtype,
+ qdf_nbuf_tx_info_get(msdu, pkt_type, pkt_subtype,
hw_classify);
ce_pkt_type = htt_to_ce_pkt_type[pkt_type];
@@ -611,7 +611,7 @@
if (tso_info->is_tso)
HTT_TX_DESC_FRM_LEN_SET(local_word1, tso_info->total_len);
else
- HTT_TX_DESC_FRM_LEN_SET(local_word1, cdf_nbuf_len(msdu));
+ HTT_TX_DESC_FRM_LEN_SET(local_word1, qdf_nbuf_len(msdu));
HTT_TX_DESC_FRM_ID_SET(local_word1, msdu_id);
*word1 = local_word1;
@@ -659,8 +659,8 @@
local_desc_ext.is_dsrc = (is_dsrc != 0);
- cdf_nbuf_push_head(msdu, sizeof(local_desc_ext));
- qdf_mem_copy(cdf_nbuf_data(msdu), &local_desc_ext,
+ qdf_nbuf_push_head(msdu, sizeof(local_desc_ext));
+ qdf_mem_copy(qdf_nbuf_data(msdu), &local_desc_ext,
sizeof(local_desc_ext));
}
@@ -675,11 +675,11 @@
* Setting the flag for this final fragment suffices for specifying
* all fragments provided by the OS rather than added by the driver.
*/
- cdf_nbuf_set_frag_is_wordstream(msdu, cdf_nbuf_get_num_frags(msdu) - 1,
+ qdf_nbuf_set_frag_is_wordstream(msdu, qdf_nbuf_get_num_frags(msdu) - 1,
0);
/* store a link to the HTT tx descriptor within the netbuf */
- cdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
+ qdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
(char *)htt_host_tx_desc, /* virtual addr */
htt_tx_desc_paddr);
@@ -691,17 +691,17 @@
* the host is big-endian, to convert to the target's little-endian
* format.
*/
- cdf_nbuf_set_frag_is_wordstream(msdu, 0, 1);
+ qdf_nbuf_set_frag_is_wordstream(msdu, 0, 1);
if (qdf_likely(pdev->cfg.ce_classify_enabled &&
(msdu_info->info.l2_hdr_type != htt_pkt_type_mgmt))) {
- uint32_t pkt_offset = cdf_nbuf_get_frag_len(msdu, 0);
+ uint32_t pkt_offset = qdf_nbuf_get_frag_len(msdu, 0);
data_attr = hw_classify << QDF_CE_TX_CLASSIFY_BIT_S;
data_attr |= ce_pkt_type << QDF_CE_TX_PKT_TYPE_BIT_S;
data_attr |= pkt_offset << QDF_CE_TX_PKT_OFFSET_BIT_S;
}
- cdf_nbuf_data_attr_set(msdu, data_attr);
+ qdf_nbuf_data_attr_set(msdu, data_attr);
}
/**
@@ -909,9 +909,9 @@
* @param - pointer to the mamangement from UMAC
* @return - pointer the allocated mgmt descriptor
*/
-cdf_nbuf_t
+qdf_nbuf_t
htt_tx_mgmt_desc_alloc(struct htt_pdev_t *pdev, A_UINT32 *desc_id,
- cdf_nbuf_t mgmt_frm);
+ qdf_nbuf_t mgmt_frm);
/** htt_tx_mgmt_desc_free
* @description - releases the management descriptor back to the pool
diff --git a/core/dp/ol/inc/ol_txrx_ctrl_api.h b/core/dp/ol/inc/ol_txrx_ctrl_api.h
index b08c5dd..79b830e 100644
--- a/core/dp/ol/inc/ol_txrx_ctrl_api.h
+++ b/core/dp/ol/inc/ol_txrx_ctrl_api.h
@@ -33,7 +33,7 @@
#define _OL_TXRX_CTRL_API__H_
#include <athdefs.h> /* A_STATUS */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <qdf_types.h> /* qdf_device_t */
#include <htc_api.h> /* HTC_HANDLE */
@@ -543,7 +543,7 @@
void ol_txrx_pdev_detach(ol_txrx_pdev_handle data_pdev, int force);
typedef void
-(*ol_txrx_data_tx_cb)(void *ctxt, cdf_nbuf_t tx_frm, int had_error);
+(*ol_txrx_data_tx_cb)(void *ctxt, qdf_nbuf_t tx_frm, int had_error);
/**
* @brief Store a delivery notification callback for specific data frames.
@@ -583,12 +583,12 @@
* @param tx_spec - what non-standard handling to apply to the tx data frames
* @param msdu_list - NULL-terminated list of tx MSDUs
*/
-cdf_nbuf_t
+qdf_nbuf_t
ol_tx_non_std(ol_txrx_vdev_handle data_vdev,
- enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list);
+ enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
typedef void
-(*ol_txrx_mgmt_tx_cb)(void *ctxt, cdf_nbuf_t tx_mgmt_frm, int had_error);
+(*ol_txrx_mgmt_tx_cb)(void *ctxt, qdf_nbuf_t tx_mgmt_frm, int had_error);
/**
* @brief Store a callback for delivery notifications for management frames.
@@ -634,7 +634,7 @@
*/
int
ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
- cdf_nbuf_t tx_mgmt_frm,
+ qdf_nbuf_t tx_mgmt_frm,
uint8_t type, uint8_t use_6mbps, uint16_t chanfreq);
/**
diff --git a/core/dp/ol/inc/ol_txrx_htt_api.h b/core/dp/ol/inc/ol_txrx_htt_api.h
index d84b3ee..00d612c 100644
--- a/core/dp/ol/inc/ol_txrx_htt_api.h
+++ b/core/dp/ol/inc/ol_txrx_htt_api.h
@@ -34,14 +34,14 @@
#include <htt.h> /* HTT_TX_COMPL_IND_STAT */
#include <athdefs.h> /* A_STATUS */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
-static inline uint16_t *ol_tx_msdu_id_storage(cdf_nbuf_t msdu)
+static inline uint16_t *ol_tx_msdu_id_storage(qdf_nbuf_t msdu)
{
- qdf_assert(cdf_nbuf_headroom(msdu) >= (sizeof(uint16_t) * 2 - 1));
- return (uint16_t *) (((qdf_size_t) (cdf_nbuf_head(msdu) + 1)) & ~0x1);
+ qdf_assert(qdf_nbuf_headroom(msdu) >= (sizeof(uint16_t) * 2 - 1));
+ return (uint16_t *) (((qdf_size_t) (qdf_nbuf_head(msdu) + 1)) & ~0x1);
}
/**
@@ -59,7 +59,7 @@
*/
void
ol_tx_download_done_ll(void *pdev,
- A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id);
+ A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id);
/**
* @brief Tx MSDU download completion for HL system without tx completion msgs
@@ -75,7 +75,7 @@
*/
void
ol_tx_download_done_hl_free(void *pdev,
- A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id);
+ A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id);
/**
* @brief Tx MSDU download completion for HL system with tx completion msgs
@@ -96,7 +96,7 @@
void
ol_tx_download_done_hl_retain(void *pdev,
A_STATUS status,
- cdf_nbuf_t msdu, uint16_t msdu_id);
+ qdf_nbuf_t msdu, uint16_t msdu_id);
/*
* For now, make the host HTT -> host txrx tx completion status
@@ -233,7 +233,7 @@
*/
void
ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
uint16_t peer_id, uint8_t tid, int num_mpdu_ranges);
/**
@@ -253,7 +253,7 @@
* @param tid - what (extended) traffic type the rx data is
*/
void ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t rx_frag_ind_msg,
+ qdf_nbuf_t rx_frag_ind_msg,
uint16_t peer_id, uint8_t tid);
/**
@@ -277,7 +277,7 @@
*/
void
ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t msg, int msdu_cnt);
+ qdf_nbuf_t msg, int msdu_cnt);
/**
* @brief Process a peer map message sent by the target.
@@ -572,7 +572,7 @@
*/
void
ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
uint16_t peer_id,
uint8_t tid, uint8_t is_offload);
diff --git a/core/dp/ol/inc/ol_txrx_osif_api.h b/core/dp/ol/inc/ol_txrx_osif_api.h
index 81fddfe..dd7acaf 100644
--- a/core/dp/ol/inc/ol_txrx_osif_api.h
+++ b/core/dp/ol/inc/ol_txrx_osif_api.h
@@ -32,7 +32,7 @@
#ifndef _OL_TXRX_OSIF_API__H_
#define _OL_TXRX_OSIF_API__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_osif_api.h> /* ol_osif_vdev_handle */
#include <ol_txrx_api.h> /* ol_txrx_pdev_handle, etc. */
@@ -46,21 +46,21 @@
*/
struct ol_rx_cached_buf {
struct list_head list;
- cdf_nbuf_t buf;
+ qdf_nbuf_t buf;
};
/**
* @typedef ol_txrx_rx_fp
* @brief receive function to hand batches of data frames from txrx to OS shim
*/
-typedef void (*ol_txrx_rx_fp)(void *osif_dev, cdf_nbuf_t msdus);
+typedef void (*ol_txrx_rx_fp)(void *osif_dev, qdf_nbuf_t msdus);
/**
* @typedef ol_txrx_tx_fp
* @brief top-level transmit function
*/
-typedef cdf_nbuf_t (*ol_txrx_tx_fp)(ol_txrx_vdev_handle data_vdev,
- cdf_nbuf_t msdu_list);
+typedef qdf_nbuf_t (*ol_txrx_tx_fp)(ol_txrx_vdev_handle data_vdev,
+ qdf_nbuf_t msdu_list);
/**
* @typedef ol_txrx_tx_non_std_fp
@@ -82,9 +82,9 @@
* @param tx_spec - what non-standard operations to apply to the tx frame
* @param msdu_list - tx frame(s), in a null-terminated list
*/
-typedef cdf_nbuf_t (*ol_txrx_tx_non_std_fp)(ol_txrx_vdev_handle data_vdev,
+typedef qdf_nbuf_t (*ol_txrx_tx_non_std_fp)(ol_txrx_vdev_handle data_vdev,
enum ol_tx_spec tx_spec,
- cdf_nbuf_t msdu_list);
+ qdf_nbuf_t msdu_list);
struct txrx_rx_metainfo;
@@ -111,7 +111,7 @@
typedef QDF_STATUS (*ol_rx_callback_fp)(void *p_cds_gctx,
- cdf_nbuf_t pDataBuff,
+ qdf_nbuf_t pDataBuff,
uint8_t ucSTAId);
typedef void (*ol_tx_pause_callback_fp)(uint8_t vdev_id,
@@ -250,16 +250,16 @@
* NULL if the segmentation fails, - OR -
* a NULL-terminated list of segment network buffers
*/
-cdf_nbuf_t ol_txrx_osif_tso_segment(ol_txrx_vdev_handle txrx_vdev,
+qdf_nbuf_t ol_txrx_osif_tso_segment(ol_txrx_vdev_handle txrx_vdev,
int max_seg_payload_bytes,
- cdf_nbuf_t jumbo_tcp_frame);
+ qdf_nbuf_t jumbo_tcp_frame);
-cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
+qdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, qdf_nbuf_t skb,
uint8_t proto_type);
#ifdef IPA_OFFLOAD
-cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
- cdf_nbuf_t skb);
+qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
+ qdf_nbuf_t skb);
#endif
QDF_STATUS ol_txrx_register_peer(ol_rx_callback_fp rxcb,
@@ -272,7 +272,7 @@
bool roam_synch_in_progress);
void ol_rx_data_process(struct ol_txrx_peer_t *peer,
- cdf_nbuf_t rx_buf_list);
+ qdf_nbuf_t rx_buf_list);
void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
bool drop);
diff --git a/core/dp/ol/inc/ol_vowext_dbg_defs.h b/core/dp/ol/inc/ol_vowext_dbg_defs.h
index 3be07ac..4a9d3db 100644
--- a/core/dp/ol/inc/ol_vowext_dbg_defs.h
+++ b/core/dp/ol/inc/ol_vowext_dbg_defs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -61,6 +61,6 @@
* @param msdu - network buffer handle
* @param pdev - handle to htt dev.
*/
-void ol_ath_add_vow_extstats(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+void ol_ath_add_vow_extstats(htt_pdev_handle pdev, qdf_nbuf_t msdu);
#endif /* _VOW_DEFINES__H_ */
diff --git a/core/dp/txrx/ol_ctrl_txrx_api.h b/core/dp/txrx/ol_ctrl_txrx_api.h
index 5a0d50f..d77a576 100644
--- a/core/dp/txrx/ol_ctrl_txrx_api.h
+++ b/core/dp/txrx/ol_ctrl_txrx_api.h
@@ -34,7 +34,7 @@
/* #include <osapi_linux.h> / * uint8_t * / */
#include <osdep.h> /* uint8_t */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_ctrl_api.h> /* ol_vdev_handle */
#include <ol_txrx_api.h> /* ol_txrx_peer_handle, etc. */
@@ -106,7 +106,7 @@
int tid,
uint32_t tsf32,
enum ol_rx_err_type err_type,
- cdf_nbuf_t rx_frame, uint64_t *pn, uint8_t key_id);
+ qdf_nbuf_t rx_frame, uint64_t *pn, uint8_t key_id);
enum ol_rx_notify_type {
OL_RX_NOTIFY_IPV4_IGMP,
@@ -138,7 +138,7 @@
uint8_t *peer_mac_addr,
int tid,
uint32_t tsf32,
- enum ol_rx_notify_type notify_type, cdf_nbuf_t rx_frame);
+ enum ol_rx_notify_type notify_type, qdf_nbuf_t rx_frame);
/**
* @brief Indicate when a paused STA has tx data available.
diff --git a/core/dp/txrx/ol_osif_txrx_api.h b/core/dp/txrx/ol_osif_txrx_api.h
index 2014d4f..979b258 100644
--- a/core/dp/txrx/ol_osif_txrx_api.h
+++ b/core/dp/txrx/ol_osif_txrx_api.h
@@ -32,7 +32,7 @@
#ifndef _OL_OSIF_TXRX_API_H_
#define _OL_OSIF_TXRX_API_H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
/**
* @brief Call tx completion handler to release the buffers
diff --git a/core/dp/txrx/ol_rx.c b/core/dp/txrx/ol_rx.c
index 263341c..c3f475e 100644
--- a/core/dp/txrx/ol_rx.c
+++ b/core/dp/txrx/ol_rx.c
@@ -25,7 +25,7 @@
* to the Linux Foundation.
*/
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_util.h> /* qdf_cpu_to_le64 */
#include <qdf_types.h> /* bool */
#include <cds_ieee80211_common.h> /* ieee80211_frame */
@@ -68,7 +68,7 @@
#endif
void ol_rx_data_process(struct ol_txrx_peer_t *peer,
- cdf_nbuf_t rx_buf_list);
+ qdf_nbuf_t rx_buf_list);
#ifdef HTT_RX_RESTORE
@@ -84,16 +84,16 @@
static DECLARE_WORK(ol_rx_restore_work, ol_rx_restore_handler);
-void ol_rx_trigger_restore(htt_pdev_handle htt_pdev, cdf_nbuf_t head_msdu,
- cdf_nbuf_t tail_msdu)
+void ol_rx_trigger_restore(htt_pdev_handle htt_pdev, qdf_nbuf_t head_msdu,
+ qdf_nbuf_t tail_msdu)
{
- cdf_nbuf_t next;
+ qdf_nbuf_t next;
while (head_msdu) {
- next = cdf_nbuf_next(head_msdu);
+ next = qdf_nbuf_next(head_msdu);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
"freeing %p\n", head_msdu);
- cdf_nbuf_free(head_msdu);
+ qdf_nbuf_free(head_msdu);
head_msdu = next;
}
@@ -106,7 +106,7 @@
#endif
static void ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,
- void *rx_mpdu_desc, cdf_nbuf_t msdu)
+ void *rx_mpdu_desc, qdf_nbuf_t msdu)
{
uint8_t a1[IEEE80211_ADDR_LEN];
htt_pdev_handle htt_pdev = pdev->htt_pdev;
@@ -166,7 +166,7 @@
}
static void
-ol_rx_ind_rssi_update(struct ol_txrx_peer_t *peer, cdf_nbuf_t rx_ind_msg)
+ol_rx_ind_rssi_update(struct ol_txrx_peer_t *peer, qdf_nbuf_t rx_ind_msg)
{
struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
@@ -192,12 +192,12 @@
#endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
void discard_msdus(htt_pdev_handle htt_pdev,
- cdf_nbuf_t head_msdu,
- cdf_nbuf_t tail_msdu)
+ qdf_nbuf_t head_msdu,
+ qdf_nbuf_t tail_msdu)
{
while (1) {
- cdf_nbuf_t next;
- next = cdf_nbuf_next(
+ qdf_nbuf_t next;
+ next = qdf_nbuf_next(
head_msdu);
htt_rx_desc_frame_free
(htt_pdev,
@@ -212,12 +212,12 @@
}
void chain_msdus(htt_pdev_handle htt_pdev,
- cdf_nbuf_t head_msdu,
- cdf_nbuf_t tail_msdu)
+ qdf_nbuf_t head_msdu,
+ qdf_nbuf_t tail_msdu)
{
while (1) {
- cdf_nbuf_t next;
- next = cdf_nbuf_next(head_msdu);
+ qdf_nbuf_t next;
+ next = qdf_nbuf_next(head_msdu);
htt_rx_desc_frame_free(
htt_pdev,
head_msdu);
@@ -232,8 +232,8 @@
void *rx_mpdu_desc,
uint8_t tid,
struct ol_txrx_peer_t *peer,
- cdf_nbuf_t head_msdu,
- cdf_nbuf_t tail_msdu,
+ qdf_nbuf_t head_msdu,
+ qdf_nbuf_t tail_msdu,
int num_mpdu_ranges,
int num_pdus,
bool rx_ind_release
@@ -310,7 +310,7 @@
void
ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
uint16_t peer_id, uint8_t tid, int num_mpdu_ranges)
{
int mpdu_range, i;
@@ -383,7 +383,7 @@
if (htt_rx_ind_release(pdev->htt_pdev, rx_ind_msg)) {
/* the ind info of release is saved here and do release at the
- * end. This is for the reason of in HL case, the cdf_nbuf_t
+ * end. This is for the reason of in HL case, the qdf_nbuf_t
* for msg and payload are the same buf. And the buf will be
* changed during processing */
rx_ind_release = true;
@@ -398,7 +398,7 @@
for (mpdu_range = 0; mpdu_range < num_mpdu_ranges; mpdu_range++) {
enum htt_rx_status status;
int i, num_mpdus;
- cdf_nbuf_t head_msdu, tail_msdu, msdu;
+ qdf_nbuf_t head_msdu, tail_msdu, msdu;
void *rx_mpdu_desc;
#ifdef DEBUG_DMA_DONE
@@ -558,8 +558,8 @@
}
while (1) {
/* Free the nbuf */
- cdf_nbuf_t next;
- next = cdf_nbuf_next(msdu);
+ qdf_nbuf_t next;
+ next = qdf_nbuf_next(msdu);
htt_rx_desc_frame_free(htt_pdev, msdu);
if (msdu == tail_msdu)
break;
@@ -649,7 +649,7 @@
#include <cds_ieee80211_common.h>
-static void transcap_nwifi_to_8023(cdf_nbuf_t msdu)
+static void transcap_nwifi_to_8023(qdf_nbuf_t msdu)
{
struct ieee80211_frame *wh;
uint32_t hdrsize;
@@ -661,7 +661,7 @@
uint8_t a3[IEEE80211_ADDR_LEN];
uint8_t fc1;
- wh = (struct ieee80211_frame *)cdf_nbuf_data(msdu);
+ wh = (struct ieee80211_frame *)qdf_nbuf_data(msdu);
qdf_mem_copy(a1, wh->i_addr1, IEEE80211_ADDR_LEN);
qdf_mem_copy(a2, wh->i_addr2, IEEE80211_ADDR_LEN);
qdf_mem_copy(a3, wh->i_addr3, IEEE80211_ADDR_LEN);
@@ -669,17 +669,17 @@
/* Native Wifi header is 80211 non-QoS header */
hdrsize = sizeof(struct ieee80211_frame);
- llchdr = (struct llc *)(((uint8_t *) cdf_nbuf_data(msdu)) + hdrsize);
+ llchdr = (struct llc *)(((uint8_t *) qdf_nbuf_data(msdu)) + hdrsize);
ether_type = llchdr->llc_un.type_snap.ether_type;
/*
* Now move the data pointer to the beginning of the mac header :
* new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
*/
- cdf_nbuf_pull_head(msdu,
+ qdf_nbuf_pull_head(msdu,
(hdrsize + sizeof(struct llc) -
sizeof(struct ether_header)));
- eth_hdr = (struct ether_header *)(cdf_nbuf_data(msdu));
+ eth_hdr = (struct ether_header *)(qdf_nbuf_data(msdu));
switch (fc1) {
case IEEE80211_FC1_DIR_NODS:
qdf_mem_copy(eth_hdr->ether_dhost, a1, IEEE80211_ADDR_LEN);
@@ -705,7 +705,7 @@
uint8_t *peer_mac_addr,
int tid,
uint32_t tsf32,
- enum ol_rx_notify_type notify_type, cdf_nbuf_t rx_frame)
+ enum ol_rx_notify_type notify_type, qdf_nbuf_t rx_frame)
{
/*
* NOTE: This is used in qca_main for AP mode to handle IGMP
@@ -732,14 +732,14 @@
void
ol_rx_inspect(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu, void *rx_desc)
+ unsigned tid, qdf_nbuf_t msdu, void *rx_desc)
{
ol_txrx_pdev_handle pdev = vdev->pdev;
uint8_t *data, *l3_hdr;
uint16_t ethertype;
int offset;
- data = cdf_nbuf_data(msdu);
+ data = qdf_nbuf_data(msdu);
if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
offset = SIZEOF_80211_HDR + LLC_SNAP_HDR_OFFSET_ETHERTYPE;
l3_hdr = data + SIZEOF_80211_HDR + LLC_SNAP_HDR_LEN;
@@ -764,10 +764,10 @@
void
ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t msg, int msdu_cnt)
+ qdf_nbuf_t msg, int msdu_cnt)
{
int vdev_id, peer_id, tid;
- cdf_nbuf_t head_buf, tail_buf, buf;
+ qdf_nbuf_t head_buf, tail_buf, buf;
struct ol_txrx_peer_t *peer;
uint8_t fw_desc;
htt_pdev_handle htt_pdev = pdev->htt_pdev;
@@ -782,8 +782,8 @@
} else {
buf = head_buf;
while (1) {
- cdf_nbuf_t next;
- next = cdf_nbuf_next(buf);
+ qdf_nbuf_t next;
+ next = qdf_nbuf_next(buf);
htt_rx_desc_frame_free(htt_pdev, buf);
if (buf == tail_buf)
break;
@@ -801,7 +801,7 @@
u_int8_t tid,
u_int16_t peer_id,
void *msdu_desc,
- cdf_nbuf_t msdu)
+ qdf_nbuf_t msdu)
{
union htt_rx_pn_t pn = {0};
u_int8_t key_id = 0;
@@ -836,7 +836,7 @@
*/
bool
ol_rx_filter(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, cdf_nbuf_t msdu, void *rx_desc)
+ struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, void *rx_desc)
{
#define FILTER_STATUS_REJECT 1
#define FILTER_STATUS_ACCEPT 0
@@ -867,7 +867,7 @@
offset = ETHERNET_ADDR_LEN * 2;
}
/* get header info from msdu */
- wh = cdf_nbuf_data(msdu);
+ wh = qdf_nbuf_data(msdu);
/* get ether type */
ether_type = (wh[offset] << 8) | wh[offset + 1];
@@ -963,13 +963,13 @@
void
ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t msdu_list)
{
ol_txrx_pdev_handle pdev = vdev->pdev;
htt_pdev_handle htt_pdev = pdev->htt_pdev;
- cdf_nbuf_t deliver_list_head = NULL;
- cdf_nbuf_t deliver_list_tail = NULL;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t deliver_list_head = NULL;
+ qdf_nbuf_t deliver_list_tail = NULL;
+ qdf_nbuf_t msdu;
bool filter = false;
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
struct ol_rx_decap_info_t info;
@@ -984,7 +984,7 @@
while (msdu) {
void *rx_desc;
int discard, inspect, dummy_fwd;
- cdf_nbuf_t next = cdf_nbuf_next(msdu);
+ qdf_nbuf_t next = qdf_nbuf_next(msdu);
rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
@@ -1002,7 +1002,7 @@
peer->mac_addr.raw[0], peer->mac_addr.raw[1],
peer->mac_addr.raw[2], peer->mac_addr.raw[3],
peer->mac_addr.raw[4], peer->mac_addr.raw[5],
- cdf_nbuf_len(msdu));
+ qdf_nbuf_len(msdu));
goto DONE;
}
#endif
@@ -1028,13 +1028,13 @@
ol_txrx_frm_dump_tcp_seq |
ol_txrx_frm_dump_contents,
0 /* don't print contents */);
- cdf_nbuf_free(msdu);
+ qdf_nbuf_free(msdu);
/* If discarding packet is last packet of the delivery
list, NULL terminator should be added
for delivery list. */
if (next == NULL && deliver_list_head) {
/* add NULL terminator */
- cdf_nbuf_set_next(deliver_list_tail, NULL);
+ qdf_nbuf_set_next(deliver_list_tail, NULL);
}
} else {
/*
@@ -1117,9 +1117,9 @@
rx_header.tsf32 = peer->last_pkt_tsf;
rx_header.ext_tid = peer->last_pkt_tid;
- cdf_nbuf_push_head(msdu,
+ qdf_nbuf_push_head(msdu,
sizeof(rx_header));
- qdf_mem_copy(cdf_nbuf_data(msdu),
+ qdf_mem_copy(qdf_nbuf_data(msdu),
&rx_header, sizeof(rx_header));
/* Construct the ethernet header with
@@ -1128,9 +1128,9 @@
RX stats header. */
eth_header.ether_type = QDF_SWAP_U16(
ETHERTYPE_OCB_RX);
- cdf_nbuf_push_head(msdu,
+ qdf_nbuf_push_head(msdu,
sizeof(eth_header));
- qdf_mem_copy(cdf_nbuf_data(msdu),
+ qdf_mem_copy(qdf_nbuf_data(msdu),
ð_header,
sizeof(eth_header));
}
@@ -1150,7 +1150,7 @@
#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
if (pdev->host_80211_enable)
- for (msdu = deliver_list_head; msdu; msdu = cdf_nbuf_next(msdu))
+ for (msdu = deliver_list_head; msdu; msdu = qdf_nbuf_next(msdu))
transcap_nwifi_to_8023(msdu);
#endif
@@ -1164,15 +1164,15 @@
void
ol_rx_discard(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t msdu_list)
{
ol_txrx_pdev_handle pdev = vdev->pdev;
htt_pdev_handle htt_pdev = pdev->htt_pdev;
while (msdu_list) {
- cdf_nbuf_t msdu = msdu_list;
+ qdf_nbuf_t msdu = msdu_list;
- msdu_list = cdf_nbuf_next(msdu_list);
+ msdu_list = qdf_nbuf_next(msdu_list);
TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
"discard rx %p from partly-deleted peer %p "
"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
@@ -1214,12 +1214,12 @@
/*
* Free frames including both rx descriptors and buffers
*/
-void ol_rx_frames_free(htt_pdev_handle htt_pdev, cdf_nbuf_t frames)
+void ol_rx_frames_free(htt_pdev_handle htt_pdev, qdf_nbuf_t frames)
{
- cdf_nbuf_t next, frag = frames;
+ qdf_nbuf_t next, frag = frames;
while (frag) {
- next = cdf_nbuf_next(frag);
+ next = qdf_nbuf_next(frag);
htt_rx_desc_frame_free(htt_pdev, frag);
frag = next;
}
@@ -1227,7 +1227,7 @@
void
ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t rx_ind_msg,
+ qdf_nbuf_t rx_ind_msg,
uint16_t peer_id,
uint8_t tid, uint8_t is_offload)
{
@@ -1235,7 +1235,7 @@
struct ol_txrx_peer_t *peer = NULL;
htt_pdev_handle htt_pdev = NULL;
int status;
- cdf_nbuf_t head_msdu, tail_msdu = NULL;
+ qdf_nbuf_t head_msdu, tail_msdu = NULL;
if (pdev) {
peer = ol_txrx_peer_find_by_id(pdev, peer_id);
@@ -1271,7 +1271,7 @@
/* Send the chain of MSDUs to the OS */
/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
- cdf_nbuf_set_next(tail_msdu, NULL);
+ qdf_nbuf_set_next(tail_msdu, NULL);
/* Pktlog */
#ifdef WDI_EVENT_ENABLE
@@ -1287,8 +1287,8 @@
"%s: Couldn't find peer from ID 0x%x\n",
__func__, peer_id);
while (head_msdu) {
- cdf_nbuf_t msdu = head_msdu;
- head_msdu = cdf_nbuf_next(head_msdu);
+ qdf_nbuf_t msdu = head_msdu;
+ head_msdu = qdf_nbuf_next(head_msdu);
htt_rx_desc_frame_free(htt_pdev, msdu);
}
return;
@@ -1301,9 +1301,9 @@
void
ol_rx_in_order_deliver(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list)
+ unsigned tid, qdf_nbuf_t msdu_list)
{
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
msdu = msdu_list;
/*
@@ -1314,7 +1314,7 @@
*/
while (msdu) {
- cdf_nbuf_t next = cdf_nbuf_next(msdu);
+ qdf_nbuf_t next = qdf_nbuf_next(msdu);
OL_RX_PEER_STATS_UPDATE(peer, msdu);
OL_RX_ERR_STATISTICS_1(vdev->pdev, vdev, peer, rx_desc,
@@ -1338,7 +1338,7 @@
uint32_t *msg_word)
{
int vdev_id, peer_id, tid;
- cdf_nbuf_t head_buf, tail_buf, buf;
+ qdf_nbuf_t head_buf, tail_buf, buf;
struct ol_txrx_peer_t *peer;
uint8_t fw_desc;
int msdu_iter = 0;
@@ -1355,8 +1355,8 @@
} else {
buf = head_buf;
while (1) {
- cdf_nbuf_t next;
- next = cdf_nbuf_next(buf);
+ qdf_nbuf_t next;
+ next = qdf_nbuf_next(buf);
htt_rx_desc_frame_free(htt_pdev, buf);
if (buf == tail_buf)
break;
@@ -1376,7 +1376,7 @@
* @param msdu - network buffer handle
* @param pdev - handle to htt dev.
*/
-void ol_ath_add_vow_extstats(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+void ol_ath_add_vow_extstats(htt_pdev_handle pdev, qdf_nbuf_t msdu)
{
/* FIX THIS:
* txrx should not be directly using data types (scn)
@@ -1393,7 +1393,7 @@
int offset;
struct vow_extstats vowstats;
- data = cdf_nbuf_data(msdu);
+ data = qdf_nbuf_data(msdu);
offset = ETHERNET_ADDR_LEN * 2;
l3_hdr = data + ETHERNET_HDR_LEN;
diff --git a/core/dp/txrx/ol_rx.h b/core/dp/txrx/ol_rx.h
index 33d749f..ce1bbb0 100644
--- a/core/dp/txrx/ol_rx.h
+++ b/core/dp/txrx/ol_rx.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -28,18 +28,18 @@
#ifndef _OL_RX__H_
#define _OL_RX__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
void
ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t head_msdu);
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t head_msdu);
void
ol_rx_discard(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t head_msdu);
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t head_msdu);
-void ol_rx_frames_free(htt_pdev_handle htt_pdev, cdf_nbuf_t frames);
+void ol_rx_frames_free(htt_pdev_handle htt_pdev, qdf_nbuf_t frames);
void ol_rx_peer_init(struct ol_txrx_pdev_t *pdev, struct ol_txrx_peer_t *peer);
@@ -49,7 +49,7 @@
void
ol_rx_in_order_deliver(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t head_msdu);
+ unsigned tid, qdf_nbuf_t head_msdu);
void
ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,
@@ -62,6 +62,6 @@
u_int8_t tid,
u_int16_t peer_id,
void *msdu_desc,
- cdf_nbuf_t msdu);
+ qdf_nbuf_t msdu);
#endif /* _OL_RX__H_ */
diff --git a/core/dp/txrx/ol_rx_defrag.c b/core/dp/txrx/ol_rx_defrag.c
index 58f5b90..f4c0768 100644
--- a/core/dp/txrx/ol_rx_defrag.c
+++ b/core/dp/txrx/ol_rx_defrag.c
@@ -61,7 +61,7 @@
#include <ol_txrx_internal.h>
#include <ol_ctrl_txrx_api.h>
#include <ol_txrx_peer_find.h>
-#include <cdf_nbuf.h>
+#include <qdf_nbuf.h>
#include <ieee80211.h>
#include <qdf_util.h>
#include <athdefs.h>
@@ -107,18 +107,18 @@
inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
htt_pdev_handle htt_pdev,
- cdf_nbuf_t frag)
+ qdf_nbuf_t frag)
{
return
- (struct ieee80211_frame *) cdf_nbuf_data(frag);
+ (struct ieee80211_frame *) qdf_nbuf_data(frag);
}
#define ol_rx_frag_pull_hdr(pdev, frag, hdrsize) \
- cdf_nbuf_pull_head(frag, hdrsize);
+ qdf_nbuf_pull_head(frag, hdrsize);
#define OL_RX_FRAG_CLONE(frag) NULL /* no-op */
static inline void
ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
void **rx_desc_old_position,
void **ind_old_position, int *rx_desc_len)
{
@@ -132,14 +132,14 @@
*/
void
ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t rx_frag_ind_msg,
+ qdf_nbuf_t rx_frag_ind_msg,
uint16_t peer_id, uint8_t tid)
{
uint16_t seq_num;
int seq_num_start, seq_num_end;
struct ol_txrx_peer_t *peer;
htt_pdev_handle htt_pdev;
- cdf_nbuf_t head_msdu, tail_msdu;
+ qdf_nbuf_t head_msdu, tail_msdu;
void *rx_mpdu_desc;
htt_pdev = pdev->htt_pdev;
@@ -213,7 +213,7 @@
void
ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, uint16_t seq_num, cdf_nbuf_t frag)
+ unsigned tid, uint16_t seq_num, qdf_nbuf_t frag)
{
struct ieee80211_frame *fmac_hdr, *mac_hdr;
uint8_t fragno, more_frag, all_frag_present = 0;
@@ -236,7 +236,7 @@
if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
rx_reorder_array_elem->head = frag;
rx_reorder_array_elem->tail = frag;
- cdf_nbuf_set_next(frag, NULL);
+ qdf_nbuf_set_next(frag, NULL);
ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
rx_reorder_array_elem->head = NULL;
rx_reorder_array_elem->tail = NULL;
@@ -292,15 +292,15 @@
*/
void
ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
- cdf_nbuf_t *head_addr,
- cdf_nbuf_t *tail_addr,
- cdf_nbuf_t frag, uint8_t *all_frag_present)
+ qdf_nbuf_t *head_addr,
+ qdf_nbuf_t *tail_addr,
+ qdf_nbuf_t frag, uint8_t *all_frag_present)
{
- cdf_nbuf_t next, prev = NULL, cur = *head_addr;
+ qdf_nbuf_t next, prev = NULL, cur = *head_addr;
struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr;
uint8_t fragno, cur_fragno, lfragno, next_fragno;
uint8_t last_morefrag = 1, count = 0;
- cdf_nbuf_t frag_clone;
+ qdf_nbuf_t frag_clone;
qdf_assert(frag);
frag_clone = OL_RX_FRAG_CLONE(frag);
@@ -314,7 +314,7 @@
if (!(*head_addr)) {
*head_addr = frag;
*tail_addr = frag;
- cdf_nbuf_set_next(*tail_addr, NULL);
+ qdf_nbuf_set_next(*tail_addr, NULL);
return;
}
/* For efficiency, compare with tail first */
@@ -323,9 +323,9 @@
lfragno = qdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) &
IEEE80211_SEQ_FRAG_MASK;
if (fragno > lfragno) {
- cdf_nbuf_set_next(*tail_addr, frag);
+ qdf_nbuf_set_next(*tail_addr, frag);
*tail_addr = frag;
- cdf_nbuf_set_next(*tail_addr, NULL);
+ qdf_nbuf_set_next(*tail_addr, NULL);
} else {
do {
cmac_hdr = (struct ieee80211_frame *)
@@ -334,7 +334,7 @@
qdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) &
IEEE80211_SEQ_FRAG_MASK;
prev = cur;
- cur = cdf_nbuf_next(cur);
+ cur = qdf_nbuf_next(cur);
} while (fragno > cur_fragno);
if (fragno == cur_fragno) {
@@ -342,11 +342,11 @@
*all_frag_present = 0;
return;
} else {
- cdf_nbuf_set_next(prev, frag);
- cdf_nbuf_set_next(frag, cur);
+ qdf_nbuf_set_next(prev, frag);
+ qdf_nbuf_set_next(frag, cur);
}
}
- next = cdf_nbuf_next(*head_addr);
+ next = qdf_nbuf_next(*head_addr);
lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
*tail_addr);
last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
@@ -362,7 +362,7 @@
if (next_fragno != count)
break;
- next = cdf_nbuf_next(next);
+ next = qdf_nbuf_next(next);
} while (next);
if (!next) {
@@ -448,10 +448,10 @@
*/
void
ol_rx_defrag(ol_txrx_pdev_handle pdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t frag_list)
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t frag_list)
{
struct ol_txrx_vdev_t *vdev = NULL;
- cdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
+ qdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
uint8_t index, tkip_demic = 0;
uint16_t hdr_space;
void *rx_desc;
@@ -471,12 +471,12 @@
}
while (cur) {
- tmp_next = cdf_nbuf_next(cur);
- cdf_nbuf_set_next(cur, NULL);
+ tmp_next = qdf_nbuf_next(cur);
+ qdf_nbuf_set_next(cur, NULL);
if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) {
/* PN check failed,discard frags */
if (prev) {
- cdf_nbuf_set_next(prev, NULL);
+ qdf_nbuf_set_next(prev, NULL);
ol_rx_frames_free(htt_pdev, frag_list);
}
ol_rx_frames_free(htt_pdev, tmp_next);
@@ -485,9 +485,9 @@
return;
}
/* remove FCS from each fragment */
- cdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
+ qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
prev = cur;
- cdf_nbuf_set_next(cur, tmp_next);
+ qdf_nbuf_set_next(cur, tmp_next);
cur = tmp_next;
}
cur = frag_list;
@@ -504,7 +504,7 @@
/* fall-through to rest of tkip ops */
case htt_sec_type_tkip_nomic:
while (cur) {
- tmp_next = cdf_nbuf_next(cur);
+ tmp_next = qdf_nbuf_next(cur);
if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) {
/* TKIP decap failed, discard frags */
ol_rx_frames_free(htt_pdev, frag_list);
@@ -518,7 +518,7 @@
case htt_sec_type_aes_ccmp:
while (cur) {
- tmp_next = cdf_nbuf_next(cur);
+ tmp_next = qdf_nbuf_next(cur);
if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) {
/* CCMP demic failed, discard frags */
ol_rx_frames_free(htt_pdev, frag_list);
@@ -541,7 +541,7 @@
case htt_sec_type_wep104:
case htt_sec_type_wep128:
while (cur) {
- tmp_next = cdf_nbuf_next(cur);
+ tmp_next = qdf_nbuf_next(cur);
if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) {
/* wep decap failed, discard frags */
ol_rx_frames_free(htt_pdev, frag_list);
@@ -589,7 +589,7 @@
*/
int
ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t msdu, uint16_t hdrlen)
+ qdf_nbuf_t msdu, uint16_t hdrlen)
{
uint8_t *ivp, *origHdr;
@@ -602,15 +602,15 @@
&rx_desc_old_position,
&ind_old_position, &rx_desc_len);
/* Header should have extended IV */
- origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
+ origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
ivp = origHdr + hdrlen;
if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
return OL_RX_DEFRAG_ERR;
qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
- cdf_nbuf_pull_head(msdu, f_tkip.ic_header);
- cdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
+ qdf_nbuf_pull_head(msdu, f_tkip.ic_header);
+ qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
return OL_RX_DEFRAG_OK;
}
@@ -618,7 +618,7 @@
* Handling WEP processing for defragmentation
*/
int
-ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t hdrlen)
+ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t hdrlen)
{
uint8_t *origHdr;
void *rx_desc_old_position = NULL;
@@ -629,10 +629,10 @@
msdu,
&rx_desc_old_position,
&ind_old_position, &rx_desc_len);
- origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
+ origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
- cdf_nbuf_pull_head(msdu, f_wep.ic_header);
- cdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
+ qdf_nbuf_pull_head(msdu, f_wep.ic_header);
+ qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
return OL_RX_DEFRAG_OK;
}
@@ -641,7 +641,7 @@
*/
int
ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key,
- cdf_nbuf_t msdu, uint16_t hdrlen)
+ qdf_nbuf_t msdu, uint16_t hdrlen)
{
int status;
uint32_t pktlen;
@@ -668,7 +668,7 @@
if (!qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen))
return OL_RX_DEFRAG_ERR;
- cdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
+ qdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
return OL_RX_DEFRAG_OK;
}
@@ -677,7 +677,7 @@
*/
int
ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t nbuf, uint16_t hdrlen)
+ qdf_nbuf_t nbuf, uint16_t hdrlen)
{
uint8_t *ivp, *origHdr;
void *rx_desc_old_position = NULL;
@@ -689,13 +689,13 @@
&rx_desc_old_position,
&ind_old_position, &rx_desc_len);
- origHdr = (uint8_t *) (cdf_nbuf_data(nbuf) + rx_desc_len);
+ origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
ivp = origHdr + hdrlen;
if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
return OL_RX_DEFRAG_ERR;
qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
- cdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
+ qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
return OL_RX_DEFRAG_OK;
}
@@ -705,7 +705,7 @@
*/
int
ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t wbuf, uint16_t hdrlen)
+ qdf_nbuf_t wbuf, uint16_t hdrlen)
{
uint8_t *ivp, *origHdr;
void *rx_desc_old_position = NULL;
@@ -717,13 +717,13 @@
&rx_desc_old_position,
&ind_old_position, &rx_desc_len);
- origHdr = (uint8_t *) (cdf_nbuf_data(wbuf) + rx_desc_len);
+ origHdr = (uint8_t *) (qdf_nbuf_data(wbuf) + rx_desc_len);
ivp = origHdr + hdrlen;
if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
return OL_RX_DEFRAG_ERR;
- cdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
+ qdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
return OL_RX_DEFRAG_OK;
}
@@ -779,7 +779,7 @@
int
ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
const uint8_t *key,
- cdf_nbuf_t wbuf,
+ qdf_nbuf_t wbuf,
uint16_t off, uint16_t data_len, uint8_t mic[])
{
uint8_t hdr[16] = { 0, };
@@ -795,7 +795,7 @@
&rx_desc_old_position,
&ind_old_position, &rx_desc_len);
- ol_rx_defrag_michdr((struct ieee80211_frame *)(cdf_nbuf_data(wbuf) +
+ ol_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) +
rx_desc_len), hdr);
l = get_le32(key);
r = get_le32(key + 4);
@@ -811,7 +811,7 @@
michael_block(l, r);
/* first buffer has special handling */
- data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len + off;
+ data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off;
space = ol_rx_defrag_len(wbuf) - rx_desc_len - off;
for (;; ) {
if (space > data_len)
@@ -828,7 +828,7 @@
if (data_len < sizeof(uint32_t))
break;
- wbuf = cdf_nbuf_next(wbuf);
+ wbuf = qdf_nbuf_next(wbuf);
if (wbuf == NULL)
return OL_RX_DEFRAG_ERR;
@@ -840,7 +840,7 @@
* Block straddles buffers, split references.
*/
data_next =
- (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
+ (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
if ((ol_rx_defrag_len(wbuf) - rx_desc_len) <
sizeof(uint32_t) - space) {
return OL_RX_DEFRAG_ERR;
@@ -874,7 +874,7 @@
/*
* Setup for next buffer.
*/
- data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
+ data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
space = ol_rx_defrag_len(wbuf) - rx_desc_len;
}
}
@@ -923,26 +923,26 @@
/*
* Recombine and decap fragments
*/
-cdf_nbuf_t
+qdf_nbuf_t
ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
- cdf_nbuf_t frag_list, uint16_t hdrsize)
+ qdf_nbuf_t frag_list, uint16_t hdrsize)
{
- cdf_nbuf_t tmp;
- cdf_nbuf_t msdu = frag_list;
- cdf_nbuf_t rx_nbuf = frag_list;
+ qdf_nbuf_t tmp;
+ qdf_nbuf_t msdu = frag_list;
+ qdf_nbuf_t rx_nbuf = frag_list;
struct ieee80211_frame *wh;
- msdu = cdf_nbuf_next(msdu);
- cdf_nbuf_set_next(rx_nbuf, NULL);
+ msdu = qdf_nbuf_next(msdu);
+ qdf_nbuf_set_next(rx_nbuf, NULL);
while (msdu) {
htt_rx_msdu_desc_free(htt_pdev, msdu);
- tmp = cdf_nbuf_next(msdu);
- cdf_nbuf_set_next(msdu, NULL);
+ tmp = qdf_nbuf_next(msdu);
+ qdf_nbuf_set_next(msdu, NULL);
ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize);
if (!ol_rx_defrag_concat(rx_nbuf, msdu)) {
ol_rx_frames_free(htt_pdev, tmp);
htt_rx_desc_frame_free(htt_pdev, rx_nbuf);
- cdf_nbuf_free(msdu);
+ qdf_nbuf_free(msdu);
/* msdu rx desc already freed above */
return NULL;
}
@@ -956,7 +956,7 @@
return rx_nbuf;
}
-void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu)
+void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu)
{
struct ieee80211_frame wh;
uint32_t hdrsize;
@@ -972,10 +972,10 @@
&rx_desc_old_position,
&ind_old_position, &rx_desc_len);
- wh_ptr = (struct ieee80211_frame *)(cdf_nbuf_data(msdu) + rx_desc_len);
+ wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) + rx_desc_len);
qdf_mem_copy(&wh, wh_ptr, sizeof(wh));
hdrsize = sizeof(struct ieee80211_frame);
- qdf_mem_copy(&llchdr, ((uint8_t *) (cdf_nbuf_data(msdu) +
+ qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) +
rx_desc_len)) + hdrsize,
sizeof(struct llc_snap_hdr_t));
@@ -983,10 +983,10 @@
* Now move the data pointer to the beginning of the mac header :
* new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
*/
- cdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
+ qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
sizeof(struct llc_snap_hdr_t) -
sizeof(struct ethernet_hdr_t)));
- eth_hdr = (struct ethernet_hdr_t *)(cdf_nbuf_data(msdu));
+ eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu));
switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
case IEEE80211_FC1_DIR_NODS:
qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
@@ -1016,7 +1016,7 @@
*/
void
ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t nbuf, uint16_t hdrlen)
+ qdf_nbuf_t nbuf, uint16_t hdrlen)
{
struct ieee80211_frame *wh;
uint16_t qoslen;
@@ -1029,7 +1029,7 @@
&rx_desc_old_position,
&ind_old_position, &rx_desc_len);
- wh = (struct ieee80211_frame *)(cdf_nbuf_data(nbuf) + rx_desc_len);
+ wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + rx_desc_len);
if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
qoslen = sizeof(struct ieee80211_qoscntl);
/* Qos frame with Order bit set indicates a HTC frame */
@@ -1039,19 +1039,19 @@
/* remove QoS filed from header */
hdrlen -= qoslen;
qdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen);
- wh = (struct ieee80211_frame *)cdf_nbuf_pull_head(nbuf,
+ wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf,
rx_desc_len +
qoslen);
/* clear QoS bit */
/*
- * KW# 6154 'cdf_nbuf_pull_head' in turn calls
- * __cdf_nbuf_pull_head,
+ * KW# 6154 'qdf_nbuf_pull_head' in turn calls
+ * __qdf_nbuf_pull_head,
* which returns NULL if there is not sufficient data to pull.
- * It's guaranteed that cdf_nbuf_pull_head will succeed rather
+ * It's guaranteed that qdf_nbuf_pull_head will succeed rather
* than returning NULL, since the entire rx frame is already
* present in the rx buffer.
* However, to make it obvious to static analyzers that this
- * code is safe, add an explicit check that cdf_nbuf_pull_head
+ * code is safe, add an explicit check that qdf_nbuf_pull_head
* returns a non-NULL value.
* Since this part of the code is not performance-critical,
* adding this explicit check is okay.
diff --git a/core/dp/txrx/ol_rx_defrag.h b/core/dp/txrx/ol_rx_defrag.h
index 6ed90da..595d83a 100644
--- a/core/dp/txrx/ol_rx_defrag.h
+++ b/core/dp/txrx/ol_rx_defrag.h
@@ -28,7 +28,7 @@
#ifndef _OL_RX_DEFRAG_H_
#define _OL_RX_DEFRAG_H_
-#include <cdf_nbuf.h>
+#include <qdf_nbuf.h>
#include <cds_ieee80211_common.h>
#include <qdf_util.h>
#include <qdf_types.h>
@@ -54,16 +54,16 @@
};
#define ol_rx_defrag_copydata(buf, offset, len, _to) \
- cdf_nbuf_copy_bits(buf, offset, len, _to)
+ qdf_nbuf_copy_bits(buf, offset, len, _to)
#define ol_rx_defrag_len(buf) \
- cdf_nbuf_len(buf)
+ qdf_nbuf_len(buf)
void
ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
- cdf_nbuf_t *head_addr,
- cdf_nbuf_t *tail_addr,
- cdf_nbuf_t frag, uint8_t *all_frag_present);
+ qdf_nbuf_t *head_addr,
+ qdf_nbuf_t *tail_addr,
+ qdf_nbuf_t frag, uint8_t *all_frag_present);
void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned tid);
@@ -73,33 +73,33 @@
void
ol_rx_defrag(ol_txrx_pdev_handle pdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t frag_list);
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t frag_list);
int
ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t msdu, uint16_t hdrlen);
+ qdf_nbuf_t msdu, uint16_t hdrlen);
int
ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t nbuf, uint16_t hdrlen);
+ qdf_nbuf_t nbuf, uint16_t hdrlen);
-void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu);
+void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu);
void
ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t nbuf, uint16_t hdrlen);
+ qdf_nbuf_t nbuf, uint16_t hdrlen);
int
ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev,
- const uint8_t *key, cdf_nbuf_t msdu, uint16_t hdrlen);
+ const uint8_t *key, qdf_nbuf_t msdu, uint16_t hdrlen);
int
ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t nbuf, uint16_t hdrlen);
+ qdf_nbuf_t nbuf, uint16_t hdrlen);
int
ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
- cdf_nbuf_t wbuf, uint16_t hdrlen);
+ qdf_nbuf_t wbuf, uint16_t hdrlen);
uint16_t ol_rx_frag_hdrsize(const void *data);
@@ -108,16 +108,16 @@
void
ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, uint16_t seq_num, cdf_nbuf_t frag);
+ unsigned tid, uint16_t seq_num, qdf_nbuf_t frag);
-cdf_nbuf_t
+qdf_nbuf_t
ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
- cdf_nbuf_t frag_list, uint16_t hdrsize);
+ qdf_nbuf_t frag_list, uint16_t hdrsize);
int
ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
const uint8_t *key,
- cdf_nbuf_t wbuf,
+ qdf_nbuf_t wbuf,
uint16_t off, uint16_t data_len, uint8_t mic[]);
void
@@ -167,16 +167,16 @@
p[3] = (v >> 24) & 0xff;
}
-static inline uint8_t ol_rx_defrag_concat(cdf_nbuf_t dst, cdf_nbuf_t src)
+static inline uint8_t ol_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
{
/*
- * Inside cdf_nbuf_cat, if it is necessary to reallocate dst
+ * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
* to provide space for src, the headroom portion is copied from
* the original dst buffer to the larger new dst buffer.
* (This is needed, because the headroom of the dst buffer
* contains the rx desc.)
*/
- if (cdf_nbuf_cat(dst, src))
+ if (qdf_nbuf_cat(dst, src))
return OL_RX_DEFRAG_ERR;
return OL_RX_DEFRAG_OK;
diff --git a/core/dp/txrx/ol_rx_fwd.c b/core/dp/txrx/ol_rx_fwd.c
index f85df9d..7653928 100644
--- a/core/dp/txrx/ol_rx_fwd.c
+++ b/core/dp/txrx/ol_rx_fwd.c
@@ -26,7 +26,7 @@
*/
/* standard header files */
-#include <cdf_nbuf.h> /* cdf_nbuf_map */
+#include <qdf_nbuf.h> /* qdf_nbuf_map */
#include <qdf_mem.h> /* qdf_mem_cmp */
/* external header files */
@@ -48,7 +48,7 @@
* Check that this Packet is suitable for forwarding. If yes, then
* prepare the new 802.11 header.
*/
-static inline void ol_ap_fwd_check(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu)
+static inline void ol_ap_fwd_check(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu)
{
struct ieee80211_frame *mac_header;
unsigned char tmp_addr[IEEE80211_ADDR_LEN];
@@ -57,7 +57,7 @@
unsigned char fromds;
unsigned char tods;
- mac_header = (struct ieee80211_frame *)(cdf_nbuf_data(msdu));
+ mac_header = (struct ieee80211_frame *)(qdf_nbuf_data(msdu));
TXRX_ASSERT1(mac_header);
type = mac_header->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
@@ -107,7 +107,7 @@
}
}
-static inline void ol_rx_fwd_to_tx(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu)
+static inline void ol_rx_fwd_to_tx(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
@@ -118,8 +118,8 @@
* Map the netbuf, so it's accessible to the DMA that
* sends it to the target.
*/
- cdf_nbuf_map_single(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
- cdf_nbuf_set_next(msdu, NULL); /* add NULL terminator */
+ qdf_nbuf_map_single(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_set_next(msdu, NULL); /* add NULL terminator */
msdu = OL_TX_LL(vdev, msdu);
@@ -129,19 +129,19 @@
* We could store the frame and try again later,
* but the simplest solution is to discard the frames.
*/
- cdf_nbuf_unmap_single(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(msdu, NBUF_PKT_ERROR);
+ qdf_nbuf_unmap_single(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_tx_free(msdu, QDF_NBUF_PKT_ERROR);
}
}
void
ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t msdu_list)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
- cdf_nbuf_t deliver_list_head = NULL;
- cdf_nbuf_t deliver_list_tail = NULL;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t deliver_list_head = NULL;
+ qdf_nbuf_t deliver_list_tail = NULL;
+ qdf_nbuf_t msdu;
msdu = msdu_list;
while (msdu) {
@@ -151,7 +151,7 @@
* Remember the next list elem, because our processing
* may cause the MSDU to get linked into a different list.
*/
- msdu_list = cdf_nbuf_next(msdu);
+ msdu_list = qdf_nbuf_next(msdu);
rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
@@ -182,9 +182,9 @@
* Fw will take care of filling proper tid.
*/
if (tid != HTT_NON_QOS_TID) {
- cdf_nbuf_set_tid(msdu, tid);
+ qdf_nbuf_set_tid(msdu, tid);
} else {
- cdf_nbuf_set_tid(msdu,
+ qdf_nbuf_set_tid(msdu,
QDF_NBUF_TX_EXT_TID_INVALID);
}
/*
@@ -195,14 +195,14 @@
*/
if (htt_rx_msdu_discard(pdev->htt_pdev, rx_desc)) {
htt_rx_msdu_desc_free(pdev->htt_pdev, msdu);
- cdf_net_buf_debug_release_skb(msdu);
+ qdf_net_buf_debug_release_skb(msdu);
ol_rx_fwd_to_tx(tx_vdev, msdu);
msdu = NULL; /* already handled this MSDU */
TXRX_STATS_ADD(pdev,
pub.rx.intra_bss_fwd.packets_fwd, 1);
} else {
- cdf_nbuf_t copy;
- copy = cdf_nbuf_copy(msdu);
+ qdf_nbuf_t copy;
+ copy = qdf_nbuf_copy(msdu);
if (copy)
ol_rx_fwd_to_tx(tx_vdev, copy);
TXRX_STATS_ADD(pdev,
@@ -221,7 +221,7 @@
}
if (deliver_list_head) {
/* add NULL terminator */
- cdf_nbuf_set_next(deliver_list_tail, NULL);
+ qdf_nbuf_set_next(deliver_list_tail, NULL);
if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
ol_rx_in_order_deliver(vdev, peer, tid,
deliver_list_head);
diff --git a/core/dp/txrx/ol_rx_fwd.h b/core/dp/txrx/ol_rx_fwd.h
index fe570c5..79d1e2d 100644
--- a/core/dp/txrx/ol_rx_fwd.h
+++ b/core/dp/txrx/ol_rx_fwd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -28,19 +28,19 @@
#ifndef _OL_RX_FWD_H_
#define _OL_RX_FWD_H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <ol_txrx_api.h> /* ol_txrx_peer_t, etc. */
-cdf_nbuf_t
+qdf_nbuf_t
ol_rx_fwd_mcast_check_sta(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- cdf_nbuf_t msdu, void *rx_desc, int is_wlan_mcast);
+ qdf_nbuf_t msdu, void *rx_desc, int is_wlan_mcast);
-cdf_nbuf_t
+qdf_nbuf_t
ol_rx_fwd_mcast_check_ap(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- cdf_nbuf_t msdu, void *rx_desc, int is_wlan_mcast);
+ qdf_nbuf_t msdu, void *rx_desc, int is_wlan_mcast);
/**
* @brief Check if rx frames should be transmitted over WLAN.
@@ -70,6 +70,6 @@
void
ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list);
+ unsigned tid, qdf_nbuf_t msdu_list);
#endif /* _OL_RX_FWD_H_ */
diff --git a/core/dp/txrx/ol_rx_pn.c b/core/dp/txrx/ol_rx_pn.c
index 4da66ef..fa4903d 100644
--- a/core/dp/txrx/ol_rx_pn.c
+++ b/core/dp/txrx/ol_rx_pn.c
@@ -25,7 +25,7 @@
* to the Linux Foundation.
*/
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_htt_rx_api.h> /* htt_rx_pn_t, etc. */
#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
@@ -41,7 +41,7 @@
if (!head) { \
head = mpdu; \
} else { \
- cdf_nbuf_set_next(tail, mpdu); \
+ qdf_nbuf_set_next(tail, mpdu); \
} \
tail = mpdu_tail; \
} while (0)
@@ -80,16 +80,16 @@
return pn_is_replay;
}
-cdf_nbuf_t
+qdf_nbuf_t
ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list)
+ unsigned tid, qdf_nbuf_t msdu_list)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
union htt_rx_pn_t *last_pn;
- cdf_nbuf_t out_list_head = NULL;
- cdf_nbuf_t out_list_tail = NULL;
- cdf_nbuf_t mpdu;
+ qdf_nbuf_t out_list_head = NULL;
+ qdf_nbuf_t out_list_tail = NULL;
+ qdf_nbuf_t mpdu;
int index; /* unicast vs. multicast */
int pn_len;
void *rx_desc;
@@ -114,7 +114,7 @@
last_pn = &peer->tids_last_pn[tid];
mpdu = msdu_list;
while (mpdu) {
- cdf_nbuf_t mpdu_tail, next_mpdu;
+ qdf_nbuf_t mpdu_tail, next_mpdu;
union htt_rx_pn_t new_pn;
int pn_is_replay = 0;
@@ -148,7 +148,7 @@
}
if (pn_is_replay) {
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
static uint32_t last_pncheck_print_time /* = 0 */;
int log_level;
uint32_t current_time_ms;
@@ -198,10 +198,10 @@
mpdu, NULL, 0);
/* free all MSDUs within this MPDU */
do {
- cdf_nbuf_t next_msdu;
+ qdf_nbuf_t next_msdu;
OL_RX_ERR_STATISTICS_1(pdev, vdev, peer,
rx_desc, OL_RX_ERR_PN);
- next_msdu = cdf_nbuf_next(msdu);
+ next_msdu = qdf_nbuf_next(msdu);
htt_rx_desc_frame_free(pdev->htt_pdev, msdu);
if (msdu == mpdu_tail)
break;
@@ -228,14 +228,14 @@
}
/* make sure the list is null-terminated */
if (out_list_tail)
- cdf_nbuf_set_next(out_list_tail, NULL);
+ qdf_nbuf_set_next(out_list_tail, NULL);
return out_list_head;
}
void
ol_rx_pn_check(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t msdu_list)
{
msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list);
ol_rx_fwd_check(vdev, peer, tid, msdu_list);
@@ -244,7 +244,7 @@
void
ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list)
+ unsigned tid, qdf_nbuf_t msdu_list)
{
msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list);
ol_rx_deliver(vdev, peer, tid, msdu_list);
diff --git a/core/dp/txrx/ol_rx_pn.h b/core/dp/txrx/ol_rx_pn.h
index 845dc91..0b132f8 100644
--- a/core/dp/txrx/ol_rx_pn.h
+++ b/core/dp/txrx/ol_rx_pn.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -28,7 +28,7 @@
#ifndef _OL_RX_PN_H_
#define _OL_RX_PN_H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <ol_txrx_api.h> /* ol_txrx_peer_t, etc. */
@@ -60,7 +60,7 @@
*/
void
ol_rx_pn_check(struct ol_txrx_vdev_t *vdev,
- struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list);
+ struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t msdu_list);
/**
* @brief If applicable, check the Packet Number to detect replays.
@@ -82,7 +82,7 @@
void
ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list);
+ unsigned tid, qdf_nbuf_t msdu_list);
/**
* @brief If applicable, check the Packet Number to detect replays.
@@ -97,9 +97,9 @@
* (if PN check is applicable, i.e. PN length > 0)
* @return list of netbufs that didn't fail the PN check
*/
-cdf_nbuf_t
+qdf_nbuf_t
ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list);
+ unsigned tid, qdf_nbuf_t msdu_list);
#endif /* _OL_RX_PN_H_ */
diff --git a/core/dp/txrx/ol_rx_reorder.c b/core/dp/txrx/ol_rx_reorder.c
index 42fd4fb..f832aa6 100644
--- a/core/dp/txrx/ol_rx_reorder.c
+++ b/core/dp/txrx/ol_rx_reorder.c
@@ -27,7 +27,7 @@
/*=== header file includes ===*/
/* generic utilities */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* cdf_nbuf_t, etc. */
#include <qdf_mem.h> /* qdf_mem_malloc */
#include <ieee80211.h> /* IEEE80211_SEQ_MAX */
@@ -86,7 +86,7 @@
#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
do { \
if (tail_msdu) { \
- cdf_nbuf_set_next(tail_msdu, \
+ qdf_nbuf_set_next(tail_msdu, \
rx_reorder_array_elem->head); \
} \
} while (0)
@@ -223,14 +223,14 @@
ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_peer_t *peer,
unsigned tid,
- unsigned idx, cdf_nbuf_t head_msdu, cdf_nbuf_t tail_msdu)
+ unsigned idx, qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
{
struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
idx &= peer->tids_rx_reorder[tid].win_sz_mask;
rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
if (rx_reorder_array_elem->head) {
- cdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
+ qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
} else {
rx_reorder_array_elem->head = head_msdu;
OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
@@ -246,8 +246,8 @@
unsigned idx;
unsigned win_sz, win_sz_mask;
struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
- cdf_nbuf_t head_msdu;
- cdf_nbuf_t tail_msdu;
+ qdf_nbuf_t head_msdu;
+ qdf_nbuf_t tail_msdu;
OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
/* may get reset below */
@@ -305,7 +305,7 @@
head_msdu));
peer->tids_last_seq[tid] = seq_num;
/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
- cdf_nbuf_set_next(tail_msdu, NULL);
+ qdf_nbuf_set_next(tail_msdu, NULL);
peer->rx_opt_proc(vdev, peer, tid, head_msdu);
}
/*
@@ -327,8 +327,8 @@
unsigned win_sz;
uint8_t win_sz_mask;
struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
- cdf_nbuf_t head_msdu = NULL;
- cdf_nbuf_t tail_msdu = NULL;
+ qdf_nbuf_t head_msdu = NULL;
+ qdf_nbuf_t tail_msdu = NULL;
pdev = vdev->pdev;
win_sz = peer->tids_rx_reorder[tid].win_sz;
@@ -371,7 +371,7 @@
rx_reorder_array_elem->tail = NULL;
continue;
}
- cdf_nbuf_set_next(tail_msdu,
+ qdf_nbuf_set_next(tail_msdu,
rx_reorder_array_elem->head);
tail_msdu = rx_reorder_array_elem->tail;
rx_reorder_array_elem->head =
@@ -390,13 +390,13 @@
htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
peer->tids_last_seq[tid] = seq_num;
/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
- cdf_nbuf_set_next(tail_msdu, NULL);
+ qdf_nbuf_set_next(tail_msdu, NULL);
if (action == htt_rx_flush_release) {
peer->rx_opt_proc(vdev, peer, tid, head_msdu);
} else {
do {
- cdf_nbuf_t next;
- next = cdf_nbuf_next(head_msdu);
+ qdf_nbuf_t next;
+ next = qdf_nbuf_next(head_msdu);
htt_rx_desc_frame_free(pdev->htt_pdev,
head_msdu);
head_msdu = next;
@@ -596,8 +596,8 @@
struct ol_txrx_peer_t *peer;
struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
unsigned win_sz_mask;
- cdf_nbuf_t head_msdu = NULL;
- cdf_nbuf_t tail_msdu = NULL;
+ qdf_nbuf_t head_msdu = NULL;
+ qdf_nbuf_t tail_msdu = NULL;
htt_pdev_handle htt_pdev = pdev->htt_pdev;
int seq_num, i = 0;
@@ -630,7 +630,7 @@
if (rx_reorder_array_elem->head) {
if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
- cdf_nbuf_t msdu, next_msdu, mpdu_head,
+ qdf_nbuf_t msdu, next_msdu, mpdu_head,
mpdu_tail;
static uint32_t last_pncheck_print_time;
/* Do not need to initialize as C does it */
@@ -691,7 +691,7 @@
/* free all MSDUs within this MPDU */
do {
- next_msdu = cdf_nbuf_next(msdu);
+ next_msdu = qdf_nbuf_next(msdu);
htt_rx_desc_frame_free(htt_pdev, msdu);
if (msdu == mpdu_tail)
break;
@@ -704,7 +704,7 @@
head_msdu = rx_reorder_array_elem->head;
tail_msdu = rx_reorder_array_elem->tail;
} else {
- cdf_nbuf_set_next(
+ qdf_nbuf_set_next(
tail_msdu,
rx_reorder_array_elem->head);
tail_msdu = rx_reorder_array_elem->tail;
@@ -718,7 +718,7 @@
if (head_msdu) {
/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
- cdf_nbuf_set_next(tail_msdu, NULL);
+ qdf_nbuf_set_next(tail_msdu, NULL);
peer->rx_opt_proc(vdev, peer, tid, head_msdu);
}
}
diff --git a/core/dp/txrx/ol_rx_reorder.h b/core/dp/txrx/ol_rx_reorder.h
index 7629c6a..7017154 100644
--- a/core/dp/txrx/ol_rx_reorder.h
+++ b/core/dp/txrx/ol_rx_reorder.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -28,7 +28,7 @@
#ifndef _OL_RX_REORDER__H_
#define _OL_RX_REORDER__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <ol_txrx_api.h> /* ol_txrx_peer_t, etc. */
@@ -39,7 +39,7 @@
struct ol_txrx_peer_t *peer,
unsigned tid,
unsigned reorder_array_index,
- cdf_nbuf_t head_msdu, cdf_nbuf_t tail_msdu);
+ qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu);
void
ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
diff --git a/core/dp/txrx/ol_rx_reorder_timeout.c b/core/dp/txrx/ol_rx_reorder_timeout.c
index 3175dc5..24b0ae5 100644
--- a/core/dp/txrx/ol_rx_reorder_timeout.c
+++ b/core/dp/txrx/ol_rx_reorder_timeout.c
@@ -27,7 +27,7 @@
/*=== header file includes ===*/
/* generic utilities */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_timer.h>
#include <qdf_time.h>
diff --git a/core/dp/txrx/ol_tx.c b/core/dp/txrx/ol_tx.c
index 6ac608a..b5dedef 100644
--- a/core/dp/txrx/ol_tx.c
+++ b/core/dp/txrx/ol_tx.c
@@ -26,7 +26,7 @@
*/
/* OS abstraction libraries */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
#include <qdf_util.h> /* qdf_unlikely */
@@ -56,7 +56,7 @@
#include <htt_internal.h>
#include <htt_types.h> /* htc_endpoint */
-int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
+int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
unsigned int num_msdus, unsigned int transfer_id);
#endif /* WLAN_FEATURE_FASTPATH */
@@ -91,11 +91,11 @@
* Return: 0 - success, >0 - error
*/
static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
- cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
+ qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
{
msdu_info->tso_info.curr_seg = NULL;
- if (cdf_nbuf_is_tso(msdu)) {
- int num_seg = cdf_nbuf_get_tso_num_seg(msdu);
+ if (qdf_nbuf_is_tso(msdu)) {
+ int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
msdu_info->tso_info.tso_seg_list = NULL;
msdu_info->tso_info.num_segs = num_seg;
while (num_seg) {
@@ -121,7 +121,7 @@
return 1;
}
}
- cdf_nbuf_get_tso_info(vdev->pdev->osdev,
+ qdf_nbuf_get_tso_info(vdev->pdev->osdev,
msdu, &(msdu_info->tso_info));
msdu_info->tso_info.curr_seg =
msdu_info->tso_info.tso_seg_list;
@@ -142,13 +142,13 @@
*
* Return: skb/NULL for success
*/
-cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
+qdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, qdf_nbuf_t skb,
uint8_t proto_type)
{
void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
struct ol_txrx_peer_t *peer;
- cdf_nbuf_t ret;
+ qdf_nbuf_t ret;
QDF_STATUS status;
if (qdf_unlikely(!pdev)) {
@@ -181,27 +181,27 @@
return skb;
}
- status = cdf_nbuf_map_single(qdf_ctx, skb, QDF_DMA_TO_DEVICE);
+ status = qdf_nbuf_map_single(qdf_ctx, skb, QDF_DMA_TO_DEVICE);
if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
"%s: nbuf map failed", __func__);
return skb;
}
- cdf_nbuf_trace_set_proto_type(skb, proto_type);
+ qdf_nbuf_trace_set_proto_type(skb, proto_type);
if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
- && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
- && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
- cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
+ && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
+ && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
+ qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
/* Terminate the (single-element) list of tx frames */
- cdf_nbuf_set_next(skb, NULL);
+ qdf_nbuf_set_next(skb, NULL);
ret = OL_TX_LL(peer->vdev, skb);
if (ret) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
"%s: Failed to tx", __func__);
- cdf_nbuf_unmap_single(qdf_ctx, ret, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_unmap_single(qdf_ctx, ret, QDF_DMA_TO_DEVICE);
return ret;
}
@@ -216,11 +216,11 @@
*
* Return: skb/ NULL is for success
*/
-cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
- cdf_nbuf_t skb)
+qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
+ qdf_nbuf_t skb)
{
ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
- cdf_nbuf_t ret;
+ qdf_nbuf_t ret;
if (qdf_unlikely(!pdev)) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
@@ -229,12 +229,12 @@
}
if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
- && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
- && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
- cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
+ && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
+ && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
+ qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
/* Terminate the (single-element) list of tx frames */
- cdf_nbuf_set_next(skb, NULL);
+ qdf_nbuf_set_next(skb, NULL);
ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
if (ret) {
TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
@@ -248,9 +248,9 @@
#if defined(FEATURE_TSO)
-cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
- cdf_nbuf_t msdu = msdu_list;
+ qdf_nbuf_t msdu = msdu_list;
struct ol_txrx_msdu_info_t msdu_info;
msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
@@ -262,11 +262,11 @@
* within the list.
*/
while (msdu) {
- cdf_nbuf_t next;
+ qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
int segments = 1;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL;
if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
@@ -283,7 +283,7 @@
* ol_tx_send function, so store the next pointer before the
* tx_send call.
*/
- next = cdf_nbuf_next(msdu);
+ next = qdf_nbuf_next(msdu);
/* init the current segment to the 1st segment in the list */
while (segments) {
@@ -301,7 +301,7 @@
* receiving tx completion for all segments of an nbuf
*/
if (segments)
- cdf_nbuf_inc_users(msdu);
+ qdf_nbuf_inc_users(msdu);
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
@@ -318,7 +318,7 @@
msdu_info.tso_info.curr_seg->next;
}
- cdf_nbuf_reset_num_frags(msdu);
+ qdf_nbuf_reset_num_frags(msdu);
if (msdu_info.tso_info.is_tso) {
TXRX_STATS_TSO_INC_SEG(vdev->pdev);
@@ -336,9 +336,9 @@
}
#else /* TSO */
-cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
- cdf_nbuf_t msdu = msdu_list;
+ qdf_nbuf_t msdu = msdu_list;
struct ol_txrx_msdu_info_t msdu_info;
msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
@@ -351,10 +351,10 @@
* within the list.
*/
while (msdu) {
- cdf_nbuf_t next;
+ qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
@@ -368,7 +368,7 @@
* ol_tx_send function, so store the next pointer before the
* tx_send call.
*/
- next = cdf_nbuf_next(msdu);
+ next = qdf_nbuf_next(msdu);
ol_tx_send(vdev->pdev, tx_desc, msdu);
msdu = next;
}
@@ -394,7 +394,7 @@
*/
static inline struct ol_tx_desc_t *
ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
- ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu,
+ ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
uint32_t pkt_download_len, uint32_t ep_id,
struct ol_txrx_msdu_info_t *msdu_info)
{
@@ -436,10 +436,10 @@
&msdu_info->htt, &msdu_info->tso_info,
NULL, vdev->opmode == wlan_op_mode_ocb);
- num_frags = cdf_nbuf_get_num_frags(msdu);
+ num_frags = qdf_nbuf_get_num_frags(msdu);
/* num_frags are expected to be 2 max */
- num_frags = (num_frags > NBUF_CB_TX_MAX_EXTRA_FRAGS)
- ? NBUF_CB_TX_MAX_EXTRA_FRAGS
+ num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
+ ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
: num_frags;
#if defined(HELIUMPLUS_PADDR64)
/*
@@ -463,8 +463,8 @@
qdf_size_t frag_len;
qdf_dma_addr_t frag_paddr;
- frag_len = cdf_nbuf_get_frag_len(msdu, i);
- frag_paddr = cdf_nbuf_get_frag_paddr(msdu, i);
+ frag_len = qdf_nbuf_get_frag_len(msdu, i);
+ frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
#if defined(HELIUMPLUS_PADDR64)
htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
i - 1, frag_paddr, frag_len);
@@ -491,8 +491,8 @@
/*
* TODO : Can we remove this check and always download a fixed length ?
* */
- if (qdf_unlikely(cdf_nbuf_len(msdu) < pkt_download_len))
- pkt_download_len = cdf_nbuf_len(msdu);
+ if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
+ pkt_download_len = qdf_nbuf_len(msdu);
/* Fill the HTC header information */
/*
@@ -513,10 +513,10 @@
*
* Return: on success return NULL, pointer to nbuf when it fails to send.
*/
-cdf_nbuf_t
-ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+qdf_nbuf_t
+ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
- cdf_nbuf_t msdu = msdu_list;
+ qdf_nbuf_t msdu = msdu_list;
struct ol_txrx_pdev_t *pdev = vdev->pdev;
uint32_t pkt_download_len =
((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
@@ -532,11 +532,11 @@
* within the list.
*/
while (msdu) {
- cdf_nbuf_t next;
+ qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
int segments = 1;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL;
if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
@@ -553,7 +553,7 @@
* inside the ce_send_fast function, so store the next
* pointer before the ce_send call.
*/
- next = cdf_nbuf_next(msdu);
+ next = qdf_nbuf_next(msdu);
/* init the current segment to the 1st segment in the list */
while (segments) {
@@ -570,13 +570,13 @@
* receiving tx completion for all segments of an nbuf
*/
if (segments)
- cdf_nbuf_inc_users(msdu);
+ qdf_nbuf_inc_users(msdu);
msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
msdu_info.htt.info.vdev_id = vdev->vdev_id;
msdu_info.htt.action.cksum_offload =
- cdf_nbuf_get_tx_cksum(msdu);
- switch (cdf_nbuf_get_exemption_type(msdu)) {
+ qdf_nbuf_get_tx_cksum(msdu);
+ switch (qdf_nbuf_get_exemption_type(msdu)) {
case QDF_NBUF_EXEMPT_NO_EXEMPTION:
case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
/* We want to encrypt this frame */
@@ -619,7 +619,7 @@
}
if (msdu_info.tso_info.is_tso) {
- cdf_nbuf_reset_num_frags(msdu);
+ qdf_nbuf_reset_num_frags(msdu);
TXRX_STATS_TSO_INC_SEG(vdev->pdev);
TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
}
@@ -640,10 +640,10 @@
return NULL; /* all MSDUs were accepted */
}
#else
-cdf_nbuf_t
-ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+qdf_nbuf_t
+ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
- cdf_nbuf_t msdu = msdu_list;
+ qdf_nbuf_t msdu = msdu_list;
struct ol_txrx_pdev_t *pdev = vdev->pdev;
uint32_t pkt_download_len =
((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
@@ -660,17 +660,17 @@
* within the list.
*/
while (msdu) {
- cdf_nbuf_t next;
+ qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL;
msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
msdu_info.htt.info.vdev_id = vdev->vdev_id;
msdu_info.htt.action.cksum_offload =
- cdf_nbuf_get_tx_cksum(msdu);
- switch (cdf_nbuf_get_exemption_type(msdu)) {
+ qdf_nbuf_get_tx_cksum(msdu);
+ switch (qdf_nbuf_get_exemption_type(msdu)) {
case QDF_NBUF_EXEMPT_NO_EXEMPTION:
case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
/* We want to encrypt this frame */
@@ -701,7 +701,7 @@
* inside the ce_send_fast function, so store the next
* pointer before the ce_send call.
*/
- next = cdf_nbuf_next(msdu);
+ next = qdf_nbuf_next(msdu);
if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
ep_id))) {
/* The packet could not be sent */
@@ -728,8 +728,8 @@
* ol_tx_ll_wrapper() wrapper to ol_tx_ll
*
*/
-static inline cdf_nbuf_t
-ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+static inline qdf_nbuf_t
+ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
struct hif_opaque_softc *hif_device =
(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
@@ -742,8 +742,8 @@
return msdu_list;
}
#else
-static inline cdf_nbuf_t
-ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+static inline qdf_nbuf_t
+ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
return ol_tx_ll(vdev, msdu_list);
}
@@ -778,17 +778,17 @@
max_to_accept = vdev->pdev->tx_desc.num_free -
OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
- cdf_nbuf_t tx_msdu;
+ qdf_nbuf_t tx_msdu;
max_to_accept--;
vdev->ll_pause.txq.depth--;
tx_msdu = vdev->ll_pause.txq.head;
if (tx_msdu) {
- vdev->ll_pause.txq.head = cdf_nbuf_next(tx_msdu);
+ vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
if (NULL == vdev->ll_pause.txq.head)
vdev->ll_pause.txq.tail = NULL;
- cdf_nbuf_set_next(tx_msdu, NULL);
- NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
- NBUF_TX_PKT_TXRX_DEQUEUE);
+ qdf_nbuf_set_next(tx_msdu, NULL);
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
+ QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
/*
* It is unexpected that ol_tx_ll would reject the frame
@@ -799,9 +799,9 @@
* For simplicity, just drop the frame.
*/
if (tx_msdu) {
- cdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
+ qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
QDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(tx_msdu, NBUF_PKT_ERROR);
+ qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
}
}
}
@@ -817,33 +817,34 @@
qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
}
-static cdf_nbuf_t
+static qdf_nbuf_t
ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu_list, uint8_t start_timer)
+ qdf_nbuf_t msdu_list, uint8_t start_timer)
{
qdf_spin_lock_bh(&vdev->ll_pause.mutex);
while (msdu_list &&
vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
- cdf_nbuf_t next = cdf_nbuf_next(msdu_list);
- NBUF_UPDATE_TX_PKT_COUNT(msdu_list, NBUF_TX_PKT_TXRX_ENQUEUE);
+ qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
+ QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
DPTRACE(qdf_dp_trace(msdu_list,
QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
- (uint8_t *)(cdf_nbuf_data(msdu_list)),
- sizeof(cdf_nbuf_data(msdu_list))));
+ (uint8_t *)(qdf_nbuf_data(msdu_list)),
+ sizeof(qdf_nbuf_data(msdu_list))));
vdev->ll_pause.txq.depth++;
if (!vdev->ll_pause.txq.head) {
vdev->ll_pause.txq.head = msdu_list;
vdev->ll_pause.txq.tail = msdu_list;
} else {
- cdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
+ qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
}
vdev->ll_pause.txq.tail = msdu_list;
msdu_list = next;
}
if (vdev->ll_pause.txq.tail)
- cdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
+ qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
if (start_timer) {
qdf_timer_stop(&vdev->ll_pause.timer);
@@ -860,7 +861,7 @@
* Store up the tx frame in the vdev's tx queue if the vdev is paused.
* If there are too many frames in the tx queue, reject it.
*/
-cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
uint16_t eth_type;
uint32_t paused_reason;
@@ -874,10 +875,10 @@
OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
paused_reason)) {
eth_type = (((struct ethernet_hdr_t *)
- cdf_nbuf_data(msdu_list))->
+ qdf_nbuf_data(msdu_list))->
ethertype[0] << 8) |
(((struct ethernet_hdr_t *)
- cdf_nbuf_data(msdu_list))->ethertype[1]);
+ qdf_nbuf_data(msdu_list))->ethertype[1]);
if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
return msdu_list;
@@ -918,7 +919,7 @@
void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
{
int max_to_send; /* tracks how many frames have been sent */
- cdf_nbuf_t tx_msdu;
+ qdf_nbuf_t tx_msdu;
struct ol_txrx_vdev_t *vdev = NULL;
uint8_t more;
@@ -961,12 +962,12 @@
vdev->ll_pause.txq.depth--;
vdev->ll_pause.txq.head =
- cdf_nbuf_next(tx_msdu);
+ qdf_nbuf_next(tx_msdu);
if (NULL == vdev->ll_pause.txq.head)
vdev->ll_pause.txq.tail = NULL;
- cdf_nbuf_set_next(tx_msdu, NULL);
+ qdf_nbuf_set_next(tx_msdu, NULL);
tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
/*
* It is unexpected that ol_tx_ll would reject
@@ -978,10 +979,10 @@
* For simplicity, just drop the frame.
*/
if (tx_msdu) {
- cdf_nbuf_unmap(pdev->osdev, tx_msdu,
+ qdf_nbuf_unmap(pdev->osdev, tx_msdu,
QDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(tx_msdu,
- NBUF_PKT_ERROR);
+ qdf_nbuf_tx_free(tx_msdu,
+ QDF_NBUF_PKT_ERROR);
}
}
/*check if there are more msdus to transmit */
@@ -1038,11 +1039,11 @@
return sub_type;
}
-cdf_nbuf_t
+qdf_nbuf_t
ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
- enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
+ enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
{
- cdf_nbuf_t msdu = msdu_list;
+ qdf_nbuf_t msdu = msdu_list;
htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
struct ol_txrx_msdu_info_t msdu_info;
@@ -1056,10 +1057,10 @@
* within the list.
*/
while (msdu) {
- cdf_nbuf_t next;
+ qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL;
msdu_info.tso_info.is_tso = 0;
@@ -1070,7 +1071,7 @@
* ol_tx_send function, so store the next pointer before the
* tx_send call.
*/
- next = cdf_nbuf_next(msdu);
+ next = qdf_nbuf_next(msdu);
if (tx_spec != ol_tx_spec_std) {
if (tx_spec & ol_tx_spec_no_free) {
@@ -1126,26 +1127,26 @@
* parse_ocb_tx_header() - Function to check for OCB
* TX control header on a packet and extract it if present
*
- * @msdu: Pointer to OS packet (cdf_nbuf_t)
+ * @msdu: Pointer to OS packet (qdf_nbuf_t)
*/
#define OCB_HEADER_VERSION 1
-bool parse_ocb_tx_header(cdf_nbuf_t msdu,
+bool parse_ocb_tx_header(qdf_nbuf_t msdu,
struct ocb_tx_ctrl_hdr_t *tx_ctrl)
{
struct ether_header *eth_hdr_p;
struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
/* Check if TX control header is present */
- eth_hdr_p = (struct ether_header *) cdf_nbuf_data(msdu);
+ eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu);
if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
/* TX control header is not present. Nothing to do.. */
return true;
/* Remove the ethernet header */
- cdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
+ qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
/* Parse the TX control header */
- tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) cdf_nbuf_data(msdu);
+ tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu);
if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
if (tx_ctrl)
@@ -1157,13 +1158,13 @@
}
/* Remove the TX control header */
- cdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
+ qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
return true;
}
-cdf_nbuf_t
+qdf_nbuf_t
ol_tx_non_std(ol_txrx_vdev_handle vdev,
- enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
+ enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
{
return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
}
@@ -1230,7 +1231,7 @@
int
ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
- cdf_nbuf_t tx_mgmt_frm,
+ qdf_nbuf_t tx_mgmt_frm,
uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
@@ -1287,7 +1288,7 @@
tx_msdu_info.peer = NULL;
- cdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, QDF_DMA_TO_DEVICE);
/* For LL tx_comp_req is not used so initialized to 0 */
tx_msdu_info.htt.action.tx_comp_req = 0;
tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
@@ -1310,7 +1311,7 @@
htt_tx_desc_frags_table_set(
pdev->htt_pdev,
tx_desc->htt_tx_desc,
- cdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
+ qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
0, 0);
#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
dump_frag_desc(
@@ -1319,7 +1320,7 @@
#endif /* defined(HELIUMPLUS_PADDR64) */
}
if (!tx_desc) {
- cdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
+ qdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
QDF_DMA_TO_DEVICE);
return -EINVAL; /* can't accept the tx mgmt frame */
}
@@ -1328,7 +1329,9 @@
tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
- NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) = NBUF_TX_PKT_MGMT_TRACK; ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
+ QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
+ QDF_NBUF_TX_PKT_MGMT_TRACK;
+ ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
htt_pkt_type_mgmt);
return 0; /* accepted the tx mgmt frame */
@@ -1339,8 +1342,8 @@
htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
}
-cdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu, uint16_t peer_id)
+qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t msdu, uint16_t peer_id)
{
struct ol_tx_desc_t *tx_desc;
struct ol_txrx_msdu_info_t msdu_info;
diff --git a/core/dp/txrx/ol_tx.h b/core/dp/txrx/ol_tx.h
index f0c0a7f..a76b829 100644
--- a/core/dp/txrx/ol_tx.h
+++ b/core/dp/txrx/ol_tx.h
@@ -32,18 +32,18 @@
#ifndef _OL_TX__H_
#define _OL_TX__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <qdf_lock.h>
#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
#include <ol_txrx_types.h> /* ol_tx_desc_t, ol_txrx_msdu_info_t */
-cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list);
+qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
#ifdef WLAN_FEATURE_FASTPATH
-cdf_nbuf_t ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list);
+qdf_nbuf_t ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
#endif
-cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list);
+qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
#define OL_TX_LL ol_tx_ll_queue
@@ -65,14 +65,14 @@
return;
}
#endif
-cdf_nbuf_t
+qdf_nbuf_t
ol_tx_non_std_ll(ol_txrx_vdev_handle data_vdev,
- enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list);
+ enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
-cdf_nbuf_t
-ol_tx_reinject(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu, uint16_t peer_id);
+qdf_nbuf_t
+ol_tx_reinject(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu, uint16_t peer_id);
-void ol_txrx_mgmt_tx_complete(void *ctxt, cdf_nbuf_t netbuf, int err);
+void ol_txrx_mgmt_tx_complete(void *ctxt, qdf_nbuf_t netbuf, int err);
#if defined(FEATURE_TSO)
diff --git a/core/dp/txrx/ol_tx_desc.c b/core/dp/txrx/ol_tx_desc.c
index a0a1b18..852829e 100644
--- a/core/dp/txrx/ol_tx_desc.c
+++ b/core/dp/txrx/ol_tx_desc.c
@@ -26,7 +26,7 @@
*/
#include <qdf_net_types.h> /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_util.h> /* qdf_assert */
#include <qdf_lock.h> /* cdf_spinlock */
#ifdef QCA_COMPUTE_TX_DELAY
@@ -307,12 +307,12 @@
dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
void
-dump_pkt(cdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len)
+dump_pkt(qdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len)
{
qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
- cdf_nbuf_data(nbuf), nbuf_paddr, len);
+ qdf_nbuf_data(nbuf), nbuf_paddr, len);
print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
- cdf_nbuf_data(nbuf), len, true);
+ qdf_nbuf_data(nbuf), len, true);
}
const uint32_t htt_to_ce_pkt_type[] = {
@@ -326,7 +326,7 @@
struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t netbuf,
+ qdf_nbuf_t netbuf,
struct ol_txrx_msdu_info_t *msdu_info)
{
struct ol_tx_desc_t *tx_desc;
@@ -334,8 +334,8 @@
uint32_t num_frags;
msdu_info->htt.info.vdev_id = vdev->vdev_id;
- msdu_info->htt.action.cksum_offload = cdf_nbuf_get_tx_cksum(netbuf);
- switch (cdf_nbuf_get_exemption_type(netbuf)) {
+ msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
+ switch (qdf_nbuf_get_exemption_type(netbuf)) {
case QDF_NBUF_EXEMPT_NO_EXEMPTION:
case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
/* We want to encrypt this frame */
@@ -379,10 +379,10 @@
* Skip the prefix fragment (HTT tx descriptor) that was added
* during the call to htt_tx_desc_init above.
*/
- num_frags = cdf_nbuf_get_num_frags(netbuf);
+ num_frags = qdf_nbuf_get_num_frags(netbuf);
/* num_frags are expected to be 2 max */
- num_frags = (num_frags > NBUF_CB_TX_MAX_EXTRA_FRAGS)
- ? NBUF_CB_TX_MAX_EXTRA_FRAGS
+ num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
+ ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
: num_frags;
#if defined(HELIUMPLUS_PADDR64)
/*
@@ -408,10 +408,10 @@
qdf_dma_addr_t frag_paddr;
#ifdef HELIUMPLUS_DEBUG
void *frag_vaddr;
- frag_vaddr = cdf_nbuf_get_frag_vaddr(netbuf, i);
+ frag_vaddr = qdf_nbuf_get_frag_vaddr(netbuf, i);
#endif
- frag_len = cdf_nbuf_get_frag_len(netbuf, i);
- frag_paddr = cdf_nbuf_get_frag_paddr(netbuf, i);
+ frag_len = qdf_nbuf_get_frag_len(netbuf, i);
+ frag_paddr = qdf_nbuf_get_frag_paddr(netbuf, i);
#if defined(HELIUMPLUS_PADDR64)
htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
frag_paddr, frag_len);
@@ -438,25 +438,25 @@
ol_tx_desc_list *tx_descs, int had_error)
{
struct ol_tx_desc_t *tx_desc, *tmp;
- cdf_nbuf_t msdus = NULL;
+ qdf_nbuf_t msdus = NULL;
TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
- cdf_nbuf_t msdu = tx_desc->netbuf;
+ qdf_nbuf_t msdu = tx_desc->netbuf;
qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
/* restore original hdr offset */
OL_TX_RESTORE_HDR(tx_desc, msdu);
#endif
- cdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_TO_DEVICE);
/* free the tx desc */
ol_tx_desc_free(pdev, tx_desc);
/* link the netbuf into a list to free as a batch */
- cdf_nbuf_set_next(msdu, msdus);
+ qdf_nbuf_set_next(msdu, msdus);
msdus = msdu;
}
/* free the netbufs as a batch */
- cdf_nbuf_tx_free(msdus, had_error);
+ qdf_nbuf_tx_free(msdus, had_error);
}
void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
@@ -472,11 +472,11 @@
OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
#endif
trace_str = (had_error) ? "OT:C:F:" : "OT:C:S:";
- cdf_nbuf_trace_update(tx_desc->netbuf, trace_str);
+ qdf_nbuf_trace_update(tx_desc->netbuf, trace_str);
if (tx_desc->pkt_type == ol_tx_frm_no_free) {
/* free the tx desc but don't unmap or free the frame */
if (pdev->tx_data_callback.func) {
- cdf_nbuf_set_next(tx_desc->netbuf, NULL);
+ qdf_nbuf_set_next(tx_desc->netbuf, NULL);
pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
tx_desc->netbuf, had_error);
ol_tx_desc_free(pdev, tx_desc);
@@ -484,7 +484,7 @@
}
/* let the code below unmap and free the frame */
}
- cdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
/* check the frame type to see what kind of special steps are needed */
if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
(tx_desc->pkt_type != 0xff)) {
@@ -524,11 +524,11 @@
ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
}
/* free the netbuf */
- cdf_nbuf_free(tx_desc->netbuf);
+ qdf_nbuf_free(tx_desc->netbuf);
} else {
/* single regular frame */
- cdf_nbuf_set_next(tx_desc->netbuf, NULL);
- cdf_nbuf_tx_free(tx_desc->netbuf, had_error);
+ qdf_nbuf_set_next(tx_desc->netbuf, NULL);
+ qdf_nbuf_tx_free(tx_desc->netbuf, had_error);
}
/* free the tx desc */
ol_tx_desc_free(pdev, tx_desc);
diff --git a/core/dp/txrx/ol_tx_desc.h b/core/dp/txrx/ol_tx_desc.h
index fa154f8..4563251 100644
--- a/core/dp/txrx/ol_tx_desc.h
+++ b/core/dp/txrx/ol_tx_desc.h
@@ -33,7 +33,7 @@
#define _OL_TX_DESC__H_
#include <cds_queue.h> /* TAILQ_HEAD */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_txrx_types.h> /* ol_tx_desc_t */
#include <ol_txrx_internal.h> /*TXRX_ASSERT2 */
@@ -63,7 +63,7 @@
*/
struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t netbuf,
+ qdf_nbuf_t netbuf,
struct ol_txrx_msdu_info_t *msdu_info);
/**
diff --git a/core/dp/txrx/ol_tx_queue.c b/core/dp/txrx/ol_tx_queue.c
index 99f3584..5c25a40 100644
--- a/core/dp/txrx/ol_tx_queue.c
+++ b/core/dp/txrx/ol_tx_queue.c
@@ -25,7 +25,7 @@
* to the Linux Foundation.
*/
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
#include <ol_cfg.h> /* ol_cfg_addba_retry */
#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
@@ -91,14 +91,14 @@
qdf_timer_stop(&vdev->ll_pause.timer);
vdev->ll_pause.is_q_timer_on = false;
while (vdev->ll_pause.txq.head) {
- cdf_nbuf_t next =
- cdf_nbuf_next(vdev->ll_pause.txq.head);
- cdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
- cdf_nbuf_unmap(vdev->pdev->osdev,
+ qdf_nbuf_t next =
+ qdf_nbuf_next(vdev->ll_pause.txq.head);
+ qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
+ qdf_nbuf_unmap(vdev->pdev->osdev,
vdev->ll_pause.txq.head,
QDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(vdev->ll_pause.txq.head,
- NBUF_PKT_ERROR);
+ qdf_nbuf_tx_free(vdev->ll_pause.txq.head,
+ QDF_NBUF_PKT_ERROR);
vdev->ll_pause.txq.head = next;
}
vdev->ll_pause.txq.tail = NULL;
diff --git a/core/dp/txrx/ol_tx_queue.h b/core/dp/txrx/ol_tx_queue.h
index 5da4b9f..ce847b7 100644
--- a/core/dp/txrx/ol_tx_queue.h
+++ b/core/dp/txrx/ol_tx_queue.h
@@ -32,7 +32,7 @@
#ifndef _OL_TX_QUEUE__H_
#define _OL_TX_QUEUE__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
#include <qdf_types.h> /* bool */
diff --git a/core/dp/txrx/ol_tx_send.c b/core/dp/txrx/ol_tx_send.c
index ae2d77a..785938b 100644
--- a/core/dp/txrx/ol_tx_send.c
+++ b/core/dp/txrx/ol_tx_send.c
@@ -28,7 +28,7 @@
#include <qdf_atomic.h> /* qdf_atomic_inc, etc. */
#include <qdf_lock.h> /* cdf_os_spinlock */
#include <qdf_time.h> /* qdf_system_ticks, etc. */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */
#include <cds_queue.h> /* TAILQ */
@@ -133,15 +133,15 @@
static inline uint16_t
ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
- struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu)
+ struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
{
int msdu_credit_consumed;
- TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", cdf_nbuf_len(msdu));
+ TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu));
TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
qdf_atomic_read(&pdev->target_tx_credit),
qdf_atomic_read(&pdev->target_tx_credit) - 1,
- cdf_nbuf_len(msdu));
+ qdf_nbuf_len(msdu));
msdu_credit_consumed = htt_tx_msdu_credit(msdu);
OL_TX_TARGET_CREDIT_DECR_INT(pdev, msdu_credit_consumed);
@@ -174,7 +174,7 @@
void
ol_tx_send(struct ol_txrx_pdev_t *pdev,
- struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu)
+ struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
{
int msdu_credit_consumed;
uint16_t id;
@@ -182,10 +182,10 @@
msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
id = ol_tx_desc_id(pdev, tx_desc);
- NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_TXRX);
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
- (uint8_t *)(cdf_nbuf_data(msdu)),
- sizeof(cdf_nbuf_data(msdu))));
+ (uint8_t *)(qdf_nbuf_data(msdu)),
+ sizeof(qdf_nbuf_data(msdu))));
failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
if (qdf_unlikely(failed)) {
OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
@@ -195,18 +195,18 @@
void
ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
- cdf_nbuf_t head_msdu, int num_msdus)
+ qdf_nbuf_t head_msdu, int num_msdus)
{
- cdf_nbuf_t rejected;
+ qdf_nbuf_t rejected;
OL_TX_CREDIT_RECLAIM(pdev);
rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
while (qdf_unlikely(rejected)) {
struct ol_tx_desc_t *tx_desc;
uint16_t *msdu_id_storage;
- cdf_nbuf_t next;
+ qdf_nbuf_t next;
- next = cdf_nbuf_next(rejected);
+ next = qdf_nbuf_next(rejected);
msdu_id_storage = ol_tx_msdu_id_storage(rejected);
tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
@@ -220,7 +220,7 @@
void
ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
struct ol_tx_desc_t *tx_desc,
- cdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
+ qdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
{
int msdu_credit_consumed;
uint16_t id;
@@ -228,7 +228,7 @@
msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
id = ol_tx_desc_id(pdev, tx_desc);
- NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_TXRX);
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
if (failed) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
@@ -240,7 +240,7 @@
static inline void
ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
- A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
+ A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
{
struct ol_tx_desc_t *tx_desc;
@@ -283,7 +283,7 @@
void
ol_tx_download_done_ll(void *pdev,
- A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
+ A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
{
ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
msdu_id);
@@ -292,7 +292,7 @@
void
ol_tx_download_done_hl_retain(void *txrx_pdev,
A_STATUS status,
- cdf_nbuf_t msdu, uint16_t msdu_id)
+ qdf_nbuf_t msdu, uint16_t msdu_id)
{
struct ol_txrx_pdev_t *pdev = txrx_pdev;
ol_tx_download_done_base(pdev, status, msdu, msdu_id);
@@ -300,7 +300,7 @@
void
ol_tx_download_done_hl_free(void *txrx_pdev,
- A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
+ A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
{
struct ol_txrx_pdev_t *pdev = txrx_pdev;
struct ol_tx_desc_t *tx_desc;
@@ -363,8 +363,8 @@
qdf_atomic_init(&(_tx_desc)->ref_cnt); \
/* restore orginal hdr offset */ \
OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
- cdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
- cdf_nbuf_free((_netbuf)); \
+ qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
+ qdf_nbuf_free((_netbuf)); \
((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
(_lcl_freelist); \
if (qdf_unlikely(!lcl_freelist)) { \
@@ -379,8 +379,8 @@
do { \
/* restore orginal hdr offset */ \
OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
- cdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
- cdf_nbuf_free((_netbuf)); \
+ qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
+ qdf_nbuf_free((_netbuf)); \
((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
(_lcl_freelist); \
if (qdf_unlikely(!lcl_freelist)) { \
@@ -488,7 +488,7 @@
char *trace_str;
uint32_t byte_cnt = 0;
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
@@ -505,9 +505,9 @@
netbuf = tx_desc->netbuf;
qdf_runtime_pm_put();
- cdf_nbuf_trace_update(netbuf, trace_str);
+ qdf_nbuf_trace_update(netbuf, trace_str);
/* Per SDU update of byte count */
- byte_cnt += cdf_nbuf_len(netbuf);
+ byte_cnt += qdf_nbuf_len(netbuf);
if (OL_TX_DESC_NO_REFS(tx_desc)) {
ol_tx_statistics(
pdev->ctrl_pdev,
@@ -518,7 +518,7 @@
ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
lcl_freelist, tx_desc_last, status);
}
- NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_FREE);
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
tx_desc->pkt_type = 0xff;
#ifdef QCA_COMPUTE_TX_DELAY
@@ -561,15 +561,15 @@
enum htt_tx_status status, uint16_t tx_desc_id)
{
struct ol_tx_desc_t *tx_desc;
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
tx_desc->status = status;
netbuf = tx_desc->netbuf;
- NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_FREE);
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
/* Do one shot statistics */
- TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, cdf_nbuf_len(netbuf));
+ TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf));
if (OL_TX_DESC_NO_REFS(tx_desc)) {
ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
@@ -599,7 +599,7 @@
struct ol_tx_desc_t *tx_desc;
union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
ol_tx_desc_list tx_descs;
TAILQ_INIT(&tx_descs);
@@ -760,10 +760,10 @@
#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
static inline uint8_t *ol_tx_dest_addr_find(struct ol_txrx_pdev_t *pdev,
- cdf_nbuf_t tx_nbuf)
+ qdf_nbuf_t tx_nbuf)
{
uint8_t *hdr_ptr;
- void *datap = cdf_nbuf_data(tx_nbuf);
+ void *datap = qdf_nbuf_data(tx_nbuf);
if (pdev->frame_format == wlan_frm_fmt_raw) {
/* adjust hdr_ptr to RA */
@@ -787,7 +787,7 @@
static uint8_t
ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
- cdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
+ qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
{
uint16_t ethertype;
uint8_t *dest_addr, *l3_hdr;
@@ -810,14 +810,14 @@
if (pdev->frame_format == wlan_frm_fmt_802_3) {
struct ethernet_hdr_t *enet_hdr;
- enet_hdr = (struct ethernet_hdr_t *)cdf_nbuf_data(msdu);
+ enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
l2_hdr_size = sizeof(struct ethernet_hdr_t);
ethertype =
(enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
if (!IS_ETHERTYPE(ethertype)) {
struct llc_snap_hdr_t *llc_hdr;
llc_hdr = (struct llc_snap_hdr_t *)
- (cdf_nbuf_data(msdu) + l2_hdr_size);
+ (qdf_nbuf_data(msdu) + l2_hdr_size);
l2_hdr_size += sizeof(struct llc_snap_hdr_t);
ethertype =
(llc_hdr->ethertype[0] << 8) | llc_hdr->
@@ -826,13 +826,13 @@
} else {
struct llc_snap_hdr_t *llc_hdr;
l2_hdr_size = sizeof(struct ieee80211_frame);
- llc_hdr = (struct llc_snap_hdr_t *)(cdf_nbuf_data(msdu)
+ llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu)
+ l2_hdr_size);
l2_hdr_size += sizeof(struct llc_snap_hdr_t);
ethertype =
(llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
}
- l3_hdr = cdf_nbuf_data(msdu) + l2_hdr_size;
+ l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size;
if (ETHERTYPE_IPV4 == ethertype) {
return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
} else if (ETHERTYPE_IPV6 == ethertype) {
@@ -850,8 +850,8 @@
struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
uint8_t tid;
- cdf_nbuf_t msdu = tx_desc->netbuf;
- tid = cdf_nbuf_get_tid(msdu);
+ qdf_nbuf_t msdu = tx_desc->netbuf;
+ tid = qdf_nbuf_get_tid(msdu);
if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
diff --git a/core/dp/txrx/ol_tx_send.h b/core/dp/txrx/ol_tx_send.h
index db48812..2260da9 100644
--- a/core/dp/txrx/ol_tx_send.h
+++ b/core/dp/txrx/ol_tx_send.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -32,7 +32,7 @@
#ifndef _OL_TX_SEND__H_
#define _OL_TX_SEND__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_txrx_types.h> /* ol_tx_send_t */
/**
@@ -52,7 +52,7 @@
*/
void
ol_tx_send(struct ol_txrx_pdev_t *pdev,
- struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu);
+ struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu);
/**
* @brief Send a tx batch download to the target.
@@ -67,7 +67,7 @@
int
ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
- cdf_nbuf_t msdu_list, int num_msdus);
+ qdf_nbuf_t msdu_list, int num_msdus);
/**
* @brief Send a tx frame with a non-std header or payload type to the target.
@@ -82,5 +82,5 @@
void
ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
struct ol_tx_desc_t *tx_desc,
- cdf_nbuf_t msdu, enum htt_pkt_type pkt_type);
+ qdf_nbuf_t msdu, enum htt_pkt_type pkt_type);
#endif /* _OL_TX_SEND__H_ */
diff --git a/core/dp/txrx/ol_txrx.c b/core/dp/txrx/ol_txrx.c
index 208548f..7a0629b 100644
--- a/core/dp/txrx/ol_txrx.c
+++ b/core/dp/txrx/ol_txrx.c
@@ -30,7 +30,7 @@
#include <osdep.h> /* uint32_t, etc. */
#include <qdf_mem.h> /* qdf_mem_malloc,free */
#include <qdf_types.h> /* qdf_device_t, qdf_print */
-#include <qdf_lock.h> /* cdf_spinlock */
+#include <qdf_lock.h> /* qdf_spinlock */
#include <qdf_atomic.h> /* qdf_atomic_read */
/* Required for WLAN_FEATURE_FASTPATH */
@@ -1190,11 +1190,11 @@
qdf_timer_free(&vdev->ll_pause.timer);
vdev->ll_pause.is_q_timer_on = false;
while (vdev->ll_pause.txq.head) {
- cdf_nbuf_t next = cdf_nbuf_next(vdev->ll_pause.txq.head);
- cdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
- cdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
+ qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
+ qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
+ qdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
QDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(vdev->ll_pause.txq.head, NBUF_PKT_ERROR);
+ qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
vdev->ll_pause.txq.head = next;
}
qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
@@ -1287,12 +1287,12 @@
list_del(&cache_buf->list);
qdf_spin_unlock_bh(&peer->bufq_lock);
if (drop) {
- cdf_nbuf_free(cache_buf->buf);
+ qdf_nbuf_free(cache_buf->buf);
} else {
/* Flush the cached frames to HDD */
ret = data_rx(cds_ctx, cache_buf->buf, peer->local_id);
if (ret != QDF_STATUS_SUCCESS)
- cdf_nbuf_free(cache_buf->buf);
+ qdf_nbuf_free(cache_buf->buf);
}
qdf_mem_free(cache_buf);
qdf_spin_lock_bh(&peer->bufq_lock);
@@ -2939,7 +2939,7 @@
ol_tx_dump_flow_pool_info();
break;
case WLAN_TXRX_DESC_STATS:
- cdf_nbuf_tx_desc_count_display();
+ qdf_nbuf_tx_desc_count_display();
break;
default:
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
@@ -2967,7 +2967,7 @@
ol_tx_clear_flow_pool_stats();
break;
case WLAN_TXRX_DESC_STATS:
- cdf_nbuf_tx_desc_count_clear();
+ qdf_nbuf_tx_desc_count_clear();
break;
default:
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
@@ -2984,10 +2984,10 @@
* Return: None
*/
static void ol_rx_data_cb(struct ol_txrx_peer_t *peer,
- cdf_nbuf_t buf_list)
+ qdf_nbuf_t buf_list)
{
void *cds_ctx = cds_get_global_context();
- cdf_nbuf_t buf, next_buf;
+ qdf_nbuf_t buf, next_buf;
QDF_STATUS ret;
ol_rx_callback_fp data_rx = NULL;
@@ -3012,12 +3012,12 @@
buf = buf_list;
while (buf) {
- next_buf = cdf_nbuf_queue_next(buf);
- cdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
+ next_buf = qdf_nbuf_queue_next(buf);
+ qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
ret = data_rx(cds_ctx, buf, peer->local_id);
if (ret != QDF_STATUS_SUCCESS) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Frame Rx to HDD failed");
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
buf = next_buf;
}
@@ -3027,8 +3027,8 @@
TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%s:Dropping frames", __func__);
buf = buf_list;
while (buf) {
- next_buf = cdf_nbuf_queue_next(buf);
- cdf_nbuf_free(buf);
+ next_buf = qdf_nbuf_queue_next(buf);
+ qdf_nbuf_free(buf);
buf = next_buf;
}
}
@@ -3041,12 +3041,12 @@
* Return: None
*/
void ol_rx_data_process(struct ol_txrx_peer_t *peer,
- cdf_nbuf_t rx_buf_list)
+ qdf_nbuf_t rx_buf_list)
{
/* Firmware data path active response will use shim RX thread
* T2H MSG running on SIRQ context,
* IPA kernel module API should not be called on SIRQ CTXT */
- cdf_nbuf_t buf, next_buf;
+ qdf_nbuf_t buf, next_buf;
ol_rx_callback_fp data_rx = NULL;
ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
@@ -3069,15 +3069,15 @@
struct ol_rx_cached_buf *cache_buf;
buf = rx_buf_list;
while (buf) {
- next_buf = cdf_nbuf_queue_next(buf);
+ next_buf = qdf_nbuf_queue_next(buf);
cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
if (!cache_buf) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"Failed to allocate buf to cache the rx frames");
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
} else {
/* Add NULL terminator */
- cdf_nbuf_set_next(buf, NULL);
+ qdf_nbuf_set_next(buf, NULL);
cache_buf->buf = buf;
qdf_spin_lock_bh(&peer->bufq_lock);
list_add_tail(&cache_buf->list,
@@ -3126,8 +3126,8 @@
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Dropping rx packets");
buf = rx_buf_list;
while (buf) {
- next_buf = cdf_nbuf_queue_next(buf);
- cdf_nbuf_free(buf);
+ next_buf = qdf_nbuf_queue_next(buf);
+ qdf_nbuf_free(buf);
buf = next_buf;
}
}
@@ -3137,7 +3137,7 @@
* @rxcb: rx callback
* @sta_desc: sta descriptor
*
- * Return: CDF Status
+ * Return: QDF Status
*/
QDF_STATUS ol_txrx_register_peer(ol_rx_callback_fp rxcb,
struct ol_txrx_desc_type *sta_desc)
@@ -3187,7 +3187,7 @@
* ol_txrx_clear_peer() - clear peer
* @sta_id: sta id
*
- * Return: CDF Status
+ * Return: QDF Status
*/
QDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
{
@@ -3320,7 +3320,7 @@
* ol_txrx_register_pause_cb() - register pause callback
* @pause_cb: pause callback
*
- * Return: CDF status
+ * Return: QDF status
*/
QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
{
diff --git a/core/dp/txrx/ol_txrx.h b/core/dp/txrx/ol_txrx.h
index 760850f..2247238 100644
--- a/core/dp/txrx/ol_txrx.h
+++ b/core/dp/txrx/ol_txrx.h
@@ -28,7 +28,7 @@
#ifndef _OL_TXRX__H_
#define _OL_TXRX__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
#include <ol_ctrl_api.h> /* ol_pdev_handle */
#include "cds_sched.h"
diff --git a/core/dp/txrx/ol_txrx_encap.c b/core/dp/txrx/ol_txrx_encap.c
index 8deddeb..03b1ddd 100644
--- a/core/dp/txrx/ol_txrx_encap.c
+++ b/core/dp/txrx/ol_txrx_encap.c
@@ -35,7 +35,7 @@
*/
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <cds_ieee80211_common.h> /* ieee80211_frame */
#include <net.h> /* struct llc, struct ether_header, etc. */
#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
@@ -44,14 +44,14 @@
#define OL_TX_COPY_NATIVE_WIFI_HEADER(wh, msdu, hdsize, localbuf) \
do { \
- wh = (struct ieee80211_frame *)cdf_nbuf_data(msdu); \
+ wh = (struct ieee80211_frame *)qdf_nbuf_data(msdu); \
if ((wh->i_fc[1] & \
IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) { \
hdsize = sizeof(struct ieee80211_frame_addr4); \
} else { \
hdsize = sizeof(struct ieee80211_frame); \
} \
- if (cdf_nbuf_len(msdu) < hdsize) { \
+ if (qdf_nbuf_len(msdu) < hdsize) { \
return A_ERROR; \
} \
qdf_mem_copy(localbuf, wh, hdsize); \
@@ -59,18 +59,18 @@
} while (0)
static inline A_STATUS
-ol_tx_copy_native_wifi_header(cdf_nbuf_t msdu,
+ol_tx_copy_native_wifi_header(qdf_nbuf_t msdu,
uint8_t *hdsize, uint8_t *localbuf)
{
struct ieee80211_frame *wh =
- (struct ieee80211_frame *)cdf_nbuf_data(msdu);
+ (struct ieee80211_frame *)qdf_nbuf_data(msdu);
if ((wh->i_fc[1] &
IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) {
*hdsize = sizeof(struct ieee80211_frame_addr4);
} else {
*hdsize = sizeof(struct ieee80211_frame);
}
- if (cdf_nbuf_len(msdu) < *hdsize)
+ if (qdf_nbuf_len(msdu) < *hdsize)
return A_ERROR;
qdf_mem_copy(localbuf, wh, *hdsize);
@@ -80,7 +80,7 @@
static inline A_STATUS
ol_tx_encap_from_native_wifi(struct ol_txrx_vdev_t *vdev,
struct ol_tx_desc_t *tx_desc,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
struct ol_txrx_msdu_info_t *tx_msdu_info)
{
uint8_t localbuf[sizeof(struct ieee80211_qosframe_htc_addr4)];
@@ -127,7 +127,7 @@
htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,
new_hdsize), localbuf,
new_hdsize);
- cdf_nbuf_pull_head(msdu, hdsize);
+ qdf_nbuf_pull_head(msdu, hdsize);
tx_msdu_info->htt.info.l3_hdr_offset = new_hdsize;
tx_desc->orig_l2_hdr_bytes = hdsize;
}
@@ -148,7 +148,7 @@
htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,
hdsize);
qdf_mem_copy((void *)wh, localbuf, hdsize);
- cdf_nbuf_pull_head(msdu, hdsize);
+ qdf_nbuf_pull_head(msdu, hdsize);
tx_msdu_info->htt.info.l3_hdr_offset = hdsize;
tx_desc->orig_l2_hdr_bytes = hdsize;
}
@@ -160,7 +160,7 @@
static inline A_STATUS
ol_tx_encap_from_8023(struct ol_txrx_vdev_t *vdev,
struct ol_tx_desc_t *tx_desc,
- cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *tx_msdu_info)
+ qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *tx_msdu_info)
{
uint8_t localbuf[sizeof(struct ieee80211_qosframe_htc_addr4)
+ sizeof(struct llc_snap_hdr_t)];
@@ -183,7 +183,7 @@
*/
peer = tx_msdu_info->peer;
- eth_hdr = (struct ethernet_hdr_t *)cdf_nbuf_data(msdu);
+ eth_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
hdsize = sizeof(struct ethernet_hdr_t);
wh = (struct ieee80211_frame *)localbuf;
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
@@ -277,7 +277,7 @@
htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,
new_l2_hdsize), localbuf,
new_hdsize);
- cdf_nbuf_pull_head(msdu, hdsize);
+ qdf_nbuf_pull_head(msdu, hdsize);
tx_msdu_info->htt.info.l3_hdr_offset = new_l2_hdsize;
tx_desc->orig_l2_hdr_bytes = hdsize;
return A_OK;
@@ -286,7 +286,7 @@
A_STATUS
ol_tx_encap(struct ol_txrx_vdev_t *vdev,
struct ol_tx_desc_t *tx_desc,
- cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
+ qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
@@ -303,7 +303,7 @@
static inline void
ol_rx_decap_to_native_wifi(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
struct ol_rx_decap_info_t *info,
struct ethernet_hdr_t *ethr_hdr)
{
@@ -322,7 +322,7 @@
else
hdsize = sizeof(struct ieee80211_frame);
- wh = (struct ieee80211_frame_addr4 *)cdf_nbuf_push_head(msdu, hdsize);
+ wh = (struct ieee80211_frame_addr4 *)qdf_nbuf_push_head(msdu, hdsize);
TXRX_ASSERT2(wh != NULL);
TXRX_ASSERT2(hdsize <= info->hdr_len);
qdf_mem_copy((uint8_t *) wh, info->hdr, hdsize);
@@ -365,7 +365,7 @@
static inline void
ol_rx_decap_to_8023(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu,
+ qdf_nbuf_t msdu,
struct ol_rx_decap_info_t *info,
struct ethernet_hdr_t *ethr_hdr)
{
@@ -381,7 +381,7 @@
* if ethr_hdr is null, rx frame is 802.11 format(HW ft disabled)
* if ethr_hdr is not null, rx frame is "subfrm of amsdu".
*/
- buf = (uint8_t *) cdf_nbuf_data(msdu);
+ buf = (uint8_t *) qdf_nbuf_data(msdu);
llc_hdr = (struct llc_snap_hdr_t *)buf;
ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
/* do llc remove if needed */
@@ -401,9 +401,9 @@
}
}
if (l2_hdr_space > ETHERNET_HDR_LEN)
- buf = cdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
+ buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
else if (l2_hdr_space < ETHERNET_HDR_LEN)
- buf = cdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
+ buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
/* normal msdu(non-subfrm of A-MSDU) if ethr_hdr is null */
if (ethr_hdr == NULL) {
@@ -444,10 +444,10 @@
ethr_hdr->ethertype[1] = (ether_type) & 0xff;
} else {
uint32_t pktlen =
- cdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
+ qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
TXRX_ASSERT2(pktlen <= ETHERNET_MTU);
ether_type = (uint16_t) pktlen;
- ether_type = cdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
+ ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
ethr_hdr->ethertype[1] = (ether_type) & 0xff;
}
@@ -456,18 +456,18 @@
static inline A_STATUS
ol_rx_decap_subfrm_amsdu(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+ qdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
uint8_t *subfrm_hdr;
uint8_t localbuf[ETHERNET_HDR_LEN];
struct ethernet_hdr_t *ether_hdr = (struct ethernet_hdr_t *)localbuf;
- subfrm_hdr = (uint8_t *) cdf_nbuf_data(msdu);
+ subfrm_hdr = (uint8_t *) qdf_nbuf_data(msdu);
if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
/* decap to native wifi */
qdf_mem_copy(ether_hdr, subfrm_hdr, ETHERNET_HDR_LEN);
- cdf_nbuf_pull_head(msdu, ETHERNET_HDR_LEN);
+ qdf_nbuf_pull_head(msdu, ETHERNET_HDR_LEN);
ol_rx_decap_to_native_wifi(vdev, msdu, info, ether_hdr);
} else if (pdev->frame_format == wlan_frm_fmt_802_3) {
if (pdev->sw_rx_llc_proc_enable) {
@@ -475,7 +475,7 @@
* 802.11 table P-3
*/
qdf_mem_copy(ether_hdr, subfrm_hdr, ETHERNET_HDR_LEN);
- cdf_nbuf_pull_head(msdu, ETHERNET_HDR_LEN);
+ qdf_nbuf_pull_head(msdu, ETHERNET_HDR_LEN);
ol_rx_decap_to_8023(vdev, msdu, info, ether_hdr);
} else {
/* subfrm of A-MSDU is already in 802.3 format.
@@ -492,11 +492,11 @@
static inline A_STATUS
ol_rx_decap_msdu(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+ qdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
struct ieee80211_frame *wh;
- wh = (struct ieee80211_frame *)cdf_nbuf_data(msdu);
+ wh = (struct ieee80211_frame *)qdf_nbuf_data(msdu);
if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
/* Decap to native wifi because according to MSFT(
@@ -509,7 +509,7 @@
TXRX_ASSERT2(info->hdr_len <= sizeof(info->hdr));
qdf_mem_copy(info->hdr, /* use info->hdr as temp buf. */
wh, info->hdr_len);
- cdf_nbuf_pull_head(msdu, info->hdr_len);
+ qdf_nbuf_pull_head(msdu, info->hdr_len);
ol_rx_decap_to_native_wifi(vdev, msdu, info, NULL);
/* 802.11 hdr^ eth_hdr^ */
}
@@ -519,7 +519,7 @@
TXRX_ASSERT2(info->hdr_len <= sizeof(info->hdr));
qdf_mem_copy(info->hdr, /* use info->hdr as temp buf. */
wh, info->hdr_len);
- cdf_nbuf_pull_head(msdu, info->hdr_len);
+ qdf_nbuf_pull_head(msdu, info->hdr_len);
/* remove llc snap hdr if it's necessary according to
* 802.11 table P-3
*/
@@ -541,7 +541,7 @@
A_STATUS
ol_rx_decap(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+ qdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
{
A_STATUS status;
uint8_t *mpdu_hdr;
@@ -559,14 +559,14 @@
* subsequent subfrm 802.11 header recovery
* in certain chip(such as Riva).
*/
- mpdu_hdr = cdf_nbuf_data(msdu);
+ mpdu_hdr = qdf_nbuf_data(msdu);
info->hdr_len =
ol_txrx_ieee80211_hdrsize(mpdu_hdr);
TXRX_ASSERT2(info->hdr_len <=
sizeof(info->hdr));
qdf_mem_copy(info->hdr, mpdu_hdr,
info->hdr_len);
- cdf_nbuf_pull_head(msdu, info->hdr_len);
+ qdf_nbuf_pull_head(msdu, info->hdr_len);
}
}
}
diff --git a/core/dp/txrx/ol_txrx_encap.h b/core/dp/txrx/ol_txrx_encap.h
index 5795b3b..2da28be 100644
--- a/core/dp/txrx/ol_txrx_encap.h
+++ b/core/dp/txrx/ol_txrx_encap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -34,7 +34,7 @@
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <cds_ieee80211_common.h> /* ieee80211_qosframe_htc_addr4 */
#include <ol_txrx_types.h> /* ol_tx_desc_t, ol_txrx_msdu_info_t */
@@ -48,7 +48,7 @@
* with or without QOS control field based on peer's QOS capabilites.
* @param vdev - handle to vdev object
* @param tx_desc - tx desc struct,some fields will be updated.
- * @param msdu - cdf_nbuf_t
+ * @param msdu - qdf_nbuf_t
* @param msdu_info - informations from tx classification.
* @return
* A_OK: encap operation sucessful
@@ -57,7 +57,7 @@
A_STATUS
ol_tx_encap(struct ol_txrx_vdev_t *vdev,
struct ol_tx_desc_t *tx_desc,
- cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info);
+ qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info);
struct ol_rx_decap_info_t {
uint8_t hdr[sizeof(struct ieee80211_qosframe_htc_addr4)];
@@ -75,7 +75,7 @@
* if Target haven't done that.
* @param vdev - handle to vdev object
* @param peer - the peer object.
- * @param msdu - cdf_nbuf_t
+ * @param msdu - qdf_nbuf_t
* @param info - ol_rx_decap_info_t: context info for decap
* @return
* A_OK: decap operation sucessful
@@ -84,12 +84,12 @@
A_STATUS
ol_rx_decap(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info);
+ qdf_nbuf_t msdu, struct ol_rx_decap_info_t *info);
static inline A_STATUS
OL_TX_ENCAP(struct ol_txrx_vdev_t *vdev,
struct ol_tx_desc_t *tx_desc,
- cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
+ qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
{
if (vdev->pdev->sw_tx_encap)
return ol_tx_encap(vdev, tx_desc, msdu, msdu_info);
@@ -99,7 +99,7 @@
static inline A_STATUS
OL_RX_DECAP(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+ qdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
{
if (vdev->pdev->sw_rx_decap)
return ol_rx_decap(vdev, peer, msdu, info);
@@ -109,7 +109,7 @@
#define OL_TX_RESTORE_HDR(__tx_desc, __msdu) \
do { \
if (__tx_desc->orig_l2_hdr_bytes != 0) \
- cdf_nbuf_push_head(__msdu, \
+ qdf_nbuf_push_head(__msdu, \
__tx_desc->orig_l2_hdr_bytes); \
} while (0)
#else
diff --git a/core/dp/txrx/ol_txrx_flow_control.c b/core/dp/txrx/ol_txrx_flow_control.c
index 157c0d4..442115b 100644
--- a/core/dp/txrx/ol_txrx_flow_control.c
+++ b/core/dp/txrx/ol_txrx_flow_control.c
@@ -26,7 +26,7 @@
*/
/* OS abstraction libraries */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <cdf_atomic.h> /* qdf_atomic_read, etc. */
#include <qdf_util.h> /* qdf_unlikely */
diff --git a/core/dp/txrx/ol_txrx_internal.h b/core/dp/txrx/ol_txrx_internal.h
index 51d6a94..52c2546 100644
--- a/core/dp/txrx/ol_txrx_internal.h
+++ b/core/dp/txrx/ol_txrx_internal.h
@@ -29,7 +29,7 @@
#define _OL_TXRX_INTERNAL__H_
#include <qdf_util.h> /* qdf_assert */
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* cdf_nbuf_t */
#include <qdf_mem.h> /* qdf_mem_set */
#include <cds_ieee80211_common.h> /* ieee80211_frame */
#include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_completes_mpdu, etc. */
@@ -160,7 +160,7 @@
if (!(head)) { \
(head) = (elem); \
} else { \
- cdf_nbuf_set_next((tail), (elem)); \
+ qdf_nbuf_set_next((tail), (elem)); \
} \
(tail) = (elem); \
} while (0)
@@ -168,10 +168,10 @@
static inline void
ol_rx_mpdu_list_next(struct ol_txrx_pdev_t *pdev,
void *mpdu_list,
- cdf_nbuf_t *mpdu_tail, cdf_nbuf_t *next_mpdu)
+ qdf_nbuf_t *mpdu_tail, qdf_nbuf_t *next_mpdu)
{
htt_pdev_handle htt_pdev = pdev->htt_pdev;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
/*
* For now, we use a simply flat list of MSDUs.
@@ -181,12 +181,12 @@
msdu = mpdu_list;
while (!htt_rx_msdu_desc_completes_mpdu
(htt_pdev, htt_rx_msdu_desc_retrieve(htt_pdev, msdu))) {
- msdu = cdf_nbuf_next(msdu);
+ msdu = qdf_nbuf_next(msdu);
TXRX_ASSERT2(msdu);
}
/* msdu now points to the last MSDU within the first MPDU */
*mpdu_tail = msdu;
- *next_mpdu = cdf_nbuf_next(msdu);
+ *next_mpdu = qdf_nbuf_next(msdu);
}
/*--- txrx stats macros ---*/
@@ -203,7 +203,7 @@
#define TXRX_STATS_MSDU_INCR(pdev, field, netbuf) \
do { \
TXRX_STATS_INCR((pdev), pub.field.pkts); \
- TXRX_STATS_ADD((pdev), pub.field.bytes, cdf_nbuf_len(netbuf)); \
+ TXRX_STATS_ADD((pdev), pub.field.bytes, qdf_nbuf_len(netbuf)); \
} while (0)
/* conditional defs based on verbosity level */
@@ -211,10 +211,10 @@
#define TXRX_STATS_MSDU_LIST_INCR(pdev, field, netbuf_list) \
do { \
- cdf_nbuf_t tmp_list = netbuf_list; \
+ qdf_nbuf_t tmp_list = netbuf_list; \
while (tmp_list) { \
TXRX_STATS_MSDU_INCR(pdev, field, tmp_list); \
- tmp_list = cdf_nbuf_next(tmp_list); \
+ tmp_list = qdf_nbuf_next(tmp_list); \
} \
} while (0)
@@ -373,7 +373,7 @@
static inline void
ol_txrx_frms_dump(const char *name,
struct ol_txrx_pdev_t *pdev,
- cdf_nbuf_t frm,
+ qdf_nbuf_t frm,
enum ol_txrx_frm_dump_options display_options, int max_len)
{
#define TXRX_FRM_DUMP_MAX_LEN 128
@@ -385,7 +385,7 @@
name);
}
while (frm) {
- p = cdf_nbuf_data(frm);
+ p = qdf_nbuf_data(frm);
if (display_options & ol_txrx_frm_dump_tcp_seq) {
int tcp_offset;
int l2_hdr_size;
@@ -475,8 +475,8 @@
if (display_options & ol_txrx_frm_dump_contents) {
int i, frag_num, len_lim;
len_lim = max_len;
- if (len_lim > cdf_nbuf_len(frm))
- len_lim = cdf_nbuf_len(frm);
+ if (len_lim > qdf_nbuf_len(frm))
+ len_lim = qdf_nbuf_len(frm);
if (len_lim > TXRX_FRM_DUMP_MAX_LEN)
len_lim = TXRX_FRM_DUMP_MAX_LEN;
@@ -489,11 +489,11 @@
while (i < len_lim) {
int frag_bytes;
frag_bytes =
- cdf_nbuf_get_frag_len(frm, frag_num);
+ qdf_nbuf_get_frag_len(frm, frag_num);
if (frag_bytes > len_lim - i)
frag_bytes = len_lim - i;
if (frag_bytes > 0) {
- p = cdf_nbuf_get_frag_vaddr(frm,
+ p = qdf_nbuf_get_frag_vaddr(frm,
frag_num);
qdf_mem_copy(&local_buf[i], p,
frag_bytes);
@@ -504,7 +504,7 @@
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
"frame %p data (%p), hex dump of bytes 0-%d of %d:\n",
- frm, p, len_lim - 1, (int)cdf_nbuf_len(frm));
+ frm, p, len_lim - 1, (int)qdf_nbuf_len(frm));
p = local_buf;
while (len_lim > 16) {
QDF_TRACE(QDF_MODULE_ID_TXRX,
@@ -532,7 +532,7 @@
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
"\n");
}
- frm = cdf_nbuf_next(frm);
+ frm = qdf_nbuf_next(frm);
}
}
#else
@@ -571,7 +571,7 @@
if (ol_cfg_frame_type(pdev->ctrl_pdev) == \
wlan_frm_fmt_native_wifi) { \
/* For windows, it is always native wifi header .*/ \
- wh = (struct ieee80211_frame *)cdf_nbuf_data(rx_msdu); \
+ wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu); \
} \
ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \
wh, OL_RX_ERR_UNKNOWN_PEER); \
@@ -613,7 +613,7 @@
do { \
qdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
peer->stats.tx_or_rx.frms.type += 1; \
- peer->stats.tx_or_rx.bytes.type += cdf_nbuf_len(msdu); \
+ peer->stats.tx_or_rx.bytes.type += qdf_nbuf_len(msdu); \
qdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex); \
} while (0)
#define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu) \
@@ -622,10 +622,10 @@
struct ol_txrx_pdev_t *pdev = vdev->pdev; \
uint8_t *dest_addr; \
if (pdev->frame_format == wlan_frm_fmt_802_3) { \
- dest_addr = cdf_nbuf_data(msdu); \
+ dest_addr = qdf_nbuf_data(msdu); \
} else { /* 802.11 format */ \
struct ieee80211_frame *frm; \
- frm = (struct ieee80211_frame *) cdf_nbuf_data(msdu); \
+ frm = (struct ieee80211_frame *) qdf_nbuf_data(msdu); \
if (vdev->opmode == wlan_op_mode_ap) { \
dest_addr = (uint8_t *) &(frm->i_addr1[0]); \
} else { \
diff --git a/core/dp/txrx/ol_txrx_types.h b/core/dp/txrx/ol_txrx_types.h
index 90ad86b..7d6ffc4 100644
--- a/core/dp/txrx/ol_txrx_types.h
+++ b/core/dp/txrx/ol_txrx_types.h
@@ -32,7 +32,7 @@
#ifndef _OL_TXRX_TYPES__H_
#define _OL_TXRX_TYPES__H_
-#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <qdf_nbuf.h> /* cdf_nbuf_t */
#include <qdf_mem.h>
#include <cds_queue.h> /* TAILQ */
#include <a_types.h> /* A_UINT8 */
@@ -127,7 +127,7 @@
};
struct ol_tx_desc_t {
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
void *htt_tx_desc;
uint16_t id;
qdf_dma_addr_t htt_tx_desc_paddr;
@@ -536,7 +536,7 @@
/* rx proc function */
void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list);
+ unsigned tid, qdf_nbuf_t msdu_list);
/* tx data delivery notification callback function */
struct {
@@ -827,8 +827,8 @@
struct {
struct {
- cdf_nbuf_t head;
- cdf_nbuf_t tail;
+ qdf_nbuf_t head;
+ qdf_nbuf_t tail;
int depth;
} txq;
uint32_t paused_reason;
@@ -877,8 +877,8 @@
};
struct ol_rx_reorder_array_elem_t {
- cdf_nbuf_t head;
- cdf_nbuf_t tail;
+ qdf_nbuf_t head;
+ qdf_nbuf_t tail;
};
struct ol_rx_reorder_t {
@@ -967,7 +967,7 @@
*/
void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
struct ol_txrx_peer_t *peer,
- unsigned tid, cdf_nbuf_t msdu_list);
+ unsigned tid, qdf_nbuf_t msdu_list);
#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
ol_txrx_peer_stats_t stats;
diff --git a/core/dp/txrx/txrx.h b/core/dp/txrx/txrx.h
index 7d2c03c..fa24eaf 100644
--- a/core/dp/txrx/txrx.h
+++ b/core/dp/txrx/txrx.h
@@ -29,10 +29,9 @@
#define TXRX_H
#include "cds_api.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "csr_api.h"
#include "sap_api.h"
-#include "cdf_nbuf.h"
#include "ol_txrx_osif_api.h"
/* wait on peer deletion timeout value in milliseconds */
@@ -117,11 +116,11 @@
* @typedef ol_txrx_tx_fp
* @brief top-level transmit function
*/
-typedef cdf_nbuf_t
-(*ol_txrx_tx_fp)(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu_list);
+typedef qdf_nbuf_t
+(*ol_txrx_tx_fp)(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu_list);
typedef void
-(*ol_txrx_mgmt_tx_cb)(void *ctxt, cdf_nbuf_t tx_mgmt_frm, int had_error);
+(*ol_txrx_mgmt_tx_cb)(void *ctxt, qdf_nbuf_t tx_mgmt_frm, int had_error);
/* If RSSI realm is changed, send notification to Clients, SME, HDD */
typedef QDF_STATUS (*wlan_txrx_rssi_cross_thresh)(void *adapter, u8 rssi,
@@ -141,7 +140,7 @@
/* Rx callback registered with txrx */
-typedef int (*wlan_txrx_cb_type)(void *g_cdsctx, cdf_nbuf_t buf, u8 sta_id,
+typedef int (*wlan_txrx_cb_type)(void *g_cdsctx, qdf_nbuf_t buf, u8 sta_id,
struct txrx_rx_metainfo *rx_meta_info);
static inline int wlan_txrx_get_rssi(void *g_cdsctx, u8 sta_id, int8_t *rssi)
diff --git a/core/dp/txrx/wdi_event.h b/core/dp/txrx/wdi_event.h
index d1019ee..70eb795 100644
--- a/core/dp/txrx/wdi_event.h
+++ b/core/dp/txrx/wdi_event.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -29,7 +29,7 @@
#define _WDI_EVENT_H_
#include "athdefs.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#define WDI_EVENT_BASE 0x100 /* Event starting number */
enum WDI_EVENT {
@@ -45,7 +45,7 @@
};
struct wdi_event_rx_peer_invalid_msg {
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
struct ieee80211_frame *wh;
uint8_t vdev_id;
};
diff --git a/core/hdd/inc/wlan_hdd_ipa.h b/core/hdd/inc/wlan_hdd_ipa.h
index a7cabcc..a7e8d5f 100644
--- a/core/hdd/inc/wlan_hdd_ipa.h
+++ b/core/hdd/inc/wlan_hdd_ipa.h
@@ -45,8 +45,8 @@
* FIXME: Temporary hack - until IPA functionality gets restored
*
*/
-typedef void (*hdd_ipa_nbuf_cb_fn)(cdf_nbuf_t);
-void hdd_ipa_nbuf_cb(cdf_nbuf_t skb); /* Fwd declare */
+typedef void (*hdd_ipa_nbuf_cb_fn)(qdf_nbuf_t);
+void hdd_ipa_nbuf_cb(qdf_nbuf_t skb); /* Fwd declare */
static inline hdd_ipa_nbuf_cb_fn wlan_hdd_stub_ipa_fn(void)
{
return hdd_ipa_nbuf_cb;
@@ -54,7 +54,7 @@
QDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx);
QDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx);
-QDF_STATUS hdd_ipa_process_rxt(void *cds_context, cdf_nbuf_t rxBuf,
+QDF_STATUS hdd_ipa_process_rxt(void *cds_context, qdf_nbuf_t rxBuf,
uint8_t sta_id);
int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
enum ipa_wlan_event type, uint8_t *mac_addr);
@@ -86,7 +86,7 @@
}
static inline QDF_STATUS hdd_ipa_process_rxt(void *cds_context,
- cdf_nbuf_t rxBuf, uint8_t sta_id)
+ qdf_nbuf_t rxBuf, uint8_t sta_id)
{
return QDF_STATUS_SUCCESS;
}
diff --git a/core/hdd/inc/wlan_hdd_softap_tx_rx.h b/core/hdd/inc/wlan_hdd_softap_tx_rx.h
index eff0fb1..b5d879a 100644
--- a/core/hdd/inc/wlan_hdd_softap_tx_rx.h
+++ b/core/hdd/inc/wlan_hdd_softap_tx_rx.h
@@ -47,11 +47,11 @@
QDF_STATUS hdd_softap_deinit_tx_rx_sta(hdd_adapter_t *pAdapter,
uint8_t STAId);
QDF_STATUS hdd_softap_rx_packet_cbk(void *cds_context,
- cdf_nbuf_t rxBufChain,
+ qdf_nbuf_t rxBufChain,
uint8_t staId);
#ifdef IPA_OFFLOAD
QDF_STATUS hdd_softap_rx_mul_packet_cbk(void *cds_context,
- cdf_nbuf_t rx_buf_list, uint8_t staId);
+ qdf_nbuf_t rx_buf_list, uint8_t staId);
#endif /* IPA_OFFLOAD */
QDF_STATUS hdd_softap_deregister_sta(hdd_adapter_t *pAdapter,
diff --git a/core/hdd/inc/wlan_hdd_tx_rx.h b/core/hdd/inc/wlan_hdd_tx_rx.h
index 881a788..3bf0c92 100644
--- a/core/hdd/inc/wlan_hdd_tx_rx.h
+++ b/core/hdd/inc/wlan_hdd_tx_rx.h
@@ -57,12 +57,12 @@
void hdd_tx_timeout(struct net_device *dev);
QDF_STATUS hdd_init_tx_rx(hdd_adapter_t *pAdapter);
QDF_STATUS hdd_deinit_tx_rx(hdd_adapter_t *pAdapter);
-QDF_STATUS hdd_rx_packet_cbk(void *cds_context, cdf_nbuf_t rxBufChain,
+QDF_STATUS hdd_rx_packet_cbk(void *cds_context, qdf_nbuf_t rxBufChain,
uint8_t staId);
#ifdef IPA_OFFLOAD
QDF_STATUS hdd_rx_mul_packet_cbk(void *cds_context,
- cdf_nbuf_t rx_buf_list, uint8_t staId);
+ qdf_nbuf_t rx_buf_list, uint8_t staId);
#endif /* IPA_OFFLOAD */
QDF_STATUS hdd_ibss_get_sta_id(hdd_station_ctx_t *pHddStaCtx,
diff --git a/core/hdd/src/wlan_hdd_ipa.c b/core/hdd/src/wlan_hdd_ipa.c
index 4458819..8c6dc29 100644
--- a/core/hdd/src/wlan_hdd_ipa.c
+++ b/core/hdd/src/wlan_hdd_ipa.c
@@ -376,7 +376,7 @@
uint8_t num_iface;
enum hdd_ipa_rm_state rm_state;
/*
- * IPA driver can send RM notifications with IRQ disabled so using cdf
+ * IPA driver can send RM notifications with IRQ disabled so using qdf
* APIs as it is taken care gracefully. Without this, kernel would throw
* an warning if spin_lock_bh is used while IRQ is disabled
*/
@@ -390,7 +390,7 @@
enum ipa_client_type prod_client;
atomic_t tx_ref_cnt;
- cdf_nbuf_queue_t pm_queue_head;
+ qdf_nbuf_queue_t pm_queue_head;
struct work_struct pm_work;
qdf_spinlock_t pm_lock;
bool suspended;
@@ -2130,7 +2130,7 @@
qdf_spin_lock_bh(&hdd_ipa->pm_lock);
- if (!cdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
+ if (!qdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
return -EAGAIN;
}
@@ -2508,7 +2508,7 @@
*
* Return: None
*/
-static void hdd_ipa_send_skb_to_network(cdf_nbuf_t skb,
+static void hdd_ipa_send_skb_to_network(qdf_nbuf_t skb,
hdd_adapter_t *adapter)
{
struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
@@ -2518,13 +2518,13 @@
HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO_LOW, "Invalid adapter: 0x%p",
adapter);
HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
return;
}
if (cds_is_driver_unloading()) {
HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
return;
}
@@ -2559,11 +2559,11 @@
{
struct hdd_ipa_priv *hdd_ipa = NULL;
hdd_adapter_t *adapter = NULL;
- cdf_nbuf_t skb;
+ qdf_nbuf_t skb;
uint8_t iface_id;
uint8_t session_id;
struct hdd_ipa_iface_context *iface_context;
- cdf_nbuf_t copy;
+ qdf_nbuf_t copy;
uint8_t fw_desc;
int ret;
@@ -2571,7 +2571,7 @@
switch (evt) {
case IPA_RECEIVE:
- skb = (cdf_nbuf_t) data;
+ skb = (qdf_nbuf_t) data;
if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
session_id = (uint8_t)skb->cb[0];
iface_id = vdev_to_iface[session_id];
@@ -2589,7 +2589,7 @@
HDD_IPA_DBG_DUMP(QDF_TRACE_LEVEL_INFO_HIGH,
"w2i -- skb", skb->data, 8);
HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
return;
}
@@ -2628,7 +2628,7 @@
QDF_TRACE_LEVEL_DEBUG,
"Forward packet to Tx (fw_desc=%d)",
fw_desc);
- copy = cdf_nbuf_copy(skb);
+ copy = qdf_nbuf_copy(skb);
if (copy) {
hdd_ipa->ipa_tx_forward++;
ret = hdd_softap_hard_start_xmit(
@@ -2649,7 +2649,7 @@
if (fw_desc & HDD_IPA_FW_RX_DESC_DISCARD_M) {
HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
hdd_ipa->ipa_rx_discard++;
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
break;
}
@@ -2674,13 +2674,15 @@
*
* Return: None
*/
-void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
+void hdd_ipa_nbuf_cb(qdf_nbuf_t skb)
{
struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "%p", wlan_hdd_stub_priv_to_addr(NBUF_CB_TX_IPA_PRIV(skb)));
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "%p",
+ wlan_hdd_stub_priv_to_addr(QDF_NBUF_CB_TX_IPA_PRIV(skb)));
/* FIXME: This is broken; PRIV_DATA is now 31 bits */
- ipa_free_skb((struct ipa_rx_data *)wlan_hdd_stub_priv_to_addr(NBUF_CB_TX_IPA_PRIV(skb)));
+ ipa_free_skb((struct ipa_rx_data *)
+ wlan_hdd_stub_priv_to_addr(QDF_NBUF_CB_TX_IPA_PRIV(skb)));
hdd_ipa->stats.num_tx_comp_cnt++;
@@ -2703,7 +2705,7 @@
struct hdd_ipa_priv *hdd_ipa = iface_context->hdd_ipa;
uint8_t interface_id;
hdd_adapter_t *adapter = NULL;
- cdf_nbuf_t skb;
+ qdf_nbuf_t skb;
qdf_spin_lock_bh(&iface_context->interface_lock);
adapter = iface_context->adapter;
@@ -2736,21 +2738,21 @@
skb = ipa_tx_desc->skb;
qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
- cdf_nbuf_ipa_owned_set(skb);
+ qdf_nbuf_ipa_owned_set(skb);
/* FIXME: This is broken. No such field in cb any more:
NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb; */
if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
- cdf_nbuf_mapped_paddr_set(skb,
+ qdf_nbuf_mapped_paddr_set(skb,
ipa_tx_desc->dma_addr
+ HDD_IPA_WLAN_FRAG_HEADER
+ HDD_IPA_WLAN_IPA_HEADER);
ipa_tx_desc->skb->len -=
HDD_IPA_WLAN_FRAG_HEADER + HDD_IPA_WLAN_IPA_HEADER;
} else
- cdf_nbuf_mapped_paddr_set(skb, ipa_tx_desc->dma_addr);
+ qdf_nbuf_mapped_paddr_set(skb, ipa_tx_desc->dma_addr);
/* FIXME: This is broken: priv_data is 31 bits */
- cdf_nbuf_ipa_priv_set(skb, wlan_hdd_stub_addr_to_priv(ipa_tx_desc));
+ qdf_nbuf_ipa_priv_set(skb, wlan_hdd_stub_addr_to_priv(ipa_tx_desc));
adapter->stats.tx_bytes += ipa_tx_desc->skb->len;
@@ -2785,12 +2787,13 @@
struct hdd_ipa_priv,
pm_work);
struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
- cdf_nbuf_t skb;
+ qdf_nbuf_t skb;
uint32_t dequeued = 0;
qdf_spin_lock_bh(&hdd_ipa->pm_lock);
- while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
+ while (((skb = qdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head))
+ != NULL)) {
qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
@@ -2825,13 +2828,13 @@
struct hdd_ipa_priv *hdd_ipa = NULL;
struct ipa_rx_data *ipa_tx_desc;
struct hdd_ipa_iface_context *iface_context;
- cdf_nbuf_t skb;
+ qdf_nbuf_t skb;
struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
QDF_STATUS status = QDF_STATUS_SUCCESS;
iface_context = (struct hdd_ipa_iface_context *)priv;
if (evt != IPA_RECEIVE) {
- skb = (cdf_nbuf_t) data;
+ skb = (qdf_nbuf_t) data;
dev_kfree_skb_any(skb);
iface_context->stats.num_tx_drop++;
return;
@@ -2879,7 +2882,7 @@
pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
pm_tx_cb->iface_context = iface_context;
pm_tx_cb->ipa_tx_desc = ipa_tx_desc;
- cdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
+ qdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
hdd_ipa->stats.num_tx_queued++;
qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
@@ -4066,7 +4069,7 @@
INIT_WORK(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
#endif
qdf_spinlock_create(&hdd_ipa->pm_lock);
- cdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
+ qdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
ret = hdd_ipa_setup_rm(hdd_ipa);
if (ret)
@@ -4140,7 +4143,7 @@
struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
int i;
struct hdd_ipa_iface_context *iface_context = NULL;
- cdf_nbuf_t skb;
+ qdf_nbuf_t skb;
struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
if (!hdd_ipa_is_enabled(hdd_ctx))
@@ -4163,7 +4166,8 @@
qdf_spin_lock_bh(&hdd_ipa->pm_lock);
- while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
+ while (((skb = qdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head))
+ != NULL)) {
qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
diff --git a/core/hdd/src/wlan_hdd_softap_tx_rx.c b/core/hdd/src/wlan_hdd_softap_tx_rx.c
index a39a5dc..b20c086 100644
--- a/core/hdd/src/wlan_hdd_softap_tx_rx.c
+++ b/core/hdd/src/wlan_hdd_softap_tx_rx.c
@@ -258,7 +258,7 @@
++pAdapter->hdd_stats.hddTxRxStats.txXmitClassifiedAC[ac];
#if defined (IPA_OFFLOAD)
- if (!cdf_nbuf_ipa_owned_get(skb)) {
+ if (!qdf_nbuf_ipa_owned_get(skb)) {
#endif
/* Check if the buffer has enough header room */
skb = skb_unshare(skb, GFP_ATOMIC);
@@ -299,18 +299,18 @@
/* Zero out skb's context buffer for the driver to use */
qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
- NBUF_CB_TX_PACKET_TRACK(skb) = NBUF_TX_PKT_DATA_TRACK;
- NBUF_UPDATE_TX_PKT_COUNT(skb, NBUF_TX_PKT_HDD);
+ QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
qdf_dp_trace_set_track(skb);
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_PACKET_PTR_RECORD,
(uint8_t *)skb->data, sizeof(skb->data)));
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_PACKET_RECORD,
- (uint8_t *)skb->data, cdf_nbuf_len(skb)));
- if (cdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
+ (uint8_t *)skb->data, qdf_nbuf_len(skb)));
+ if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_PACKET_RECORD,
(uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (cdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
+ (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
if (ol_tx_send_data_frame(STAId, skb,
proto_type) != NULL) {
@@ -327,11 +327,11 @@
drop_pkt:
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
- (uint8_t *)skb->data, cdf_nbuf_len(skb)));
- if (cdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
+ (uint8_t *)skb->data, qdf_nbuf_len(skb)));
+ if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
(uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (cdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
+ (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
kfree_skb(skb);
drop_pkt_accounting:
@@ -490,7 +490,7 @@
/**
* hdd_softap_rx_packet_cbk() - Receive packet handler
* @cds_context: pointer to CDS context
- * @rxBuf: pointer to rx cdf_nbuf
+ * @rxBuf: pointer to rx qdf_nbuf
* @staId: Station Id
*
* Receive callback registered with TL. TL will call this to notify
@@ -501,7 +501,7 @@
* QDF_STATUS_SUCCESS otherwise
*/
QDF_STATUS hdd_softap_rx_packet_cbk(void *cds_context,
- cdf_nbuf_t rxBuf, uint8_t staId)
+ qdf_nbuf_t rxBuf, uint8_t staId)
{
hdd_adapter_t *pAdapter = NULL;
int rxstat;
@@ -578,7 +578,7 @@
/* Remove SKB from internal tracking table before submitting
* it to stack
*/
- cdf_net_buf_debug_release_skb(rxBuf);
+ qdf_net_buf_debug_release_skb(rxBuf);
if (hdd_napi_enabled(HDD_NAPI_ANY) && !pHddCtx->config->enableRxThread)
rxstat = netif_receive_skb(skb);
diff --git a/core/hdd/src/wlan_hdd_tx_rx.c b/core/hdd/src/wlan_hdd_tx_rx.c
index d176b81..a288c4e 100644
--- a/core/hdd/src/wlan_hdd_tx_rx.c
+++ b/core/hdd/src/wlan_hdd_tx_rx.c
@@ -368,7 +368,7 @@
/* Get TL AC corresponding to Qdisc queue index/AC. */
ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
- if (!cdf_nbuf_ipa_owned_get(skb)) {
+ if (!qdf_nbuf_ipa_owned_get(skb)) {
/* Check if the buffer has enough header room */
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
@@ -476,20 +476,20 @@
/* Zero out skb's context buffer for the driver to use */
qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
- NBUF_CB_TX_PACKET_TRACK(skb) = NBUF_TX_PKT_DATA_TRACK;
- NBUF_UPDATE_TX_PKT_COUNT(skb, NBUF_TX_PKT_HDD);
+ QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
+ QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
qdf_dp_trace_set_track(skb);
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_PACKET_PTR_RECORD,
(uint8_t *)skb->data, sizeof(skb->data)));
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_PACKET_RECORD,
- (uint8_t *)skb->data, cdf_nbuf_len(skb)));
- if (cdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
+ (uint8_t *)skb->data, qdf_nbuf_len(skb)));
+ if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_PACKET_RECORD,
(uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (cdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
+ (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
- if (ol_tx_send_data_frame(STAId, (cdf_nbuf_t) skb,
+ if (ol_tx_send_data_frame(STAId, (qdf_nbuf_t) skb,
proto_type) != NULL) {
QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_WARN,
"%s: Failed to send packet to txrx for staid:%d",
@@ -503,11 +503,11 @@
drop_pkt:
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
- (uint8_t *)skb->data, cdf_nbuf_len(skb)));
- if (cdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
+ (uint8_t *)skb->data, qdf_nbuf_len(skb)));
+ if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
(uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (cdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
+ (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE)));
++pAdapter->stats.tx_dropped;
++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped;
@@ -642,7 +642,7 @@
/**
* hdd_rx_packet_cbk() - Receive packet handler
* @cds_context: pointer to CDS context
- * @rxBuf: pointer to rx cdf_nbuf
+ * @rxBuf: pointer to rx qdf_nbuf
* @staId: Station Id
*
* Receive callback registered with TL. TL will call this to notify
@@ -652,7 +652,7 @@
* Return: QDF_STATUS_E_FAILURE if any errors encountered,
* QDF_STATUS_SUCCESS otherwise
*/
-QDF_STATUS hdd_rx_packet_cbk(void *cds_context, cdf_nbuf_t rxBuf, uint8_t staId)
+QDF_STATUS hdd_rx_packet_cbk(void *cds_context, qdf_nbuf_t rxBuf, uint8_t staId)
{
hdd_adapter_t *pAdapter = NULL;
hdd_context_t *pHddCtx = NULL;
@@ -706,7 +706,7 @@
/* Remove SKB from internal tracking table before submitting
* it to stack
*/
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
return QDF_STATUS_SUCCESS;
}
@@ -740,7 +740,7 @@
/* Remove SKB from internal tracking table before submitting
* it to stack
*/
- cdf_net_buf_debug_release_skb(rxBuf);
+ qdf_net_buf_debug_release_skb(rxBuf);
if (HDD_LRO_NO_RX ==
hdd_lro_rx(pHddCtx, pAdapter, skb)) {
diff --git a/core/mac/src/sys/common/src/wlan_qct_sys.c b/core/mac/src/sys/common/src/wlan_qct_sys.c
index aab767f..6d83790 100644
--- a/core/mac/src/sys/common/src/wlan_qct_sys.c
+++ b/core/mac/src/sys/common/src/wlan_qct_sys.c
@@ -35,6 +35,7 @@
#include "wma_types.h"
#include "sme_api.h"
#include "mac_init_api.h"
+#include "qdf_trace.h"
/*
* Cookie for SYS messages. Note that anyone posting a SYS Message
@@ -72,6 +73,7 @@
*
* Return: none
*/
+#ifdef QDF_ENABLE_TRACING
void sys_stop_complete_cb(void *pUserData)
{
qdf_event_t *pStopEvt = (qdf_event_t *) pUserData;
@@ -80,6 +82,12 @@
QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
}
+#else
+void sys_stop_complete_cb(void *pUserData)
+{
+ return;
+}
+#endif
/**
* sys_stop() - To post stop message to system module
diff --git a/core/sap/dfs/inc/dfs.h b/core/sap/dfs/inc/dfs.h
index a2f9ae2..ffcafff 100644
--- a/core/sap/dfs/inc/dfs.h
+++ b/core/sap/dfs/inc/dfs.h
@@ -49,7 +49,7 @@
/*DFS New Include Start*/
#include <qdf_net_types.h> /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
-#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
#include <qdf_util.h> /* qdf_assert */
#include <qdf_lock.h> /* cdf_spinlock */
#include <cds_queue.h> /* TAILQ */
diff --git a/core/utils/epping/inc/epping_internal.h b/core/utils/epping/inc/epping_internal.h
index 0c980de..2439c57 100644
--- a/core/utils/epping/inc/epping_internal.h
+++ b/core/utils/epping/inc/epping_internal.h
@@ -96,7 +96,7 @@
struct task_struct *pid;
void *arg;
bool done;
- cdf_nbuf_t skb;
+ qdf_nbuf_t skb;
HTC_ENDPOINT_ID eid;
struct semaphore sem;
bool inited;
@@ -142,7 +142,7 @@
uint8_t sessionId;
/* for mboxping */
qdf_spinlock_t data_lock;
- cdf_nbuf_queue_t nodrop_queue;
+ qdf_nbuf_queue_t nodrop_queue;
qdf_timer_t epping_timer;
epping_tx_timer_state_t epping_timer_state;
bool registered;
@@ -168,7 +168,7 @@
/* epping_tx signatures */
void epping_tx_timer_expire(epping_adapter_t *pAdapter);
void epping_tx_complete_multiple(void *ctx, HTC_PACKET_QUEUE *pPacketQueue);
-int epping_tx_send(cdf_nbuf_t skb, epping_adapter_t *pAdapter);
+int epping_tx_send(qdf_nbuf_t skb, epping_adapter_t *pAdapter);
#ifdef HIF_SDIO
HTC_SEND_FULL_ACTION epping_tx_queue_full(void *Context, HTC_PACKET *pPacket);
@@ -192,6 +192,6 @@
void epping_unregister_tx_copier(HTC_ENDPOINT_ID eid,
epping_context_t *pEpping_ctx);
void epping_tx_copier_schedule(epping_context_t *pEpping_ctx,
- HTC_ENDPOINT_ID eid, cdf_nbuf_t skb);
+ HTC_ENDPOINT_ID eid, qdf_nbuf_t skb);
#endif /* HIF_PCI */
#endif /* end #ifndef EPPING_INTERNAL_H */
diff --git a/core/utils/epping/src/epping_rx.c b/core/utils/epping/src/epping_rx.c
index ea2d323..2718e64 100644
--- a/core/utils/epping/src/epping_rx.c
+++ b/core/utils/epping/src/epping_rx.c
@@ -83,7 +83,7 @@
__func__, buffersToRefill, Endpoint);
for (RxBuffers = 0; RxBuffers < buffersToRefill; RxBuffers++) {
- osBuf = cdf_nbuf_alloc(NULL, AR6000_BUFFER_SIZE,
+ osBuf = qdf_nbuf_alloc(NULL, AR6000_BUFFER_SIZE,
AR6000_MIN_HEAD_ROOM, 4, false);
if (NULL == osBuf) {
break;
@@ -93,7 +93,7 @@
pPacket = (HTC_PACKET *) (A_NETBUF_HEAD(osBuf));
/* set re-fill info */
SET_HTC_PACKET_INFO_RX_REFILL(pPacket, osBuf,
- cdf_nbuf_data(osBuf),
+ qdf_nbuf_data(osBuf),
AR6000_BUFFER_SIZE, Endpoint);
SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, osBuf);
/* add to queue */
@@ -123,9 +123,9 @@
if (status != A_OK) {
if (status != A_ECANCELED) {
- printk("%s: RX ERR (%d) \n", __func__, status);
+ printk("%s: RX ERR (%d)\n", __func__, status);
}
- cdf_nbuf_free(pktSkb);
+ qdf_nbuf_free(pktSkb);
return;
}
@@ -135,7 +135,7 @@
A_NETBUF_PULL(pktSkb, EPPING_ALIGNMENT_PAD);
}
if (enb_rx_dump)
- epping_hex_dump((void *)cdf_nbuf_data(pktSkb),
+ epping_hex_dump((void *)qdf_nbuf_data(pktSkb),
pktSkb->len, __func__);
pktSkb->dev = dev;
if ((pktSkb->dev->flags & IFF_UP) == IFF_UP) {
@@ -155,7 +155,7 @@
}
} else {
++pAdapter->stats.rx_dropped;
- cdf_nbuf_free(pktSkb);
+ qdf_nbuf_free(pktSkb);
}
}
}
diff --git a/core/utils/epping/src/epping_tx.c b/core/utils/epping/src/epping_tx.c
index 814f443..ffcf8df 100644
--- a/core/utils/epping/src/epping_tx.c
+++ b/core/utils/epping/src/epping_tx.c
@@ -56,11 +56,11 @@
static bool enb_tx_dump;
void epping_tx_dup_pkt(epping_adapter_t *pAdapter,
- HTC_ENDPOINT_ID eid, cdf_nbuf_t skb)
+ HTC_ENDPOINT_ID eid, qdf_nbuf_t skb)
{
struct epping_cookie *cookie = NULL;
int skb_len, ret;
- cdf_nbuf_t new_skb;
+ qdf_nbuf_t new_skb;
cookie = epping_alloc_cookie(pAdapter->pEpping_ctx);
if (cookie == NULL) {
@@ -69,25 +69,25 @@
__func__);
return;
}
- new_skb = cdf_nbuf_copy(skb);
+ new_skb = qdf_nbuf_copy(skb);
if (!new_skb) {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
- "%s: cdf_nbuf_copy returns no resource\n", __func__);
+ "%s: qdf_nbuf_copy returns no resource\n", __func__);
epping_free_cookie(pAdapter->pEpping_ctx, cookie);
return;
}
SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
- cookie, cdf_nbuf_data(skb),
- cdf_nbuf_len(new_skb), eid, 0);
+ cookie, qdf_nbuf_data(skb),
+ qdf_nbuf_len(new_skb), eid, 0);
SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, new_skb);
- skb_len = (int)cdf_nbuf_len(new_skb);
+ skb_len = (int)qdf_nbuf_len(new_skb);
/* send the packet */
ret = htc_send_pkt(pAdapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt);
if (ret != A_OK) {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: htc_send_pkt failed, ret = %d\n", __func__, ret);
epping_free_cookie(pAdapter->pEpping_ctx, cookie);
- cdf_nbuf_free(new_skb);
+ qdf_nbuf_free(new_skb);
return;
}
pAdapter->stats.tx_bytes += skb_len;
@@ -99,9 +99,9 @@
}
}
-static int epping_tx_send_int(cdf_nbuf_t skb, epping_adapter_t *pAdapter)
+static int epping_tx_send_int(qdf_nbuf_t skb, epping_adapter_t *pAdapter)
{
- EPPING_HEADER *eppingHdr = (EPPING_HEADER *) cdf_nbuf_data(skb);
+ EPPING_HEADER *eppingHdr = (EPPING_HEADER *) qdf_nbuf_data(skb);
HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED;
struct epping_cookie *cookie = NULL;
A_UINT8 ac = 0;
@@ -143,7 +143,7 @@
epping_set_kperf_flag(pAdapter, eid, tmpHdr.CmdBuffer_t[0]);
}
SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
- cookie, cdf_nbuf_data(skb), cdf_nbuf_len(skb),
+ cookie, qdf_nbuf_data(skb), qdf_nbuf_len(skb),
eid, 0);
SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, skb);
skb_len = skb->len;
@@ -170,25 +170,25 @@
void epping_tx_timer_expire(epping_adapter_t *pAdapter)
{
- cdf_nbuf_t nodrop_skb;
+ qdf_nbuf_t nodrop_skb;
EPPING_LOG(QDF_TRACE_LEVEL_INFO, "%s: queue len: %d\n", __func__,
- cdf_nbuf_queue_len(&pAdapter->nodrop_queue));
+ qdf_nbuf_queue_len(&pAdapter->nodrop_queue));
- if (!cdf_nbuf_queue_len(&pAdapter->nodrop_queue)) {
+ if (!qdf_nbuf_queue_len(&pAdapter->nodrop_queue)) {
/* nodrop queue is empty so no need to arm timer */
pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
return;
}
/* try to flush nodrop queue */
- while ((nodrop_skb = cdf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
+ while ((nodrop_skb = qdf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
if (epping_tx_send_int(nodrop_skb, pAdapter)) {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: nodrop: %p xmit fail in timer\n",
__func__, nodrop_skb);
/* fail to xmit so put the nodrop packet to the nodrop queue */
- cdf_nbuf_queue_insert_head(&pAdapter->nodrop_queue,
+ qdf_nbuf_queue_insert_head(&pAdapter->nodrop_queue,
nodrop_skb);
break;
} else {
@@ -213,19 +213,19 @@
}
}
-int epping_tx_send(cdf_nbuf_t skb, epping_adapter_t *pAdapter)
+int epping_tx_send(qdf_nbuf_t skb, epping_adapter_t *pAdapter)
{
- cdf_nbuf_t nodrop_skb;
+ qdf_nbuf_t nodrop_skb;
EPPING_HEADER *eppingHdr;
A_UINT8 ac = 0;
- eppingHdr = (EPPING_HEADER *) cdf_nbuf_data(skb);
+ eppingHdr = (EPPING_HEADER *) qdf_nbuf_data(skb);
if (!IS_EPPING_PACKET(eppingHdr)) {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: Recived non endpoint ping packets\n", __func__);
/* no packet to send, cleanup */
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
return -ENOMEM;
}
@@ -236,7 +236,7 @@
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: ac %d is not mapped to mboxping service\n",
__func__, ac);
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
return -ENOMEM;
}
@@ -252,13 +252,13 @@
*/
/* check the nodrop queue first */
- while ((nodrop_skb = cdf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
+ while ((nodrop_skb = qdf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
if (epping_tx_send_int(nodrop_skb, pAdapter)) {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: nodrop: %p xmit fail\n", __func__,
nodrop_skb);
/* fail to xmit so put the nodrop packet to the nodrop queue */
- cdf_nbuf_queue_insert_head(&pAdapter->nodrop_queue,
+ qdf_nbuf_queue_insert_head(&pAdapter->nodrop_queue,
nodrop_skb);
/* no cookie so free the current skb */
goto tx_fail;
@@ -278,7 +278,7 @@
tx_fail:
if (!IS_EPING_PACKET_NO_DROP(eppingHdr)) {
/* allow to drop the skb so drop it */
- cdf_nbuf_free(skb);
+ qdf_nbuf_free(skb);
++pAdapter->stats.tx_dropped;
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: Tx skb %p dropped, stats.tx_dropped = %ld\n",
@@ -287,7 +287,7 @@
} else {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: nodrop: %p queued\n", __func__, skb);
- cdf_nbuf_queue_add(&pAdapter->nodrop_queue, skb);
+ qdf_nbuf_queue_add(&pAdapter->nodrop_queue, skb);
qdf_spin_lock_bh(&pAdapter->data_lock);
if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
@@ -317,13 +317,13 @@
struct net_device *dev = pAdapter->dev;
A_STATUS status;
HTC_ENDPOINT_ID eid;
- cdf_nbuf_t pktSkb;
+ qdf_nbuf_t pktSkb;
struct epping_cookie *cookie;
A_BOOL flushing = false;
- cdf_nbuf_queue_t skb_queue;
+ qdf_nbuf_queue_t skb_queue;
HTC_PACKET *htc_pkt;
- cdf_nbuf_queue_init(&skb_queue);
+ qdf_nbuf_queue_init(&skb_queue);
qdf_spin_lock_bh(&pAdapter->data_lock);
@@ -337,13 +337,13 @@
cookie = htc_pkt->pPktContext;
ASSERT(pktSkb);
- ASSERT(htc_pkt->pBuffer == cdf_nbuf_data(pktSkb));
+ ASSERT(htc_pkt->pBuffer == qdf_nbuf_data(pktSkb));
/* add this to the list, use faster non-lock API */
- cdf_nbuf_queue_add(&skb_queue, pktSkb);
+ qdf_nbuf_queue_add(&skb_queue, pktSkb);
if (A_SUCCESS(status)) {
- ASSERT(htc_pkt->ActualLength == cdf_nbuf_len(pktSkb));
+ ASSERT(htc_pkt->ActualLength == qdf_nbuf_len(pktSkb));
}
EPPING_LOG(QDF_TRACE_LEVEL_INFO,
"%s skb=%p data=%p len=0x%x eid=%d ",
@@ -370,12 +370,12 @@
qdf_spin_unlock_bh(&pAdapter->data_lock);
/* free all skbs in our local list */
- while (cdf_nbuf_queue_len(&skb_queue)) {
+ while (qdf_nbuf_queue_len(&skb_queue)) {
/* use non-lock version */
- pktSkb = cdf_nbuf_queue_remove(&skb_queue);
+ pktSkb = qdf_nbuf_queue_remove(&skb_queue);
if (pktSkb == NULL)
break;
- cdf_nbuf_free(pktSkb);
+ qdf_nbuf_free(pktSkb);
pEpping_ctx->total_tx_acks++;
}
diff --git a/core/utils/epping/src/epping_txrx.c b/core/utils/epping/src/epping_txrx.c
index 3d41d0f..dc7e2fe 100644
--- a/core/utils/epping/src/epping_txrx.c
+++ b/core/utils/epping/src/epping_txrx.c
@@ -312,11 +312,11 @@
qdf_timer_free(&pAdapter->epping_timer);
pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
- while (cdf_nbuf_queue_len(&pAdapter->nodrop_queue)) {
- cdf_nbuf_t tmp_nbuf = NULL;
- tmp_nbuf = cdf_nbuf_queue_remove(&pAdapter->nodrop_queue);
+ while (qdf_nbuf_queue_len(&pAdapter->nodrop_queue)) {
+ qdf_nbuf_t tmp_nbuf = NULL;
+ tmp_nbuf = qdf_nbuf_queue_remove(&pAdapter->nodrop_queue);
if (tmp_nbuf)
- cdf_nbuf_free(tmp_nbuf);
+ qdf_nbuf_free(tmp_nbuf);
}
free_netdev(dev);
@@ -368,7 +368,7 @@
qdf_mem_copy(pAdapter->macAddressCurrent.bytes,
macAddr, sizeof(tSirMacAddr));
qdf_spinlock_create(&pAdapter->data_lock);
- cdf_nbuf_queue_init(&pAdapter->nodrop_queue);
+ qdf_nbuf_queue_init(&pAdapter->nodrop_queue);
pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
qdf_timer_init(epping_get_cdf_ctx(), &pAdapter->epping_timer,
epping_timer_expire, dev, QDF_TIMER_TYPE_SW);
diff --git a/core/utils/fwlog/dbglog_host.c b/core/utils/fwlog/dbglog_host.c
index b37e839..977d051 100644
--- a/core/utils/fwlog/dbglog_host.c
+++ b/core/utils/fwlog/dbglog_host.c
@@ -1392,7 +1392,7 @@
len, WMI_DBGLOG_CFG_CMDID);
if (status != A_OK)
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return status;
}
diff --git a/core/utils/pktlog/include/pktlog_ac_i.h b/core/utils/pktlog/include/pktlog_ac_i.h
index 07bbac0..7dbf443 100644
--- a/core/utils/pktlog/include/pktlog_ac_i.h
+++ b/core/utils/pktlog/include/pktlog_ac_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -59,7 +59,7 @@
A_STATUS process_tx_info(struct ol_txrx_pdev_t *pdev, void *data);
A_STATUS process_rx_info(void *pdev, void *data);
-A_STATUS process_rx_info_remote(void *pdev, cdf_nbuf_t amsdu);
+A_STATUS process_rx_info_remote(void *pdev, qdf_nbuf_t amsdu);
A_STATUS process_rate_find(void *pdev, void *data);
A_STATUS process_rate_update(void *pdev, void *data);
diff --git a/core/utils/pktlog/pktlog_internal.c b/core/utils/pktlog/pktlog_internal.c
index 010585c..0d219b6 100644
--- a/core/utils/pktlog/pktlog_internal.c
+++ b/core/utils/pktlog/pktlog_internal.c
@@ -406,7 +406,7 @@
msdu_id_offset);
uint8_t *addr, *vap_addr;
uint8_t vdev_id;
- cdf_nbuf_t netbuf;
+ qdf_nbuf_t netbuf;
uint32_t len;
qdf_mem_set(&pl_msdu_info, sizeof(pl_msdu_info), 0);
@@ -436,7 +436,7 @@
htt_tx_desc = (uint32_t *) tx_desc->htt_tx_desc;
qdf_assert(htt_tx_desc);
- cdf_nbuf_peek_header(netbuf, &addr, &len);
+ qdf_nbuf_peek_header(netbuf, &addr, &len);
if (len < (2 * IEEE80211_ADDR_LEN)) {
qdf_print("TX frame does not have a valid"
@@ -482,7 +482,7 @@
return A_OK;
}
-A_STATUS process_rx_info_remote(void *pdev, cdf_nbuf_t amsdu)
+A_STATUS process_rx_info_remote(void *pdev, qdf_nbuf_t amsdu)
{
struct ol_pktlog_dev_t *pl_dev;
struct ath_pktlog_info *pl_info;
@@ -490,7 +490,7 @@
struct ath_pktlog_hdr pl_hdr;
struct ath_pktlog_rx_info rxstat_log;
size_t log_size;
- cdf_nbuf_t msdu;
+ qdf_nbuf_t msdu;
if (!pdev) {
printk("Invalid pdev in %s\n", __func__);
@@ -506,7 +506,7 @@
while (msdu) {
rx_desc =
- (struct htt_host_rx_desc_base *)(cdf_nbuf_data(msdu)) - 1;
+ (struct htt_host_rx_desc_base *)(qdf_nbuf_data(msdu)) - 1;
log_size =
sizeof(*rx_desc) - sizeof(struct htt_host_fw_desc_base);
@@ -530,7 +530,7 @@
log_size, &pl_hdr);
qdf_mem_copy(rxstat_log.rx_desc, (void *)rx_desc +
sizeof(struct htt_host_fw_desc_base), pl_hdr.size);
- msdu = cdf_nbuf_next(msdu);
+ msdu = qdf_nbuf_next(msdu);
}
return A_OK;
}
diff --git a/core/wma/inc/wma.h b/core/wma/inc/wma.h
index 21d9153..9c4e1a1 100644
--- a/core/wma/inc/wma.h
+++ b/core/wma/inc/wma.h
@@ -509,7 +509,7 @@
* @lock: lock
*/
struct beacon_info {
- cdf_nbuf_t buf;
+ qdf_nbuf_t buf;
uint32_t len;
uint8_t dma_mapped;
uint32_t tim_ie_offset;
@@ -991,7 +991,7 @@
struct wmi_desc_t {
pWMATxRxCompFunc tx_cmpl_cb;
pWMAAckFnTxComp ota_post_proc_cb;
- cdf_nbuf_t nbuf;
+ qdf_nbuf_t nbuf;
uint32_t desc_id;
};
@@ -1216,7 +1216,7 @@
pWMAAckFnTxComp umac_ota_ack_cb[SIR_MAC_MGMT_RESERVED15];
pWMAAckFnTxComp umac_data_ota_ack_cb;
unsigned long last_umac_data_ota_timestamp;
- cdf_nbuf_t last_umac_data_nbuf;
+ qdf_nbuf_t last_umac_data_nbuf;
bool needShutdown;
uint32_t num_mem_chunks;
struct wma_mem_chunk mem_chunks[MAX_MEM_CHUNKS];
diff --git a/core/wma/inc/wma_internal.h b/core/wma/inc/wma_internal.h
index c822ffc..1e563b0 100644
--- a/core/wma/inc/wma_internal.h
+++ b/core/wma/inc/wma_internal.h
@@ -156,7 +156,7 @@
void *body_ptr, uint32_t body_val);
void wma_data_tx_ack_comp_hdlr(void *wma_context,
- cdf_nbuf_t netbuf, int32_t status);
+ qdf_nbuf_t netbuf, int32_t status);
QDF_STATUS wma_set_ppsconfig(uint8_t vdev_id, uint16_t pps_param,
int value);
diff --git a/core/wma/src/wma_data.c b/core/wma/src/wma_data.c
index 941ce36..dbfce92 100644
--- a/core/wma/src/wma_data.c
+++ b/core/wma/src/wma_data.c
@@ -45,7 +45,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -846,7 +846,7 @@
* Return: none
*/
void
-wma_data_tx_ack_comp_hdlr(void *wma_context, cdf_nbuf_t netbuf, int32_t status)
+wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
{
ol_txrx_pdev_handle pdev;
tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
@@ -897,8 +897,8 @@
free_nbuf:
/* unmap and freeing the tx buf as txrx is not taking care */
- cdf_nbuf_unmap_single(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
- cdf_nbuf_free(netbuf);
+ qdf_nbuf_unmap_single(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_free(netbuf);
}
/**
@@ -1026,7 +1026,7 @@
if (ret) {
WMA_LOGP("%s: Failed to send enable/disable MCC"
" adaptive scheduler command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
return QDF_STATUS_SUCCESS;
}
@@ -1134,7 +1134,7 @@
if (ret) {
WMA_LOGE("%s: Failed to send MCC Channel Time Latency command",
__func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
QDF_ASSERT(0);
return QDF_STATUS_E_FAILURE;
}
@@ -1266,7 +1266,7 @@
WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID);
if (ret) {
WMA_LOGE("Failed to send MCC Channel Time Quota command");
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
QDF_ASSERT(0);
return QDF_STATUS_E_FAILURE;
}
@@ -1540,9 +1540,9 @@
* Return: none
*/
static void
-wma_mgmt_tx_ack_comp_hdlr(void *wma_context, cdf_nbuf_t netbuf, int32_t status)
+wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
{
- tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (cdf_nbuf_data(netbuf));
+ tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(netbuf));
tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
if (wma_handle && wma_handle->umac_ota_ack_cb[pFc->subType]) {
@@ -1583,7 +1583,7 @@
* Return: none
*/
static void
-wma_mgmt_tx_dload_comp_hldr(void *wma_context, cdf_nbuf_t netbuf,
+wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf,
int32_t status)
{
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
@@ -2004,7 +2004,7 @@
status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
WMI_THERMAL_MGMT_CMDID);
if (status) {
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
WMA_LOGE("%s:Failed to send thermal mgmt command", __func__);
return QDF_STATUS_E_FAILURE;
}
@@ -2141,7 +2141,7 @@
*
* Return: none
*/
-static void wma_decap_to_8023(cdf_nbuf_t msdu, struct wma_decap_info_t *info)
+static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info)
{
struct llc_snap_hdr_t *llc_hdr;
uint16_t ether_type;
@@ -2151,7 +2151,7 @@
uint8_t *buf;
struct ethernet_hdr_t *ethr_hdr;
- buf = (uint8_t *) cdf_nbuf_data(msdu);
+ buf = (uint8_t *) qdf_nbuf_data(msdu);
llc_hdr = (struct llc_snap_hdr_t *)buf;
ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
/* do llc remove if needed */
@@ -2171,9 +2171,9 @@
}
}
if (l2_hdr_space > ETHERNET_HDR_LEN) {
- buf = cdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
+ buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
} else if (l2_hdr_space < ETHERNET_HDR_LEN) {
- buf = cdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
+ buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
}
/* mpdu hdr should be present in info,re-create ethr_hdr based on mpdu hdr */
@@ -2211,9 +2211,9 @@
ethr_hdr->ethertype[1] = (ether_type) & 0xff;
} else {
uint32_t pktlen =
- cdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
+ qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
ether_type = (uint16_t) pktlen;
- ether_type = cdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
+ ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
ethr_hdr->ethertype[1] = (ether_type) & 0xff;
}
@@ -2393,8 +2393,8 @@
sizeof(uint32_t)));
bufp += WMI_TLV_HDR_SIZE;
qdf_mem_copy(bufp, pData, bufp_len);
- cdf_nbuf_map_single(qdf_ctx, tx_frame, QDF_DMA_TO_DEVICE);
- dma_addr = cdf_nbuf_get_frag_paddr(tx_frame, 0);
+ qdf_nbuf_map_single(qdf_ctx, tx_frame, QDF_DMA_TO_DEVICE);
+ dma_addr = qdf_nbuf_get_frag_paddr(tx_frame, 0);
cmd->paddr_lo = (uint32_t)(dma_addr & 0xffffffff);
#if defined(HELIUMPLUS_PADDR64)
cmd->paddr_hi = (uint32_t)((dma_addr >> 32) & 0x1F);
@@ -2447,7 +2447,7 @@
int32_t is_high_latency;
ol_txrx_vdev_handle txrx_vdev;
enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX;
- tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (cdf_nbuf_data(tx_frame));
+ tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
uint8_t use_6mbps = 0;
uint8_t downld_comp_required = 0;
uint16_t chanfreq;
@@ -2495,7 +2495,7 @@
WMA_LOGE("No Support to send other frames except 802.11 Mgmt/Data");
return QDF_STATUS_E_FAILURE;
}
- mHdr = (tpSirMacMgmtHdr)cdf_nbuf_data(tx_frame);
+ mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame);
#ifdef WLAN_FEATURE_11W
if ((iface && iface->rmfEnabled) &&
(frmType == TXRX_FRM_802_11_MGMT) &&
@@ -2503,7 +2503,7 @@
pFc->subType == SIR_MAC_MGMT_DEAUTH ||
pFc->subType == SIR_MAC_MGMT_ACTION)) {
struct ieee80211_frame *wh =
- (struct ieee80211_frame *)cdf_nbuf_data(tx_frame);
+ (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
if (!IEEE80211_IS_BROADCAST(wh->i_addr1) &&
!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
if (pFc->wep) {
@@ -2586,7 +2586,7 @@
(pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) {
uint64_t adjusted_tsf_le;
struct ieee80211_frame *wh =
- (struct ieee80211_frame *)cdf_nbuf_data(tx_frame);
+ (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
/* Make the TSF offset negative to match TSF in beacons */
adjusted_tsf_le = cpu_to_le64(0ULL -
@@ -2595,14 +2595,14 @@
A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
}
if (frmType == TXRX_FRM_802_11_DATA) {
- cdf_nbuf_t ret;
- cdf_nbuf_t skb = (cdf_nbuf_t) tx_frame;
+ qdf_nbuf_t ret;
+ qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame;
ol_txrx_pdev_handle pdev =
cds_get_context(QDF_MODULE_ID_TXRX);
struct wma_decap_info_t decap_info;
struct ieee80211_frame *wh =
- (struct ieee80211_frame *)cdf_nbuf_data(skb);
+ (struct ieee80211_frame *)qdf_nbuf_data(skb);
unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks();
if (pdev == NULL) {
@@ -2645,7 +2645,7 @@
/* Take out 802.11 header from skb */
decap_info.hdr_len = wma_ieee80211_hdrsize(wh);
qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len);
- cdf_nbuf_pull_head(skb, decap_info.hdr_len);
+ qdf_nbuf_pull_head(skb, decap_info.hdr_len);
/* Decapsulate to 802.3 format */
wma_decap_to_8023(skb, &decap_info);
@@ -2654,7 +2654,7 @@
qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
/* Do the DMA Mapping */
- cdf_nbuf_map_single(pdev->osdev, skb, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_map_single(pdev->osdev, skb, QDF_DMA_TO_DEVICE);
/* Terminate the (single-element) list of tx frames */
skb->next = NULL;
@@ -2671,7 +2671,7 @@
if (ret) {
WMA_LOGE("TxRx Rejected. Fail to do Tx");
- cdf_nbuf_unmap_single(pdev->osdev, skb,
+ qdf_nbuf_unmap_single(pdev->osdev, skb,
QDF_DMA_TO_DEVICE);
/* Call Download Cb so that umac can free the buffer */
if (tx_frm_download_comp_cb)
@@ -2718,7 +2718,7 @@
NBUF_PKT_TRAC_TYPE_MGMT_ACTION);
if (proto_type & NBUF_PKT_TRAC_TYPE_MGMT_ACTION)
cds_pkt_trace_buf_update("WM:T:MACT");
- cdf_nbuf_trace_set_proto_type(tx_frame, proto_type);
+ qdf_nbuf_trace_set_proto_type(tx_frame, proto_type);
#endif /* QCA_PKT_PROTO_TRACE */
} else {
if (downld_comp_required)
@@ -2893,7 +2893,7 @@
*/
void ol_rx_err(ol_pdev_handle pdev, uint8_t vdev_id,
uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
- enum ol_rx_err_type err_type, cdf_nbuf_t rx_frame,
+ enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
uint64_t *pn, uint8_t key_id)
{
tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
@@ -2909,9 +2909,9 @@
if (err_type != OL_RX_ERR_TKIP_MIC)
return;
- if (cdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
+ if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
return;
- eth_hdr = (struct ether_header *)cdf_nbuf_data(rx_frame);
+ eth_hdr = (struct ether_header *)qdf_nbuf_data(rx_frame);
mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
if (!mic_err_ind) {
WMA_LOGE("%s: Failed to allocate memory for MIC indication message",
@@ -3069,7 +3069,7 @@
status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf,
sizeof(*cmd), WMI_LRO_CONFIG_CMDID);
if (status) {
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
WMA_LOGE("%s:Failed to send WMI_LRO_CONFIG_CMDID", __func__);
return QDF_STATUS_E_FAILURE;
}
diff --git a/core/wma/src/wma_dev_if.c b/core/wma/src/wma_dev_if.c
index 3dafbdd..e75d085 100644
--- a/core/wma/src/wma_dev_if.c
+++ b/core/wma/src/wma_dev_if.c
@@ -45,7 +45,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -752,7 +752,7 @@
if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_DELETE_CMDID)) {
WMA_LOGP("%s: Failed to send peer delete command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
WMA_LOGD("%s: peer_addr %pM vdev_id %d", __func__, peer_addr, vdev_id);
@@ -801,7 +801,7 @@
goto send_fail_resp;
}
qdf_mem_zero(bcn, sizeof(*bcn));
- bcn->buf = cdf_nbuf_alloc(NULL, WMA_BCN_BUF_MAX_SIZE, 0,
+ bcn->buf = qdf_nbuf_alloc(NULL, WMA_BCN_BUF_MAX_SIZE, 0,
sizeof(uint32_t), 0);
if (!bcn->buf) {
WMA_LOGE("%s: No memory allocated for beacon buffer",
@@ -1125,7 +1125,7 @@
if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_FLUSH_TIDS_CMDID)) {
WMA_LOGP("%s: Failed to send flush tid command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
WMA_LOGD("%s: peer_addr %pM vdev_id %d", __func__, peer_addr, vdev_id);
@@ -1267,7 +1267,7 @@
if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_CREATE_CMDID)) {
WMA_LOGP("%s: failed to send WMI_PEER_CREATE_CMDID", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
WMA_LOGD("%s: peer_addr %pM vdev_id %d", __func__, peer_addr, vdev_id);
@@ -1369,7 +1369,7 @@
cmd->vdev_id = vdev_id;
if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_DOWN_CMDID)) {
WMA_LOGP("%s: Failed to send vdev down", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
WMA_LOGE("%s: vdev_id %d", __func__, vdev_id);
@@ -1651,7 +1651,7 @@
WMA_LOGE("%s: Failed to send vdev restart command", __func__);
qdf_atomic_set(&intr[sessionId].vdev_restart_params.
hidden_ssid_restart_in_progress, 0);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
}
@@ -1779,9 +1779,9 @@
WMA_LOGD("%s: Freeing beacon struct %p, "
"template memory %p", __func__, bcn, bcn->buf);
if (bcn->dma_mapped)
- cdf_nbuf_unmap_single(pdev->osdev, bcn->buf,
+ qdf_nbuf_unmap_single(pdev->osdev, bcn->buf,
QDF_DMA_TO_DEVICE);
- cdf_nbuf_free(bcn->buf);
+ qdf_nbuf_free(bcn->buf);
qdf_mem_free(bcn);
wma->interfaces[resp_event->vdev_id].beacon = NULL;
}
@@ -2211,7 +2211,7 @@
" Failed to send VDEV START command",
__func__, __LINE__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return QDF_STATUS_E_FAILURE;
}
@@ -2326,7 +2326,7 @@
if (ret < 0) {
WMA_LOGP("%s: Failed to send vdev start command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return QDF_STATUS_E_FAILURE;
}
@@ -2787,9 +2787,9 @@
WMA_LOGD("%s: Freeing beacon struct %p, "
"template memory %p", __func__, bcn, bcn->buf);
if (bcn->dma_mapped)
- cdf_nbuf_unmap_single(pdev->osdev, bcn->buf,
+ qdf_nbuf_unmap_single(pdev->osdev, bcn->buf,
QDF_DMA_TO_DEVICE);
- cdf_nbuf_free(bcn->buf);
+ qdf_nbuf_free(bcn->buf);
qdf_mem_free(bcn);
wma->interfaces[tgt_req->vdev_id].beacon = NULL;
}
@@ -3758,7 +3758,7 @@
WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid, &cmd->vdev_bssid);
if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_UP_CMDID)) {
WMA_LOGP("%s: Failed to send vdev up command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
return 0;
@@ -4617,7 +4617,7 @@
cmd->vdev_id = vdev_id;
if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_STOP_CMDID)) {
WMA_LOGP("%s: Failed to send vdev stop command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
return 0;
diff --git a/core/wma/src/wma_features.c b/core/wma/src/wma_features.c
index ea82fa9..9fa87a0 100644
--- a/core/wma/src/wma_features.c
+++ b/core/wma/src/wma_features.c
@@ -46,7 +46,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -971,7 +971,7 @@
if (wmi_unified_cmd_send(wma_handle->wmi_handle, wmi_buf, len,
WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID)) {
WMA_LOGE("%s: failed to send link speed command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -1131,7 +1131,7 @@
if (wmi_unified_cmd_send(wmi_handle, buf, len,
WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID)) {
WMA_LOGE("Set Green AP PS param Failed val %d", value);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
return 0;
@@ -1179,7 +1179,7 @@
if (ret) {
WMA_LOGE("PROFILE_TRIGGER cmd Failed with value %d",
value1);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return ret;
}
break;
@@ -1203,7 +1203,7 @@
if (ret) {
WMA_LOGE("PROFILE_DATA cmd Failed for id %d value %d",
value1, value2);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return ret;
}
break;
@@ -1229,7 +1229,7 @@
if (ret) {
WMA_LOGE("HIST_INTVL cmd Failed for id %d value %d",
value1, value2);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return ret;
}
break;
@@ -1256,7 +1256,7 @@
if (ret) {
WMA_LOGE("enable cmd Failed for id %d value %d",
value1, value2);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return ret;
}
break;
@@ -1937,7 +1937,7 @@
if (ret != EOK) {
WMA_LOGE(FL(":wmi cmd send failed"));
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
out:
@@ -4460,7 +4460,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, buf, len,
WMI_VDEV_WMM_DELTS_CMDID)) {
WMA_LOGP("%s: Failed to send vdev DELTS command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
#ifdef WLAN_FEATURE_ROAM_OFFLOAD
if (msg->setRICparams == true)
@@ -4529,7 +4529,7 @@
__func__);
pAggrQosRspMsg->status[i] =
QDF_STATUS_E_FAILURE;
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
}
}
@@ -4592,7 +4592,7 @@
WMI_VDEV_WMM_ADDTS_CMDID)) {
WMA_LOGP("%s: Failed to send vdev ADDTS command", __func__);
msg->status = QDF_STATUS_E_FAILURE;
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
#ifdef WLAN_FEATURE_ROAM_OFFLOAD
if (msg->setRICparams == true)
@@ -5486,7 +5486,7 @@
&vdev_id)) {
WMA_LOGE("%s: Failed to find vdev id for %pM", __func__,
pAddPeriodicTxPtrnParams->mac_address.bytes);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_INVAL;
}
buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf);
@@ -5518,7 +5518,7 @@
WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID)) {
WMA_LOGE("%s: failed to add pattern set state command",
__func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -5557,7 +5557,7 @@
&vdev_id)) {
WMA_LOGE("%s: Failed to find vdev id for %pM", __func__,
pDelPeriodicTxPtrnParams->mac_address.bytes);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_INVAL;
}
cmd = (WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *)
@@ -5576,7 +5576,7 @@
if (wmi_unified_cmd_send(wma_handle->wmi_handle, wmi_buf, len,
WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID)) {
WMA_LOGE("%s: failed to send del pattern command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -6316,7 +6316,7 @@
WMI_PDEV_SET_REGDOMAIN_CMDID)) {
WMA_LOGP("%s: Failed to send pdev set regdomain command",
__func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
if ((((reg_dmn & ~COUNTRY_ERD_FLAG) == CTRY_JAPAN) ||
@@ -6577,7 +6577,7 @@
ret = wmi_unified_cmd_send(wma_handle->wmi_handle, wmibuf, len,
WMI_PDEV_SUSPEND_CMDID);
if (ret < 0) {
- cdf_nbuf_free(wmibuf);
+ qdf_nbuf_free(wmibuf);
return ret;
}
@@ -6913,7 +6913,7 @@
if (wmi_unified_cmd_send(wma_handle->wmi_handle, wmi_buf, len,
WMI_TDLS_SET_OFFCHAN_MODE_CMDID)) {
WMA_LOGP(FL("failed to send tdls off chan command"));
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
ret = -EIO;
}
@@ -7021,7 +7021,7 @@
if (wmi_unified_cmd_send(wma_handle->wmi_handle, wmi_buf, len,
WMI_TDLS_SET_STATE_CMDID)) {
WMA_LOGP("%s: failed to send tdls set state command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
ret = -EIO;
goto end_fw_tdls_state;
}
@@ -7223,7 +7223,7 @@
WMI_TDLS_PEER_UPDATE_CMDID)) {
WMA_LOGE("%s: failed to send tdls peer update state command",
__func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
ret = -EIO;
goto end_tdls_peer_state;
}
diff --git a/core/wma/src/wma_main.c b/core/wma/src/wma_main.c
index 777b027..5c76e7d 100644
--- a/core/wma/src/wma_main.c
+++ b/core/wma/src/wma_main.c
@@ -47,7 +47,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -1492,7 +1492,7 @@
params_buf = qdf_mem_malloc(sizeof(wma_process_fw_event_params));
if (!params_buf) {
WMA_LOGE("%s: Failed alloc memory for params_buf", __func__);
- cdf_nbuf_free(evt_buf);
+ qdf_nbuf_free(evt_buf);
return -ENOMEM;
}
@@ -1507,7 +1507,7 @@
cds_mq_post_message(CDS_MQ_ID_WMA, &cds_msg)) {
WMA_LOGP("%s: Failed to post WMA_PROCESS_FW_EVENT msg",
__func__);
- cdf_nbuf_free(evt_buf);
+ qdf_nbuf_free(evt_buf);
qdf_mem_free(params_buf);
return -EFAULT;
}
@@ -3013,9 +3013,9 @@
if (bcn) {
if (bcn->dma_mapped)
- cdf_nbuf_unmap_single(wma_handle->qdf_dev,
+ qdf_nbuf_unmap_single(wma_handle->qdf_dev,
bcn->buf, QDF_DMA_TO_DEVICE);
- cdf_nbuf_free(bcn->buf);
+ qdf_nbuf_free(bcn->buf);
qdf_mem_free(bcn);
wma_handle->interfaces[i].beacon = NULL;
}
@@ -3609,7 +3609,7 @@
/* allocate memory requested by FW */
if (ev->num_mem_reqs > WMI_MAX_MEM_REQS) {
QDF_ASSERT(0);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return NULL;
}
@@ -5487,7 +5487,7 @@
if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
WMI_SOC_SET_PCL_CMDID)) {
WMA_LOGE("%s: Failed to send WMI_SOC_SET_PCL_CMDID", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -5549,7 +5549,7 @@
WMI_SOC_SET_HW_MODE_CMDID)) {
WMA_LOGE("%s: Failed to send WMI_SOC_SET_HW_MODE_CMDID",
__func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
goto fail;
}
return QDF_STATUS_SUCCESS;
@@ -5620,7 +5620,7 @@
WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID)) {
WMA_LOGE("%s: Failed to send WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID",
__func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
return QDF_STATUS_SUCCESS;
}
diff --git a/core/wma/src/wma_mgmt.c b/core/wma/src/wma_mgmt.c
index 2573e67..a710e05 100644
--- a/core/wma/src/wma_mgmt.c
+++ b/core/wma/src/wma_mgmt.c
@@ -46,7 +46,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -111,7 +111,7 @@
qdf_spin_lock_bh(&bcn->lock);
- bcn_payload = cdf_nbuf_data(bcn->buf);
+ bcn_payload = qdf_nbuf_data(bcn->buf);
tim_ie = (struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]);
@@ -200,12 +200,12 @@
}
if (bcn->dma_mapped) {
- cdf_nbuf_unmap_single(pdev->osdev, bcn->buf, QDF_DMA_TO_DEVICE);
+ qdf_nbuf_unmap_single(pdev->osdev, bcn->buf, QDF_DMA_TO_DEVICE);
bcn->dma_mapped = 0;
}
- ret = cdf_nbuf_map_single(pdev->osdev, bcn->buf, QDF_DMA_TO_DEVICE);
+ ret = qdf_nbuf_map_single(pdev->osdev, bcn->buf, QDF_DMA_TO_DEVICE);
if (ret != QDF_STATUS_SUCCESS) {
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
WMA_LOGE("%s: failed map beacon buf to DMA region", __func__);
qdf_spin_unlock_bh(&bcn->lock);
return;
@@ -220,7 +220,7 @@
cmd->vdev_id = vdev_id;
cmd->data_len = bcn->len;
cmd->frame_ctrl = *((A_UINT16 *) wh->i_fc);
- cmd->frag_ptr = cdf_nbuf_get_frag_paddr(bcn->buf, 0);
+ cmd->frag_ptr = qdf_nbuf_get_frag_paddr(bcn->buf, 0);
/* notify Firmware of DTM and mcast/bcast traffic */
if (tim_ie->dtim_count == 0) {
@@ -653,7 +653,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, buf, len,
WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID)) {
WMA_LOGE(FL("Failed to offload STA SA Query"));
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
WMA_LOGD(FL("Exit :"));
@@ -723,7 +723,7 @@
WMA_LOGE("%s: received null pointer, hostv4addr:%p "
"destv4addr:%p destmac:%p ", __func__,
hostv4addr, destv4addr, destmac);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return;
}
cmd->method = WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE;
@@ -739,7 +739,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, buf, len,
WMI_STA_KEEPALIVE_CMDID)) {
WMA_LOGE("Failed to set KeepAlive");
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
WMA_LOGD("%s: Exit", __func__);
@@ -1069,7 +1069,7 @@
WMA_LOGE
("Set WMI_VDEV_PARAM_DROP_UNENCRY Param status:%d\n",
ret);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return ret;
}
}
@@ -1164,7 +1164,7 @@
if (ret != EOK) {
WMA_LOGP("%s: Failed to send peer assoc command ret = %d",
__func__, ret);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
return ret;
}
@@ -1519,7 +1519,7 @@
/* TODO: MFP ? */
WMA_LOGE("%s:Invalid encryption type:%d", __func__,
key_params->key_type);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return NULL;
}
@@ -1680,7 +1680,7 @@
status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
WMI_VDEV_INSTALL_KEY_CMDID);
if (status) {
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
WMA_LOGE("%s:Failed to send install key command",
__func__);
key_info->status = QDF_STATUS_E_FAILURE;
@@ -1872,7 +1872,7 @@
status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
WMI_VDEV_INSTALL_KEY_CMDID);
if (status) {
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
WMA_LOGE("%s:Failed to send install key command",
__func__);
}
@@ -1991,7 +1991,7 @@
status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
WMI_VDEV_INSTALL_KEY_CMDID);
if (status) {
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
WMA_LOGE("%s:Failed to send install key command",
__func__);
key_info->status = QDF_STATUS_E_FAILURE;
@@ -2327,8 +2327,8 @@
* this will be send to target on the reception of SWBA
* event from target.
*/
- cdf_nbuf_trim_tail(bcn->buf, cdf_nbuf_len(bcn->buf));
- memcpy(cdf_nbuf_data(bcn->buf),
+ qdf_nbuf_trim_tail(bcn->buf, qdf_nbuf_len(bcn->buf));
+ memcpy(qdf_nbuf_data(bcn->buf),
bcn_info->beacon + 4 /* Exclude beacon length field */,
len);
if (bcn_info->timIeOffset > 3) {
@@ -2342,7 +2342,7 @@
} else {
bcn->p2p_ie_offset = bcn_info->p2pIeOffset;
}
- bcn_payload = cdf_nbuf_data(bcn->buf);
+ bcn_payload = qdf_nbuf_data(bcn->buf);
if (bcn->tim_ie_offset) {
tim_ie =
(struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]);
@@ -2355,7 +2355,7 @@
tim_ie->tim_bitctl = 0;
}
- cdf_nbuf_put_tail(bcn->buf, len);
+ qdf_nbuf_put_tail(bcn->buf, len);
bcn->len = len;
qdf_spin_unlock_bh(&bcn->lock);
@@ -2421,7 +2421,7 @@
qdf_spin_lock_bh(&bcn->lock);
qdf_mem_zero(&bcn_info, sizeof(bcn_info));
- bcn_info.beacon = cdf_nbuf_data(bcn->buf);
+ bcn_info.beacon = qdf_nbuf_data(bcn->buf);
bcn_info.p2pIeOffset = bcn->p2p_ie_offset;
bcn_info.beaconLength = bcn->len;
bcn_info.timIeOffset = bcn->tim_ie_offset;
@@ -2701,7 +2701,7 @@
}
if (wmi_desc->nbuf)
- cdf_nbuf_unmap_single(pdev->osdev, wmi_desc->nbuf,
+ qdf_nbuf_unmap_single(pdev->osdev, wmi_desc->nbuf,
QDF_DMA_TO_DEVICE);
if (wmi_desc->tx_cmpl_cb)
wmi_desc->tx_cmpl_cb(wma_handle->mac_context,
@@ -3012,13 +3012,13 @@
int wma_process_bip(tp_wma_handle wma_handle,
struct wma_txrx_node *iface,
struct ieee80211_frame *wh,
- cdf_nbuf_t wbuf
+ qdf_nbuf_t wbuf
)
{
uint16_t key_id;
uint8_t *efrm;
- efrm = cdf_nbuf_data(wbuf) + cdf_nbuf_len(wbuf);
+ efrm = qdf_nbuf_data(wbuf) + qdf_nbuf_len(wbuf);
key_id = (uint16_t)*(efrm - cds_get_mmie_size() + 2);
if (!((key_id == WMA_IGTK_KEY_INDEX_4)
@@ -3032,14 +3032,14 @@
* if 11w offload is enabled then mmie validation is performed
* in firmware, host just need to trim the mmie.
*/
- cdf_nbuf_trim_tail(wbuf, cds_get_mmie_size());
+ qdf_nbuf_trim_tail(wbuf, cds_get_mmie_size());
} else {
if (cds_is_mmie_valid(iface->key.key,
iface->key.key_id[key_id - WMA_IGTK_KEY_INDEX_4].ipn,
(uint8_t *) wh, efrm)) {
WMA_LOGE(FL("Protected BC/MC frame MMIE validation successful"));
/* Remove MMIE */
- cdf_nbuf_trim_tail(wbuf, cds_get_mmie_size());
+ qdf_nbuf_trim_tail(wbuf, cds_get_mmie_size());
} else {
WMA_LOGE(FL("BC/MC MIC error or MMIE not present, dropping the frame"));
return -EINVAL;
@@ -3063,7 +3063,7 @@
struct wma_txrx_node *iface,
struct ieee80211_frame *wh,
cds_pkt_t *rx_pkt,
- cdf_nbuf_t wbuf)
+ qdf_nbuf_t wbuf)
{
uint8_t *orig_hdr;
uint8_t *ccmp;
@@ -3076,7 +3076,7 @@
return -EINVAL;
}
- orig_hdr = (uint8_t *) cdf_nbuf_data(wbuf);
+ orig_hdr = (uint8_t *) qdf_nbuf_data(wbuf);
/* Pointer to head of CCMP header */
ccmp = orig_hdr + sizeof(*wh);
if (wma_is_ccmp_pn_replay_attack(
@@ -3092,13 +3092,13 @@
qdf_mem_move(orig_hdr +
IEEE80211_CCMP_HEADERLEN, wh,
sizeof(*wh));
- cdf_nbuf_pull_head(wbuf,
+ qdf_nbuf_pull_head(wbuf,
IEEE80211_CCMP_HEADERLEN);
- cdf_nbuf_trim_tail(wbuf, IEEE80211_CCMP_MICLEN);
+ qdf_nbuf_trim_tail(wbuf, IEEE80211_CCMP_MICLEN);
rx_pkt->pkt_meta.mpdu_hdr_ptr =
- cdf_nbuf_data(wbuf);
- rx_pkt->pkt_meta.mpdu_len = cdf_nbuf_len(wbuf);
+ qdf_nbuf_data(wbuf);
+ rx_pkt->pkt_meta.mpdu_len = qdf_nbuf_len(wbuf);
rx_pkt->pkt_meta.mpdu_data_len =
rx_pkt->pkt_meta.mpdu_len -
rx_pkt->pkt_meta.mpdu_hdr_len;
@@ -3143,7 +3143,7 @@
struct wma_txrx_node *iface = NULL;
uint8_t vdev_id = WMA_INVALID_VDEV_ID;
cds_pkt_t *rx_pkt;
- cdf_nbuf_t wbuf;
+ qdf_nbuf_t wbuf;
struct ieee80211_frame *wh;
uint8_t mgt_type, mgt_subtype;
int status;
@@ -3217,7 +3217,7 @@
rx_pkt->pkt_meta.roamCandidateInd = 0;
/* Why not just use rx_event->hdr.buf_len? */
- wbuf = cdf_nbuf_alloc(NULL, roundup(hdr->buf_len, 4), 0, 4, false);
+ wbuf = qdf_nbuf_alloc(NULL, roundup(hdr->buf_len, 4), 0, 4, false);
if (!wbuf) {
WMA_LOGE("%s: Failed to allocate wbuf for mgmt rx len(%u)",
__func__, hdr->buf_len);
@@ -3225,11 +3225,11 @@
return -ENOMEM;
}
- cdf_nbuf_put_tail(wbuf, hdr->buf_len);
- cdf_nbuf_set_protocol(wbuf, ETH_P_CONTROL);
- wh = (struct ieee80211_frame *)cdf_nbuf_data(wbuf);
+ qdf_nbuf_put_tail(wbuf, hdr->buf_len);
+ qdf_nbuf_set_protocol(wbuf, ETH_P_CONTROL);
+ wh = (struct ieee80211_frame *)qdf_nbuf_data(wbuf);
- rx_pkt->pkt_meta.mpdu_hdr_ptr = cdf_nbuf_data(wbuf);
+ rx_pkt->pkt_meta.mpdu_hdr_ptr = qdf_nbuf_data(wbuf);
rx_pkt->pkt_meta.mpdu_data_ptr = rx_pkt->pkt_meta.mpdu_hdr_ptr +
rx_pkt->pkt_meta.mpdu_hdr_len;
rx_pkt->pkt_meta.tsf_delta = hdr->tsf_delta;
diff --git a/core/wma/src/wma_power.c b/core/wma/src/wma_power.c
index 52276cb..4330b4d 100644
--- a/core/wma/src/wma_power.c
+++ b/core/wma/src/wma_power.c
@@ -45,7 +45,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -154,7 +154,7 @@
WMI_STA_POWERSAVE_PARAM_CMDID)) {
WMA_LOGE("Set Sta Ps param Failed vdevId %d Param %d val %d",
vdev_id, param, value);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
/* Store the PS Status */
@@ -578,7 +578,7 @@
WMI_STA_POWERSAVE_MODE_CMDID)) {
WMA_LOGE("Set Sta Mode Ps Failed vdevId %d val %d",
vdev_id, val);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return -EIO;
}
return 0;
@@ -1526,7 +1526,7 @@
} else { /* NoA is not present in previous beacon */
WMA_LOGD("%s: NoA not present in previous beacon, add it"
"bcn->len %u", __func__, bcn->len);
- buf = cdf_nbuf_data(bcn->buf);
+ buf = qdf_nbuf_data(bcn->buf);
bcn->noa_ie = buf + bcn->len;
}
diff --git a/core/wma/src/wma_scan_roam.c b/core/wma/src/wma_scan_roam.c
index 138d4ea..8b270fd 100644
--- a/core/wma/src/wma_scan_roam.c
+++ b/core/wma/src/wma_scan_roam.c
@@ -46,7 +46,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -724,7 +724,7 @@
return QDF_STATUS_SUCCESS;
error:
if (buf)
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
error1:
return qdf_status;
}
@@ -3076,7 +3076,7 @@
__func__);
if (is_add_ts)
((tAddTsParams *) msg)->status = QDF_STATUS_E_FAILURE;
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
}
}
#endif /* WLAN_FEATURE_ROAM_OFFLOAD */
@@ -3135,7 +3135,7 @@
if (wmi_unified_cmd_send(wma_handle->wmi_handle, wmi_buf, len,
WMI_UNIT_TEST_CMDID)) {
WMA_LOGP("%s: failed to send unit test command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return;
}
return;
@@ -3215,7 +3215,7 @@
WMI_ROAM_SYNCH_COMPLETE)) {
WMA_LOGP("%s: failed to send roam synch confirmation",
__func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return;
}
return;
@@ -5405,7 +5405,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, buf,
len, WMI_EXTSCAN_START_CMDID)) {
WMA_LOGE("%s: failed to send command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return QDF_STATUS_E_FAILURE;
}
wma->interfaces[pstart->sessionId].extscan_in_progress = true;
@@ -5460,7 +5460,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_EXTSCAN_STOP_CMDID)) {
WMA_LOGE("%s: failed to command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
wma->interfaces[pstopcmd->sessionId].extscan_in_progress = false;
@@ -5609,7 +5609,7 @@
if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID)) {
WMA_LOGE("%s: failed to send command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return QDF_STATUS_E_FAILURE;
}
index = index + min_entries;
@@ -5718,7 +5718,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID)) {
WMA_LOGE("%s: failed to command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -5849,7 +5849,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, buf, len,
WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID)) {
WMA_LOGE("%s: failed to send command", __func__);
- cdf_nbuf_free(buf);
+ qdf_nbuf_free(buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -5918,7 +5918,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID)) {
WMA_LOGE("%s: failed to command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -5972,7 +5972,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID)) {
WMA_LOGE("%s: failed to command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -6024,7 +6024,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_EXTSCAN_GET_CAPABILITIES_CMDID)) {
WMA_LOGE("%s: failed to command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
@@ -6531,7 +6531,7 @@
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_SCAN_PROB_REQ_OUI_CMDID)) {
WMA_LOGE("%s: failed to send command", __func__);
- cdf_nbuf_free(wmi_buf);
+ qdf_nbuf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
diff --git a/core/wma/src/wma_utils.c b/core/wma/src/wma_utils.c
index 6f6af9b..fd54008 100644
--- a/core/wma/src/wma_utils.c
+++ b/core/wma/src/wma_utils.c
@@ -45,7 +45,7 @@
#include "ol_txrx_ctrl_api.h"
#include "wlan_tgt_def_config.h"
-#include "cdf_nbuf.h"
+#include "qdf_nbuf.h"
#include "qdf_types.h"
#include "ol_txrx_api.h"
#include "qdf_mem.h"
@@ -2172,7 +2172,7 @@
qdf_spin_lock_bh(&beacon->lock);
- buf_size = cdf_nbuf_len(beacon->buf);
+ buf_size = qdf_nbuf_len(beacon->buf);
buf = qdf_mem_malloc(buf_size);
if (!buf) {
@@ -2181,7 +2181,7 @@
return NULL;
}
- qdf_mem_copy(buf, cdf_nbuf_data(beacon->buf), buf_size);
+ qdf_mem_copy(buf, qdf_nbuf_data(beacon->buf), buf_size);
qdf_spin_unlock_bh(&beacon->lock);
diff --git a/core/wmi/wmi_unified.c b/core/wmi/wmi_unified.c
index b081a5d..3d1d7ae 100644
--- a/core/wmi/wmi_unified.c
+++ b/core/wmi/wmi_unified.c
@@ -177,7 +177,7 @@
return NULL;
}
- wmi_buf = cdf_nbuf_alloc_debug(NULL,
+ wmi_buf = qdf_nbuf_alloc_debug(NULL,
roundup(len + WMI_MIN_HEAD_ROOM, 4),
WMI_MIN_HEAD_ROOM, 4, false, file_name,
line_num);
@@ -186,19 +186,19 @@
return NULL;
/* Clear the wmi buffer */
- OS_MEMZERO(cdf_nbuf_data(wmi_buf), len);
+ OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
/*
* Set the length of the buffer to match the allocation size.
*/
- cdf_nbuf_set_pktlen(wmi_buf, len);
+ qdf_nbuf_set_pktlen(wmi_buf, len);
return wmi_buf;
}
void wmi_buf_free(wmi_buf_t net_buf)
{
- cdf_nbuf_free(net_buf);
+ qdf_nbuf_free(net_buf);
}
#else
wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, uint16_t len)
@@ -210,24 +210,24 @@
return NULL;
}
- wmi_buf = cdf_nbuf_alloc(NULL, roundup(len + WMI_MIN_HEAD_ROOM, 4),
+ wmi_buf = qdf_nbuf_alloc(NULL, roundup(len + WMI_MIN_HEAD_ROOM, 4),
WMI_MIN_HEAD_ROOM, 4, false);
if (!wmi_buf)
return NULL;
/* Clear the wmi buffer */
- OS_MEMZERO(cdf_nbuf_data(wmi_buf), len);
+ OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
/*
* Set the length of the buffer to match the allocation size.
*/
- cdf_nbuf_set_pktlen(wmi_buf, len);
+ qdf_nbuf_set_pktlen(wmi_buf, len);
return wmi_buf;
}
void wmi_buf_free(wmi_buf_t net_buf)
{
- cdf_nbuf_free(net_buf);
+ qdf_nbuf_free(net_buf);
}
#endif
@@ -813,7 +813,7 @@
/* Do sanity check on the TLV parameter structure */
{
- void *buf_ptr = (void *)cdf_nbuf_data(buf);
+ void *buf_ptr = (void *)qdf_nbuf_data(buf);
if (wmitlv_check_command_tlv_params(NULL, buf_ptr, len, cmd_id)
!= 0) {
@@ -824,13 +824,13 @@
}
}
- if (cdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
+ if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
pr_err("%s, Failed to send cmd %x, no memory\n",
__func__, cmd_id);
return -ENOMEM;
}
- WMI_SET_FIELD(cdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
+ WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
qdf_atomic_inc(&wmi_handle->pending_cmds);
if (qdf_atomic_read(&wmi_handle->pending_cmds) >= WMI_MAX_CMDS) {
@@ -854,7 +854,7 @@
SET_HTC_PACKET_INFO_TX(pkt,
NULL,
- cdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
+ qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
/* htt_host_data_dl_len(buf)+20 */
wmi_handle->wmi_endpoint_id, htc_tag);
@@ -867,9 +867,9 @@
/*Record 16 bytes of WMI cmd data - exclude TLV and WMI headers */
if (cmd_id == WMI_MGMT_TX_SEND_CMDID) {
WMI_MGMT_COMMAND_RECORD(cmd_id,
- ((uint32_t *)cdf_nbuf_data(buf) + 2));
+ ((uint32_t *)qdf_nbuf_data(buf) + 2));
} else {
- WMI_COMMAND_RECORD(cmd_id, ((uint32_t *) cdf_nbuf_data(buf) +
+ WMI_COMMAND_RECORD(cmd_id, ((uint32_t *) qdf_nbuf_data(buf) +
2));
}
@@ -956,9 +956,9 @@
ASSERT(evt_buf != NULL);
- id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
+ id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
- if (cdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
+ if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
goto end;
idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
@@ -968,15 +968,15 @@
goto end;
}
- event = cdf_nbuf_data(evt_buf);
- len = cdf_nbuf_len(evt_buf);
+ event = qdf_nbuf_data(evt_buf);
+ len = qdf_nbuf_len(evt_buf);
/* Call the WMI registered event handler */
status = wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
event, len);
end:
- cdf_nbuf_free(evt_buf);
+ qdf_nbuf_free(evt_buf);
return status;
}
#endif /* 0 */
@@ -1024,15 +1024,15 @@
uint8_t *data;
evt_buf = (wmi_buf_t) htc_packet->pPktContext;
- id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
- data = cdf_nbuf_data(evt_buf);
+ id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
+ data = qdf_nbuf_data(evt_buf);
qdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
/* Exclude 4 bytes of TLV header */
WMI_RX_EVENT_RECORD(id, ((uint8_t *) data + 4));
qdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
qdf_spin_lock_bh(&wmi_handle->eventq_lock);
- cdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
+ qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
schedule_work(&wmi_handle->rx_event_work);
return;
@@ -1049,7 +1049,7 @@
uint32_t id;
evt_buf = (wmi_buf_t) htc_packet->pPktContext;
- id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
+ id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
switch (id) {
/*Event will be handled in tasklet ctx*/
case WMI_TX_PAUSE_EVENTID:
@@ -1090,13 +1090,13 @@
void *wmi_cmd_struct_ptr = NULL;
int tlv_ok_status = 0;
- id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
+ id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
- if (cdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
+ if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
goto end;
- data = cdf_nbuf_data(evt_buf);
- len = cdf_nbuf_len(evt_buf);
+ data = qdf_nbuf_data(evt_buf);
+ len = qdf_nbuf_len(evt_buf);
/* Validate and pad(if necessary) the TLVs */
tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle,
@@ -1164,7 +1164,7 @@
}
end:
wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr);
- cdf_nbuf_free(evt_buf);
+ qdf_nbuf_free(evt_buf);
}
void wmi_rx_event_work(struct work_struct *work)
@@ -1174,12 +1174,12 @@
wmi_buf_t buf;
qdf_spin_lock_bh(&wmi->eventq_lock);
- buf = cdf_nbuf_queue_remove(&wmi->event_queue);
+ buf = qdf_nbuf_queue_remove(&wmi->event_queue);
qdf_spin_unlock_bh(&wmi->eventq_lock);
while (buf) {
__wmi_control_rx(wmi, buf);
qdf_spin_lock_bh(&wmi->eventq_lock);
- buf = cdf_nbuf_queue_remove(&wmi->event_queue);
+ buf = qdf_nbuf_queue_remove(&wmi->event_queue);
qdf_spin_unlock_bh(&wmi->eventq_lock);
}
}
@@ -1219,7 +1219,7 @@
qdf_atomic_init(&wmi_handle->is_target_suspended);
wmi_runtime_pm_init(wmi_handle);
qdf_spinlock_create(&wmi_handle->eventq_lock);
- cdf_nbuf_queue_init(&wmi_handle->event_queue);
+ qdf_nbuf_queue_init(&wmi_handle->event_queue);
#ifdef CONFIG_CNSS
cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work);
#else
@@ -1238,10 +1238,10 @@
cds_flush_work(&wmi_handle->rx_event_work);
qdf_spin_lock_bh(&wmi_handle->eventq_lock);
- buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+ buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
while (buf) {
- cdf_nbuf_free(buf);
- buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+ qdf_nbuf_free(buf);
+ buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
}
qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
if (wmi_handle != NULL) {
@@ -1270,10 +1270,10 @@
"Enter: %s", __func__);
cds_flush_work(&wmi_handle->rx_event_work);
qdf_spin_lock_bh(&wmi_handle->eventq_lock);
- buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+ buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
while (buf) {
- cdf_nbuf_free(buf);
- buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+ qdf_nbuf_free(buf);
+ buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
}
qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_INFO,
@@ -1290,7 +1290,7 @@
ASSERT(wmi_cmd_buf);
#ifdef WMI_INTERFACE_EVENT_LOGGING
- cmd_id = WMI_GET_FIELD(cdf_nbuf_data(wmi_cmd_buf),
+ cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
WMI_CMD_HDR, COMMANDID);
#ifdef QCA_WIFI_3_0_EMU
@@ -1303,15 +1303,15 @@
- exclude TLV and WMI headers */
if (cmd_id == WMI_MGMT_TX_SEND_CMDID) {
WMI_MGMT_COMMAND_TX_CMP_RECORD(cmd_id,
- ((uint32_t *) cdf_nbuf_data(wmi_cmd_buf) + 2));
+ ((uint32_t *) qdf_nbuf_data(wmi_cmd_buf) + 2));
} else {
WMI_COMMAND_TX_CMP_RECORD(cmd_id,
- ((uint32_t *) cdf_nbuf_data(wmi_cmd_buf) + 2));
+ ((uint32_t *) qdf_nbuf_data(wmi_cmd_buf) + 2));
}
qdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
- cdf_nbuf_free(wmi_cmd_buf);
+ qdf_nbuf_free(wmi_cmd_buf);
qdf_mem_free(htc_pkt);
qdf_atomic_dec(&wmi_handle->pending_cmds);
}
diff --git a/core/wmi/wmi_unified_api.h b/core/wmi/wmi_unified_api.h
index f96c128..a995e04 100644
--- a/core/wmi/wmi_unified_api.h
+++ b/core/wmi/wmi_unified_api.h
@@ -38,8 +38,8 @@
#include "wmi.h"
#include "htc_api.h"
-typedef cdf_nbuf_t wmi_buf_t;
-#define wmi_buf_data(_buf) cdf_nbuf_data(_buf)
+typedef qdf_nbuf_t wmi_buf_t;
+#define wmi_buf_data(_buf) qdf_nbuf_data(_buf)
/**
* attach for unified WMI
diff --git a/core/wmi/wmi_unified_priv.h b/core/wmi/wmi_unified_priv.h
index 9c46dce..a508c82 100644
--- a/core/wmi/wmi_unified_priv.h
+++ b/core/wmi/wmi_unified_priv.h
@@ -39,7 +39,7 @@
#define WMI_UNIFIED_MAX_EVENT 0x100
#define WMI_MAX_CMDS 1024
-typedef cdf_nbuf_t wmi_buf_t;
+typedef qdf_nbuf_t wmi_buf_t;
#ifdef WMI_INTERFACE_EVENT_LOGGING
@@ -77,7 +77,7 @@
uint32_t max_event_idx;
void *htc_handle;
qdf_spinlock_t eventq_lock;
- cdf_nbuf_queue_t event_queue;
+ qdf_nbuf_queue_t event_queue;
struct work_struct rx_event_work;
#ifdef WLAN_OPEN_SOURCE
struct fwdebug dbglog;