blob: e48da66768fdb437ba7c7b683b948732f7960ce8 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnsona00ff4c2018-12-30 10:22:28 -08002 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019#if !defined(WLAN_HDD_TX_RX_H)
20#define WLAN_HDD_TX_RX_H
21
22/**
23 *
24 * DOC: wlan_hdd_tx_rx.h
25 *
26 * Linux HDD Tx/RX APIs
27 */
28
29#include <wlan_hdd_includes.h>
30#include <cds_api.h>
31#include <linux/skbuff.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080032#include "cdp_txrx_flow_ctrl_legacy.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033
Mohit Khannaf7e7b342019-04-08 11:54:21 -070034struct hdd_netif_queue_history;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -070035struct hdd_context;
36
Mohit Khanna70322002018-05-15 19:21:32 -070037#define hdd_dp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_HDD_DATA, params)
38#define hdd_dp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_HDD_DATA, params)
39#define hdd_dp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_HDD_DATA, params)
40#define hdd_dp_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_HDD_DATA, params)
41#define hdd_dp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_HDD_DATA, params)
42
43#define hdd_dp_alert_rl(params...) \
44 QDF_TRACE_FATAL_RL(QDF_MODULE_ID_HDD_DATA, params)
45#define hdd_dp_err_rl(params...) \
46 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_HDD_DATA, params)
47#define hdd_dp_warn_rl(params...) \
48 QDF_TRACE_WARN_RL(QDF_MODULE_ID_HDD_DATA, params)
49#define hdd_dp_info_rl(params...) \
50 QDF_TRACE_INFO_RL(QDF_MODULE_ID_HDD_DATA, params)
51#define hdd_dp_debug_rl(params...) \
52 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HDD_DATA, params)
53
54#define hdd_dp_enter() hdd_dp_debug("enter")
55#define hdd_dp_enter_dev(dev) hdd_dp_debug("enter(%s)", (dev)->name)
56#define hdd_dp_exit() hdd_dp_debug("exit")
57
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058#define HDD_ETHERTYPE_802_1_X 0x888E
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059#ifdef FEATURE_WLAN_WAPI
60#define HDD_ETHERTYPE_WAI 0x88b4
Jinwei Chen19846e52018-04-03 19:20:38 +080061#define IS_HDD_ETHERTYPE_WAI(_skb) (ntohs(_skb->protocol) == \
62 HDD_ETHERTYPE_WAI)
63#else
64#define IS_HDD_ETHERTYPE_WAI(_skb) (false)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080065#endif
66
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067#define HDD_PSB_CFG_INVALID 0xFF
68#define HDD_PSB_CHANGED 0xFF
69#define SME_QOS_UAPSD_CFG_BK_CHANGED_MASK 0xF1
70#define SME_QOS_UAPSD_CFG_BE_CHANGED_MASK 0xF2
71#define SME_QOS_UAPSD_CFG_VI_CHANGED_MASK 0xF4
72#define SME_QOS_UAPSD_CFG_VO_CHANGED_MASK 0xF8
73
Srinivas Girigowdade28a9d2018-03-19 19:19:49 -070074netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075void hdd_tx_timeout(struct net_device *dev);
Dhanashri Atre182b0272016-02-17 15:35:07 -080076
Jeff Johnson80486862017-10-02 13:21:29 -070077QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter);
78QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter);
Mohit Khanna70322002018-05-15 19:21:32 -070079
80/**
81 * hdd_rx_packet_cbk() - Receive packet handler
82 * @adapter_context: pointer to HDD adapter context
83 * @rxBuf: pointer to rx qdf_nbuf
84 *
85 * Receive callback registered with data path. DP will call this to notify
86 * the HDD when one or more packets were received for a registered
87 * STA.
88 *
89 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
90 * QDF_STATUS_SUCCESS otherwise
91 */
92QDF_STATUS hdd_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rxBuf);
93
94/**
Mohit Khanna81418772018-10-30 14:14:46 -070095 * hdd_rx_deliver_to_stack() - HDD helper function to deliver RX pkts to stack
96 * @adapter: pointer to HDD adapter context
97 * @skb: pointer to skb
98 *
99 * The function calls the appropriate stack function depending upon the packet
100 * type and whether GRO/LRO is enabled.
101 *
102 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
103 * QDF_STATUS_SUCCESS otherwise
104 */
105QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
106 struct sk_buff *skb);
107
108/**
Mohit Khanna70322002018-05-15 19:21:32 -0700109 * hdd_rx_pkt_thread_enqueue_cbk() - receive pkt handler to enqueue into thread
110 * @adapter: pointer to HDD adapter
Mohit Khanna81418772018-10-30 14:14:46 -0700111 * @nbuf_list: pointer to qdf_nbuf list
Mohit Khanna70322002018-05-15 19:21:32 -0700112 *
113 * Receive callback registered with DP layer which enqueues packets into dp rx
114 * thread
115 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
116 * QDF_STATUS_SUCCESS otherwise
117 */
118QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter_context,
119 qdf_nbuf_t nbuf_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800120
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -0700121/**
Mohit Khanna81418772018-10-30 14:14:46 -0700122 * hdd_rx_ol_init() - Initialize Rx offload mode (LRO or GRO)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -0700123 * @hdd_ctx: pointer to HDD Station Context
124 *
125 * Return: 0 on success and non zero on failure.
126 */
127int hdd_rx_ol_init(struct hdd_context *hdd_ctx);
128
129/**
130 * hdd_disable_rx_ol_in_concurrency() - Disable Rx offload due to concurrency
131 * @disable: true/false to disable/enable the Rx offload
132 *
133 * Return: none
134 */
135void hdd_disable_rx_ol_in_concurrency(bool disable);
136
137/**
138 * hdd_disable_rx_ol_for_low_tput() - Disable Rx offload in low TPUT scenario
139 * @hdd_ctx: hdd context
140 * @disable: true/false to disable/enable the Rx offload
141 *
142 * Return: none
143 */
144void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable);
145
Jeff Johnsoneeddb382018-11-17 13:04:38 -0800146/**
147 * hdd_get_peer_sta_id() - Get the StationID using the Peer Mac address
148 * @sta_ctx: pointer to HDD Station Context
149 * @mac_address: pointer to Peer Mac address
150 * @sta_id: pointer to returned Station Index
151 *
152 * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_FAILURE
153 */
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700154QDF_STATUS hdd_get_peer_sta_id(struct hdd_station_ctx *sta_ctx,
Jeff Johnsoneeddb382018-11-17 13:04:38 -0800155 struct qdf_mac_addr *mac_address,
156 uint8_t *sta_id);
157
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530158/**
jitiphilfb410612018-03-26 22:37:56 +0530159 * hdd_reset_all_adapters_connectivity_stats() - reset connectivity stats
160 * @hdd_ctx: pointer to HDD Station Context
161 *
162 * Return: None
163 */
164void hdd_reset_all_adapters_connectivity_stats(struct hdd_context *hdd_ctx);
165
166/**
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530167 * hdd_tx_rx_collect_connectivity_stats_info() - collect connectivity stats
168 * @skb: pointer to skb data
169 * @adapter: pointer to vdev apdapter
170 * @action: action done on pkt.
171 * @pkt_type: data pkt type
172 *
173 * Return: None
174 */
175void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
176 void *adapter, enum connectivity_stats_pkt_status action,
177 uint8_t *pkt_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800178
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -0800179/**
180 * hdd_tx_queue_cb() - Disable/Enable the Transmit Queues
Jeff Johnsonda2afa42018-07-04 10:25:42 -0700181 * @hdd_handle: HDD handle
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -0800182 * @vdev_id: vdev id
183 * @action: Action to be taken on the Tx Queues
184 * @reason: Reason for the netif action
185 *
186 * Return: None
187 */
Jeff Johnsonda2afa42018-07-04 10:25:42 -0700188void hdd_tx_queue_cb(hdd_handle_t hdd_handle, uint32_t vdev_id,
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -0800189 enum netif_action_type action,
190 enum netif_reason_type reason);
191
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800192#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
193void hdd_tx_resume_cb(void *adapter_context, bool tx_resume);
bings284f8be2017-08-11 10:41:30 +0800194
195/**
196 * hdd_tx_flow_control_is_pause() - Is TX Q paused by flow control
197 * @adapter_context: pointer to vdev apdapter
198 *
199 * Return: true if TX Q is paused by flow control
200 */
201bool hdd_tx_flow_control_is_pause(void *adapter_context);
bings284f8be2017-08-11 10:41:30 +0800202
203/**
204 * hdd_register_tx_flow_control() - Register TX Flow control
205 * @adapter: adapter handle
206 * @timer_callback: timer callback
207 * @flow_control_fp: txrx flow control
208 * @flow_control_is_pause_fp: is txrx paused by flow control
209 *
210 * Return: none
211 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700212void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530213 qdf_mc_timer_callback_t timer_callback,
Jeff Johnson9fa53772019-03-23 09:58:21 -0700214 ol_txrx_tx_flow_control_fp flow_control_fp,
bings284f8be2017-08-11 10:41:30 +0800215 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700216void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter);
217void hdd_get_tx_resource(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800218 uint8_t STAId, uint16_t timer_value);
219
220#else
221static inline void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
222{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223}
bings284f8be2017-08-11 10:41:30 +0800224static inline bool hdd_tx_flow_control_is_pause(void *adapter_context)
225{
226 return false;
227}
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700228static inline void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530229 qdf_mc_timer_callback_t timer_callback,
Jeff Johnson9fa53772019-03-23 09:58:21 -0700230 ol_txrx_tx_flow_control_fp flow_control_fp,
bings284f8be2017-08-11 10:41:30 +0800231 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233}
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700234static inline void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236}
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700237static inline void hdd_get_tx_resource(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800238 uint8_t STAId, uint16_t timer_value)
239{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240}
241#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
242
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530243#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
244 defined(QCA_HL_NETDEV_FLOW_CONTROL)
245void hdd_tx_resume_timer_expired_handler(void *adapter_context);
246#else
247static inline void hdd_tx_resume_timer_expired_handler(void *adapter_context)
248{
249}
250#endif
251
252#ifdef QCA_HL_NETDEV_FLOW_CONTROL
253void hdd_register_hl_netdev_fc_timer(struct hdd_adapter *adapter,
254 qdf_mc_timer_callback_t timer_callback);
255void hdd_deregister_hl_netdev_fc_timer(struct hdd_adapter *adapter);
256#else
257static inline void hdd_register_hl_netdev_fc_timer(struct hdd_adapter *adapter,
258 qdf_mc_timer_callback_t
259 timer_callback)
260{}
261
262static inline void
263 hdd_deregister_hl_netdev_fc_timer(struct hdd_adapter *adapter)
264{}
265#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
266
Jeff Johnson811f47d2017-10-03 11:33:09 -0700267int hdd_get_peer_idx(struct hdd_station_ctx *sta_ctx,
268 struct qdf_mac_addr *addr);
Naveen Rawatf28315c2016-06-29 18:06:02 -0700269
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800270const char *hdd_reason_type_to_string(enum netif_reason_type reason);
271const char *hdd_action_type_to_string(enum netif_action_type action);
Mohit Khannaf7e7b342019-04-08 11:54:21 -0700272
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700273void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800274 enum netif_action_type action, enum netif_reason_type reason);
Nirav Shah73713f72018-05-17 14:50:41 +0530275
276#ifdef FEATURE_MONITOR_MODE_SUPPORT
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -0700277int hdd_set_mon_rx_cb(struct net_device *dev);
Nirav Shah73713f72018-05-17 14:50:41 +0530278#else
279static inline
280int hdd_set_mon_rx_cb(struct net_device *dev)
281{
282 return 0;
283}
284#endif
285
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700286void hdd_send_rps_ind(struct hdd_adapter *adapter);
Yun Parkff6a16a2017-09-26 16:38:18 -0700287void hdd_send_rps_disable_ind(struct hdd_adapter *adapter);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530288void wlan_hdd_classify_pkt(struct sk_buff *skb);
289
Tiger Yu8b119e92019-04-09 13:55:07 +0800290#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700291void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx);
hangtian2b9856f2019-01-25 11:50:39 +0800292bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx);
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -0700293#define HDD_MSM_CFG(msm_cfg) msm_cfg
Ravi Joshib89e7f72016-09-07 13:43:15 -0700294#else
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700295static inline void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx) {}
hangtian2b9856f2019-01-25 11:50:39 +0800296static inline bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx)
297{
298 return false;
299}
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -0700300#define HDD_MSM_CFG(msm_cfg) 0
Ravi Joshib89e7f72016-09-07 13:43:15 -0700301#endif
302
Nirav Shah5e74bb82016-07-20 16:01:27 +0530303#ifdef FEATURE_WLAN_DIAG_SUPPORT
304void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir);
305#else
306static inline
307void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
308{}
309#endif
310
Dustin Browne0024fa2016-10-14 16:29:21 -0700311/*
312 * As of the 4.7 kernel, net_device->trans_start is removed. Create shims to
313 * support compiling against older versions of the kernel.
314 */
315#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
316static inline void netif_trans_update(struct net_device *dev)
317{
318 dev->trans_start = jiffies;
319}
320
321#define TX_TIMEOUT_TRACE(dev, module_id) QDF_TRACE( \
322 module_id, QDF_TRACE_LEVEL_ERROR, \
323 "%s: Transmission timeout occurred jiffies %lu trans_start %lu", \
324 __func__, jiffies, dev->trans_start)
325#else
326#define TX_TIMEOUT_TRACE(dev, module_id) QDF_TRACE( \
327 module_id, QDF_TRACE_LEVEL_ERROR, \
328 "%s: Transmission timeout occurred jiffies %lu", \
329 __func__, jiffies)
330#endif
331
Mohit Khanna87493732017-08-27 23:26:44 -0700332static inline void
Jeff Johnson450d2442017-11-07 13:58:01 -0800333hdd_skb_fill_gso_size(struct net_device *dev, struct sk_buff *skb)
334{
Mohit Khanna87493732017-08-27 23:26:44 -0700335 if (skb_cloned(skb) && skb_is_nonlinear(skb) &&
Jeff Johnson450d2442017-11-07 13:58:01 -0800336 skb_shinfo(skb)->gso_size == 0 &&
337 ip_hdr(skb)->protocol == IPPROTO_TCP) {
Mohit Khanna87493732017-08-27 23:26:44 -0700338 skb_shinfo(skb)->gso_size = dev->mtu -
339 ((skb_transport_header(skb) - skb_network_header(skb))
340 + tcp_hdrlen(skb));
341 }
342}
343
Alok Kumarb64650c2018-03-23 17:05:11 +0530344/**
345 * hdd_txrx_get_tx_ack_count() - get tx acked count
346 * @adapter: Pointer to adapter
347 *
348 * Return: tx acked count
349 */
350uint32_t hdd_txrx_get_tx_ack_count(struct hdd_adapter *adapter);
jinweic chen51046012018-04-11 16:02:22 +0800351
352#ifdef CONFIG_HL_SUPPORT
353static inline QDF_STATUS
354hdd_skb_nontso_linearize(struct sk_buff *skb)
355{
356 return QDF_STATUS_SUCCESS;
357}
358#else
359static inline QDF_STATUS
360hdd_skb_nontso_linearize(struct sk_buff *skb)
361{
362 if (qdf_nbuf_is_nonlinear(skb) && qdf_nbuf_is_tso(skb) == false) {
363 if (qdf_unlikely(skb_linearize(skb)))
364 return QDF_STATUS_E_NOMEM;
365 }
366 return QDF_STATUS_SUCCESS;
367}
368#endif
369
jitiphil869b9f72018-09-25 17:14:01 +0530370/**
371 * hdd_dp_cfg_update() - update hdd config for HDD DP INIs
372 * @psoc: Pointer to psoc obj
373 * @hdd_ctx: Pointer to hdd context
374 *
375 * Return: None
376 */
377void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
378 struct hdd_context *hdd_ctx);
379
Mohit Khannaf7e7b342019-04-08 11:54:21 -0700380/**
381 * hdd_print_netdev_txq_status() - print netdev tx queue status
382 * @dev: Pointer to network device
383 *
384 * This function is used to print netdev tx queue status
385 *
386 * Return: None
387 */
388void hdd_print_netdev_txq_status(struct net_device *dev);
389
390/**
391 * wlan_hdd_dump_queue_history_state() - Dump hdd queue history states
392 * @q_hist: pointer to hdd queue history structure
393 * @buf: buffer where the queue history string is dumped
394 * @size: size of the buffer
395 *
396 * Dump hdd queue history states into a buffer
397 *
398 * Return: number of bytes written to the buffer
399 */
400uint32_t
401wlan_hdd_dump_queue_history_state(struct hdd_netif_queue_history *q_hist,
402 char *buf, uint32_t size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403#endif /* end #if !defined(WLAN_HDD_TX_RX_H) */