blob: cfa3c66e4eb2dcf01b9b39078149e4ea8f5b1df6 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * DOC: wlan_hdd_tx_rx.c
21 *
22 * Linux HDD Tx/RX APIs
23 */
24
Jeff Johnsona0399642016-12-05 12:39:59 -080025/* denote that this file does not allow legacy hddLog */
26#define HDD_DISALLOW_LEGACY_HDDLOG 1
27
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028#include <wlan_hdd_tx_rx.h>
29#include <wlan_hdd_softap_tx_rx.h>
30#include <wlan_hdd_napi.h>
31#include <linux/netdevice.h>
32#include <linux/skbuff.h>
33#include <linux/etherdevice.h>
Ravi Joshibb8d4512016-08-22 10:14:52 -070034#include <linux/if_ether.h>
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +053035#include <linux/inetdevice.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036#include <cds_sched.h>
Manjunathappa Prakash779e4862016-09-12 17:00:11 -070037#include <cds_utils.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038
39#include <wlan_hdd_p2p.h>
40#include <linux/wireless.h>
41#include <net/cfg80211.h>
42#include <net/ieee80211_radiotap.h>
43#include "sap_api.h"
44#include "wlan_hdd_wmm.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include "wlan_hdd_tdls.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046#include "wlan_hdd_ocb.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047#include "wlan_hdd_lro.h"
Leo Changfdb45c32016-10-28 11:09:23 -070048#include <cdp_txrx_cmn.h>
49#include <cdp_txrx_peer_ops.h>
50#include <cdp_txrx_flow_ctrl_v2.h>
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -070051#include "wlan_hdd_nan_datapath.h"
Ravi Joshib89e7f72016-09-07 13:43:15 -070052#include "pld_common.h"
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +053053#include <cdp_txrx_misc.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080054#include "wlan_hdd_rx_monitor.h"
Zhu Jianmin04392c42017-05-12 16:34:53 +080055#include "wlan_hdd_power.h"
Poddar, Siddarth31797fa2018-01-22 17:24:15 +053056#include "wlan_hdd_cfg80211.h"
Yu Wangceb357b2017-06-01 12:04:18 +080057#include <wlan_hdd_tsf.h>
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -070058#include <net/tcp.h>
59#include "wma_api.h"
Ravi Joshi106ffe02017-01-18 18:09:05 -080060
Alok Kumarb64650c2018-03-23 17:05:11 +053061#include "wlan_hdd_nud_tracking.h"
62
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053063#ifdef QCA_LL_TX_FLOW_CONTROL_V2
64/*
65 * Mapping Linux AC interpretation to SME AC.
66 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
67 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
68 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
69 */
70const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
71 SME_AC_VO,
72 SME_AC_VI,
73 SME_AC_BE,
74 SME_AC_BK,
75 SME_AC_VO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080076};
77
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053078#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080079const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
80 SME_AC_VO,
81 SME_AC_VI,
82 SME_AC_BE,
83 SME_AC_BK,
84};
85
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053086#endif
87
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
89/**
90 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
91 * @adapter_context: pointer to vdev adapter
92 *
93 * If Blocked OS Q is not resumed during timeout period, to prevent
94 * permanent stall, resume OS Q forcefully.
95 *
96 * Return: None
97 */
98void hdd_tx_resume_timer_expired_handler(void *adapter_context)
99{
Jeff Johnson80486862017-10-02 13:21:29 -0700100 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800101
Jeff Johnson80486862017-10-02 13:21:29 -0700102 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800103 /* INVALID ARG */
104 return;
105 }
106
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700107 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700108 wlan_hdd_netif_queue_control(adapter, WLAN_WAKE_ALL_NETIF_QUEUE,
Jeff Johnsona0399642016-12-05 12:39:59 -0800109 WLAN_CONTROL_PATH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800110}
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530111#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
112
113/**
114 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
Jeff Johnson80486862017-10-02 13:21:29 -0700115 * @adapter: pointer to hdd adapter
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530116 * @tx_resume: TX Q resume trigger
117 *
118 *
119 * Return: None
120 */
121static void
Jeff Johnson80486862017-10-02 13:21:29 -0700122hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530123{
124 if (true == tx_resume)
125 return;
126
127 /* Pause TX */
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700128 hdd_debug("Disabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700129 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530130 WLAN_DATA_FLOW_CONTROL);
131
132 if (QDF_TIMER_STATE_STOPPED ==
Jeff Johnson80486862017-10-02 13:21:29 -0700133 qdf_mc_timer_get_current_state(&adapter->
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530134 tx_flow_control_timer)) {
135 QDF_STATUS status;
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700136
Jeff Johnson80486862017-10-02 13:21:29 -0700137 status = qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530138 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
139
140 if (!QDF_IS_STATUS_SUCCESS(status))
141 hdd_err("Failed to start tx_flow_control_timer");
142 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700143 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530144 }
145
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700146 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
147 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530148}
149#else
150
151static inline void
Jeff Johnson80486862017-10-02 13:21:29 -0700152hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530153{
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530154}
155#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800156
Jeff Johnson80486862017-10-02 13:21:29 -0700157static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
gbianec670c592016-11-24 11:21:30 +0800158 struct sk_buff *skb)
159{
Jeff Johnson80486862017-10-02 13:21:29 -0700160 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800161 int need_orphan = 0;
162
Jeff Johnson80486862017-10-02 13:21:29 -0700163 if (adapter->tx_flow_low_watermark > 0) {
tfyubdf453e2017-09-27 13:34:30 +0800164#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
165 /*
166 * The TCP TX throttling logic is changed a little after
167 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
168 * which will throttle the TCP packets to the host driver.
169 * The TCP UP LINK throughput will drop heavily. In order to
170 * fix this issue, need to orphan the socket buffer asap, which
171 * will call skb's destructor to notify the TCP stack that the
172 * SKB buffer is unowned. And then the TCP stack will pump more
173 * packets to host driver.
174 *
175 * The TX packets might be dropped for UDP case in the iperf
176 * testing. So need to be protected by follow control.
177 */
178 need_orphan = 1;
179#else
180 if (hdd_ctx->config->tx_orphan_enable)
181 need_orphan = 1;
182#endif
tfyu5f01db22017-10-11 13:51:04 +0800183 } else if (hdd_ctx->config->tx_orphan_enable) {
184 if (qdf_nbuf_is_ipv4_tcp_pkt(skb) ||
Tiger Yu438c6482017-10-13 11:07:00 +0800185 qdf_nbuf_is_ipv6_tcp_pkt(skb))
tfyu5f01db22017-10-11 13:51:04 +0800186 need_orphan = 1;
tfyubdf453e2017-09-27 13:34:30 +0800187 }
188
tfyu5f01db22017-10-11 13:51:04 +0800189 if (need_orphan) {
gbianec670c592016-11-24 11:21:30 +0800190 skb_orphan(skb);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700191 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Tiger Yu438c6482017-10-13 11:07:00 +0800192 } else
gbianec670c592016-11-24 11:21:30 +0800193 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800194
195 return skb;
196}
197
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198/**
199 * hdd_tx_resume_cb() - Resume OS TX Q.
200 * @adapter_context: pointer to vdev apdapter
201 * @tx_resume: TX Q resume trigger
202 *
203 * Q was stopped due to WLAN TX path low resource condition
204 *
205 * Return: None
206 */
207void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
208{
Jeff Johnson80486862017-10-02 13:21:29 -0700209 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700210 struct hdd_station_ctx *hdd_sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211
Jeff Johnson80486862017-10-02 13:21:29 -0700212 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213 /* INVALID ARG */
214 return;
215 }
216
Jeff Johnson80486862017-10-02 13:21:29 -0700217 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800218
219 /* Resume TX */
220 if (true == tx_resume) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530221 if (QDF_TIMER_STATE_STOPPED !=
Jeff Johnson80486862017-10-02 13:21:29 -0700222 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223 tx_flow_control_timer)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700224 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800225 }
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700226 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700227 wlan_hdd_netif_queue_control(adapter,
Jeff Johnsona0399642016-12-05 12:39:59 -0800228 WLAN_WAKE_ALL_NETIF_QUEUE,
229 WLAN_DATA_FLOW_CONTROL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 }
Jeff Johnson80486862017-10-02 13:21:29 -0700231 hdd_tx_resume_false(adapter, tx_resume);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232}
233
bings284f8be2017-08-11 10:41:30 +0800234bool hdd_tx_flow_control_is_pause(void *adapter_context)
235{
Jeff Johnson80486862017-10-02 13:21:29 -0700236 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
bings284f8be2017-08-11 10:41:30 +0800237
Jeff Johnson80486862017-10-02 13:21:29 -0700238 if ((NULL == adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
bings284f8be2017-08-11 10:41:30 +0800239 /* INVALID ARG */
Jeff Johnson80486862017-10-02 13:21:29 -0700240 hdd_err("invalid adapter %pK", adapter);
bings284f8be2017-08-11 10:41:30 +0800241 return false;
242 }
243
Jeff Johnson80486862017-10-02 13:21:29 -0700244 return adapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
bings284f8be2017-08-11 10:41:30 +0800245}
246
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700247void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530248 qdf_mc_timer_callback_t timer_callback,
bings284f8be2017-08-11 10:41:30 +0800249 ol_txrx_tx_flow_control_fp flow_control_fp,
250 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251{
252 if (adapter->tx_flow_timer_initialized == false) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530253 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530254 QDF_TIMER_TYPE_SW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800255 timer_callback,
256 adapter);
257 adapter->tx_flow_timer_initialized = true;
258 }
Leo Changfdb45c32016-10-28 11:09:23 -0700259 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1b780e42017-10-31 14:11:45 -0700260 adapter->session_id, flow_control_fp, adapter,
bings284f8be2017-08-11 10:41:30 +0800261 flow_control_is_pause_fp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800262}
263
264/**
265 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
266 * @adapter: adapter handle
267 *
268 * Return: none
269 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700270void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800271{
Leo Changfdb45c32016-10-28 11:09:23 -0700272 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1b780e42017-10-31 14:11:45 -0700273 adapter->session_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800274 if (adapter->tx_flow_timer_initialized == true) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530275 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
276 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277 adapter->tx_flow_timer_initialized = false;
278 }
279}
280
281/**
282 * hdd_get_tx_resource() - check tx resources and take action
283 * @adapter: adapter handle
284 * @STAId: station id
285 * @timer_value: timer value
286 *
287 * Return: none
288 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700289void hdd_get_tx_resource(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800290 uint8_t STAId, uint16_t timer_value)
291{
292 if (false ==
Leo Changfdb45c32016-10-28 11:09:23 -0700293 cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC), STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294 adapter->tx_flow_low_watermark,
295 adapter->tx_flow_high_watermark_offset)) {
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700296 hdd_debug("Disabling queues lwm %d hwm offset %d",
Jeff Johnsona0399642016-12-05 12:39:59 -0800297 adapter->tx_flow_low_watermark,
298 adapter->tx_flow_high_watermark_offset);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800299 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
300 WLAN_DATA_FLOW_CONTROL);
301 if ((adapter->tx_flow_timer_initialized == true) &&
Anurag Chouhan210db072016-02-22 18:42:15 +0530302 (QDF_TIMER_STATE_STOPPED ==
303 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 tx_flow_control_timer))) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530305 qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306 timer_value);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700307 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
308 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
309 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800310 }
311 }
312}
313
gbianec670c592016-11-24 11:21:30 +0800314#else
Mohit Khannad0b63f52017-02-18 18:05:52 -0800315/**
316 * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
Jeff Johnson80486862017-10-02 13:21:29 -0700317 * @adapter: pointer to HDD adapter
Mohit Khannad0b63f52017-02-18 18:05:52 -0800318 * @skb: pointer to skb data packet
319 *
320 * Return: pointer to skb structure
321 */
Jeff Johnson80486862017-10-02 13:21:29 -0700322static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
Mohit Khannad0b63f52017-02-18 18:05:52 -0800323 struct sk_buff *skb) {
gbianec670c592016-11-24 11:21:30 +0800324
Mohit Khannad0b63f52017-02-18 18:05:52 -0800325 struct sk_buff *nskb;
tfyubdf453e2017-09-27 13:34:30 +0800326#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Jeff Johnson80486862017-10-02 13:21:29 -0700327 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800328#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800329
Mohit Khanna87493732017-08-27 23:26:44 -0700330 hdd_skb_fill_gso_size(adapter->dev, skb);
331
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700332 nskb = skb_unshare(skb, GFP_ATOMIC);
tfyubdf453e2017-09-27 13:34:30 +0800333#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700334 if (unlikely(hdd_ctx->config->tx_orphan_enable) && (nskb == skb)) {
Mohit Khannad0b63f52017-02-18 18:05:52 -0800335 /*
336 * For UDP packets we want to orphan the packet to allow the app
337 * to send more packets. The flow would ultimately be controlled
338 * by the limited number of tx descriptors for the vdev.
339 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700340 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Mohit Khannad0b63f52017-02-18 18:05:52 -0800341 skb_orphan(skb);
342 }
tfyubdf453e2017-09-27 13:34:30 +0800343#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800344 return nskb;
gbianec670c592016-11-24 11:21:30 +0800345}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800346#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
347
Alok Kumarb64650c2018-03-23 17:05:11 +0530348uint32_t hdd_txrx_get_tx_ack_count(struct hdd_adapter *adapter)
349{
350 return cdp_get_tx_ack_stats(cds_get_context(QDF_MODULE_ID_SOC),
351 adapter->session_id);
352}
353
Nirav Shah5e74bb82016-07-20 16:01:27 +0530354/**
355 * qdf_event_eapol_log() - send event to wlan diag
356 * @skb: skb ptr
357 * @dir: direction
358 * @eapol_key_info: eapol key info
359 *
360 * Return: None
361 */
362void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
363{
364 int16_t eapol_key_info;
365
366 WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
367
368 if ((dir == QDF_TX &&
369 (QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
370 QDF_NBUF_CB_GET_PACKET_TYPE(skb))))
371 return;
372 else if (!qdf_nbuf_is_ipv4_eapol_pkt(skb))
373 return;
374
375 eapol_key_info = (uint16_t)(*(uint16_t *)
376 (skb->data + EAPOL_KEY_INFO_OFFSET));
377
378 wlan_diag_event.event_sub_type =
379 (dir == QDF_TX ?
380 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
381 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
382 wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
383 (skb->data + EAPOL_PACKET_TYPE_OFFSET));
384 wlan_diag_event.eapol_key_info = eapol_key_info;
385 wlan_diag_event.eapol_rate = 0;
386 qdf_mem_copy(wlan_diag_event.dest_addr,
387 (skb->data + QDF_NBUF_DEST_MAC_OFFSET),
388 sizeof(wlan_diag_event.dest_addr));
389 qdf_mem_copy(wlan_diag_event.src_addr,
390 (skb->data + QDF_NBUF_SRC_MAC_OFFSET),
391 sizeof(wlan_diag_event.src_addr));
392
393 WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
394}
395
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800396
397/**
Nirav Shah5e74bb82016-07-20 16:01:27 +0530398 * wlan_hdd_classify_pkt() - classify packet
399 * @skb - sk buff
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400 *
Nirav Shah5e74bb82016-07-20 16:01:27 +0530401 * Return: none
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402 */
Nirav Shah5e74bb82016-07-20 16:01:27 +0530403void wlan_hdd_classify_pkt(struct sk_buff *skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404{
Nirav Shah5e74bb82016-07-20 16:01:27 +0530405 struct ethhdr *eh = (struct ethhdr *)skb->data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800406
Nirav Shah5e74bb82016-07-20 16:01:27 +0530407 qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408
Nirav Shah5e74bb82016-07-20 16:01:27 +0530409 /* check destination mac address is broadcast/multicast */
410 if (is_broadcast_ether_addr((uint8_t *)eh))
411 QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
412 else if (is_multicast_ether_addr((uint8_t *)eh))
413 QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800414
Nirav Shah5e74bb82016-07-20 16:01:27 +0530415 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
416 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
417 QDF_NBUF_CB_PACKET_TYPE_ARP;
418 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
419 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
420 QDF_NBUF_CB_PACKET_TYPE_DHCP;
421 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
422 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
423 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
424 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
425 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
426 QDF_NBUF_CB_PACKET_TYPE_WAPI;
Zhu Jianmin04392c42017-05-12 16:34:53 +0800427 else if (qdf_nbuf_is_icmp_pkt(skb))
428 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
429 QDF_NBUF_CB_PACKET_TYPE_ICMP;
Poddar, Siddarth44aa5aa2017-07-10 17:30:22 +0530430 else if (qdf_nbuf_is_icmpv6_pkt(skb))
431 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
432 QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433}
434
435/**
Zhu Jianmin04392c42017-05-12 16:34:53 +0800436 * wlan_hdd_latency_opt()- latency option
437 * @adapter: pointer to the adapter structure
438 * @skb: pointer to sk buff
439 *
440 * Function to disable power save for icmp packets.
441 *
442 * Return: None
443 */
444#ifdef WLAN_ICMP_DISABLE_PS
445static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700446wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800447{
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700448 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800449
450 if (hdd_ctx->config->icmp_disable_ps_val <= 0)
451 return;
452
453 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) ==
454 QDF_NBUF_CB_PACKET_TYPE_ICMP) {
455 wlan_hdd_set_powersave(adapter, false,
456 hdd_ctx->config->icmp_disable_ps_val);
Yeshwanth Sriram Guntukaae03c432017-11-12 13:31:02 +0530457 sme_ps_enable_auto_ps_timer(WLAN_HDD_GET_HAL_CTX(adapter),
458 adapter->session_id,
459 hdd_ctx->config->icmp_disable_ps_val);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800460 }
461}
462#else
463static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700464wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800465{
466}
467#endif
468
469/**
Ravi Joshi24477b72016-07-19 15:45:09 -0700470 * hdd_get_transmit_sta_id() - function to retrieve station id to be used for
471 * sending traffic towards a particular destination address. The destination
472 * address can be unicast, multicast or broadcast
473 *
474 * @adapter: Handle to adapter context
475 * @dst_addr: Destination address
476 * @station_id: station id
477 *
478 * Returns: None
479 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700480static void hdd_get_transmit_sta_id(struct hdd_adapter *adapter,
Nirav Shah5e74bb82016-07-20 16:01:27 +0530481 struct sk_buff *skb, uint8_t *station_id)
Ravi Joshi24477b72016-07-19 15:45:09 -0700482{
483 bool mcbc_addr = false;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700484 QDF_STATUS status;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700485 struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530486 struct qdf_mac_addr *dst_addr = NULL;
Ravi Joshi24477b72016-07-19 15:45:09 -0700487
Nirav Shah5e74bb82016-07-20 16:01:27 +0530488 dst_addr = (struct qdf_mac_addr *)skb->data;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700489 status = hdd_get_peer_sta_id(sta_ctx, dst_addr, station_id);
490 if (QDF_IS_STATUS_ERROR(status)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530491 if (QDF_NBUF_CB_GET_IS_BCAST(skb) ||
492 QDF_NBUF_CB_GET_IS_MCAST(skb)) {
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -0700493 hdd_debug("Received MC/BC packet for transmission");
Ravi Joshi24477b72016-07-19 15:45:09 -0700494 mcbc_addr = true;
Ravi Joshi24477b72016-07-19 15:45:09 -0700495 }
496 }
497
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700498 if (adapter->device_mode == QDF_IBSS_MODE ||
499 adapter->device_mode == QDF_NDI_MODE) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700500 /*
501 * This check is necessary to make sure station id is not
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700502 * overwritten for UC traffic in IBSS or NDI mode
Ravi Joshi24477b72016-07-19 15:45:09 -0700503 */
504 if (mcbc_addr)
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700505 *station_id = sta_ctx->broadcast_staid;
Ravi Joshi24477b72016-07-19 15:45:09 -0700506 } else {
507 /* For the rest, traffic is directed to AP/P2P GO */
508 if (eConnectionState_Associated == sta_ctx->conn_info.connState)
509 *station_id = sta_ctx->conn_info.staId[0];
510 }
511}
512
513/**
jitiphilfb410612018-03-26 22:37:56 +0530514 * hdd_clear_tx_rx_connectivity_stats() - clear connectivity stats
515 * @hdd_ctx: pointer to HDD Station Context
516 *
517 * Return: None
518 */
519static void hdd_clear_tx_rx_connectivity_stats(struct hdd_adapter *adapter)
520{
521 hdd_info("Clear txrx connectivity stats");
522 qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
523 sizeof(adapter->hdd_stats.hdd_arp_stats));
524 qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats,
525 sizeof(adapter->hdd_stats.hdd_dns_stats));
526 qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
527 sizeof(adapter->hdd_stats.hdd_tcp_stats));
528 qdf_mem_zero(&adapter->hdd_stats.hdd_icmpv4_stats,
529 sizeof(adapter->hdd_stats.hdd_icmpv4_stats));
530 adapter->pkt_type_bitmap = 0;
531 adapter->track_arp_ip = 0;
532 qdf_mem_zero(adapter->dns_payload, adapter->track_dns_domain_len);
533 adapter->track_dns_domain_len = 0;
534 adapter->track_src_port = 0;
535 adapter->track_dest_port = 0;
536 adapter->track_dest_ipv4 = 0;
537}
538
539void hdd_reset_all_adapters_connectivity_stats(struct hdd_context *hdd_ctx)
540{
541 struct hdd_adapter *adapter = NULL, *pNext = NULL;
542 QDF_STATUS status;
543
544 hdd_enter();
545
546 status = hdd_get_front_adapter(hdd_ctx, &adapter);
547
548 while (NULL != adapter && QDF_STATUS_SUCCESS == status) {
549 hdd_clear_tx_rx_connectivity_stats(adapter);
550 status = hdd_get_next_adapter(hdd_ctx, adapter, &pNext);
551 adapter = pNext;
552 }
553
554 hdd_exit();
555}
556
557/**
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800558 * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
559 * @skb: pointer to OS packet (sk_buff)
560 * @peer_id: Peer STA ID in peer table
561 *
562 * This function gets the peer state from DP and check if it is either
563 * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
564 * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
565 * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
566 *
567 * Return: true if Tx is allowed and false otherwise.
568 */
569static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t peer_id)
570{
571 enum ol_txrx_peer_state peer_state;
572 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
573 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
574 void *peer;
575
576 QDF_BUG(soc);
577 QDF_BUG(pdev);
578
579 peer = cdp_peer_find_by_local_id(soc, pdev, peer_id);
580
581 if (peer == NULL) {
Dustin Brown897dae42018-03-13 17:08:47 -0700582 hdd_err_rl("Unable to find peer entry for staid: %d", peer_id);
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800583 return false;
584 }
585
586 peer_state = cdp_peer_state_get(soc, peer);
Jeff Johnson68755312017-02-10 11:46:55 -0800587 if (likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800588 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800589 if (OL_TXRX_PEER_STATE_CONN == peer_state &&
Jinwei Chen19846e52018-04-03 19:20:38 +0800590 (ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X
591 || IS_HDD_ETHERTYPE_WAI(skb)))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800592 return true;
hqu8925c8f2017-12-11 19:29:01 +0800593 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson68755312017-02-10 11:46:55 -0800594 FL("Invalid peer state for Tx: %d"), peer_state);
595 return false;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800596}
jitiphilfb410612018-03-26 22:37:56 +0530597
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530598/**
599 * hdd_tx_rx_is_dns_domain_name_match() - function to check whether dns
600 * domain name in the received skb matches with the tracking dns domain
601 * name or not
602 *
603 * @skb: pointer to skb
604 * @adapter: pointer to adapter
605 *
606 * Returns: true if matches else false
607 */
608static bool hdd_tx_rx_is_dns_domain_name_match(struct sk_buff *skb,
609 struct hdd_adapter *adapter)
610{
611 uint8_t *domain_name;
612
613 if (adapter->track_dns_domain_len == 0)
614 return false;
615
616 domain_name = qdf_nbuf_get_dns_domain_name(skb,
617 adapter->track_dns_domain_len);
618 if (strncmp(domain_name, adapter->dns_payload,
619 adapter->track_dns_domain_len) == 0)
620 return true;
621 else
622 return false;
623}
624
625void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
626 void *context,
627 enum connectivity_stats_pkt_status action,
628 uint8_t *pkt_type)
629{
630 uint32_t pkt_type_bitmap;
631 struct hdd_adapter *adapter = NULL;
632
633 adapter = (struct hdd_adapter *)context;
634 if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
635 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
636 "Magic cookie(%x) for adapter sanity verification is invalid",
637 adapter->magic);
638 return;
639 }
640
641 /* ARP tracking is done already. */
642 pkt_type_bitmap = adapter->pkt_type_bitmap;
643 pkt_type_bitmap &= ~CONNECTIVITY_CHECK_SET_ARP;
644
645 if (!pkt_type_bitmap)
646 return;
647
648 switch (action) {
649 case PKT_TYPE_REQ:
650 case PKT_TYPE_TX_HOST_FW_SENT:
651 if (qdf_nbuf_is_icmp_pkt(skb)) {
652 if (qdf_nbuf_data_is_icmpv4_req(skb) &&
653 (adapter->track_dest_ipv4 ==
654 qdf_nbuf_get_icmpv4_tgt_ip(skb))) {
655 *pkt_type = CONNECTIVITY_CHECK_SET_ICMPV4;
656 if (action == PKT_TYPE_REQ) {
657 ++adapter->hdd_stats.hdd_icmpv4_stats.
658 tx_icmpv4_req_count;
659 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
660 QDF_TRACE_LEVEL_INFO_HIGH,
661 "%s : ICMPv4 Req packet",
662 __func__);
663 } else
664 /* host receives tx completion */
665 ++adapter->hdd_stats.hdd_icmpv4_stats.
666 tx_host_fw_sent;
667 }
668 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
669 if (qdf_nbuf_data_is_tcp_syn(skb) &&
670 (adapter->track_dest_port ==
671 qdf_nbuf_data_get_tcp_dst_port(skb))) {
672 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_SYN;
673 if (action == PKT_TYPE_REQ) {
674 ++adapter->hdd_stats.hdd_tcp_stats.
675 tx_tcp_syn_count;
676 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
677 QDF_TRACE_LEVEL_INFO_HIGH,
678 "%s : TCP Syn packet",
679 __func__);
680 } else
681 /* host receives tx completion */
682 ++adapter->hdd_stats.hdd_tcp_stats.
683 tx_tcp_syn_host_fw_sent;
684 } else if ((adapter->hdd_stats.hdd_tcp_stats.
685 is_tcp_syn_ack_rcv || adapter->hdd_stats.
686 hdd_tcp_stats.is_tcp_ack_sent) &&
687 qdf_nbuf_data_is_tcp_ack(skb) &&
688 (adapter->track_dest_port ==
689 qdf_nbuf_data_get_tcp_dst_port(skb))) {
690 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_ACK;
691 if (action == PKT_TYPE_REQ &&
692 adapter->hdd_stats.hdd_tcp_stats.
693 is_tcp_syn_ack_rcv) {
694 ++adapter->hdd_stats.hdd_tcp_stats.
695 tx_tcp_ack_count;
696 adapter->hdd_stats.hdd_tcp_stats.
697 is_tcp_syn_ack_rcv = false;
698 adapter->hdd_stats.hdd_tcp_stats.
699 is_tcp_ack_sent = true;
700 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
701 QDF_TRACE_LEVEL_INFO_HIGH,
702 "%s : TCP Ack packet",
703 __func__);
704 } else if (action == PKT_TYPE_TX_HOST_FW_SENT &&
705 adapter->hdd_stats.hdd_tcp_stats.
706 is_tcp_ack_sent) {
707 /* host receives tx completion */
708 ++adapter->hdd_stats.hdd_tcp_stats.
709 tx_tcp_ack_host_fw_sent;
710 adapter->hdd_stats.hdd_tcp_stats.
711 is_tcp_ack_sent = false;
712 }
713 }
714 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
715 if (qdf_nbuf_data_is_dns_query(skb) &&
716 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
717 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
718 if (action == PKT_TYPE_REQ) {
719 ++adapter->hdd_stats.hdd_dns_stats.
720 tx_dns_req_count;
721 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
722 QDF_TRACE_LEVEL_INFO_HIGH,
723 "%s : DNS query packet",
724 __func__);
725 } else
726 /* host receives tx completion */
727 ++adapter->hdd_stats.hdd_dns_stats.
728 tx_host_fw_sent;
729 }
730 }
731 break;
732
733 case PKT_TYPE_RSP:
734 if (qdf_nbuf_is_icmp_pkt(skb)) {
735 if (qdf_nbuf_data_is_icmpv4_rsp(skb) &&
736 (adapter->track_dest_ipv4 ==
737 qdf_nbuf_get_icmpv4_src_ip(skb))) {
738 ++adapter->hdd_stats.hdd_icmpv4_stats.
739 rx_icmpv4_rsp_count;
740 *pkt_type =
741 CONNECTIVITY_CHECK_SET_ICMPV4;
742 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
743 QDF_TRACE_LEVEL_INFO_HIGH,
744 "%s : ICMPv4 Res packet", __func__);
745 }
746 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
747 if (qdf_nbuf_data_is_tcp_syn_ack(skb) &&
748 (adapter->track_dest_port ==
749 qdf_nbuf_data_get_tcp_src_port(skb))) {
750 ++adapter->hdd_stats.hdd_tcp_stats.
751 rx_tcp_syn_ack_count;
752 adapter->hdd_stats.hdd_tcp_stats.
753 is_tcp_syn_ack_rcv = true;
754 *pkt_type =
755 CONNECTIVITY_CHECK_SET_TCP_SYN_ACK;
756 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
757 QDF_TRACE_LEVEL_INFO_HIGH,
758 "%s : TCP Syn ack packet", __func__);
759 }
760 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
761 if (qdf_nbuf_data_is_dns_response(skb) &&
762 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
763 ++adapter->hdd_stats.hdd_dns_stats.
764 rx_dns_rsp_count;
765 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
766 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
767 QDF_TRACE_LEVEL_INFO_HIGH,
768 "%s : DNS response packet", __func__);
769 }
770 }
771 break;
772
773 case PKT_TYPE_TX_DROPPED:
774 switch (*pkt_type) {
775 case CONNECTIVITY_CHECK_SET_ICMPV4:
776 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_dropped;
777 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
778 QDF_TRACE_LEVEL_INFO_HIGH,
779 "%s : ICMPv4 Req packet dropped", __func__);
780 break;
781 case CONNECTIVITY_CHECK_SET_TCP_SYN:
782 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_dropped;
783 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
784 QDF_TRACE_LEVEL_INFO_HIGH,
785 "%s : TCP syn packet dropped", __func__);
786 break;
787 case CONNECTIVITY_CHECK_SET_TCP_ACK:
788 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_dropped;
789 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
790 QDF_TRACE_LEVEL_INFO_HIGH,
791 "%s : TCP ack packet dropped", __func__);
792 break;
793 case CONNECTIVITY_CHECK_SET_DNS:
794 ++adapter->hdd_stats.hdd_dns_stats.tx_dropped;
795 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
796 QDF_TRACE_LEVEL_INFO_HIGH,
797 "%s : DNS query packet dropped", __func__);
798 break;
799 default:
800 break;
801 }
802 break;
803 case PKT_TYPE_RX_DELIVERED:
804 switch (*pkt_type) {
805 case CONNECTIVITY_CHECK_SET_ICMPV4:
806 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_delivered;
807 break;
808 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
809 ++adapter->hdd_stats.hdd_tcp_stats.rx_delivered;
810 break;
811 case CONNECTIVITY_CHECK_SET_DNS:
812 ++adapter->hdd_stats.hdd_dns_stats.rx_delivered;
813 break;
814 default:
815 break;
816 }
817 break;
818 case PKT_TYPE_RX_REFUSED:
819 switch (*pkt_type) {
820 case CONNECTIVITY_CHECK_SET_ICMPV4:
821 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_refused;
822 break;
823 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
824 ++adapter->hdd_stats.hdd_tcp_stats.rx_refused;
825 break;
826 case CONNECTIVITY_CHECK_SET_DNS:
827 ++adapter->hdd_stats.hdd_dns_stats.rx_refused;
828 break;
829 default:
830 break;
831 }
832 break;
833 case PKT_TYPE_TX_ACK_CNT:
834 switch (*pkt_type) {
835 case CONNECTIVITY_CHECK_SET_ICMPV4:
836 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_ack_cnt;
837 break;
838 case CONNECTIVITY_CHECK_SET_TCP_SYN:
839 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_ack_cnt;
840 break;
841 case CONNECTIVITY_CHECK_SET_TCP_ACK:
842 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_ack_cnt;
843 break;
844 case CONNECTIVITY_CHECK_SET_DNS:
845 ++adapter->hdd_stats.hdd_dns_stats.tx_ack_cnt;
846 break;
847 default:
848 break;
849 }
850 break;
851 default:
852 break;
853 }
854}
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800855
856/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530857 * __hdd_hard_start_xmit() - Transmit a frame
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800858 * @skb: pointer to OS packet (sk_buff)
859 * @dev: pointer to network device
860 *
861 * Function registered with the Linux OS for transmitting
862 * packets. This version of the function directly passes
863 * the packet to Transport Layer.
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530864 * In case of any packet drop or error, log the error with
865 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800866 *
867 * Return: Always returns NETDEV_TX_OK
868 */
Srinivas Girigowda49b48b22018-04-05 09:23:28 -0700869static netdev_tx_t __hdd_hard_start_xmit(struct sk_buff *skb,
870 struct net_device *dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800871{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530872 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800873 sme_ac_enum_type ac;
Abhishek Singh12be60f2017-08-11 13:52:42 +0530874 enum sme_qos_wmmuptype up;
Jeff Johnson80486862017-10-02 13:21:29 -0700875 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800876 bool granted;
Nirav Shah5e74bb82016-07-20 16:01:27 +0530877 uint8_t STAId;
Jeff Johnsonb9424862017-10-30 08:49:35 -0700878 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700879 struct qdf_mac_addr *mac_addr;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530880 uint8_t pkt_type = 0;
Jeff Johnson80486862017-10-02 13:21:29 -0700881 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Sravan Kumar Kairamdd57ea32017-04-06 16:57:35 +0530882 bool is_arp = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800883
884#ifdef QCA_WIFI_FTM
Anurag Chouhan6d760662016-02-20 16:05:43 +0530885 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800886 kfree_skb(skb);
887 return NETDEV_TX_OK;
888 }
889#endif
890
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700891 ++adapter->hdd_stats.tx_rx_stats.tx_called;
892 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530893
Will Huang20de9432018-02-06 17:01:03 +0800894 if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
895 cds_is_load_or_unload_in_progress()) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530896 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Will Huang20de9432018-02-06 17:01:03 +0800897 "Recovery/(Un)load in progress, dropping the packet");
Nirav Shahdf3659e2016-06-27 12:26:28 +0530898 goto drop_pkt;
Govind Singhede435f2015-12-01 16:16:36 +0530899 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800900
Nirav Shah5e74bb82016-07-20 16:01:27 +0530901 wlan_hdd_classify_pkt(skb);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530902 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) == QDF_NBUF_CB_PACKET_TYPE_ARP) {
903 is_arp = true;
904 if (qdf_nbuf_data_is_arp_req(skb) &&
905 (hdd_ctx->track_arp_ip == qdf_nbuf_get_arp_tgt_ip(skb))) {
906 ++adapter->hdd_stats.hdd_arp_stats.tx_arp_req_count;
907 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
908 QDF_TRACE_LEVEL_INFO_HIGH,
909 "%s : ARP packet", __func__);
910 }
911 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530912 /* track connectivity stats */
913 if (adapter->pkt_type_bitmap)
914 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
915 PKT_TYPE_REQ, &pkt_type);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530916
917 if (cds_is_driver_recovering()) {
918 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_WARN,
919 "Recovery in progress, dropping the packet");
920 goto drop_pkt;
921 }
Nirav Shah5e74bb82016-07-20 16:01:27 +0530922
Ravi Joshi24477b72016-07-19 15:45:09 -0700923 STAId = HDD_WLAN_INVALID_STA_ID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800924
Jeff Johnson80486862017-10-02 13:21:29 -0700925 hdd_get_transmit_sta_id(adapter, skb, &STAId);
Naveen Rawat209d0932016-08-03 15:07:23 -0700926 if (STAId >= WLAN_MAX_STA_COUNT) {
hqu5e6b9862017-12-21 18:48:46 +0800927 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsona0399642016-12-05 12:39:59 -0800928 "Invalid station id, transmit operation suspended");
Ravi Joshi24477b72016-07-19 15:45:09 -0700929 goto drop_pkt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800930 }
931
Jeff Johnson80486862017-10-02 13:21:29 -0700932 hdd_get_tx_resource(adapter, STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800933 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
934
935 /* Get TL AC corresponding to Qdisc queue index/AC. */
936 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
937
Nirav Shahcbc6d722016-03-01 16:24:53 +0530938 if (!qdf_nbuf_ipa_owned_get(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700939 skb = hdd_skb_orphan(adapter, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800940 if (!skb)
Nirav Shahdf3659e2016-06-27 12:26:28 +0530941 goto drop_pkt_accounting;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800942 }
943
Ravi Joshi24477b72016-07-19 15:45:09 -0700944 /*
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530945 * Add SKB to internal tracking table before further processing
946 * in WLAN driver.
947 */
948 qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
949
950 /*
Ravi Joshi24477b72016-07-19 15:45:09 -0700951 * user priority from IP header, which is already extracted and set from
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800952 * select_queue call back function
953 */
954 up = skb->priority;
955
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700956 ++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800957#ifdef HDD_WMM_DEBUG
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800958 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800959 "%s: Classified as ac %d up %d", __func__, ac, up);
960#endif /* HDD_WMM_DEBUG */
961
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700962 if (HDD_PSB_CHANGED == adapter->psb_changed) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700963 /*
964 * Function which will determine acquire admittance for a
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800965 * WMM AC is required or not based on psb configuration done
966 * in the framework
967 */
Jeff Johnson80486862017-10-02 13:21:29 -0700968 hdd_wmm_acquire_access_required(adapter, ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800969 }
970 /*
971 * Make sure we already have access to this access category
972 * or it is EAPOL or WAPI frame during initial authentication which
973 * can have artifically boosted higher qos priority.
974 */
975
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700976 if (((adapter->psb_changed & (1 << ac)) &&
Jeff Johnson02d14ce2017-10-31 09:08:30 -0700977 likely(adapter->hdd_wmm_status.wmmAcStatus[ac].
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800978 wmmAcAccessAllowed)) ||
Jeff Johnsond377dce2017-10-04 10:32:42 -0700979 ((sta_ctx->conn_info.uIsAuthenticated == false) &&
Nirav Shah5e74bb82016-07-20 16:01:27 +0530980 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
981 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
982 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
983 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800984 granted = true;
985 } else {
Jeff Johnson80486862017-10-02 13:21:29 -0700986 status = hdd_wmm_acquire_access(adapter, ac, &granted);
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700987 adapter->psb_changed |= (1 << ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800988 }
989
990 if (!granted) {
991 bool isDefaultAc = false;
Ravi Joshi24477b72016-07-19 15:45:09 -0700992 /*
993 * ADDTS request for this AC is sent, for now
Jeff Johnson55ceaf02018-05-06 17:22:29 -0700994 * send this packet through next available lower
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995 * Access category until ADDTS negotiation completes.
996 */
997 while (!likely
Jeff Johnson02d14ce2017-10-31 09:08:30 -0700998 (adapter->hdd_wmm_status.wmmAcStatus[ac].
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800999 wmmAcAccessAllowed)) {
1000 switch (ac) {
1001 case SME_AC_VO:
1002 ac = SME_AC_VI;
1003 up = SME_QOS_WMM_UP_VI;
1004 break;
1005 case SME_AC_VI:
1006 ac = SME_AC_BE;
1007 up = SME_QOS_WMM_UP_BE;
1008 break;
1009 case SME_AC_BE:
1010 ac = SME_AC_BK;
1011 up = SME_QOS_WMM_UP_BK;
1012 break;
1013 default:
1014 ac = SME_AC_BK;
1015 up = SME_QOS_WMM_UP_BK;
1016 isDefaultAc = true;
1017 break;
1018 }
1019 if (isDefaultAc)
1020 break;
1021 }
1022 skb->priority = up;
1023 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
1024 }
1025
Jeff Johnson80486862017-10-02 13:21:29 -07001026 adapter->stats.tx_bytes += skb->len;
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001027
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001028 mac_addr = (struct qdf_mac_addr *)skb->data;
1029
Jeff Johnson80486862017-10-02 13:21:29 -07001030 ucfg_tdls_update_tx_pkt_cnt(adapter->hdd_vdev, mac_addr);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001031
Mohit Khannab1dd1e82017-02-04 15:14:38 -08001032 if (qdf_nbuf_is_tso(skb))
Jeff Johnson80486862017-10-02 13:21:29 -07001033 adapter->stats.tx_packets += qdf_nbuf_get_tso_num_seg(skb);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -07001034 else
Jeff Johnson80486862017-10-02 13:21:29 -07001035 ++adapter->stats.tx_packets;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001036
Nirav Shah5e74bb82016-07-20 16:01:27 +05301037 hdd_event_eapol_log(skb, QDF_TX);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301038 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
1039 QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001040
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301041 qdf_dp_trace_set_track(skb, QDF_TX);
Mohit Khannaf8f96822017-05-17 17:11:59 -07001042
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301043 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001044 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
1045 sizeof(qdf_nbuf_data(skb)),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301046 QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001047
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -08001048 if (!hdd_is_tx_allowed(skb, STAId)) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301049 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -08001050 FL("Tx not allowed for sta_id: %d"), STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001051 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301052 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001053 }
1054
jinweic chen51046012018-04-11 16:02:22 +08001055 /* check whether need to linearize skb, like non-linear udp data */
1056 if (hdd_skb_nontso_linearize(skb) != QDF_STATUS_SUCCESS) {
1057 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1058 QDF_TRACE_LEVEL_INFO_HIGH,
1059 "%s: skb %pK linearize failed. drop the pkt",
1060 __func__, skb);
1061 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
1062 goto drop_pkt_and_release_skb;
1063 }
1064
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001065 /*
Ravi Joshi24477b72016-07-19 15:45:09 -07001066 * If a transmit function is not registered, drop packet
1067 */
Jeff Johnson80486862017-10-02 13:21:29 -07001068 if (!adapter->tx_fn) {
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001069 QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1070 "%s: TX function not registered by the data path",
1071 __func__);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001072 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301073 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001074 }
1075
Jeff Johnson80486862017-10-02 13:21:29 -07001076 if (adapter->tx_fn(adapter->txrx_vdev,
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001077 (qdf_nbuf_t) skb) != NULL) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301078 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001079 "%s: Failed to send packet to txrx for staid: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001080 __func__, STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001081 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301082 goto drop_pkt_and_release_skb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001083 }
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301084
Dustin Browne0024fa2016-10-14 16:29:21 -07001085 netif_trans_update(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001086
1087 return NETDEV_TX_OK;
1088
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301089drop_pkt_and_release_skb:
1090 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001091drop_pkt:
1092
Nirav Shahdf3659e2016-06-27 12:26:28 +05301093 if (skb) {
Ryan Hsuda743322018-05-04 12:19:50 -07001094 /* track connectivity stats */
1095 if (adapter->pkt_type_bitmap)
1096 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
1097 PKT_TYPE_TX_DROPPED, &pkt_type);
Mohit Khanna02281da2017-08-27 09:40:55 -07001098 qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
1099 QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
1100 QDF_TX);
Nirav Shahdf3659e2016-06-27 12:26:28 +05301101 kfree_skb(skb);
Ryan Hsuda743322018-05-04 12:19:50 -07001102 skb = NULL;
Nirav Shahdf3659e2016-06-27 12:26:28 +05301103 }
1104
1105drop_pkt_accounting:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001106
Jeff Johnson80486862017-10-02 13:21:29 -07001107 ++adapter->stats.tx_dropped;
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001108 ++adapter->hdd_stats.tx_rx_stats.tx_dropped;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301109 if (is_arp) {
1110 ++adapter->hdd_stats.hdd_arp_stats.tx_dropped;
1111 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1112 "%s : ARP packet dropped", __func__);
1113 }
Nirav Shahdf3659e2016-06-27 12:26:28 +05301114
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001115 return NETDEV_TX_OK;
1116}
1117
1118/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301119 * hdd_hard_start_xmit() - Wrapper function to protect
1120 * __hdd_hard_start_xmit from SSR
1121 * @skb: pointer to OS packet
1122 * @dev: pointer to net_device structure
1123 *
1124 * Function called by OS if any packet needs to transmit.
1125 *
1126 * Return: Always returns NETDEV_TX_OK
1127 */
Srinivas Girigowda49b48b22018-04-05 09:23:28 -07001128netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301129{
Srinivas Girigowda49b48b22018-04-05 09:23:28 -07001130 netdev_tx_t ret;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301131
1132 cds_ssr_protect(__func__);
1133 ret = __hdd_hard_start_xmit(skb, dev);
1134 cds_ssr_unprotect(__func__);
1135
1136 return ret;
1137}
1138
1139/**
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -07001140 * hdd_get_peer_sta_id() - Get the StationID using the Peer Mac address
Jeff Johnsond377dce2017-10-04 10:32:42 -07001141 * @sta_ctx: pointer to HDD Station Context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001142 * @pMacAddress: pointer to Peer Mac address
1143 * @staID: pointer to returned Station Index
1144 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301145 * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_FAILURE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001146 */
1147
Jeff Johnsond377dce2017-10-04 10:32:42 -07001148QDF_STATUS hdd_get_peer_sta_id(struct hdd_station_ctx *sta_ctx,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301149 struct qdf_mac_addr *pMacAddress, uint8_t *staId)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001150{
1151 uint8_t idx;
1152
Naveen Rawatc45d1622016-07-05 12:20:09 -07001153 for (idx = 0; idx < MAX_PEERS; idx++) {
Jeff Johnsond377dce2017-10-04 10:32:42 -07001154 if (!qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +05301155 pMacAddress, QDF_MAC_ADDR_SIZE)) {
Jeff Johnsond377dce2017-10-04 10:32:42 -07001156 *staId = sta_ctx->conn_info.staId[idx];
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301157 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001158 }
1159 }
1160
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301161 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001162}
1163
1164/**
1165 * __hdd_tx_timeout() - TX timeout handler
1166 * @dev: pointer to network device
1167 *
1168 * This function is registered as a netdev ndo_tx_timeout method, and
1169 * is invoked by the kernel if the driver takes too long to transmit a
1170 * frame.
1171 *
1172 * Return: None
1173 */
1174static void __hdd_tx_timeout(struct net_device *dev)
1175{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001176 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001177 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001178 struct netdev_queue *txq;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301179 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1180 u64 diff_jiffies;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001181 int i = 0;
1182
Dustin Browne0024fa2016-10-14 16:29:21 -07001183 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301184 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001185 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301186 NULL, 0, QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001187
1188 /* Getting here implies we disabled the TX queues for too
1189 * long. Queues are disabled either because of disassociation
1190 * or low resource scenarios. In case of disassociation it is
1191 * ok to ignore this. But if associated, we have do possible
1192 * recovery here
1193 */
1194
1195 for (i = 0; i < NUM_TX_QUEUES; i++) {
1196 txq = netdev_get_tx_queue(dev, i);
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001197 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
1198 "Queue: %d status: %d txq->trans_start: %lu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001199 i, netif_tx_queue_stopped(txq), txq->trans_start);
1200 }
1201
Houston Hoffman00227112017-08-14 23:58:18 -07001202 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001203 "carrier state: %d", netif_carrier_ok(dev));
Nirav Shah89223f72016-03-01 18:10:38 +05301204 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Mohit Khannaca4173b2017-09-12 21:52:19 -07001205 wlan_hdd_display_netif_queue_history(hdd_ctx,
1206 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Leo Changfdb45c32016-10-28 11:09:23 -07001207 cdp_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301208
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001209 ++adapter->hdd_stats.tx_rx_stats.tx_timeout_cnt;
1210 ++adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301211
1212 diff_jiffies = jiffies -
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001213 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301214
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001215 if ((adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301216 (diff_jiffies > (HDD_TX_TIMEOUT * 2))) {
1217 /*
1218 * In case when there is no traffic is running, it may
1219 * possible tx time-out may once happen and later system
1220 * recovered then continuous tx timeout count has to be
1221 * reset as it is gets modified only when traffic is running.
1222 * If over a period of time if this count reaches to threshold
1223 * then host triggers a false subsystem restart. In genuine
1224 * time out case kernel will call the tx time-out back to back
1225 * at interval of HDD_TX_TIMEOUT. Here now check if previous
1226 * TX TIME out has occurred more than twice of HDD_TX_TIMEOUT
1227 * back then host may recovered here from data stall.
1228 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001229 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301230 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson9a27ffa2018-05-06 17:26:57 -07001231 "Reset continuous tx timeout stat");
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301232 }
1233
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001234 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout = jiffies;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301235
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001236 if (adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt >
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301237 HDD_TX_STALL_THRESHOLD) {
1238 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1239 "Data stall due to continuous TX timeouts");
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001240 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Poddar, Siddarth37033032017-10-11 15:47:40 +05301241 if (hdd_ctx->config->enable_data_stall_det)
1242 cdp_post_data_stall_event(soc,
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301243 DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
1244 DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
1245 0xFF, 0xFF,
1246 DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
1247 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001248}
1249
1250/**
1251 * hdd_tx_timeout() - Wrapper function to protect __hdd_tx_timeout from SSR
1252 * @dev: pointer to net_device structure
1253 *
1254 * Function called by OS if there is any timeout during transmission.
1255 * Since HDD simply enqueues packet and returns control to OS right away,
1256 * this would never be invoked
1257 *
1258 * Return: none
1259 */
1260void hdd_tx_timeout(struct net_device *dev)
1261{
1262 cds_ssr_protect(__func__);
1263 __hdd_tx_timeout(dev);
1264 cds_ssr_unprotect(__func__);
1265}
1266
1267/**
1268 * @hdd_init_tx_rx() - Initialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001269 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001270 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301271 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1272 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001273 */
Jeff Johnson80486862017-10-02 13:21:29 -07001274QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001275{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301276 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001277
Jeff Johnson80486862017-10-02 13:21:29 -07001278 if (NULL == adapter) {
1279 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301280 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301281 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001282 }
1283
1284 return status;
1285}
1286
1287/**
1288 * @hdd_deinit_tx_rx() - Deinitialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001289 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001290 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301291 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1292 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001293 */
Jeff Johnson80486862017-10-02 13:21:29 -07001294QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001295{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301296 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001297
Jeff Johnson80486862017-10-02 13:21:29 -07001298 if (NULL == adapter) {
1299 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301300 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301301 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001302 }
1303
1304 return status;
1305}
1306
Nirav Shah73713f72018-05-17 14:50:41 +05301307#ifdef FEATURE_MONITOR_MODE_SUPPORT
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001308/**
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001309 * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
1310 * @context: [in] pointer to qdf context
1311 * @rxBuf: [in] pointer to rx qdf_nbuf
1312 *
1313 * TL will call this to notify the HDD when one or more packets were
1314 * received for a registered STA.
1315 *
1316 * Return: QDF_STATUS_E_FAILURE if any errors encountered, QDF_STATUS_SUCCESS
1317 * otherwise
1318 */
1319static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
1320{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001321 struct hdd_adapter *adapter;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001322 int rxstat;
1323 struct sk_buff *skb;
1324 struct sk_buff *skb_next;
1325 unsigned int cpu_index;
1326
1327 /* Sanity check on inputs */
1328 if ((NULL == context) || (NULL == rxbuf)) {
1329 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1330 "%s: Null params being passed", __func__);
1331 return QDF_STATUS_E_FAILURE;
1332 }
1333
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001334 adapter = (struct hdd_adapter *)context;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001335 if ((NULL == adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
1336 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001337 "invalid adapter %pK", adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001338 return QDF_STATUS_E_FAILURE;
1339 }
1340
1341 cpu_index = wlan_hdd_get_cpu();
1342
1343 /* walk the chain until all are processed */
1344 skb = (struct sk_buff *) rxbuf;
1345 while (NULL != skb) {
1346 skb_next = skb->next;
1347 skb->dev = adapter->dev;
1348
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001349 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001350 ++adapter->stats.rx_packets;
1351 adapter->stats.rx_bytes += skb->len;
1352
1353 /* Remove SKB from internal tracking table before submitting
1354 * it to stack
1355 */
1356 qdf_net_buf_debug_release_skb(skb);
1357
1358 /*
1359 * If this is not a last packet on the chain
1360 * Just put packet into backlog queue, not scheduling RX sirq
1361 */
1362 if (skb->next) {
1363 rxstat = netif_rx(skb);
1364 } else {
1365 /*
1366 * This is the last packet on the chain
1367 * Scheduling rx sirq
1368 */
1369 rxstat = netif_rx_ni(skb);
1370 }
1371
1372 if (NET_RX_SUCCESS == rxstat)
1373 ++adapter->
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001374 hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001375 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001376 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001377
1378 skb = skb_next;
1379 }
1380
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001381 return QDF_STATUS_SUCCESS;
1382}
Nirav Shah73713f72018-05-17 14:50:41 +05301383#endif
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001384
1385/**
Naveen Rawatf28315c2016-06-29 18:06:02 -07001386 * hdd_get_peer_idx() - Get the idx for given address in peer table
1387 * @sta_ctx: pointer to HDD Station Context
1388 * @addr: pointer to Peer Mac address
1389 *
1390 * Return: index when success else INVALID_PEER_IDX
1391 */
Jeff Johnson811f47d2017-10-03 11:33:09 -07001392int hdd_get_peer_idx(struct hdd_station_ctx *sta_ctx,
1393 struct qdf_mac_addr *addr)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001394{
1395 uint8_t idx;
1396
Naveen Rawatc45d1622016-07-05 12:20:09 -07001397 for (idx = 0; idx < MAX_PEERS; idx++) {
Naveen Rawatac027cb2017-04-27 15:02:42 -07001398 if (sta_ctx->conn_info.staId[idx] == HDD_WLAN_INVALID_STA_ID)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001399 continue;
1400 if (qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
1401 addr, sizeof(struct qdf_mac_addr)))
1402 continue;
1403 return idx;
1404 }
1405
1406 return INVALID_PEER_IDX;
1407}
1408
Ravi Joshibb8d4512016-08-22 10:14:52 -07001409/*
1410 * hdd_is_mcast_replay() - checks if pkt is multicast replay
1411 * @skb: packet skb
1412 *
1413 * Return: true if replayed multicast pkt, false otherwise
1414 */
1415static bool hdd_is_mcast_replay(struct sk_buff *skb)
1416{
1417 struct ethhdr *eth;
1418
1419 eth = eth_hdr(skb);
1420 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
1421 if (unlikely(ether_addr_equal(eth->h_source,
1422 skb->dev->dev_addr)))
1423 return true;
1424 }
1425 return false;
1426}
1427
Naveen Rawatf28315c2016-06-29 18:06:02 -07001428/**
Jeff Johnsondcf84ce2017-10-05 09:26:24 -07001429 * hdd_is_arp_local() - check if local or non local arp
1430 * @skb: pointer to sk_buff
1431 *
1432 * Return: true if local arp or false otherwise.
1433 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301434static bool hdd_is_arp_local(struct sk_buff *skb)
1435{
1436 struct arphdr *arp;
1437 struct in_ifaddr **ifap = NULL;
1438 struct in_ifaddr *ifa = NULL;
1439 struct in_device *in_dev;
1440 unsigned char *arp_ptr;
1441 __be32 tip;
1442
1443 arp = (struct arphdr *)skb->data;
1444 if (arp->ar_op == htons(ARPOP_REQUEST)) {
1445 in_dev = __in_dev_get_rtnl(skb->dev);
1446 if (in_dev) {
1447 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1448 ifap = &ifa->ifa_next) {
1449 if (!strcmp(skb->dev->name, ifa->ifa_label))
1450 break;
1451 }
1452 }
1453
1454 if (ifa && ifa->ifa_local) {
1455 arp_ptr = (unsigned char *)(arp + 1);
1456 arp_ptr += (skb->dev->addr_len + 4 +
1457 skb->dev->addr_len);
1458 memcpy(&tip, arp_ptr, 4);
Poddar, Siddarthb4b74792017-11-06 14:57:35 +05301459 hdd_debug("ARP packet: local IP: %x dest IP: %x",
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301460 ifa->ifa_local, tip);
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001461 if (ifa->ifa_local == tip)
1462 return true;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301463 }
1464 }
1465
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001466 return false;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301467}
1468
1469/**
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001470 * hdd_is_rx_wake_lock_needed() - check if wake lock is needed
1471 * @skb: pointer to sk_buff
1472 *
1473 * RX wake lock is needed for:
1474 * 1) Unicast data packet OR
1475 * 2) Local ARP data packet
1476 *
1477 * Return: true if wake lock is needed or false otherwise.
1478 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301479static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
1480{
1481 if ((skb->pkt_type != PACKET_BROADCAST &&
1482 skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb))
1483 return true;
1484
1485 return false;
1486}
1487
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001488#ifdef RECEIVE_OFFLOAD
1489/**
1490 * hdd_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
1491 * @hdd_ctx: pointer to HDD Station Context
1492 *
1493 * Return: None
1494 */
1495static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
1496{
1497 if (!(hdd_ctx->config->lro_enable ^
1498 hdd_ctx->config->gro_enable)) {
1499 hdd_ctx->config->lro_enable && hdd_ctx->config->gro_enable ?
1500 hdd_err("Can't enable both LRO and GRO, disabling Rx offload") :
1501 hdd_debug("LRO and GRO both are disabled");
1502 hdd_ctx->ol_enable = 0;
1503 } else if (hdd_ctx->config->lro_enable) {
1504 hdd_debug("Rx offload LRO is enabled");
1505 hdd_ctx->ol_enable = CFG_LRO_ENABLED;
1506 } else {
1507 hdd_debug("Rx offload GRO is enabled");
1508 hdd_ctx->ol_enable = CFG_GRO_ENABLED;
1509 }
1510}
1511
1512/**
1513 * hdd_gro_rx() - Handle Rx procesing via GRO
1514 * @adapter: pointer to adapter context
1515 * @skb: pointer to sk_buff
1516 *
1517 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1518 */
1519static QDF_STATUS hdd_gro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
1520{
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001521 struct qca_napi_info *qca_napii;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001522 struct qca_napi_data *napid;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001523 struct napi_struct *napi_to_use;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001524 QDF_STATUS status = QDF_STATUS_E_FAILURE;
1525
1526 /* Only enabling it for STA mode like LRO today */
1527 if (QDF_STA_MODE != adapter->device_mode)
1528 return QDF_STATUS_E_NOSUPPORT;
1529
1530 napid = hdd_napi_get_all();
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001531 if (unlikely(napid == NULL))
1532 goto out;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001533
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001534 qca_napii = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
1535 if (unlikely(qca_napii == NULL))
1536 goto out;
1537
1538 skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
1539 /*
1540 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1541 * corresponds each hif_napi.
1542 */
1543 if (adapter->hdd_ctx->enable_rxthread)
1544 napi_to_use = &qca_napii->rx_thread_napi;
1545 else
1546 napi_to_use = &qca_napii->napi;
1547
1548 local_bh_disable();
1549 napi_gro_receive(napi_to_use, skb);
1550 local_bh_enable();
1551
1552 status = QDF_STATUS_SUCCESS;
1553out:
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001554
1555 return status;
1556}
1557
1558/**
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001559 * hdd_rxthread_napi_gro_flush() - GRO flush callback for NAPI+Rx_Thread Rx mode
1560 * @data: hif NAPI context
1561 *
1562 * Return: none
1563 */
1564static void hdd_rxthread_napi_gro_flush(void *data)
1565{
1566 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1567
1568 local_bh_disable();
1569 /*
1570 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1571 * corresponds each hif_napi.
1572 */
1573 napi_gro_flush(&qca_napii->rx_thread_napi, false);
1574 local_bh_enable();
1575}
1576
1577/**
1578 * hdd_hif_napi_gro_flush() - GRO flush callback for NAPI Rx mode
1579 * @data: hif NAPI context
1580 *
1581 * Return: none
1582 */
1583static void hdd_hif_napi_gro_flush(void *data)
1584{
1585 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1586
1587 local_bh_disable();
1588 napi_gro_flush(&qca_napii->napi, false);
1589 local_bh_enable();
1590}
1591
1592#ifdef FEATURE_LRO
1593/**
1594 * hdd_qdf_lro_flush() - LRO flush wrapper
1595 * @data: hif NAPI context
1596 *
1597 * Return: none
1598 */
1599static void hdd_qdf_lro_flush(void *data)
1600{
1601 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1602 qdf_lro_ctx_t qdf_lro_ctx = qca_napii->lro_ctx;
1603
1604 qdf_lro_flush(qdf_lro_ctx);
1605}
1606#else
1607static void hdd_qdf_lro_flush(void *data)
1608{
1609}
1610#endif
1611
1612/**
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001613 * hdd_register_rx_ol() - Register LRO/GRO rx processing callbacks
1614 *
1615 * Return: none
1616 */
1617static void hdd_register_rx_ol(void)
1618{
1619 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001620 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001621
1622 if (!hdd_ctx)
1623 hdd_err("HDD context is NULL");
1624
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001625 hdd_ctx->en_tcp_delack_no_lro = 0;
1626
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001627 if (hdd_ctx->ol_enable == CFG_LRO_ENABLED) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001628 cdp_register_rx_offld_flush_cb(soc, hdd_qdf_lro_flush);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001629 hdd_ctx->receive_offload_cb = hdd_lro_rx;
1630 hdd_debug("LRO is enabled");
1631 } else if (hdd_ctx->ol_enable == CFG_GRO_ENABLED) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001632 if (hdd_ctx->enable_rxthread)
1633 cdp_register_rx_offld_flush_cb(soc,
1634 hdd_rxthread_napi_gro_flush);
1635 else
1636 cdp_register_rx_offld_flush_cb(soc,
1637 hdd_hif_napi_gro_flush);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001638 hdd_ctx->receive_offload_cb = hdd_gro_rx;
1639 hdd_debug("GRO is enabled");
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001640 } else if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
1641 hdd_ctx->en_tcp_delack_no_lro = 1;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001642 }
1643}
1644
1645int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1646{
1647 struct cdp_lro_hash_config lro_config = {0};
1648
1649 hdd_resolve_rx_ol_mode(hdd_ctx);
1650
1651 hdd_register_rx_ol();
1652
1653 /*
1654 * This will enable flow steering and Toeplitz hash
1655 * So enable it for LRO or GRO processing.
1656 */
1657 if (hdd_napi_enabled(HDD_NAPI_ANY) == 0) {
1658 hdd_warn("NAPI is disabled");
1659 return 0;
1660 }
1661
1662 lro_config.lro_enable = 1;
1663 lro_config.tcp_flag = TCPHDR_ACK;
1664 lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST |
1665 TCPHDR_ACK | TCPHDR_URG | TCPHDR_ECE | TCPHDR_CWR;
1666
1667 get_random_bytes(lro_config.toeplitz_hash_ipv4,
1668 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
1669 LRO_IPV4_SEED_ARR_SZ));
1670
1671 get_random_bytes(lro_config.toeplitz_hash_ipv6,
1672 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
1673 LRO_IPV6_SEED_ARR_SZ));
1674
1675 if (0 != wma_lro_init(&lro_config)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001676 hdd_err("Failed to send LRO/GRO configuration!");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001677 hdd_ctx->ol_enable = 0;
1678 return -EAGAIN;
1679 }
1680
1681 return 0;
1682}
1683
1684void hdd_disable_rx_ol_in_concurrency(bool disable)
1685{
1686 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1687
1688 if (!hdd_ctx) {
1689 hdd_err("hdd_ctx is NULL");
1690 return;
1691 }
1692
1693 if (disable) {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001694 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001695 struct wlan_rx_tp_data rx_tp_data;
1696
1697 hdd_info("Enable TCP delack as LRO disabled in concurrency");
1698 rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
1699 rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
1700 wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
1701 WLAN_SVC_WLAN_TP_IND,
1702 &rx_tp_data,
1703 sizeof(rx_tp_data));
1704 hdd_ctx->en_tcp_delack_no_lro = 1;
1705 }
1706 qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 1);
1707 } else {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001708 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001709 hdd_info("Disable TCP delack as LRO is enabled");
1710 hdd_ctx->en_tcp_delack_no_lro = 0;
1711 hdd_reset_tcp_delack(hdd_ctx);
1712 }
1713 qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 0);
1714 }
1715}
1716
1717void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1718{
1719 if (disable)
1720 qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 1);
1721 else
1722 qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 0);
1723}
1724
1725/**
1726 * hdd_can_handle_receive_offload() - Check for dynamic disablement
1727 * @hdd_ctx: hdd context
1728 * @skb: pointer to sk_buff which will be processed by Rx OL
1729 *
1730 * Check for dynamic disablement of Rx offload
1731 *
1732 * Return: false if we cannot process otherwise true
1733 */
1734static bool hdd_can_handle_receive_offload(struct hdd_context *hdd_ctx,
1735 struct sk_buff *skb)
1736{
1737 if (!QDF_NBUF_CB_RX_TCP_PROTO(skb) ||
1738 qdf_atomic_read(&hdd_ctx->disable_lro_in_concurrency) ||
1739 QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) ||
1740 qdf_atomic_read(&hdd_ctx->disable_lro_in_low_tput))
1741 return false;
1742 else
1743 return true;
1744}
1745#else /* RECEIVE_OFFLOAD */
1746static bool hdd_can_handle_receive_offload(struct hdd_context *hdd_ctx,
1747 struct sk_buff *skb)
1748{
1749 return false;
1750}
1751
1752int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1753{
1754 hdd_err("Rx_OL, LRO/GRO not supported");
1755 return -EPERM;
1756}
1757
1758void hdd_disable_rx_ol_in_concurrency(bool disable)
1759{
1760}
1761
1762void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1763{
1764}
1765#endif /* RECEIVE_OFFLOAD */
1766
Yu Wang66a250b2017-07-19 11:46:40 +08001767#ifdef WLAN_FEATURE_TSF_PLUS
1768static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1769 qdf_nbuf_t netbuf,
1770 uint64_t target_time)
1771{
1772 if (!HDD_TSF_IS_RX_SET(hdd_ctx))
1773 return;
1774
1775 hdd_rx_timestamp(netbuf, target_time);
1776}
1777#else
1778static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1779 qdf_nbuf_t netbuf,
1780 uint64_t target_time)
1781{
1782}
1783#endif
1784
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301785/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001786 * hdd_rx_packet_cbk() - Receive packet handler
Dhanashri Atre182b0272016-02-17 15:35:07 -08001787 * @context: pointer to HDD context
Nirav Shahcbc6d722016-03-01 16:24:53 +05301788 * @rxBuf: pointer to rx qdf_nbuf
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001789 *
1790 * Receive callback registered with TL. TL will call this to notify
1791 * the HDD when one or more packets were received for a registered
1792 * STA.
1793 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301794 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1795 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001796 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001797QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001798{
Jeff Johnson80486862017-10-02 13:21:29 -07001799 struct hdd_adapter *adapter = NULL;
Jeff Johnsoncc011972017-09-03 09:26:36 -07001800 struct hdd_context *hdd_ctx = NULL;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001801 int rxstat = 0;
1802 QDF_STATUS rx_ol_status = QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001803 struct sk_buff *skb = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001804 struct sk_buff *next = NULL;
Jeff Johnsond377dce2017-10-04 10:32:42 -07001805 struct hdd_station_ctx *sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001806 unsigned int cpu_index;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001807 struct qdf_mac_addr *mac_addr;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301808 bool wake_lock = false;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05301809 uint8_t pkt_type = 0;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301810 bool track_arp = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001811
1812 /* Sanity check on inputs */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001813 if (unlikely((NULL == context) || (NULL == rxBuf))) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301814 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001815 "%s: Null params being passed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301816 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001817 }
1818
Jeff Johnson80486862017-10-02 13:21:29 -07001819 adapter = (struct hdd_adapter *)context;
1820 if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001821 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001822 "Magic cookie(%x) for adapter sanity verification is invalid",
Jeff Johnson80486862017-10-02 13:21:29 -07001823 adapter->magic);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301824 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001825 }
1826
Jeff Johnson80486862017-10-02 13:21:29 -07001827 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Jeff Johnsoncc011972017-09-03 09:26:36 -07001828 if (unlikely(NULL == hdd_ctx)) {
Dhanashri Atre182b0272016-02-17 15:35:07 -08001829 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1830 "%s: HDD context is Null", __func__);
1831 return QDF_STATUS_E_FAILURE;
1832 }
1833
1834 cpu_index = wlan_hdd_get_cpu();
1835
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001836 next = (struct sk_buff *)rxBuf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001837
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001838 while (next) {
1839 skb = next;
1840 next = skb->next;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001841 skb->next = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001842
psimha884025c2017-08-01 15:07:32 -07001843#ifdef QCA_WIFI_QCA6290 /* Debug code, remove later */
Venkata Sharath Chandra Manchala9bf41ff2017-08-31 00:50:06 -07001844 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001845 "%s: skb %pK skb->len %d\n", __func__, skb, skb->len);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001846#endif
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301847 if (QDF_NBUF_CB_PACKET_TYPE_ARP ==
1848 QDF_NBUF_CB_GET_PACKET_TYPE(skb)) {
1849 if (qdf_nbuf_data_is_arp_rsp(skb) &&
1850 (hdd_ctx->track_arp_ip ==
1851 qdf_nbuf_get_arp_src_ip(skb))) {
1852 ++adapter->hdd_stats.hdd_arp_stats.
1853 rx_arp_rsp_count;
1854 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1855 QDF_TRACE_LEVEL_INFO,
1856 "%s: ARP packet received",
1857 __func__);
1858 track_arp = true;
1859 }
1860 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05301861 /* track connectivity stats */
1862 if (adapter->pkt_type_bitmap)
1863 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
1864 PKT_TYPE_RSP, &pkt_type);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001865
Jeff Johnsond377dce2017-10-04 10:32:42 -07001866 sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
1867 if ((sta_ctx->conn_info.proxyARPService) &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08001868 cfg80211_is_gratuitous_arp_unsolicited_na(skb)) {
Poddar, Siddarth37a17d32017-08-09 19:04:39 +05301869 uint32_t rx_dropped;
1870
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001871 rx_dropped = ++adapter->hdd_stats.tx_rx_stats.
1872 rx_dropped[cpu_index];
Poddar, Siddarth37a17d32017-08-09 19:04:39 +05301873 /* rate limit error messages to 1/8th */
1874 if ((rx_dropped & 0x07) == 0)
1875 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1876 QDF_TRACE_LEVEL_INFO,
1877 "%s: Dropping HS 2.0 Gratuitous ARP or Unsolicited NA count=%u",
1878 __func__, rx_dropped);
Dhanashri Atre63d98022017-01-24 18:22:09 -08001879 /* Remove SKB from internal tracking table before submitting
1880 * it to stack
1881 */
1882 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001883 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001884 }
1885
1886 hdd_event_eapol_log(skb, QDF_RX);
Mohit Khanna02281da2017-08-27 09:40:55 -07001887 qdf_dp_trace_log_pkt(adapter->session_id, skb, QDF_RX,
1888 QDF_TRACE_DEFAULT_PDEV_ID);
Mohit Khannaf8f96822017-05-17 17:11:59 -07001889
Dhanashri Atre63d98022017-01-24 18:22:09 -08001890 DPTRACE(qdf_dp_trace(skb,
1891 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001892 QDF_TRACE_DEFAULT_PDEV_ID,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001893 qdf_nbuf_data_addr(skb),
1894 sizeof(qdf_nbuf_data(skb)), QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07001895
Mohit Khanna02281da2017-08-27 09:40:55 -07001896 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
1897 QDF_DP_TRACE_RX_PACKET_RECORD,
1898 0, QDF_RX));
1899
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001900 mac_addr = (struct qdf_mac_addr *)(skb->data+QDF_MAC_ADDR_SIZE);
1901
Jeff Johnson80486862017-10-02 13:21:29 -07001902 ucfg_tdls_update_rx_pkt_cnt(adapter->hdd_vdev, mac_addr);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001903
Jeff Johnson80486862017-10-02 13:21:29 -07001904 skb->dev = adapter->dev;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001905 skb->protocol = eth_type_trans(skb, skb->dev);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001906 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Jeff Johnson80486862017-10-02 13:21:29 -07001907 ++adapter->stats.rx_packets;
1908 adapter->stats.rx_bytes += skb->len;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001909
Alok Kumarb64650c2018-03-23 17:05:11 +05301910 /* Incr GW Rx count for NUD tracking based on GW mac addr */
1911 hdd_nud_incr_gw_rx_pkt_cnt(adapter, mac_addr);
1912
Dhanashri Atre63d98022017-01-24 18:22:09 -08001913 /* Check & drop replayed mcast packets (for IPV6) */
Jeff Johnsoncc011972017-09-03 09:26:36 -07001914 if (hdd_ctx->config->multicast_replay_filter &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08001915 hdd_is_mcast_replay(skb)) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001916 ++adapter->hdd_stats.tx_rx_stats.rx_dropped[cpu_index];
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001917 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001918 "%s: Dropping multicast replay pkt", __func__);
1919 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001920 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001921 }
1922
1923 /* hold configurable wakelock for unicast traffic */
Jeff Johnsoncc011972017-09-03 09:26:36 -07001924 if (hdd_ctx->config->rx_wakelock_timeout &&
Jeff Johnsond377dce2017-10-04 10:32:42 -07001925 sta_ctx->conn_info.uIsAuthenticated)
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301926 wake_lock = hdd_is_rx_wake_lock_needed(skb);
1927
1928 if (wake_lock) {
Jeff Johnsoncc011972017-09-03 09:26:36 -07001929 cds_host_diag_log_work(&hdd_ctx->rx_wake_lock,
1930 hdd_ctx->config->rx_wakelock_timeout,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001931 WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
Jeff Johnsoncc011972017-09-03 09:26:36 -07001932 qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock,
1933 hdd_ctx->config->
Dhanashri Atre63d98022017-01-24 18:22:09 -08001934 rx_wakelock_timeout);
1935 }
1936
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001937 /* Remove SKB from internal tracking table before submitting
1938 * it to stack
1939 */
Dhanashri Atre63d98022017-01-24 18:22:09 -08001940 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001941
Yu Wang66a250b2017-07-19 11:46:40 +08001942 hdd_tsf_timestamp_rx(hdd_ctx, skb, ktime_to_us(skb->tstamp));
1943
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001944 if (hdd_can_handle_receive_offload(hdd_ctx, skb) &&
1945 hdd_ctx->receive_offload_cb)
1946 rx_ol_status = hdd_ctx->receive_offload_cb(adapter,
1947 skb);
1948
1949 if (rx_ol_status != QDF_STATUS_SUCCESS) {
Dhanashri Atre63d98022017-01-24 18:22:09 -08001950 if (hdd_napi_enabled(HDD_NAPI_ANY) &&
Jeff Johnsone2ba3cd2017-10-30 20:02:09 -07001951 !hdd_ctx->enable_rxthread &&
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05301952 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
Dhanashri Atre63d98022017-01-24 18:22:09 -08001953 rxstat = netif_receive_skb(skb);
1954 else
1955 rxstat = netif_rx_ni(skb);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001956 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001957
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001958 if (!rxstat) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001959 ++adapter->hdd_stats.tx_rx_stats.
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301960 rx_delivered[cpu_index];
1961 if (track_arp)
1962 ++adapter->hdd_stats.hdd_arp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05301963 rx_delivered;
1964 /* track connectivity stats */
1965 if (adapter->pkt_type_bitmap)
1966 hdd_tx_rx_collect_connectivity_stats_info(
1967 skb, adapter,
1968 PKT_TYPE_RX_DELIVERED, &pkt_type);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001969 } else {
1970 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
1971 if (track_arp)
1972 ++adapter->hdd_stats.hdd_arp_stats.rx_refused;
1973
1974 /* track connectivity stats */
1975 if (adapter->pkt_type_bitmap)
1976 hdd_tx_rx_collect_connectivity_stats_info(
1977 skb, adapter,
1978 PKT_TYPE_RX_REFUSED, &pkt_type);
1979
Dhanashri Atre63d98022017-01-24 18:22:09 -08001980 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001981 }
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001982
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301983 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001984}
1985
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001986/**
1987 * hdd_reason_type_to_string() - return string conversion of reason type
1988 * @reason: reason type
1989 *
1990 * This utility function helps log string conversion of reason type.
1991 *
1992 * Return: string conversion of device mode, if match found;
1993 * "Unknown" otherwise.
1994 */
1995const char *hdd_reason_type_to_string(enum netif_reason_type reason)
1996{
1997 switch (reason) {
1998 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
1999 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
2000 CASE_RETURN_STRING(WLAN_FW_PAUSE);
2001 CASE_RETURN_STRING(WLAN_TX_ABORT);
2002 CASE_RETURN_STRING(WLAN_VDEV_STOP);
2003 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
2004 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302005 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002006 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302007 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002008 }
2009}
2010
2011/**
2012 * hdd_action_type_to_string() - return string conversion of action type
2013 * @action: action type
2014 *
2015 * This utility function helps log string conversion of action_type.
2016 *
2017 * Return: string conversion of device mode, if match found;
2018 * "Unknown" otherwise.
2019 */
2020const char *hdd_action_type_to_string(enum netif_action_type action)
2021{
2022
2023 switch (action) {
2024 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
2025 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
2026 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
2027 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
2028 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302029 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
2030 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002031 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
2032 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302033 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
2034 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
2035 CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
2036 CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002037 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302038 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002039 }
2040}
2041
2042/**
2043 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
2044 * @adapter: adapter handle
2045 * @action: action type
2046 * @reason: reason type
2047 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002048static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002049 enum netif_action_type action, enum netif_reason_type reason)
2050{
2051 switch (action) {
2052 case WLAN_STOP_ALL_NETIF_QUEUE:
2053 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302054 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2055 case WLAN_STOP_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002056 adapter->queue_oper_stats[reason].pause_count++;
2057 break;
2058 case WLAN_START_ALL_NETIF_QUEUE:
2059 case WLAN_WAKE_ALL_NETIF_QUEUE:
2060 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302061 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2062 case WLAN_WAKE_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002063 adapter->queue_oper_stats[reason].unpause_count++;
2064 break;
2065 default:
2066 break;
2067 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002068}
2069
2070/**
jiad5b986632017-08-04 11:59:20 +08002071 * hdd_netdev_queue_is_locked()
2072 * @txq: net device tx queue
2073 *
2074 * For SMP system, always return false and we could safely rely on
2075 * __netif_tx_trylock().
2076 *
2077 * Return: true locked; false not locked
2078 */
2079#ifdef QCA_CONFIG_SMP
2080static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2081{
2082 return false;
2083}
2084#else
2085static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2086{
2087 return txq->xmit_lock_owner != -1;
2088}
2089#endif
2090
2091/**
Nirav Shah89223f72016-03-01 18:10:38 +05302092 * wlan_hdd_update_txq_timestamp() - update txq timestamp
2093 * @dev: net device
2094 *
2095 * Return: none
2096 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -07002097static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
Nirav Shah89223f72016-03-01 18:10:38 +05302098{
2099 struct netdev_queue *txq;
2100 int i;
Nirav Shah89223f72016-03-01 18:10:38 +05302101
2102 for (i = 0; i < NUM_TX_QUEUES; i++) {
2103 txq = netdev_get_tx_queue(dev, i);
jiad5b986632017-08-04 11:59:20 +08002104
2105 /*
2106 * On UP system, kernel will trigger watchdog bite if spinlock
2107 * recursion is detected. Unfortunately recursion is possible
2108 * when it is called in dev_queue_xmit() context, where stack
2109 * grabs the lock before calling driver's ndo_start_xmit
2110 * callback.
2111 */
2112 if (!hdd_netdev_queue_is_locked(txq)) {
2113 if (__netif_tx_trylock(txq)) {
2114 txq_trans_update(txq);
2115 __netif_tx_unlock(txq);
2116 }
wadesongba6373e2017-05-15 20:59:05 +08002117 }
Nirav Shah89223f72016-03-01 18:10:38 +05302118 }
2119}
2120
2121/**
Nirav Shah617cff92016-04-25 10:24:24 +05302122 * wlan_hdd_update_unpause_time() - update unpause time
2123 * @adapter: adapter handle
2124 *
2125 * Return: none
2126 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002127static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
Nirav Shah617cff92016-04-25 10:24:24 +05302128{
2129 qdf_time_t curr_time = qdf_system_ticks();
2130
2131 adapter->total_unpause_time += curr_time - adapter->last_time;
2132 adapter->last_time = curr_time;
2133}
2134
2135/**
2136 * wlan_hdd_update_pause_time() - update pause time
2137 * @adapter: adapter handle
2138 *
2139 * Return: none
2140 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002141static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
Nirav Shahda008342016-05-17 18:50:40 +05302142 uint32_t temp_map)
Nirav Shah617cff92016-04-25 10:24:24 +05302143{
2144 qdf_time_t curr_time = qdf_system_ticks();
Nirav Shahda008342016-05-17 18:50:40 +05302145 uint8_t i;
2146 qdf_time_t pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302147
Nirav Shahda008342016-05-17 18:50:40 +05302148 pause_time = curr_time - adapter->last_time;
2149 adapter->total_pause_time += pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302150 adapter->last_time = curr_time;
Nirav Shahda008342016-05-17 18:50:40 +05302151
2152 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
2153 if (temp_map & (1 << i)) {
2154 adapter->queue_oper_stats[i].total_pause_time +=
2155 pause_time;
2156 break;
2157 }
2158 }
2159
Nirav Shah617cff92016-04-25 10:24:24 +05302160}
2161
2162/**
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302163 * wlan_hdd_stop_non_priority_queue() - stop non prority queues
2164 * @adapter: adapter handle
2165 *
2166 * Return: None
2167 */
2168static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
2169{
2170 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2171 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2172 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2173 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2174}
2175
2176/**
2177 * wlan_hdd_wake_non_priority_queue() - wake non prority queues
2178 * @adapter: adapter handle
2179 *
2180 * Return: None
2181 */
2182static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
2183{
2184 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2185 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2186 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2187 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2188}
2189
2190/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002191 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
2192 * @adapter: adapter handle
2193 * @action: action type
2194 * @reason: reason type
2195 *
2196 * This is single function which is used for netif_queue related
2197 * actions like start/stop of network queues and on/off carrier
2198 * option.
2199 *
2200 * Return: None
2201 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002202void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002203 enum netif_action_type action, enum netif_reason_type reason)
2204{
Nirav Shahda008342016-05-17 18:50:40 +05302205 uint32_t temp_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002206
2207 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
2208 (!adapter->dev)) {
2209 hdd_err("adapter is invalid");
2210 return;
2211 }
2212
2213 switch (action) {
2214
2215 case WLAN_NETIF_CARRIER_ON:
2216 netif_carrier_on(adapter->dev);
2217 break;
2218
2219 case WLAN_NETIF_CARRIER_OFF:
2220 netif_carrier_off(adapter->dev);
2221 break;
2222
2223 case WLAN_STOP_ALL_NETIF_QUEUE:
2224 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302225 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002226 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302227 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302228 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302229 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002230 adapter->pause_map |= (1 << reason);
2231 spin_unlock_bh(&adapter->pause_map_lock);
2232 break;
2233
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302234 case WLAN_STOP_NON_PRIORITY_QUEUE:
2235 spin_lock_bh(&adapter->pause_map_lock);
2236 if (!adapter->pause_map) {
2237 wlan_hdd_stop_non_priority_queue(adapter);
2238 wlan_hdd_update_txq_timestamp(adapter->dev);
2239 wlan_hdd_update_unpause_time(adapter);
2240 }
2241 adapter->pause_map |= (1 << reason);
2242 spin_unlock_bh(&adapter->pause_map_lock);
2243 break;
2244
2245 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2246 spin_lock_bh(&adapter->pause_map_lock);
2247 temp_map = adapter->pause_map;
2248 adapter->pause_map &= ~(1 << reason);
2249 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2250 wlan_hdd_update_pause_time(adapter, temp_map);
2251 spin_unlock_bh(&adapter->pause_map_lock);
2252 break;
2253
2254 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2255 spin_lock_bh(&adapter->pause_map_lock);
2256 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2257 wlan_hdd_update_txq_timestamp(adapter->dev);
2258 wlan_hdd_update_unpause_time(adapter);
2259 adapter->pause_map |= (1 << reason);
2260 spin_unlock_bh(&adapter->pause_map_lock);
2261 break;
2262
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002263 case WLAN_START_ALL_NETIF_QUEUE:
2264 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302265 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002266 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302267 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002268 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302269 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302270 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002271 spin_unlock_bh(&adapter->pause_map_lock);
2272 break;
2273
2274 case WLAN_WAKE_ALL_NETIF_QUEUE:
2275 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302276 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002277 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302278 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002279 netif_tx_wake_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302280 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302281 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002282 spin_unlock_bh(&adapter->pause_map_lock);
2283 break;
2284
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302285 case WLAN_WAKE_NON_PRIORITY_QUEUE:
2286 spin_lock_bh(&adapter->pause_map_lock);
2287 temp_map = adapter->pause_map;
2288 adapter->pause_map &= ~(1 << reason);
2289 if (!adapter->pause_map) {
2290 wlan_hdd_wake_non_priority_queue(adapter);
2291 wlan_hdd_update_pause_time(adapter, temp_map);
2292 }
2293 spin_unlock_bh(&adapter->pause_map_lock);
2294 break;
2295
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002296 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
2297 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302298 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002299 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302300 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302301 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302302 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002303 adapter->pause_map |= (1 << reason);
2304 netif_carrier_off(adapter->dev);
2305 spin_unlock_bh(&adapter->pause_map_lock);
2306 break;
2307
2308 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
2309 spin_lock_bh(&adapter->pause_map_lock);
2310 netif_carrier_on(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302311 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002312 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302313 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002314 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302315 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302316 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002317 spin_unlock_bh(&adapter->pause_map_lock);
2318 break;
2319
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002320 default:
2321 hdd_err("unsupported action %d", action);
2322 }
2323
2324 spin_lock_bh(&adapter->pause_map_lock);
2325 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
2326 wlan_hdd_process_peer_unauthorised_pause(adapter);
2327 spin_unlock_bh(&adapter->pause_map_lock);
2328
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002329 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
2330
2331 adapter->queue_oper_history[adapter->history_index].time =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05302332 qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002333 adapter->queue_oper_history[adapter->history_index].netif_action =
2334 action;
2335 adapter->queue_oper_history[adapter->history_index].netif_reason =
2336 reason;
2337 adapter->queue_oper_history[adapter->history_index].pause_map =
2338 adapter->pause_map;
2339 if (++adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
2340 adapter->history_index = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002341}
2342
Nirav Shah73713f72018-05-17 14:50:41 +05302343#ifdef FEATURE_MONITOR_MODE_SUPPORT
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002344/**
2345 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
2346 * @dev: Pointer to net_device structure
2347 *
2348 * Return: 0 for success; non-zero for failure
2349 */
2350int hdd_set_mon_rx_cb(struct net_device *dev)
2351{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002352 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002353 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002354 int ret;
2355 QDF_STATUS qdf_status;
2356 struct ol_txrx_desc_type sta_desc = {0};
2357 struct ol_txrx_ops txrx_ops;
Leo Changfdb45c32016-10-28 11:09:23 -07002358 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2359 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002360
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002361 qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
2362 txrx_ops.rx.rx = hdd_mon_rx_packet_cbk;
Ravi Joshi106ffe02017-01-18 18:09:05 -08002363 hdd_monitor_set_rx_monitor_cb(&txrx_ops, hdd_rx_monitor_callback);
Leo Changfdb45c32016-10-28 11:09:23 -07002364 cdp_vdev_register(soc,
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002365 (struct cdp_vdev *)cdp_get_vdev_from_vdev_id(soc,
Jeff Johnson1b780e42017-10-31 14:11:45 -07002366 (struct cdp_pdev *)pdev, adapter->session_id),
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002367 adapter, &txrx_ops);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002368 /* peer is created wma_vdev_attach->wma_create_peer */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002369 qdf_status = cdp_peer_register(soc,
2370 (struct cdp_pdev *)pdev, &sta_desc);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002371 if (QDF_STATUS_SUCCESS != qdf_status) {
Leo Changfdb45c32016-10-28 11:09:23 -07002372 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002373 qdf_status, qdf_status);
2374 goto exit;
2375 }
2376
2377 qdf_status = sme_create_mon_session(hdd_ctx->hHal,
Jeff Johnson1e851a12017-10-28 14:36:12 -07002378 adapter->mac_addr.bytes);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002379 if (QDF_STATUS_SUCCESS != qdf_status) {
2380 hdd_err("sme_create_mon_session() failed to register. Status= %d [0x%08X]",
2381 qdf_status, qdf_status);
2382 }
2383exit:
2384 ret = qdf_status_to_os_return(qdf_status);
2385 return ret;
2386}
Nirav Shah73713f72018-05-17 14:50:41 +05302387#endif
Nirav Shahbd36b062016-07-18 11:12:59 +05302388
2389/**
2390 * hdd_send_rps_ind() - send rps indication to daemon
2391 * @adapter: adapter context
2392 *
2393 * If RPS feature enabled by INI, send RPS enable indication to daemon
2394 * Indication contents is the name of interface to find correct sysfs node
2395 * Should send all available interfaces
2396 *
2397 * Return: none
2398 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002399void hdd_send_rps_ind(struct hdd_adapter *adapter)
Nirav Shahbd36b062016-07-18 11:12:59 +05302400{
2401 int i;
2402 uint8_t cpu_map_list_len = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002403 struct hdd_context *hdd_ctxt = NULL;
Nirav Shahbd36b062016-07-18 11:12:59 +05302404 struct wlan_rps_data rps_data;
Yun Parkff6a16a2017-09-26 16:38:18 -07002405 struct cds_config_info *cds_cfg;
2406
2407 cds_cfg = cds_get_ini_config();
Nirav Shahbd36b062016-07-18 11:12:59 +05302408
2409 if (!adapter) {
2410 hdd_err("adapter is NULL");
2411 return;
2412 }
2413
Yun Parkff6a16a2017-09-26 16:38:18 -07002414 if (!cds_cfg) {
2415 hdd_err("cds_cfg is NULL");
2416 return;
2417 }
2418
Nirav Shahbd36b062016-07-18 11:12:59 +05302419 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2420 rps_data.num_queues = NUM_TX_QUEUES;
2421
2422 hdd_info("cpu_map_list '%s'", hdd_ctxt->config->cpu_map_list);
2423
2424 /* in case no cpu map list is provided, simply return */
2425 if (!strlen(hdd_ctxt->config->cpu_map_list)) {
2426 hdd_err("no cpu map list found");
2427 goto err;
2428 }
2429
2430 if (QDF_STATUS_SUCCESS !=
2431 hdd_hex_string_to_u16_array(hdd_ctxt->config->cpu_map_list,
2432 rps_data.cpu_map_list,
2433 &cpu_map_list_len,
2434 WLAN_SVC_IFACE_NUM_QUEUES)) {
2435 hdd_err("invalid cpu map list");
2436 goto err;
2437 }
2438
2439 rps_data.num_queues =
2440 (cpu_map_list_len < rps_data.num_queues) ?
2441 cpu_map_list_len : rps_data.num_queues;
2442
2443 for (i = 0; i < rps_data.num_queues; i++) {
2444 hdd_info("cpu_map_list[%d] = 0x%x",
2445 i, rps_data.cpu_map_list[i]);
2446 }
2447
2448 strlcpy(rps_data.ifname, adapter->dev->name,
2449 sizeof(rps_data.ifname));
Kondabattini, Ganesh96ac37b2016-09-02 23:12:15 +05302450 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2451 WLAN_SVC_RPS_ENABLE_IND,
Nirav Shahbd36b062016-07-18 11:12:59 +05302452 &rps_data, sizeof(rps_data));
2453
Yun Parkff6a16a2017-09-26 16:38:18 -07002454 cds_cfg->rps_enabled = true;
2455
2456 return;
2457
Nirav Shahbd36b062016-07-18 11:12:59 +05302458err:
2459 hdd_err("Wrong RPS configuration. enabling rx_thread");
Yun Parkff6a16a2017-09-26 16:38:18 -07002460 cds_cfg->rps_enabled = false;
2461}
2462
2463/**
2464 * hdd_send_rps_disable_ind() - send rps disable indication to daemon
2465 * @adapter: adapter context
2466 *
2467 * Return: none
2468 */
2469void hdd_send_rps_disable_ind(struct hdd_adapter *adapter)
2470{
Yun Parkff6a16a2017-09-26 16:38:18 -07002471 struct hdd_context *hdd_ctxt = NULL;
2472 struct wlan_rps_data rps_data;
2473 struct cds_config_info *cds_cfg;
2474
2475 cds_cfg = cds_get_ini_config();
2476
2477 if (!adapter) {
2478 hdd_err("adapter is NULL");
2479 return;
2480 }
2481
2482 if (!cds_cfg) {
2483 hdd_err("cds_cfg is NULL");
2484 return;
2485 }
2486
2487 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2488 rps_data.num_queues = NUM_TX_QUEUES;
2489
2490 hdd_info("Set cpu_map_list 0");
2491
2492 qdf_mem_zero(&rps_data.cpu_map_list, sizeof(rps_data.cpu_map_list));
Yun Parkff6a16a2017-09-26 16:38:18 -07002493
2494 strlcpy(rps_data.ifname, adapter->dev->name, sizeof(rps_data.ifname));
2495 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2496 WLAN_SVC_RPS_ENABLE_IND,
2497 &rps_data, sizeof(rps_data));
2498
2499 cds_cfg->rps_enabled = false;
Nirav Shahbd36b062016-07-18 11:12:59 +05302500}
2501
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002502void hdd_tx_queue_cb(void *context, uint32_t vdev_id,
2503 enum netif_action_type action,
2504 enum netif_reason_type reason)
2505{
2506 struct hdd_context *hdd_ctx = (struct hdd_context *)context;
2507 struct hdd_adapter *adapter = NULL;
2508
2509 /*
2510 * Validating the context is not required here.
2511 * if there is a driver unload/SSR in progress happening in a
2512 * different context and it has been scheduled to run and
2513 * driver got a firmware event of sta kick out, then it is
2514 * good to disable the Tx Queue to stop the influx of traffic.
2515 */
2516 if (hdd_ctx == NULL) {
2517 hdd_err("Invalid context passed");
2518 return;
2519 }
2520
2521 adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
2522 if (adapter == NULL) {
2523 hdd_err("vdev_id %d does not exist with host", vdev_id);
2524 return;
2525 }
2526 hdd_debug("Tx Queue action %d on vdev %d", action, vdev_id);
2527
2528 wlan_hdd_netif_queue_control(adapter, action, reason);
2529}
2530
Ravi Joshib89e7f72016-09-07 13:43:15 -07002531#ifdef MSM_PLATFORM
2532/**
2533 * hdd_reset_tcp_delack() - Reset tcp delack value to default
2534 * @hdd_ctx: Handle to hdd context
2535 *
2536 * Function used to reset TCP delack value to its default value
2537 *
2538 * Return: None
2539 */
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002540void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
Ravi Joshib89e7f72016-09-07 13:43:15 -07002541{
Tushnim Bhattacharyyadfbce702018-03-27 12:46:48 -07002542 enum wlan_tp_level next_level = WLAN_SVC_TP_LOW;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002543 struct wlan_rx_tp_data rx_tp_data = {0};
Nirav Shahbd36b062016-07-18 11:12:59 +05302544
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002545 rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002546 rx_tp_data.level = next_level;
Ravi Joshib89e7f72016-09-07 13:43:15 -07002547 hdd_ctx->rx_high_ind_cnt = 0;
2548 wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index, WLAN_SVC_WLAN_TP_IND,
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002549 &rx_tp_data, sizeof(rx_tp_data));
Ravi Joshib89e7f72016-09-07 13:43:15 -07002550}
2551#endif /* MSM_PLATFORM */