blob: 9c19fa1d349c5c27f7e6d5709af004e9bfeadf18 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * DOC: wlan_hdd_tx_rx.c
21 *
22 * Linux HDD Tx/RX APIs
23 */
24
Jeff Johnsona0399642016-12-05 12:39:59 -080025/* denote that this file does not allow legacy hddLog */
26#define HDD_DISALLOW_LEGACY_HDDLOG 1
27
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028#include <wlan_hdd_tx_rx.h>
29#include <wlan_hdd_softap_tx_rx.h>
30#include <wlan_hdd_napi.h>
31#include <linux/netdevice.h>
32#include <linux/skbuff.h>
33#include <linux/etherdevice.h>
Ravi Joshibb8d4512016-08-22 10:14:52 -070034#include <linux/if_ether.h>
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +053035#include <linux/inetdevice.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036#include <cds_sched.h>
Manjunathappa Prakash779e4862016-09-12 17:00:11 -070037#include <cds_utils.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038
39#include <wlan_hdd_p2p.h>
40#include <linux/wireless.h>
41#include <net/cfg80211.h>
42#include <net/ieee80211_radiotap.h>
43#include "sap_api.h"
44#include "wlan_hdd_wmm.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include "wlan_hdd_tdls.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046#include "wlan_hdd_ocb.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047#include "wlan_hdd_lro.h"
Leo Changfdb45c32016-10-28 11:09:23 -070048#include <cdp_txrx_cmn.h>
49#include <cdp_txrx_peer_ops.h>
50#include <cdp_txrx_flow_ctrl_v2.h>
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -070051#include "wlan_hdd_nan_datapath.h"
Ravi Joshib89e7f72016-09-07 13:43:15 -070052#include "pld_common.h"
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +053053#include <cdp_txrx_misc.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080054#include "wlan_hdd_rx_monitor.h"
Zhu Jianmin04392c42017-05-12 16:34:53 +080055#include "wlan_hdd_power.h"
Poddar, Siddarth31797fa2018-01-22 17:24:15 +053056#include "wlan_hdd_cfg80211.h"
Yu Wangceb357b2017-06-01 12:04:18 +080057#include <wlan_hdd_tsf.h>
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -070058#include <net/tcp.h>
59#include "wma_api.h"
Ravi Joshi106ffe02017-01-18 18:09:05 -080060
Alok Kumarb64650c2018-03-23 17:05:11 +053061#include "wlan_hdd_nud_tracking.h"
62
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053063#ifdef QCA_LL_TX_FLOW_CONTROL_V2
64/*
65 * Mapping Linux AC interpretation to SME AC.
66 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
67 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
68 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
69 */
70const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
71 SME_AC_VO,
72 SME_AC_VI,
73 SME_AC_BE,
74 SME_AC_BK,
75 SME_AC_VO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080076};
77
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053078#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080079const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
80 SME_AC_VO,
81 SME_AC_VI,
82 SME_AC_BE,
83 SME_AC_BK,
84};
85
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053086#endif
87
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
89/**
90 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
91 * @adapter_context: pointer to vdev adapter
92 *
93 * If Blocked OS Q is not resumed during timeout period, to prevent
94 * permanent stall, resume OS Q forcefully.
95 *
96 * Return: None
97 */
98void hdd_tx_resume_timer_expired_handler(void *adapter_context)
99{
Jeff Johnson80486862017-10-02 13:21:29 -0700100 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800101
Jeff Johnson80486862017-10-02 13:21:29 -0700102 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800103 /* INVALID ARG */
104 return;
105 }
106
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700107 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700108 wlan_hdd_netif_queue_control(adapter, WLAN_WAKE_ALL_NETIF_QUEUE,
Jeff Johnsona0399642016-12-05 12:39:59 -0800109 WLAN_CONTROL_PATH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800110}
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530111#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
112
113/**
114 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
Jeff Johnson80486862017-10-02 13:21:29 -0700115 * @adapter: pointer to hdd adapter
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530116 * @tx_resume: TX Q resume trigger
117 *
118 *
119 * Return: None
120 */
121static void
Jeff Johnson80486862017-10-02 13:21:29 -0700122hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530123{
124 if (true == tx_resume)
125 return;
126
127 /* Pause TX */
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700128 hdd_debug("Disabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700129 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530130 WLAN_DATA_FLOW_CONTROL);
131
132 if (QDF_TIMER_STATE_STOPPED ==
Jeff Johnson80486862017-10-02 13:21:29 -0700133 qdf_mc_timer_get_current_state(&adapter->
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530134 tx_flow_control_timer)) {
135 QDF_STATUS status;
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700136
Jeff Johnson80486862017-10-02 13:21:29 -0700137 status = qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530138 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
139
140 if (!QDF_IS_STATUS_SUCCESS(status))
141 hdd_err("Failed to start tx_flow_control_timer");
142 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700143 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530144 }
145
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700146 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
147 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530148}
149#else
150
151static inline void
Jeff Johnson80486862017-10-02 13:21:29 -0700152hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530153{
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530154}
155#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800156
Jeff Johnson80486862017-10-02 13:21:29 -0700157static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
gbianec670c592016-11-24 11:21:30 +0800158 struct sk_buff *skb)
159{
Jeff Johnson80486862017-10-02 13:21:29 -0700160 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800161 int need_orphan = 0;
162
Jeff Johnson80486862017-10-02 13:21:29 -0700163 if (adapter->tx_flow_low_watermark > 0) {
tfyubdf453e2017-09-27 13:34:30 +0800164#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
165 /*
166 * The TCP TX throttling logic is changed a little after
167 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
168 * which will throttle the TCP packets to the host driver.
169 * The TCP UP LINK throughput will drop heavily. In order to
170 * fix this issue, need to orphan the socket buffer asap, which
171 * will call skb's destructor to notify the TCP stack that the
172 * SKB buffer is unowned. And then the TCP stack will pump more
173 * packets to host driver.
174 *
175 * The TX packets might be dropped for UDP case in the iperf
176 * testing. So need to be protected by follow control.
177 */
178 need_orphan = 1;
179#else
180 if (hdd_ctx->config->tx_orphan_enable)
181 need_orphan = 1;
182#endif
tfyu5f01db22017-10-11 13:51:04 +0800183 } else if (hdd_ctx->config->tx_orphan_enable) {
184 if (qdf_nbuf_is_ipv4_tcp_pkt(skb) ||
Tiger Yu438c6482017-10-13 11:07:00 +0800185 qdf_nbuf_is_ipv6_tcp_pkt(skb))
tfyu5f01db22017-10-11 13:51:04 +0800186 need_orphan = 1;
tfyubdf453e2017-09-27 13:34:30 +0800187 }
188
tfyu5f01db22017-10-11 13:51:04 +0800189 if (need_orphan) {
gbianec670c592016-11-24 11:21:30 +0800190 skb_orphan(skb);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700191 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Tiger Yu438c6482017-10-13 11:07:00 +0800192 } else
gbianec670c592016-11-24 11:21:30 +0800193 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800194
195 return skb;
196}
197
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198/**
199 * hdd_tx_resume_cb() - Resume OS TX Q.
200 * @adapter_context: pointer to vdev apdapter
201 * @tx_resume: TX Q resume trigger
202 *
203 * Q was stopped due to WLAN TX path low resource condition
204 *
205 * Return: None
206 */
207void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
208{
Jeff Johnson80486862017-10-02 13:21:29 -0700209 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700210 struct hdd_station_ctx *hdd_sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211
Jeff Johnson80486862017-10-02 13:21:29 -0700212 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213 /* INVALID ARG */
214 return;
215 }
216
Jeff Johnson80486862017-10-02 13:21:29 -0700217 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800218
219 /* Resume TX */
220 if (true == tx_resume) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530221 if (QDF_TIMER_STATE_STOPPED !=
Jeff Johnson80486862017-10-02 13:21:29 -0700222 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223 tx_flow_control_timer)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700224 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800225 }
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700226 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700227 wlan_hdd_netif_queue_control(adapter,
Jeff Johnsona0399642016-12-05 12:39:59 -0800228 WLAN_WAKE_ALL_NETIF_QUEUE,
229 WLAN_DATA_FLOW_CONTROL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 }
Jeff Johnson80486862017-10-02 13:21:29 -0700231 hdd_tx_resume_false(adapter, tx_resume);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232}
233
bings284f8be2017-08-11 10:41:30 +0800234bool hdd_tx_flow_control_is_pause(void *adapter_context)
235{
Jeff Johnson80486862017-10-02 13:21:29 -0700236 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
bings284f8be2017-08-11 10:41:30 +0800237
Jeff Johnson80486862017-10-02 13:21:29 -0700238 if ((NULL == adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
bings284f8be2017-08-11 10:41:30 +0800239 /* INVALID ARG */
Jeff Johnson80486862017-10-02 13:21:29 -0700240 hdd_err("invalid adapter %pK", adapter);
bings284f8be2017-08-11 10:41:30 +0800241 return false;
242 }
243
Jeff Johnson80486862017-10-02 13:21:29 -0700244 return adapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
bings284f8be2017-08-11 10:41:30 +0800245}
246
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700247void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530248 qdf_mc_timer_callback_t timer_callback,
bings284f8be2017-08-11 10:41:30 +0800249 ol_txrx_tx_flow_control_fp flow_control_fp,
250 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251{
252 if (adapter->tx_flow_timer_initialized == false) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530253 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530254 QDF_TIMER_TYPE_SW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800255 timer_callback,
256 adapter);
257 adapter->tx_flow_timer_initialized = true;
258 }
Leo Changfdb45c32016-10-28 11:09:23 -0700259 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1b780e42017-10-31 14:11:45 -0700260 adapter->session_id, flow_control_fp, adapter,
bings284f8be2017-08-11 10:41:30 +0800261 flow_control_is_pause_fp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800262}
263
264/**
265 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
266 * @adapter: adapter handle
267 *
268 * Return: none
269 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700270void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800271{
Leo Changfdb45c32016-10-28 11:09:23 -0700272 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1b780e42017-10-31 14:11:45 -0700273 adapter->session_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800274 if (adapter->tx_flow_timer_initialized == true) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530275 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
276 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277 adapter->tx_flow_timer_initialized = false;
278 }
279}
280
281/**
282 * hdd_get_tx_resource() - check tx resources and take action
283 * @adapter: adapter handle
284 * @STAId: station id
285 * @timer_value: timer value
286 *
287 * Return: none
288 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700289void hdd_get_tx_resource(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800290 uint8_t STAId, uint16_t timer_value)
291{
292 if (false ==
Leo Changfdb45c32016-10-28 11:09:23 -0700293 cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC), STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294 adapter->tx_flow_low_watermark,
295 adapter->tx_flow_high_watermark_offset)) {
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700296 hdd_debug("Disabling queues lwm %d hwm offset %d",
Jeff Johnsona0399642016-12-05 12:39:59 -0800297 adapter->tx_flow_low_watermark,
298 adapter->tx_flow_high_watermark_offset);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800299 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
300 WLAN_DATA_FLOW_CONTROL);
301 if ((adapter->tx_flow_timer_initialized == true) &&
Anurag Chouhan210db072016-02-22 18:42:15 +0530302 (QDF_TIMER_STATE_STOPPED ==
303 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 tx_flow_control_timer))) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530305 qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306 timer_value);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700307 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
308 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
309 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800310 }
311 }
312}
313
gbianec670c592016-11-24 11:21:30 +0800314#else
Mohit Khannad0b63f52017-02-18 18:05:52 -0800315/**
316 * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
Jeff Johnson80486862017-10-02 13:21:29 -0700317 * @adapter: pointer to HDD adapter
Mohit Khannad0b63f52017-02-18 18:05:52 -0800318 * @skb: pointer to skb data packet
319 *
320 * Return: pointer to skb structure
321 */
Jeff Johnson80486862017-10-02 13:21:29 -0700322static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
Mohit Khannad0b63f52017-02-18 18:05:52 -0800323 struct sk_buff *skb) {
gbianec670c592016-11-24 11:21:30 +0800324
Mohit Khannad0b63f52017-02-18 18:05:52 -0800325 struct sk_buff *nskb;
tfyubdf453e2017-09-27 13:34:30 +0800326#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Jeff Johnson80486862017-10-02 13:21:29 -0700327 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800328#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800329
Mohit Khanna87493732017-08-27 23:26:44 -0700330 hdd_skb_fill_gso_size(adapter->dev, skb);
331
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700332 nskb = skb_unshare(skb, GFP_ATOMIC);
tfyubdf453e2017-09-27 13:34:30 +0800333#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700334 if (unlikely(hdd_ctx->config->tx_orphan_enable) && (nskb == skb)) {
Mohit Khannad0b63f52017-02-18 18:05:52 -0800335 /*
336 * For UDP packets we want to orphan the packet to allow the app
337 * to send more packets. The flow would ultimately be controlled
338 * by the limited number of tx descriptors for the vdev.
339 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700340 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Mohit Khannad0b63f52017-02-18 18:05:52 -0800341 skb_orphan(skb);
342 }
tfyubdf453e2017-09-27 13:34:30 +0800343#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800344 return nskb;
gbianec670c592016-11-24 11:21:30 +0800345}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800346#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
347
Alok Kumarb64650c2018-03-23 17:05:11 +0530348uint32_t hdd_txrx_get_tx_ack_count(struct hdd_adapter *adapter)
349{
350 return cdp_get_tx_ack_stats(cds_get_context(QDF_MODULE_ID_SOC),
351 adapter->session_id);
352}
353
Nirav Shah5e74bb82016-07-20 16:01:27 +0530354/**
355 * qdf_event_eapol_log() - send event to wlan diag
356 * @skb: skb ptr
357 * @dir: direction
358 * @eapol_key_info: eapol key info
359 *
360 * Return: None
361 */
362void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
363{
364 int16_t eapol_key_info;
365
366 WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
367
368 if ((dir == QDF_TX &&
369 (QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
370 QDF_NBUF_CB_GET_PACKET_TYPE(skb))))
371 return;
372 else if (!qdf_nbuf_is_ipv4_eapol_pkt(skb))
373 return;
374
375 eapol_key_info = (uint16_t)(*(uint16_t *)
376 (skb->data + EAPOL_KEY_INFO_OFFSET));
377
378 wlan_diag_event.event_sub_type =
379 (dir == QDF_TX ?
380 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
381 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
382 wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
383 (skb->data + EAPOL_PACKET_TYPE_OFFSET));
384 wlan_diag_event.eapol_key_info = eapol_key_info;
385 wlan_diag_event.eapol_rate = 0;
386 qdf_mem_copy(wlan_diag_event.dest_addr,
387 (skb->data + QDF_NBUF_DEST_MAC_OFFSET),
388 sizeof(wlan_diag_event.dest_addr));
389 qdf_mem_copy(wlan_diag_event.src_addr,
390 (skb->data + QDF_NBUF_SRC_MAC_OFFSET),
391 sizeof(wlan_diag_event.src_addr));
392
393 WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
394}
395
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800396
397/**
Nirav Shah5e74bb82016-07-20 16:01:27 +0530398 * wlan_hdd_classify_pkt() - classify packet
399 * @skb - sk buff
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400 *
Nirav Shah5e74bb82016-07-20 16:01:27 +0530401 * Return: none
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402 */
Nirav Shah5e74bb82016-07-20 16:01:27 +0530403void wlan_hdd_classify_pkt(struct sk_buff *skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404{
Nirav Shah5e74bb82016-07-20 16:01:27 +0530405 struct ethhdr *eh = (struct ethhdr *)skb->data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800406
Nirav Shah5e74bb82016-07-20 16:01:27 +0530407 qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408
Nirav Shah5e74bb82016-07-20 16:01:27 +0530409 /* check destination mac address is broadcast/multicast */
410 if (is_broadcast_ether_addr((uint8_t *)eh))
411 QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
412 else if (is_multicast_ether_addr((uint8_t *)eh))
413 QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800414
Nirav Shah5e74bb82016-07-20 16:01:27 +0530415 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
416 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
417 QDF_NBUF_CB_PACKET_TYPE_ARP;
418 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
419 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
420 QDF_NBUF_CB_PACKET_TYPE_DHCP;
421 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
422 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
423 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
424 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
425 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
426 QDF_NBUF_CB_PACKET_TYPE_WAPI;
Zhu Jianmin04392c42017-05-12 16:34:53 +0800427 else if (qdf_nbuf_is_icmp_pkt(skb))
428 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
429 QDF_NBUF_CB_PACKET_TYPE_ICMP;
Poddar, Siddarth44aa5aa2017-07-10 17:30:22 +0530430 else if (qdf_nbuf_is_icmpv6_pkt(skb))
431 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
432 QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433}
434
435/**
Zhu Jianmin04392c42017-05-12 16:34:53 +0800436 * wlan_hdd_latency_opt()- latency option
437 * @adapter: pointer to the adapter structure
438 * @skb: pointer to sk buff
439 *
440 * Function to disable power save for icmp packets.
441 *
442 * Return: None
443 */
444#ifdef WLAN_ICMP_DISABLE_PS
445static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700446wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800447{
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700448 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800449
450 if (hdd_ctx->config->icmp_disable_ps_val <= 0)
451 return;
452
453 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) ==
454 QDF_NBUF_CB_PACKET_TYPE_ICMP) {
455 wlan_hdd_set_powersave(adapter, false,
456 hdd_ctx->config->icmp_disable_ps_val);
Yeshwanth Sriram Guntukaae03c432017-11-12 13:31:02 +0530457 sme_ps_enable_auto_ps_timer(WLAN_HDD_GET_HAL_CTX(adapter),
458 adapter->session_id,
459 hdd_ctx->config->icmp_disable_ps_val);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800460 }
461}
462#else
463static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700464wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800465{
466}
467#endif
468
469/**
Ravi Joshi24477b72016-07-19 15:45:09 -0700470 * hdd_get_transmit_sta_id() - function to retrieve station id to be used for
471 * sending traffic towards a particular destination address. The destination
472 * address can be unicast, multicast or broadcast
473 *
474 * @adapter: Handle to adapter context
475 * @dst_addr: Destination address
476 * @station_id: station id
477 *
478 * Returns: None
479 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700480static void hdd_get_transmit_sta_id(struct hdd_adapter *adapter,
Nirav Shah5e74bb82016-07-20 16:01:27 +0530481 struct sk_buff *skb, uint8_t *station_id)
Ravi Joshi24477b72016-07-19 15:45:09 -0700482{
483 bool mcbc_addr = false;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700484 QDF_STATUS status;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700485 struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530486 struct qdf_mac_addr *dst_addr = NULL;
Ravi Joshi24477b72016-07-19 15:45:09 -0700487
Nirav Shah5e74bb82016-07-20 16:01:27 +0530488 dst_addr = (struct qdf_mac_addr *)skb->data;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700489 status = hdd_get_peer_sta_id(sta_ctx, dst_addr, station_id);
490 if (QDF_IS_STATUS_ERROR(status)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530491 if (QDF_NBUF_CB_GET_IS_BCAST(skb) ||
492 QDF_NBUF_CB_GET_IS_MCAST(skb)) {
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -0700493 hdd_debug("Received MC/BC packet for transmission");
Ravi Joshi24477b72016-07-19 15:45:09 -0700494 mcbc_addr = true;
Ravi Joshi24477b72016-07-19 15:45:09 -0700495 }
496 }
497
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700498 if (adapter->device_mode == QDF_IBSS_MODE ||
499 adapter->device_mode == QDF_NDI_MODE) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700500 /*
501 * This check is necessary to make sure station id is not
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700502 * overwritten for UC traffic in IBSS or NDI mode
Ravi Joshi24477b72016-07-19 15:45:09 -0700503 */
504 if (mcbc_addr)
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700505 *station_id = sta_ctx->broadcast_staid;
Ravi Joshi24477b72016-07-19 15:45:09 -0700506 } else {
507 /* For the rest, traffic is directed to AP/P2P GO */
508 if (eConnectionState_Associated == sta_ctx->conn_info.connState)
509 *station_id = sta_ctx->conn_info.staId[0];
510 }
511}
512
513/**
jitiphilfb410612018-03-26 22:37:56 +0530514 * hdd_clear_tx_rx_connectivity_stats() - clear connectivity stats
515 * @hdd_ctx: pointer to HDD Station Context
516 *
517 * Return: None
518 */
519static void hdd_clear_tx_rx_connectivity_stats(struct hdd_adapter *adapter)
520{
521 hdd_info("Clear txrx connectivity stats");
522 qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
523 sizeof(adapter->hdd_stats.hdd_arp_stats));
524 qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats,
525 sizeof(adapter->hdd_stats.hdd_dns_stats));
526 qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
527 sizeof(adapter->hdd_stats.hdd_tcp_stats));
528 qdf_mem_zero(&adapter->hdd_stats.hdd_icmpv4_stats,
529 sizeof(adapter->hdd_stats.hdd_icmpv4_stats));
530 adapter->pkt_type_bitmap = 0;
531 adapter->track_arp_ip = 0;
532 qdf_mem_zero(adapter->dns_payload, adapter->track_dns_domain_len);
533 adapter->track_dns_domain_len = 0;
534 adapter->track_src_port = 0;
535 adapter->track_dest_port = 0;
536 adapter->track_dest_ipv4 = 0;
537}
538
539void hdd_reset_all_adapters_connectivity_stats(struct hdd_context *hdd_ctx)
540{
541 struct hdd_adapter *adapter = NULL, *pNext = NULL;
542 QDF_STATUS status;
543
544 hdd_enter();
545
546 status = hdd_get_front_adapter(hdd_ctx, &adapter);
547
548 while (NULL != adapter && QDF_STATUS_SUCCESS == status) {
549 hdd_clear_tx_rx_connectivity_stats(adapter);
550 status = hdd_get_next_adapter(hdd_ctx, adapter, &pNext);
551 adapter = pNext;
552 }
553
554 hdd_exit();
555}
556
557/**
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800558 * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
559 * @skb: pointer to OS packet (sk_buff)
560 * @peer_id: Peer STA ID in peer table
561 *
562 * This function gets the peer state from DP and check if it is either
563 * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
564 * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
565 * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
566 *
567 * Return: true if Tx is allowed and false otherwise.
568 */
569static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t peer_id)
570{
571 enum ol_txrx_peer_state peer_state;
572 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
573 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
574 void *peer;
575
576 QDF_BUG(soc);
577 QDF_BUG(pdev);
578
579 peer = cdp_peer_find_by_local_id(soc, pdev, peer_id);
580
581 if (peer == NULL) {
Dustin Brown897dae42018-03-13 17:08:47 -0700582 hdd_err_rl("Unable to find peer entry for staid: %d", peer_id);
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800583 return false;
584 }
585
586 peer_state = cdp_peer_state_get(soc, peer);
Jeff Johnson68755312017-02-10 11:46:55 -0800587 if (likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800588 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800589 if (OL_TXRX_PEER_STATE_CONN == peer_state &&
Jinwei Chen19846e52018-04-03 19:20:38 +0800590 (ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X
591 || IS_HDD_ETHERTYPE_WAI(skb)))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800592 return true;
hqu8925c8f2017-12-11 19:29:01 +0800593 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson68755312017-02-10 11:46:55 -0800594 FL("Invalid peer state for Tx: %d"), peer_state);
595 return false;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800596}
jitiphilfb410612018-03-26 22:37:56 +0530597
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530598/**
599 * hdd_tx_rx_is_dns_domain_name_match() - function to check whether dns
600 * domain name in the received skb matches with the tracking dns domain
601 * name or not
602 *
603 * @skb: pointer to skb
604 * @adapter: pointer to adapter
605 *
606 * Returns: true if matches else false
607 */
608static bool hdd_tx_rx_is_dns_domain_name_match(struct sk_buff *skb,
609 struct hdd_adapter *adapter)
610{
611 uint8_t *domain_name;
612
613 if (adapter->track_dns_domain_len == 0)
614 return false;
615
616 domain_name = qdf_nbuf_get_dns_domain_name(skb,
617 adapter->track_dns_domain_len);
618 if (strncmp(domain_name, adapter->dns_payload,
619 adapter->track_dns_domain_len) == 0)
620 return true;
621 else
622 return false;
623}
624
625void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
626 void *context,
627 enum connectivity_stats_pkt_status action,
628 uint8_t *pkt_type)
629{
630 uint32_t pkt_type_bitmap;
631 struct hdd_adapter *adapter = NULL;
632
633 adapter = (struct hdd_adapter *)context;
634 if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
635 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
636 "Magic cookie(%x) for adapter sanity verification is invalid",
637 adapter->magic);
638 return;
639 }
640
641 /* ARP tracking is done already. */
642 pkt_type_bitmap = adapter->pkt_type_bitmap;
643 pkt_type_bitmap &= ~CONNECTIVITY_CHECK_SET_ARP;
644
645 if (!pkt_type_bitmap)
646 return;
647
648 switch (action) {
649 case PKT_TYPE_REQ:
650 case PKT_TYPE_TX_HOST_FW_SENT:
651 if (qdf_nbuf_is_icmp_pkt(skb)) {
652 if (qdf_nbuf_data_is_icmpv4_req(skb) &&
653 (adapter->track_dest_ipv4 ==
654 qdf_nbuf_get_icmpv4_tgt_ip(skb))) {
655 *pkt_type = CONNECTIVITY_CHECK_SET_ICMPV4;
656 if (action == PKT_TYPE_REQ) {
657 ++adapter->hdd_stats.hdd_icmpv4_stats.
658 tx_icmpv4_req_count;
659 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
660 QDF_TRACE_LEVEL_INFO_HIGH,
661 "%s : ICMPv4 Req packet",
662 __func__);
663 } else
664 /* host receives tx completion */
665 ++adapter->hdd_stats.hdd_icmpv4_stats.
666 tx_host_fw_sent;
667 }
668 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
669 if (qdf_nbuf_data_is_tcp_syn(skb) &&
670 (adapter->track_dest_port ==
671 qdf_nbuf_data_get_tcp_dst_port(skb))) {
672 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_SYN;
673 if (action == PKT_TYPE_REQ) {
674 ++adapter->hdd_stats.hdd_tcp_stats.
675 tx_tcp_syn_count;
676 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
677 QDF_TRACE_LEVEL_INFO_HIGH,
678 "%s : TCP Syn packet",
679 __func__);
680 } else
681 /* host receives tx completion */
682 ++adapter->hdd_stats.hdd_tcp_stats.
683 tx_tcp_syn_host_fw_sent;
684 } else if ((adapter->hdd_stats.hdd_tcp_stats.
685 is_tcp_syn_ack_rcv || adapter->hdd_stats.
686 hdd_tcp_stats.is_tcp_ack_sent) &&
687 qdf_nbuf_data_is_tcp_ack(skb) &&
688 (adapter->track_dest_port ==
689 qdf_nbuf_data_get_tcp_dst_port(skb))) {
690 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_ACK;
691 if (action == PKT_TYPE_REQ &&
692 adapter->hdd_stats.hdd_tcp_stats.
693 is_tcp_syn_ack_rcv) {
694 ++adapter->hdd_stats.hdd_tcp_stats.
695 tx_tcp_ack_count;
696 adapter->hdd_stats.hdd_tcp_stats.
697 is_tcp_syn_ack_rcv = false;
698 adapter->hdd_stats.hdd_tcp_stats.
699 is_tcp_ack_sent = true;
700 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
701 QDF_TRACE_LEVEL_INFO_HIGH,
702 "%s : TCP Ack packet",
703 __func__);
704 } else if (action == PKT_TYPE_TX_HOST_FW_SENT &&
705 adapter->hdd_stats.hdd_tcp_stats.
706 is_tcp_ack_sent) {
707 /* host receives tx completion */
708 ++adapter->hdd_stats.hdd_tcp_stats.
709 tx_tcp_ack_host_fw_sent;
710 adapter->hdd_stats.hdd_tcp_stats.
711 is_tcp_ack_sent = false;
712 }
713 }
714 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
715 if (qdf_nbuf_data_is_dns_query(skb) &&
716 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
717 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
718 if (action == PKT_TYPE_REQ) {
719 ++adapter->hdd_stats.hdd_dns_stats.
720 tx_dns_req_count;
721 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
722 QDF_TRACE_LEVEL_INFO_HIGH,
723 "%s : DNS query packet",
724 __func__);
725 } else
726 /* host receives tx completion */
727 ++adapter->hdd_stats.hdd_dns_stats.
728 tx_host_fw_sent;
729 }
730 }
731 break;
732
733 case PKT_TYPE_RSP:
734 if (qdf_nbuf_is_icmp_pkt(skb)) {
735 if (qdf_nbuf_data_is_icmpv4_rsp(skb) &&
736 (adapter->track_dest_ipv4 ==
737 qdf_nbuf_get_icmpv4_src_ip(skb))) {
738 ++adapter->hdd_stats.hdd_icmpv4_stats.
739 rx_icmpv4_rsp_count;
740 *pkt_type =
741 CONNECTIVITY_CHECK_SET_ICMPV4;
742 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
743 QDF_TRACE_LEVEL_INFO_HIGH,
744 "%s : ICMPv4 Res packet", __func__);
745 }
746 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
747 if (qdf_nbuf_data_is_tcp_syn_ack(skb) &&
748 (adapter->track_dest_port ==
749 qdf_nbuf_data_get_tcp_src_port(skb))) {
750 ++adapter->hdd_stats.hdd_tcp_stats.
751 rx_tcp_syn_ack_count;
752 adapter->hdd_stats.hdd_tcp_stats.
753 is_tcp_syn_ack_rcv = true;
754 *pkt_type =
755 CONNECTIVITY_CHECK_SET_TCP_SYN_ACK;
756 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
757 QDF_TRACE_LEVEL_INFO_HIGH,
758 "%s : TCP Syn ack packet", __func__);
759 }
760 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
761 if (qdf_nbuf_data_is_dns_response(skb) &&
762 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
763 ++adapter->hdd_stats.hdd_dns_stats.
764 rx_dns_rsp_count;
765 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
766 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
767 QDF_TRACE_LEVEL_INFO_HIGH,
768 "%s : DNS response packet", __func__);
769 }
770 }
771 break;
772
773 case PKT_TYPE_TX_DROPPED:
774 switch (*pkt_type) {
775 case CONNECTIVITY_CHECK_SET_ICMPV4:
776 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_dropped;
777 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
778 QDF_TRACE_LEVEL_INFO_HIGH,
779 "%s : ICMPv4 Req packet dropped", __func__);
780 break;
781 case CONNECTIVITY_CHECK_SET_TCP_SYN:
782 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_dropped;
783 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
784 QDF_TRACE_LEVEL_INFO_HIGH,
785 "%s : TCP syn packet dropped", __func__);
786 break;
787 case CONNECTIVITY_CHECK_SET_TCP_ACK:
788 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_dropped;
789 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
790 QDF_TRACE_LEVEL_INFO_HIGH,
791 "%s : TCP ack packet dropped", __func__);
792 break;
793 case CONNECTIVITY_CHECK_SET_DNS:
794 ++adapter->hdd_stats.hdd_dns_stats.tx_dropped;
795 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
796 QDF_TRACE_LEVEL_INFO_HIGH,
797 "%s : DNS query packet dropped", __func__);
798 break;
799 default:
800 break;
801 }
802 break;
803 case PKT_TYPE_RX_DELIVERED:
804 switch (*pkt_type) {
805 case CONNECTIVITY_CHECK_SET_ICMPV4:
806 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_delivered;
807 break;
808 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
809 ++adapter->hdd_stats.hdd_tcp_stats.rx_delivered;
810 break;
811 case CONNECTIVITY_CHECK_SET_DNS:
812 ++adapter->hdd_stats.hdd_dns_stats.rx_delivered;
813 break;
814 default:
815 break;
816 }
817 break;
818 case PKT_TYPE_RX_REFUSED:
819 switch (*pkt_type) {
820 case CONNECTIVITY_CHECK_SET_ICMPV4:
821 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_refused;
822 break;
823 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
824 ++adapter->hdd_stats.hdd_tcp_stats.rx_refused;
825 break;
826 case CONNECTIVITY_CHECK_SET_DNS:
827 ++adapter->hdd_stats.hdd_dns_stats.rx_refused;
828 break;
829 default:
830 break;
831 }
832 break;
833 case PKT_TYPE_TX_ACK_CNT:
834 switch (*pkt_type) {
835 case CONNECTIVITY_CHECK_SET_ICMPV4:
836 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_ack_cnt;
837 break;
838 case CONNECTIVITY_CHECK_SET_TCP_SYN:
839 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_ack_cnt;
840 break;
841 case CONNECTIVITY_CHECK_SET_TCP_ACK:
842 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_ack_cnt;
843 break;
844 case CONNECTIVITY_CHECK_SET_DNS:
845 ++adapter->hdd_stats.hdd_dns_stats.tx_ack_cnt;
846 break;
847 default:
848 break;
849 }
850 break;
851 default:
852 break;
853 }
854}
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800855
856/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530857 * __hdd_hard_start_xmit() - Transmit a frame
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800858 * @skb: pointer to OS packet (sk_buff)
859 * @dev: pointer to network device
860 *
861 * Function registered with the Linux OS for transmitting
862 * packets. This version of the function directly passes
863 * the packet to Transport Layer.
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530864 * In case of any packet drop or error, log the error with
865 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800866 *
867 * Return: Always returns NETDEV_TX_OK
868 */
Srinivas Girigowda49b48b22018-04-05 09:23:28 -0700869static netdev_tx_t __hdd_hard_start_xmit(struct sk_buff *skb,
870 struct net_device *dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800871{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530872 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800873 sme_ac_enum_type ac;
Abhishek Singh12be60f2017-08-11 13:52:42 +0530874 enum sme_qos_wmmuptype up;
Jeff Johnson80486862017-10-02 13:21:29 -0700875 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800876 bool granted;
Nirav Shah5e74bb82016-07-20 16:01:27 +0530877 uint8_t STAId;
Jeff Johnsonb9424862017-10-30 08:49:35 -0700878 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700879 struct qdf_mac_addr *mac_addr;
Mohit Khannaf8f96822017-05-17 17:11:59 -0700880 bool pkt_proto_logged = false;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530881 uint8_t pkt_type = 0;
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700882#ifdef QCA_PKT_PROTO_TRACE
883 uint8_t proto_type = 0;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530884#endif
Jeff Johnson80486862017-10-02 13:21:29 -0700885 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530886 bool is_arp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800887
888#ifdef QCA_WIFI_FTM
Anurag Chouhan6d760662016-02-20 16:05:43 +0530889 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800890 kfree_skb(skb);
891 return NETDEV_TX_OK;
892 }
893#endif
894
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700895 ++adapter->hdd_stats.tx_rx_stats.tx_called;
896 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530897
Will Huang20de9432018-02-06 17:01:03 +0800898 if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
899 cds_is_load_or_unload_in_progress()) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530900 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Will Huang20de9432018-02-06 17:01:03 +0800901 "Recovery/(Un)load in progress, dropping the packet");
Nirav Shahdf3659e2016-06-27 12:26:28 +0530902 goto drop_pkt;
Govind Singhede435f2015-12-01 16:16:36 +0530903 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800904
Nirav Shah5e74bb82016-07-20 16:01:27 +0530905 wlan_hdd_classify_pkt(skb);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530906 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) == QDF_NBUF_CB_PACKET_TYPE_ARP) {
907 is_arp = true;
908 if (qdf_nbuf_data_is_arp_req(skb) &&
909 (hdd_ctx->track_arp_ip == qdf_nbuf_get_arp_tgt_ip(skb))) {
910 ++adapter->hdd_stats.hdd_arp_stats.tx_arp_req_count;
911 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
912 QDF_TRACE_LEVEL_INFO_HIGH,
913 "%s : ARP packet", __func__);
914 }
915 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530916 /* track connectivity stats */
917 if (adapter->pkt_type_bitmap)
918 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
919 PKT_TYPE_REQ, &pkt_type);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530920
921 if (cds_is_driver_recovering()) {
922 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_WARN,
923 "Recovery in progress, dropping the packet");
924 goto drop_pkt;
925 }
Nirav Shah5e74bb82016-07-20 16:01:27 +0530926
Ravi Joshi24477b72016-07-19 15:45:09 -0700927 STAId = HDD_WLAN_INVALID_STA_ID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800928
Jeff Johnson80486862017-10-02 13:21:29 -0700929 hdd_get_transmit_sta_id(adapter, skb, &STAId);
Naveen Rawat209d0932016-08-03 15:07:23 -0700930 if (STAId >= WLAN_MAX_STA_COUNT) {
hqu5e6b9862017-12-21 18:48:46 +0800931 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsona0399642016-12-05 12:39:59 -0800932 "Invalid station id, transmit operation suspended");
Ravi Joshi24477b72016-07-19 15:45:09 -0700933 goto drop_pkt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800934 }
935
Jeff Johnson80486862017-10-02 13:21:29 -0700936 hdd_get_tx_resource(adapter, STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800937 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
938
939 /* Get TL AC corresponding to Qdisc queue index/AC. */
940 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
941
Nirav Shahcbc6d722016-03-01 16:24:53 +0530942 if (!qdf_nbuf_ipa_owned_get(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700943 skb = hdd_skb_orphan(adapter, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800944 if (!skb)
Nirav Shahdf3659e2016-06-27 12:26:28 +0530945 goto drop_pkt_accounting;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800946 }
947
Ravi Joshi24477b72016-07-19 15:45:09 -0700948 /*
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530949 * Add SKB to internal tracking table before further processing
950 * in WLAN driver.
951 */
952 qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
953
954 /*
Ravi Joshi24477b72016-07-19 15:45:09 -0700955 * user priority from IP header, which is already extracted and set from
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800956 * select_queue call back function
957 */
958 up = skb->priority;
959
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700960 ++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961#ifdef HDD_WMM_DEBUG
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800962 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800963 "%s: Classified as ac %d up %d", __func__, ac, up);
964#endif /* HDD_WMM_DEBUG */
965
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700966 if (HDD_PSB_CHANGED == adapter->psb_changed) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700967 /*
968 * Function which will determine acquire admittance for a
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800969 * WMM AC is required or not based on psb configuration done
970 * in the framework
971 */
Jeff Johnson80486862017-10-02 13:21:29 -0700972 hdd_wmm_acquire_access_required(adapter, ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800973 }
974 /*
975 * Make sure we already have access to this access category
976 * or it is EAPOL or WAPI frame during initial authentication which
977 * can have artifically boosted higher qos priority.
978 */
979
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700980 if (((adapter->psb_changed & (1 << ac)) &&
Jeff Johnson02d14ce2017-10-31 09:08:30 -0700981 likely(adapter->hdd_wmm_status.wmmAcStatus[ac].
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800982 wmmAcAccessAllowed)) ||
Jeff Johnsond377dce2017-10-04 10:32:42 -0700983 ((sta_ctx->conn_info.uIsAuthenticated == false) &&
Nirav Shah5e74bb82016-07-20 16:01:27 +0530984 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
985 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
986 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
987 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800988 granted = true;
989 } else {
Jeff Johnson80486862017-10-02 13:21:29 -0700990 status = hdd_wmm_acquire_access(adapter, ac, &granted);
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700991 adapter->psb_changed |= (1 << ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800992 }
993
994 if (!granted) {
995 bool isDefaultAc = false;
Ravi Joshi24477b72016-07-19 15:45:09 -0700996 /*
997 * ADDTS request for this AC is sent, for now
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800998 * send this packet through next avaiable lower
999 * Access category until ADDTS negotiation completes.
1000 */
1001 while (!likely
Jeff Johnson02d14ce2017-10-31 09:08:30 -07001002 (adapter->hdd_wmm_status.wmmAcStatus[ac].
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001003 wmmAcAccessAllowed)) {
1004 switch (ac) {
1005 case SME_AC_VO:
1006 ac = SME_AC_VI;
1007 up = SME_QOS_WMM_UP_VI;
1008 break;
1009 case SME_AC_VI:
1010 ac = SME_AC_BE;
1011 up = SME_QOS_WMM_UP_BE;
1012 break;
1013 case SME_AC_BE:
1014 ac = SME_AC_BK;
1015 up = SME_QOS_WMM_UP_BK;
1016 break;
1017 default:
1018 ac = SME_AC_BK;
1019 up = SME_QOS_WMM_UP_BK;
1020 isDefaultAc = true;
1021 break;
1022 }
1023 if (isDefaultAc)
1024 break;
1025 }
1026 skb->priority = up;
1027 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
1028 }
1029
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001030#ifdef QCA_PKT_PROTO_TRACE
1031 if ((hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_EAPOL) ||
1032 (hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_DHCP)) {
1033 proto_type = cds_pkt_get_proto_type(skb,
1034 hdd_ctx->config->gEnableDebugLog,
1035 0);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -07001036 if (CDS_PKT_TRAC_TYPE_EAPOL & proto_type)
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001037 cds_pkt_trace_buf_update("ST:T:EPL");
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -07001038 else if (CDS_PKT_TRAC_TYPE_DHCP & proto_type)
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001039 cds_pkt_trace_buf_update("ST:T:DHC");
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001040 }
1041#endif /* QCA_PKT_PROTO_TRACE */
1042
Jeff Johnson80486862017-10-02 13:21:29 -07001043 adapter->stats.tx_bytes += skb->len;
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001044
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001045 mac_addr = (struct qdf_mac_addr *)skb->data;
1046
Jeff Johnson80486862017-10-02 13:21:29 -07001047 ucfg_tdls_update_tx_pkt_cnt(adapter->hdd_vdev, mac_addr);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001048
Mohit Khannab1dd1e82017-02-04 15:14:38 -08001049 if (qdf_nbuf_is_tso(skb))
Jeff Johnson80486862017-10-02 13:21:29 -07001050 adapter->stats.tx_packets += qdf_nbuf_get_tso_num_seg(skb);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -07001051 else
Jeff Johnson80486862017-10-02 13:21:29 -07001052 ++adapter->stats.tx_packets;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001053
Nirav Shah5e74bb82016-07-20 16:01:27 +05301054 hdd_event_eapol_log(skb, QDF_TX);
Jeff Johnson1b780e42017-10-31 14:11:45 -07001055 pkt_proto_logged = qdf_dp_trace_log_pkt(adapter->session_id,
Mohit Khannaf8f96822017-05-17 17:11:59 -07001056 skb, QDF_TX,
1057 QDF_TRACE_DEFAULT_PDEV_ID);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301058 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
1059 QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001060
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301061 qdf_dp_trace_set_track(skb, QDF_TX);
Mohit Khannaf8f96822017-05-17 17:11:59 -07001062
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301063 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001064 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
1065 sizeof(qdf_nbuf_data(skb)),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301066 QDF_TX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07001067 if (!pkt_proto_logged) {
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301068 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
Mohit Khannaf8f96822017-05-17 17:11:59 -07001069 QDF_TRACE_DEFAULT_PDEV_ID, (uint8_t *)skb->data,
1070 qdf_nbuf_len(skb), QDF_TX));
1071 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE) {
1072 DPTRACE(qdf_dp_trace(skb,
1073 QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
1074 QDF_TRACE_DEFAULT_PDEV_ID,
1075 (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
1076 (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
1077 QDF_TX));
1078 }
Nirav Shah07e39a62016-04-25 17:46:40 +05301079 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001080
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -08001081 if (!hdd_is_tx_allowed(skb, STAId)) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301082 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -08001083 FL("Tx not allowed for sta_id: %d"), STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001084 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301085 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001086 }
1087
jinweic chen51046012018-04-11 16:02:22 +08001088 /* check whether need to linearize skb, like non-linear udp data */
1089 if (hdd_skb_nontso_linearize(skb) != QDF_STATUS_SUCCESS) {
1090 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1091 QDF_TRACE_LEVEL_INFO_HIGH,
1092 "%s: skb %pK linearize failed. drop the pkt",
1093 __func__, skb);
1094 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
1095 goto drop_pkt_and_release_skb;
1096 }
1097
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001098 /*
Ravi Joshi24477b72016-07-19 15:45:09 -07001099 * If a transmit function is not registered, drop packet
1100 */
Jeff Johnson80486862017-10-02 13:21:29 -07001101 if (!adapter->tx_fn) {
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001102 QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1103 "%s: TX function not registered by the data path",
1104 __func__);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001105 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301106 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001107 }
1108
Jeff Johnson80486862017-10-02 13:21:29 -07001109 if (adapter->tx_fn(adapter->txrx_vdev,
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001110 (qdf_nbuf_t) skb) != NULL) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301111 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001112 "%s: Failed to send packet to txrx for staid: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001113 __func__, STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001114 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301115 goto drop_pkt_and_release_skb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001116 }
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301117
Dustin Browne0024fa2016-10-14 16:29:21 -07001118 netif_trans_update(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001119
1120 return NETDEV_TX_OK;
1121
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301122drop_pkt_and_release_skb:
1123 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001124drop_pkt:
1125
Nirav Shahdf3659e2016-06-27 12:26:28 +05301126 if (skb) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301127 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001128 QDF_TRACE_DEFAULT_PDEV_ID, (uint8_t *)skb->data,
1129 qdf_nbuf_len(skb), QDF_TX));
Nirav Shahdf3659e2016-06-27 12:26:28 +05301130 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
1131 DPTRACE(qdf_dp_trace(skb,
1132 QDF_DP_TRACE_DROP_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001133 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shahdf3659e2016-06-27 12:26:28 +05301134 (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
1135 (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
1136 QDF_TX));
1137
1138 kfree_skb(skb);
1139 }
1140
1141drop_pkt_accounting:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001142
Jeff Johnson80486862017-10-02 13:21:29 -07001143 ++adapter->stats.tx_dropped;
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001144 ++adapter->hdd_stats.tx_rx_stats.tx_dropped;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301145 if (is_arp) {
1146 ++adapter->hdd_stats.hdd_arp_stats.tx_dropped;
1147 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1148 "%s : ARP packet dropped", __func__);
1149 }
Nirav Shahdf3659e2016-06-27 12:26:28 +05301150
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05301151 /* track connectivity stats */
1152 if (adapter->pkt_type_bitmap)
1153 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
1154 PKT_TYPE_TX_DROPPED, &pkt_type);
1155
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001156 return NETDEV_TX_OK;
1157}
1158
1159/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301160 * hdd_hard_start_xmit() - Wrapper function to protect
1161 * __hdd_hard_start_xmit from SSR
1162 * @skb: pointer to OS packet
1163 * @dev: pointer to net_device structure
1164 *
1165 * Function called by OS if any packet needs to transmit.
1166 *
1167 * Return: Always returns NETDEV_TX_OK
1168 */
Srinivas Girigowda49b48b22018-04-05 09:23:28 -07001169netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301170{
Srinivas Girigowda49b48b22018-04-05 09:23:28 -07001171 netdev_tx_t ret;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301172
1173 cds_ssr_protect(__func__);
1174 ret = __hdd_hard_start_xmit(skb, dev);
1175 cds_ssr_unprotect(__func__);
1176
1177 return ret;
1178}
1179
1180/**
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -07001181 * hdd_get_peer_sta_id() - Get the StationID using the Peer Mac address
Jeff Johnsond377dce2017-10-04 10:32:42 -07001182 * @sta_ctx: pointer to HDD Station Context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001183 * @pMacAddress: pointer to Peer Mac address
1184 * @staID: pointer to returned Station Index
1185 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301186 * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_FAILURE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001187 */
1188
Jeff Johnsond377dce2017-10-04 10:32:42 -07001189QDF_STATUS hdd_get_peer_sta_id(struct hdd_station_ctx *sta_ctx,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301190 struct qdf_mac_addr *pMacAddress, uint8_t *staId)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001191{
1192 uint8_t idx;
1193
Naveen Rawatc45d1622016-07-05 12:20:09 -07001194 for (idx = 0; idx < MAX_PEERS; idx++) {
Jeff Johnsond377dce2017-10-04 10:32:42 -07001195 if (!qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +05301196 pMacAddress, QDF_MAC_ADDR_SIZE)) {
Jeff Johnsond377dce2017-10-04 10:32:42 -07001197 *staId = sta_ctx->conn_info.staId[idx];
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301198 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001199 }
1200 }
1201
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301202 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001203}
1204
1205/**
1206 * __hdd_tx_timeout() - TX timeout handler
1207 * @dev: pointer to network device
1208 *
1209 * This function is registered as a netdev ndo_tx_timeout method, and
1210 * is invoked by the kernel if the driver takes too long to transmit a
1211 * frame.
1212 *
1213 * Return: None
1214 */
1215static void __hdd_tx_timeout(struct net_device *dev)
1216{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001217 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001218 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001219 struct netdev_queue *txq;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301220 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1221 u64 diff_jiffies;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001222 int i = 0;
1223
Dustin Browne0024fa2016-10-14 16:29:21 -07001224 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301225 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001226 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301227 NULL, 0, QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001228
1229 /* Getting here implies we disabled the TX queues for too
1230 * long. Queues are disabled either because of disassociation
1231 * or low resource scenarios. In case of disassociation it is
1232 * ok to ignore this. But if associated, we have do possible
1233 * recovery here
1234 */
1235
1236 for (i = 0; i < NUM_TX_QUEUES; i++) {
1237 txq = netdev_get_tx_queue(dev, i);
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001238 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
1239 "Queue: %d status: %d txq->trans_start: %lu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 i, netif_tx_queue_stopped(txq), txq->trans_start);
1241 }
1242
Houston Hoffman00227112017-08-14 23:58:18 -07001243 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001244 "carrier state: %d", netif_carrier_ok(dev));
Nirav Shah89223f72016-03-01 18:10:38 +05301245 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Mohit Khannaca4173b2017-09-12 21:52:19 -07001246 wlan_hdd_display_netif_queue_history(hdd_ctx,
1247 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Leo Changfdb45c32016-10-28 11:09:23 -07001248 cdp_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301249
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001250 ++adapter->hdd_stats.tx_rx_stats.tx_timeout_cnt;
1251 ++adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301252
1253 diff_jiffies = jiffies -
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001254 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301255
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001256 if ((adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301257 (diff_jiffies > (HDD_TX_TIMEOUT * 2))) {
1258 /*
1259 * In case when there is no traffic is running, it may
1260 * possible tx time-out may once happen and later system
1261 * recovered then continuous tx timeout count has to be
1262 * reset as it is gets modified only when traffic is running.
1263 * If over a period of time if this count reaches to threshold
1264 * then host triggers a false subsystem restart. In genuine
1265 * time out case kernel will call the tx time-out back to back
1266 * at interval of HDD_TX_TIMEOUT. Here now check if previous
1267 * TX TIME out has occurred more than twice of HDD_TX_TIMEOUT
1268 * back then host may recovered here from data stall.
1269 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001270 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301271 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
1272 "Reset continous tx timeout stat");
1273 }
1274
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001275 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout = jiffies;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301276
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001277 if (adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt >
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301278 HDD_TX_STALL_THRESHOLD) {
1279 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1280 "Data stall due to continuous TX timeouts");
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001281 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Poddar, Siddarth37033032017-10-11 15:47:40 +05301282 if (hdd_ctx->config->enable_data_stall_det)
1283 cdp_post_data_stall_event(soc,
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301284 DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
1285 DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
1286 0xFF, 0xFF,
1287 DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
1288 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001289}
1290
1291/**
1292 * hdd_tx_timeout() - Wrapper function to protect __hdd_tx_timeout from SSR
1293 * @dev: pointer to net_device structure
1294 *
1295 * Function called by OS if there is any timeout during transmission.
1296 * Since HDD simply enqueues packet and returns control to OS right away,
1297 * this would never be invoked
1298 *
1299 * Return: none
1300 */
1301void hdd_tx_timeout(struct net_device *dev)
1302{
1303 cds_ssr_protect(__func__);
1304 __hdd_tx_timeout(dev);
1305 cds_ssr_unprotect(__func__);
1306}
1307
1308/**
1309 * @hdd_init_tx_rx() - Initialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001310 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001311 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301312 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1313 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001314 */
Jeff Johnson80486862017-10-02 13:21:29 -07001315QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001316{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301317 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001318
Jeff Johnson80486862017-10-02 13:21:29 -07001319 if (NULL == adapter) {
1320 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301321 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301322 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001323 }
1324
1325 return status;
1326}
1327
1328/**
1329 * @hdd_deinit_tx_rx() - Deinitialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001330 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001331 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301332 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1333 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001334 */
Jeff Johnson80486862017-10-02 13:21:29 -07001335QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301337 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001338
Jeff Johnson80486862017-10-02 13:21:29 -07001339 if (NULL == adapter) {
1340 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301341 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301342 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001343 }
1344
1345 return status;
1346}
1347
1348/**
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001349 * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
1350 * @context: [in] pointer to qdf context
1351 * @rxBuf: [in] pointer to rx qdf_nbuf
1352 *
1353 * TL will call this to notify the HDD when one or more packets were
1354 * received for a registered STA.
1355 *
1356 * Return: QDF_STATUS_E_FAILURE if any errors encountered, QDF_STATUS_SUCCESS
1357 * otherwise
1358 */
1359static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
1360{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001361 struct hdd_adapter *adapter;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001362 int rxstat;
1363 struct sk_buff *skb;
1364 struct sk_buff *skb_next;
1365 unsigned int cpu_index;
1366
1367 /* Sanity check on inputs */
1368 if ((NULL == context) || (NULL == rxbuf)) {
1369 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1370 "%s: Null params being passed", __func__);
1371 return QDF_STATUS_E_FAILURE;
1372 }
1373
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001374 adapter = (struct hdd_adapter *)context;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001375 if ((NULL == adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
1376 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001377 "invalid adapter %pK", adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001378 return QDF_STATUS_E_FAILURE;
1379 }
1380
1381 cpu_index = wlan_hdd_get_cpu();
1382
1383 /* walk the chain until all are processed */
1384 skb = (struct sk_buff *) rxbuf;
1385 while (NULL != skb) {
1386 skb_next = skb->next;
1387 skb->dev = adapter->dev;
1388
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001389 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001390 ++adapter->stats.rx_packets;
1391 adapter->stats.rx_bytes += skb->len;
1392
1393 /* Remove SKB from internal tracking table before submitting
1394 * it to stack
1395 */
1396 qdf_net_buf_debug_release_skb(skb);
1397
1398 /*
1399 * If this is not a last packet on the chain
1400 * Just put packet into backlog queue, not scheduling RX sirq
1401 */
1402 if (skb->next) {
1403 rxstat = netif_rx(skb);
1404 } else {
1405 /*
1406 * This is the last packet on the chain
1407 * Scheduling rx sirq
1408 */
1409 rxstat = netif_rx_ni(skb);
1410 }
1411
1412 if (NET_RX_SUCCESS == rxstat)
1413 ++adapter->
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001414 hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001415 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001416 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001417
1418 skb = skb_next;
1419 }
1420
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001421 return QDF_STATUS_SUCCESS;
1422}
1423
1424/**
Naveen Rawatf28315c2016-06-29 18:06:02 -07001425 * hdd_get_peer_idx() - Get the idx for given address in peer table
1426 * @sta_ctx: pointer to HDD Station Context
1427 * @addr: pointer to Peer Mac address
1428 *
1429 * Return: index when success else INVALID_PEER_IDX
1430 */
Jeff Johnson811f47d2017-10-03 11:33:09 -07001431int hdd_get_peer_idx(struct hdd_station_ctx *sta_ctx,
1432 struct qdf_mac_addr *addr)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001433{
1434 uint8_t idx;
1435
Naveen Rawatc45d1622016-07-05 12:20:09 -07001436 for (idx = 0; idx < MAX_PEERS; idx++) {
Naveen Rawatac027cb2017-04-27 15:02:42 -07001437 if (sta_ctx->conn_info.staId[idx] == HDD_WLAN_INVALID_STA_ID)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001438 continue;
1439 if (qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
1440 addr, sizeof(struct qdf_mac_addr)))
1441 continue;
1442 return idx;
1443 }
1444
1445 return INVALID_PEER_IDX;
1446}
1447
Ravi Joshibb8d4512016-08-22 10:14:52 -07001448/*
1449 * hdd_is_mcast_replay() - checks if pkt is multicast replay
1450 * @skb: packet skb
1451 *
1452 * Return: true if replayed multicast pkt, false otherwise
1453 */
1454static bool hdd_is_mcast_replay(struct sk_buff *skb)
1455{
1456 struct ethhdr *eth;
1457
1458 eth = eth_hdr(skb);
1459 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
1460 if (unlikely(ether_addr_equal(eth->h_source,
1461 skb->dev->dev_addr)))
1462 return true;
1463 }
1464 return false;
1465}
1466
Naveen Rawatf28315c2016-06-29 18:06:02 -07001467/**
Jeff Johnsondcf84ce2017-10-05 09:26:24 -07001468 * hdd_is_arp_local() - check if local or non local arp
1469 * @skb: pointer to sk_buff
1470 *
1471 * Return: true if local arp or false otherwise.
1472 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301473static bool hdd_is_arp_local(struct sk_buff *skb)
1474{
1475 struct arphdr *arp;
1476 struct in_ifaddr **ifap = NULL;
1477 struct in_ifaddr *ifa = NULL;
1478 struct in_device *in_dev;
1479 unsigned char *arp_ptr;
1480 __be32 tip;
1481
1482 arp = (struct arphdr *)skb->data;
1483 if (arp->ar_op == htons(ARPOP_REQUEST)) {
1484 in_dev = __in_dev_get_rtnl(skb->dev);
1485 if (in_dev) {
1486 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1487 ifap = &ifa->ifa_next) {
1488 if (!strcmp(skb->dev->name, ifa->ifa_label))
1489 break;
1490 }
1491 }
1492
1493 if (ifa && ifa->ifa_local) {
1494 arp_ptr = (unsigned char *)(arp + 1);
1495 arp_ptr += (skb->dev->addr_len + 4 +
1496 skb->dev->addr_len);
1497 memcpy(&tip, arp_ptr, 4);
Poddar, Siddarthb4b74792017-11-06 14:57:35 +05301498 hdd_debug("ARP packet: local IP: %x dest IP: %x",
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301499 ifa->ifa_local, tip);
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001500 if (ifa->ifa_local == tip)
1501 return true;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301502 }
1503 }
1504
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001505 return false;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301506}
1507
1508/**
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001509 * hdd_is_rx_wake_lock_needed() - check if wake lock is needed
1510 * @skb: pointer to sk_buff
1511 *
1512 * RX wake lock is needed for:
1513 * 1) Unicast data packet OR
1514 * 2) Local ARP data packet
1515 *
1516 * Return: true if wake lock is needed or false otherwise.
1517 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301518static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
1519{
1520 if ((skb->pkt_type != PACKET_BROADCAST &&
1521 skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb))
1522 return true;
1523
1524 return false;
1525}
1526
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001527#ifdef RECEIVE_OFFLOAD
1528/**
1529 * hdd_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
1530 * @hdd_ctx: pointer to HDD Station Context
1531 *
1532 * Return: None
1533 */
1534static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
1535{
1536 if (!(hdd_ctx->config->lro_enable ^
1537 hdd_ctx->config->gro_enable)) {
1538 hdd_ctx->config->lro_enable && hdd_ctx->config->gro_enable ?
1539 hdd_err("Can't enable both LRO and GRO, disabling Rx offload") :
1540 hdd_debug("LRO and GRO both are disabled");
1541 hdd_ctx->ol_enable = 0;
1542 } else if (hdd_ctx->config->lro_enable) {
1543 hdd_debug("Rx offload LRO is enabled");
1544 hdd_ctx->ol_enable = CFG_LRO_ENABLED;
1545 } else {
1546 hdd_debug("Rx offload GRO is enabled");
1547 hdd_ctx->ol_enable = CFG_GRO_ENABLED;
1548 }
1549}
1550
1551/**
1552 * hdd_gro_rx() - Handle Rx procesing via GRO
1553 * @adapter: pointer to adapter context
1554 * @skb: pointer to sk_buff
1555 *
1556 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1557 */
1558static QDF_STATUS hdd_gro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
1559{
1560 struct napi_struct *napi;
1561 struct qca_napi_data *napid;
1562 QDF_STATUS status = QDF_STATUS_E_FAILURE;
1563
1564 /* Only enabling it for STA mode like LRO today */
1565 if (QDF_STA_MODE != adapter->device_mode)
1566 return QDF_STATUS_E_NOSUPPORT;
1567
1568 napid = hdd_napi_get_all();
1569 napi = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
1570 skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
1571
1572 if (GRO_DROP != napi_gro_receive(napi, skb))
1573 status = QDF_STATUS_SUCCESS;
1574
1575 return status;
1576}
1577
1578/**
1579 * hdd_register_rx_ol() - Register LRO/GRO rx processing callbacks
1580 *
1581 * Return: none
1582 */
1583static void hdd_register_rx_ol(void)
1584{
1585 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1586
1587 if (!hdd_ctx)
1588 hdd_err("HDD context is NULL");
1589
1590 if (hdd_ctx->ol_enable == CFG_LRO_ENABLED) {
1591 /* Register the flush callback */
1592 hdd_ctx->receive_offload_cb = hdd_lro_rx;
1593 hdd_debug("LRO is enabled");
1594 } else if (hdd_ctx->ol_enable == CFG_GRO_ENABLED) {
1595 hdd_ctx->receive_offload_cb = hdd_gro_rx;
1596 hdd_debug("GRO is enabled");
1597 }
1598}
1599
1600int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1601{
1602 struct cdp_lro_hash_config lro_config = {0};
1603
1604 hdd_resolve_rx_ol_mode(hdd_ctx);
1605
1606 hdd_register_rx_ol();
1607
1608 /*
1609 * This will enable flow steering and Toeplitz hash
1610 * So enable it for LRO or GRO processing.
1611 */
1612 if (hdd_napi_enabled(HDD_NAPI_ANY) == 0) {
1613 hdd_warn("NAPI is disabled");
1614 return 0;
1615 }
1616
1617 lro_config.lro_enable = 1;
1618 lro_config.tcp_flag = TCPHDR_ACK;
1619 lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST |
1620 TCPHDR_ACK | TCPHDR_URG | TCPHDR_ECE | TCPHDR_CWR;
1621
1622 get_random_bytes(lro_config.toeplitz_hash_ipv4,
1623 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
1624 LRO_IPV4_SEED_ARR_SZ));
1625
1626 get_random_bytes(lro_config.toeplitz_hash_ipv6,
1627 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
1628 LRO_IPV6_SEED_ARR_SZ));
1629
1630 if (0 != wma_lro_init(&lro_config)) {
1631 hdd_err("Failed to send LRO configuration!");
1632 hdd_ctx->ol_enable = 0;
1633 return -EAGAIN;
1634 }
1635
1636 return 0;
1637}
1638
1639void hdd_disable_rx_ol_in_concurrency(bool disable)
1640{
1641 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1642
1643 if (!hdd_ctx) {
1644 hdd_err("hdd_ctx is NULL");
1645 return;
1646 }
1647
1648 if (disable) {
1649 if (hdd_ctx->en_tcp_delack_no_lro) {
1650 struct wlan_rx_tp_data rx_tp_data;
1651
1652 hdd_info("Enable TCP delack as LRO disabled in concurrency");
1653 rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
1654 rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
1655 wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
1656 WLAN_SVC_WLAN_TP_IND,
1657 &rx_tp_data,
1658 sizeof(rx_tp_data));
1659 hdd_ctx->en_tcp_delack_no_lro = 1;
1660 }
1661 qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 1);
1662 } else {
1663 if (hdd_ctx->en_tcp_delack_no_lro) {
1664 hdd_info("Disable TCP delack as LRO is enabled");
1665 hdd_ctx->en_tcp_delack_no_lro = 0;
1666 hdd_reset_tcp_delack(hdd_ctx);
1667 }
1668 qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 0);
1669 }
1670}
1671
1672void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1673{
1674 if (disable)
1675 qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 1);
1676 else
1677 qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 0);
1678}
1679
1680/**
1681 * hdd_can_handle_receive_offload() - Check for dynamic disablement
1682 * @hdd_ctx: hdd context
1683 * @skb: pointer to sk_buff which will be processed by Rx OL
1684 *
1685 * Check for dynamic disablement of Rx offload
1686 *
1687 * Return: false if we cannot process otherwise true
1688 */
1689static bool hdd_can_handle_receive_offload(struct hdd_context *hdd_ctx,
1690 struct sk_buff *skb)
1691{
1692 if (!QDF_NBUF_CB_RX_TCP_PROTO(skb) ||
1693 qdf_atomic_read(&hdd_ctx->disable_lro_in_concurrency) ||
1694 QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) ||
1695 qdf_atomic_read(&hdd_ctx->disable_lro_in_low_tput))
1696 return false;
1697 else
1698 return true;
1699}
1700#else /* RECEIVE_OFFLOAD */
1701static bool hdd_can_handle_receive_offload(struct hdd_context *hdd_ctx,
1702 struct sk_buff *skb)
1703{
1704 return false;
1705}
1706
1707int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1708{
1709 hdd_err("Rx_OL, LRO/GRO not supported");
1710 return -EPERM;
1711}
1712
1713void hdd_disable_rx_ol_in_concurrency(bool disable)
1714{
1715}
1716
1717void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1718{
1719}
1720#endif /* RECEIVE_OFFLOAD */
1721
Yu Wang66a250b2017-07-19 11:46:40 +08001722#ifdef WLAN_FEATURE_TSF_PLUS
1723static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1724 qdf_nbuf_t netbuf,
1725 uint64_t target_time)
1726{
1727 if (!HDD_TSF_IS_RX_SET(hdd_ctx))
1728 return;
1729
1730 hdd_rx_timestamp(netbuf, target_time);
1731}
1732#else
1733static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1734 qdf_nbuf_t netbuf,
1735 uint64_t target_time)
1736{
1737}
1738#endif
1739
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301740/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001741 * hdd_rx_packet_cbk() - Receive packet handler
Dhanashri Atre182b0272016-02-17 15:35:07 -08001742 * @context: pointer to HDD context
Nirav Shahcbc6d722016-03-01 16:24:53 +05301743 * @rxBuf: pointer to rx qdf_nbuf
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001744 *
1745 * Receive callback registered with TL. TL will call this to notify
1746 * the HDD when one or more packets were received for a registered
1747 * STA.
1748 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301749 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1750 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001751 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001752QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001753{
Jeff Johnson80486862017-10-02 13:21:29 -07001754 struct hdd_adapter *adapter = NULL;
Jeff Johnsoncc011972017-09-03 09:26:36 -07001755 struct hdd_context *hdd_ctx = NULL;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001756 int rxstat = 0;
1757 QDF_STATUS rx_ol_status = QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001758 struct sk_buff *skb = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001759 struct sk_buff *next = NULL;
Jeff Johnsond377dce2017-10-04 10:32:42 -07001760 struct hdd_station_ctx *sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001761 unsigned int cpu_index;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001762 struct qdf_mac_addr *mac_addr;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301763 bool wake_lock = false;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05301764 uint8_t pkt_type = 0;
Mohit Khannaf8f96822017-05-17 17:11:59 -07001765 bool proto_pkt_logged = false;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301766 bool track_arp = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001767
1768 /* Sanity check on inputs */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001769 if (unlikely((NULL == context) || (NULL == rxBuf))) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301770 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001771 "%s: Null params being passed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301772 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001773 }
1774
Jeff Johnson80486862017-10-02 13:21:29 -07001775 adapter = (struct hdd_adapter *)context;
1776 if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001777 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001778 "Magic cookie(%x) for adapter sanity verification is invalid",
Jeff Johnson80486862017-10-02 13:21:29 -07001779 adapter->magic);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301780 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001781 }
1782
Jeff Johnson80486862017-10-02 13:21:29 -07001783 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Jeff Johnsoncc011972017-09-03 09:26:36 -07001784 if (unlikely(NULL == hdd_ctx)) {
Dhanashri Atre182b0272016-02-17 15:35:07 -08001785 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1786 "%s: HDD context is Null", __func__);
1787 return QDF_STATUS_E_FAILURE;
1788 }
1789
1790 cpu_index = wlan_hdd_get_cpu();
1791
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001792 next = (struct sk_buff *)rxBuf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001793
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001794 while (next) {
1795 skb = next;
1796 next = skb->next;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001797 skb->next = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001798
psimha884025c2017-08-01 15:07:32 -07001799#ifdef QCA_WIFI_QCA6290 /* Debug code, remove later */
Venkata Sharath Chandra Manchala9bf41ff2017-08-31 00:50:06 -07001800 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001801 "%s: skb %pK skb->len %d\n", __func__, skb, skb->len);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001802#endif
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301803 if (QDF_NBUF_CB_PACKET_TYPE_ARP ==
1804 QDF_NBUF_CB_GET_PACKET_TYPE(skb)) {
1805 if (qdf_nbuf_data_is_arp_rsp(skb) &&
1806 (hdd_ctx->track_arp_ip ==
1807 qdf_nbuf_get_arp_src_ip(skb))) {
1808 ++adapter->hdd_stats.hdd_arp_stats.
1809 rx_arp_rsp_count;
1810 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1811 QDF_TRACE_LEVEL_INFO,
1812 "%s: ARP packet received",
1813 __func__);
1814 track_arp = true;
1815 }
1816 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05301817 /* track connectivity stats */
1818 if (adapter->pkt_type_bitmap)
1819 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
1820 PKT_TYPE_RSP, &pkt_type);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001821
Jeff Johnsond377dce2017-10-04 10:32:42 -07001822 sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
1823 if ((sta_ctx->conn_info.proxyARPService) &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08001824 cfg80211_is_gratuitous_arp_unsolicited_na(skb)) {
Poddar, Siddarth37a17d32017-08-09 19:04:39 +05301825 uint32_t rx_dropped;
1826
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001827 rx_dropped = ++adapter->hdd_stats.tx_rx_stats.
1828 rx_dropped[cpu_index];
Poddar, Siddarth37a17d32017-08-09 19:04:39 +05301829 /* rate limit error messages to 1/8th */
1830 if ((rx_dropped & 0x07) == 0)
1831 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1832 QDF_TRACE_LEVEL_INFO,
1833 "%s: Dropping HS 2.0 Gratuitous ARP or Unsolicited NA count=%u",
1834 __func__, rx_dropped);
Dhanashri Atre63d98022017-01-24 18:22:09 -08001835 /* Remove SKB from internal tracking table before submitting
1836 * it to stack
1837 */
1838 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001839 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001840 }
1841
1842 hdd_event_eapol_log(skb, QDF_RX);
Jeff Johnson1b780e42017-10-31 14:11:45 -07001843 proto_pkt_logged = qdf_dp_trace_log_pkt(adapter->session_id,
Mohit Khannaf8f96822017-05-17 17:11:59 -07001844 skb, QDF_RX,
1845 QDF_TRACE_DEFAULT_PDEV_ID);
1846
Dhanashri Atre63d98022017-01-24 18:22:09 -08001847 DPTRACE(qdf_dp_trace(skb,
1848 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001849 QDF_TRACE_DEFAULT_PDEV_ID,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001850 qdf_nbuf_data_addr(skb),
1851 sizeof(qdf_nbuf_data(skb)), QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07001852
1853 if (!proto_pkt_logged) {
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301854 DPTRACE(qdf_dp_trace(skb,
1855 QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001856 QDF_TRACE_DEFAULT_PDEV_ID,
Mohit Khannaf8f96822017-05-17 17:11:59 -07001857 (uint8_t *)skb->data, qdf_nbuf_len(skb),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301858 QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07001859 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
1860 DPTRACE(qdf_dp_trace(skb,
1861 QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
1862 QDF_TRACE_DEFAULT_PDEV_ID,
1863 (uint8_t *)
1864 &skb->data[QDF_DP_TRACE_RECORD_SIZE],
1865 (qdf_nbuf_len(skb) -
1866 QDF_DP_TRACE_RECORD_SIZE),
1867 QDF_RX));
1868 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001869 mac_addr = (struct qdf_mac_addr *)(skb->data+QDF_MAC_ADDR_SIZE);
1870
Jeff Johnson80486862017-10-02 13:21:29 -07001871 ucfg_tdls_update_rx_pkt_cnt(adapter->hdd_vdev, mac_addr);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001872
Jeff Johnson80486862017-10-02 13:21:29 -07001873 skb->dev = adapter->dev;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001874 skb->protocol = eth_type_trans(skb, skb->dev);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001875 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Jeff Johnson80486862017-10-02 13:21:29 -07001876 ++adapter->stats.rx_packets;
1877 adapter->stats.rx_bytes += skb->len;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001878
Alok Kumarb64650c2018-03-23 17:05:11 +05301879 /* Incr GW Rx count for NUD tracking based on GW mac addr */
1880 hdd_nud_incr_gw_rx_pkt_cnt(adapter, mac_addr);
1881
Dhanashri Atre63d98022017-01-24 18:22:09 -08001882 /* Check & drop replayed mcast packets (for IPV6) */
Jeff Johnsoncc011972017-09-03 09:26:36 -07001883 if (hdd_ctx->config->multicast_replay_filter &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08001884 hdd_is_mcast_replay(skb)) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001885 ++adapter->hdd_stats.tx_rx_stats.rx_dropped[cpu_index];
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001886 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001887 "%s: Dropping multicast replay pkt", __func__);
1888 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001889 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001890 }
1891
1892 /* hold configurable wakelock for unicast traffic */
Jeff Johnsoncc011972017-09-03 09:26:36 -07001893 if (hdd_ctx->config->rx_wakelock_timeout &&
Jeff Johnsond377dce2017-10-04 10:32:42 -07001894 sta_ctx->conn_info.uIsAuthenticated)
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301895 wake_lock = hdd_is_rx_wake_lock_needed(skb);
1896
1897 if (wake_lock) {
Jeff Johnsoncc011972017-09-03 09:26:36 -07001898 cds_host_diag_log_work(&hdd_ctx->rx_wake_lock,
1899 hdd_ctx->config->rx_wakelock_timeout,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001900 WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
Jeff Johnsoncc011972017-09-03 09:26:36 -07001901 qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock,
1902 hdd_ctx->config->
Dhanashri Atre63d98022017-01-24 18:22:09 -08001903 rx_wakelock_timeout);
1904 }
1905
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001906 /* Remove SKB from internal tracking table before submitting
1907 * it to stack
1908 */
Dhanashri Atre63d98022017-01-24 18:22:09 -08001909 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001910
Yu Wang66a250b2017-07-19 11:46:40 +08001911 hdd_tsf_timestamp_rx(hdd_ctx, skb, ktime_to_us(skb->tstamp));
1912
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001913 if (hdd_can_handle_receive_offload(hdd_ctx, skb) &&
1914 hdd_ctx->receive_offload_cb)
1915 rx_ol_status = hdd_ctx->receive_offload_cb(adapter,
1916 skb);
1917
1918 if (rx_ol_status != QDF_STATUS_SUCCESS) {
Dhanashri Atre63d98022017-01-24 18:22:09 -08001919 if (hdd_napi_enabled(HDD_NAPI_ANY) &&
Jeff Johnsone2ba3cd2017-10-30 20:02:09 -07001920 !hdd_ctx->enable_rxthread &&
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05301921 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
Dhanashri Atre63d98022017-01-24 18:22:09 -08001922 rxstat = netif_receive_skb(skb);
1923 else
1924 rxstat = netif_rx_ni(skb);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001925 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001926
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001927 if (!rxstat) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001928 ++adapter->hdd_stats.tx_rx_stats.
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301929 rx_delivered[cpu_index];
1930 if (track_arp)
1931 ++adapter->hdd_stats.hdd_arp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05301932 rx_delivered;
1933 /* track connectivity stats */
1934 if (adapter->pkt_type_bitmap)
1935 hdd_tx_rx_collect_connectivity_stats_info(
1936 skb, adapter,
1937 PKT_TYPE_RX_DELIVERED, &pkt_type);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001938 } else {
1939 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
1940 if (track_arp)
1941 ++adapter->hdd_stats.hdd_arp_stats.rx_refused;
1942
1943 /* track connectivity stats */
1944 if (adapter->pkt_type_bitmap)
1945 hdd_tx_rx_collect_connectivity_stats_info(
1946 skb, adapter,
1947 PKT_TYPE_RX_REFUSED, &pkt_type);
1948
Dhanashri Atre63d98022017-01-24 18:22:09 -08001949 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001950 }
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001951
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301952 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001953}
1954
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001955/**
1956 * hdd_reason_type_to_string() - return string conversion of reason type
1957 * @reason: reason type
1958 *
1959 * This utility function helps log string conversion of reason type.
1960 *
1961 * Return: string conversion of device mode, if match found;
1962 * "Unknown" otherwise.
1963 */
1964const char *hdd_reason_type_to_string(enum netif_reason_type reason)
1965{
1966 switch (reason) {
1967 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
1968 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
1969 CASE_RETURN_STRING(WLAN_FW_PAUSE);
1970 CASE_RETURN_STRING(WLAN_TX_ABORT);
1971 CASE_RETURN_STRING(WLAN_VDEV_STOP);
1972 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
1973 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301974 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001975 default:
Nirav Shah617cff92016-04-25 10:24:24 +05301976 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001977 }
1978}
1979
1980/**
1981 * hdd_action_type_to_string() - return string conversion of action type
1982 * @action: action type
1983 *
1984 * This utility function helps log string conversion of action_type.
1985 *
1986 * Return: string conversion of device mode, if match found;
1987 * "Unknown" otherwise.
1988 */
1989const char *hdd_action_type_to_string(enum netif_action_type action)
1990{
1991
1992 switch (action) {
1993 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
1994 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
1995 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
1996 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
1997 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301998 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
1999 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002000 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
2001 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302002 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
2003 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
2004 CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
2005 CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002006 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302007 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002008 }
2009}
2010
2011/**
2012 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
2013 * @adapter: adapter handle
2014 * @action: action type
2015 * @reason: reason type
2016 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002017static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002018 enum netif_action_type action, enum netif_reason_type reason)
2019{
2020 switch (action) {
2021 case WLAN_STOP_ALL_NETIF_QUEUE:
2022 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302023 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2024 case WLAN_STOP_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002025 adapter->queue_oper_stats[reason].pause_count++;
2026 break;
2027 case WLAN_START_ALL_NETIF_QUEUE:
2028 case WLAN_WAKE_ALL_NETIF_QUEUE:
2029 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302030 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2031 case WLAN_WAKE_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002032 adapter->queue_oper_stats[reason].unpause_count++;
2033 break;
2034 default:
2035 break;
2036 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002037}
2038
2039/**
jiad5b986632017-08-04 11:59:20 +08002040 * hdd_netdev_queue_is_locked()
2041 * @txq: net device tx queue
2042 *
2043 * For SMP system, always return false and we could safely rely on
2044 * __netif_tx_trylock().
2045 *
2046 * Return: true locked; false not locked
2047 */
2048#ifdef QCA_CONFIG_SMP
2049static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2050{
2051 return false;
2052}
2053#else
2054static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2055{
2056 return txq->xmit_lock_owner != -1;
2057}
2058#endif
2059
2060/**
Nirav Shah89223f72016-03-01 18:10:38 +05302061 * wlan_hdd_update_txq_timestamp() - update txq timestamp
2062 * @dev: net device
2063 *
2064 * Return: none
2065 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -07002066static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
Nirav Shah89223f72016-03-01 18:10:38 +05302067{
2068 struct netdev_queue *txq;
2069 int i;
Nirav Shah89223f72016-03-01 18:10:38 +05302070
2071 for (i = 0; i < NUM_TX_QUEUES; i++) {
2072 txq = netdev_get_tx_queue(dev, i);
jiad5b986632017-08-04 11:59:20 +08002073
2074 /*
2075 * On UP system, kernel will trigger watchdog bite if spinlock
2076 * recursion is detected. Unfortunately recursion is possible
2077 * when it is called in dev_queue_xmit() context, where stack
2078 * grabs the lock before calling driver's ndo_start_xmit
2079 * callback.
2080 */
2081 if (!hdd_netdev_queue_is_locked(txq)) {
2082 if (__netif_tx_trylock(txq)) {
2083 txq_trans_update(txq);
2084 __netif_tx_unlock(txq);
2085 }
wadesongba6373e2017-05-15 20:59:05 +08002086 }
Nirav Shah89223f72016-03-01 18:10:38 +05302087 }
2088}
2089
2090/**
Nirav Shah617cff92016-04-25 10:24:24 +05302091 * wlan_hdd_update_unpause_time() - update unpause time
2092 * @adapter: adapter handle
2093 *
2094 * Return: none
2095 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002096static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
Nirav Shah617cff92016-04-25 10:24:24 +05302097{
2098 qdf_time_t curr_time = qdf_system_ticks();
2099
2100 adapter->total_unpause_time += curr_time - adapter->last_time;
2101 adapter->last_time = curr_time;
2102}
2103
2104/**
2105 * wlan_hdd_update_pause_time() - update pause time
2106 * @adapter: adapter handle
2107 *
2108 * Return: none
2109 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002110static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
Nirav Shahda008342016-05-17 18:50:40 +05302111 uint32_t temp_map)
Nirav Shah617cff92016-04-25 10:24:24 +05302112{
2113 qdf_time_t curr_time = qdf_system_ticks();
Nirav Shahda008342016-05-17 18:50:40 +05302114 uint8_t i;
2115 qdf_time_t pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302116
Nirav Shahda008342016-05-17 18:50:40 +05302117 pause_time = curr_time - adapter->last_time;
2118 adapter->total_pause_time += pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302119 adapter->last_time = curr_time;
Nirav Shahda008342016-05-17 18:50:40 +05302120
2121 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
2122 if (temp_map & (1 << i)) {
2123 adapter->queue_oper_stats[i].total_pause_time +=
2124 pause_time;
2125 break;
2126 }
2127 }
2128
Nirav Shah617cff92016-04-25 10:24:24 +05302129}
2130
2131/**
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302132 * wlan_hdd_stop_non_priority_queue() - stop non prority queues
2133 * @adapter: adapter handle
2134 *
2135 * Return: None
2136 */
2137static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
2138{
2139 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2140 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2141 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2142 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2143}
2144
2145/**
2146 * wlan_hdd_wake_non_priority_queue() - wake non prority queues
2147 * @adapter: adapter handle
2148 *
2149 * Return: None
2150 */
2151static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
2152{
2153 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2154 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2155 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2156 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2157}
2158
2159/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002160 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
2161 * @adapter: adapter handle
2162 * @action: action type
2163 * @reason: reason type
2164 *
2165 * This is single function which is used for netif_queue related
2166 * actions like start/stop of network queues and on/off carrier
2167 * option.
2168 *
2169 * Return: None
2170 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002171void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002172 enum netif_action_type action, enum netif_reason_type reason)
2173{
Nirav Shahda008342016-05-17 18:50:40 +05302174 uint32_t temp_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002175
2176 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
2177 (!adapter->dev)) {
2178 hdd_err("adapter is invalid");
2179 return;
2180 }
2181
2182 switch (action) {
2183
2184 case WLAN_NETIF_CARRIER_ON:
2185 netif_carrier_on(adapter->dev);
2186 break;
2187
2188 case WLAN_NETIF_CARRIER_OFF:
2189 netif_carrier_off(adapter->dev);
2190 break;
2191
2192 case WLAN_STOP_ALL_NETIF_QUEUE:
2193 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302194 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002195 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302196 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302197 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302198 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002199 adapter->pause_map |= (1 << reason);
2200 spin_unlock_bh(&adapter->pause_map_lock);
2201 break;
2202
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302203 case WLAN_STOP_NON_PRIORITY_QUEUE:
2204 spin_lock_bh(&adapter->pause_map_lock);
2205 if (!adapter->pause_map) {
2206 wlan_hdd_stop_non_priority_queue(adapter);
2207 wlan_hdd_update_txq_timestamp(adapter->dev);
2208 wlan_hdd_update_unpause_time(adapter);
2209 }
2210 adapter->pause_map |= (1 << reason);
2211 spin_unlock_bh(&adapter->pause_map_lock);
2212 break;
2213
2214 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2215 spin_lock_bh(&adapter->pause_map_lock);
2216 temp_map = adapter->pause_map;
2217 adapter->pause_map &= ~(1 << reason);
2218 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2219 wlan_hdd_update_pause_time(adapter, temp_map);
2220 spin_unlock_bh(&adapter->pause_map_lock);
2221 break;
2222
2223 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2224 spin_lock_bh(&adapter->pause_map_lock);
2225 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2226 wlan_hdd_update_txq_timestamp(adapter->dev);
2227 wlan_hdd_update_unpause_time(adapter);
2228 adapter->pause_map |= (1 << reason);
2229 spin_unlock_bh(&adapter->pause_map_lock);
2230 break;
2231
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002232 case WLAN_START_ALL_NETIF_QUEUE:
2233 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302234 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002235 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302236 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002237 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302238 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302239 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002240 spin_unlock_bh(&adapter->pause_map_lock);
2241 break;
2242
2243 case WLAN_WAKE_ALL_NETIF_QUEUE:
2244 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302245 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002246 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302247 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002248 netif_tx_wake_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302249 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302250 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002251 spin_unlock_bh(&adapter->pause_map_lock);
2252 break;
2253
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302254 case WLAN_WAKE_NON_PRIORITY_QUEUE:
2255 spin_lock_bh(&adapter->pause_map_lock);
2256 temp_map = adapter->pause_map;
2257 adapter->pause_map &= ~(1 << reason);
2258 if (!adapter->pause_map) {
2259 wlan_hdd_wake_non_priority_queue(adapter);
2260 wlan_hdd_update_pause_time(adapter, temp_map);
2261 }
2262 spin_unlock_bh(&adapter->pause_map_lock);
2263 break;
2264
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002265 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
2266 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302267 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002268 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302269 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302270 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302271 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002272 adapter->pause_map |= (1 << reason);
2273 netif_carrier_off(adapter->dev);
2274 spin_unlock_bh(&adapter->pause_map_lock);
2275 break;
2276
2277 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
2278 spin_lock_bh(&adapter->pause_map_lock);
2279 netif_carrier_on(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302280 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002281 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302282 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002283 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302284 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302285 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002286 spin_unlock_bh(&adapter->pause_map_lock);
2287 break;
2288
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002289 default:
2290 hdd_err("unsupported action %d", action);
2291 }
2292
2293 spin_lock_bh(&adapter->pause_map_lock);
2294 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
2295 wlan_hdd_process_peer_unauthorised_pause(adapter);
2296 spin_unlock_bh(&adapter->pause_map_lock);
2297
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002298 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
2299
2300 adapter->queue_oper_history[adapter->history_index].time =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05302301 qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002302 adapter->queue_oper_history[adapter->history_index].netif_action =
2303 action;
2304 adapter->queue_oper_history[adapter->history_index].netif_reason =
2305 reason;
2306 adapter->queue_oper_history[adapter->history_index].pause_map =
2307 adapter->pause_map;
2308 if (++adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
2309 adapter->history_index = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002310}
2311
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002312/**
2313 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
2314 * @dev: Pointer to net_device structure
2315 *
2316 * Return: 0 for success; non-zero for failure
2317 */
2318int hdd_set_mon_rx_cb(struct net_device *dev)
2319{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002320 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002321 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002322 int ret;
2323 QDF_STATUS qdf_status;
2324 struct ol_txrx_desc_type sta_desc = {0};
2325 struct ol_txrx_ops txrx_ops;
Leo Changfdb45c32016-10-28 11:09:23 -07002326 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2327 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002328
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002329 qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
2330 txrx_ops.rx.rx = hdd_mon_rx_packet_cbk;
Ravi Joshi106ffe02017-01-18 18:09:05 -08002331 hdd_monitor_set_rx_monitor_cb(&txrx_ops, hdd_rx_monitor_callback);
Leo Changfdb45c32016-10-28 11:09:23 -07002332 cdp_vdev_register(soc,
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002333 (struct cdp_vdev *)cdp_get_vdev_from_vdev_id(soc,
Jeff Johnson1b780e42017-10-31 14:11:45 -07002334 (struct cdp_pdev *)pdev, adapter->session_id),
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002335 adapter, &txrx_ops);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002336 /* peer is created wma_vdev_attach->wma_create_peer */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002337 qdf_status = cdp_peer_register(soc,
2338 (struct cdp_pdev *)pdev, &sta_desc);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002339 if (QDF_STATUS_SUCCESS != qdf_status) {
Leo Changfdb45c32016-10-28 11:09:23 -07002340 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002341 qdf_status, qdf_status);
2342 goto exit;
2343 }
2344
2345 qdf_status = sme_create_mon_session(hdd_ctx->hHal,
Jeff Johnson1e851a12017-10-28 14:36:12 -07002346 adapter->mac_addr.bytes);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002347 if (QDF_STATUS_SUCCESS != qdf_status) {
2348 hdd_err("sme_create_mon_session() failed to register. Status= %d [0x%08X]",
2349 qdf_status, qdf_status);
2350 }
2351exit:
2352 ret = qdf_status_to_os_return(qdf_status);
2353 return ret;
2354}
Nirav Shahbd36b062016-07-18 11:12:59 +05302355
2356/**
2357 * hdd_send_rps_ind() - send rps indication to daemon
2358 * @adapter: adapter context
2359 *
2360 * If RPS feature enabled by INI, send RPS enable indication to daemon
2361 * Indication contents is the name of interface to find correct sysfs node
2362 * Should send all available interfaces
2363 *
2364 * Return: none
2365 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002366void hdd_send_rps_ind(struct hdd_adapter *adapter)
Nirav Shahbd36b062016-07-18 11:12:59 +05302367{
2368 int i;
2369 uint8_t cpu_map_list_len = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002370 struct hdd_context *hdd_ctxt = NULL;
Nirav Shahbd36b062016-07-18 11:12:59 +05302371 struct wlan_rps_data rps_data;
Yun Parkff6a16a2017-09-26 16:38:18 -07002372 struct cds_config_info *cds_cfg;
2373
2374 cds_cfg = cds_get_ini_config();
Nirav Shahbd36b062016-07-18 11:12:59 +05302375
2376 if (!adapter) {
2377 hdd_err("adapter is NULL");
2378 return;
2379 }
2380
Yun Parkff6a16a2017-09-26 16:38:18 -07002381 if (!cds_cfg) {
2382 hdd_err("cds_cfg is NULL");
2383 return;
2384 }
2385
Nirav Shahbd36b062016-07-18 11:12:59 +05302386 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2387 rps_data.num_queues = NUM_TX_QUEUES;
2388
2389 hdd_info("cpu_map_list '%s'", hdd_ctxt->config->cpu_map_list);
2390
2391 /* in case no cpu map list is provided, simply return */
2392 if (!strlen(hdd_ctxt->config->cpu_map_list)) {
2393 hdd_err("no cpu map list found");
2394 goto err;
2395 }
2396
2397 if (QDF_STATUS_SUCCESS !=
2398 hdd_hex_string_to_u16_array(hdd_ctxt->config->cpu_map_list,
2399 rps_data.cpu_map_list,
2400 &cpu_map_list_len,
2401 WLAN_SVC_IFACE_NUM_QUEUES)) {
2402 hdd_err("invalid cpu map list");
2403 goto err;
2404 }
2405
2406 rps_data.num_queues =
2407 (cpu_map_list_len < rps_data.num_queues) ?
2408 cpu_map_list_len : rps_data.num_queues;
2409
2410 for (i = 0; i < rps_data.num_queues; i++) {
2411 hdd_info("cpu_map_list[%d] = 0x%x",
2412 i, rps_data.cpu_map_list[i]);
2413 }
2414
2415 strlcpy(rps_data.ifname, adapter->dev->name,
2416 sizeof(rps_data.ifname));
Kondabattini, Ganesh96ac37b2016-09-02 23:12:15 +05302417 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2418 WLAN_SVC_RPS_ENABLE_IND,
Nirav Shahbd36b062016-07-18 11:12:59 +05302419 &rps_data, sizeof(rps_data));
2420
Yun Parkff6a16a2017-09-26 16:38:18 -07002421 cds_cfg->rps_enabled = true;
2422
2423 return;
2424
Nirav Shahbd36b062016-07-18 11:12:59 +05302425err:
2426 hdd_err("Wrong RPS configuration. enabling rx_thread");
Yun Parkff6a16a2017-09-26 16:38:18 -07002427 cds_cfg->rps_enabled = false;
2428}
2429
2430/**
2431 * hdd_send_rps_disable_ind() - send rps disable indication to daemon
2432 * @adapter: adapter context
2433 *
2434 * Return: none
2435 */
2436void hdd_send_rps_disable_ind(struct hdd_adapter *adapter)
2437{
2438 uint8_t cpu_map_list_len = 0;
2439 struct hdd_context *hdd_ctxt = NULL;
2440 struct wlan_rps_data rps_data;
2441 struct cds_config_info *cds_cfg;
2442
2443 cds_cfg = cds_get_ini_config();
2444
2445 if (!adapter) {
2446 hdd_err("adapter is NULL");
2447 return;
2448 }
2449
2450 if (!cds_cfg) {
2451 hdd_err("cds_cfg is NULL");
2452 return;
2453 }
2454
2455 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2456 rps_data.num_queues = NUM_TX_QUEUES;
2457
2458 hdd_info("Set cpu_map_list 0");
2459
2460 qdf_mem_zero(&rps_data.cpu_map_list, sizeof(rps_data.cpu_map_list));
2461 cpu_map_list_len = 0;
2462 rps_data.num_queues =
2463 (cpu_map_list_len < rps_data.num_queues) ?
2464 cpu_map_list_len : rps_data.num_queues;
2465
2466 strlcpy(rps_data.ifname, adapter->dev->name, sizeof(rps_data.ifname));
2467 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2468 WLAN_SVC_RPS_ENABLE_IND,
2469 &rps_data, sizeof(rps_data));
2470
2471 cds_cfg->rps_enabled = false;
Nirav Shahbd36b062016-07-18 11:12:59 +05302472}
2473
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002474void hdd_tx_queue_cb(void *context, uint32_t vdev_id,
2475 enum netif_action_type action,
2476 enum netif_reason_type reason)
2477{
2478 struct hdd_context *hdd_ctx = (struct hdd_context *)context;
2479 struct hdd_adapter *adapter = NULL;
2480
2481 /*
2482 * Validating the context is not required here.
2483 * if there is a driver unload/SSR in progress happening in a
2484 * different context and it has been scheduled to run and
2485 * driver got a firmware event of sta kick out, then it is
2486 * good to disable the Tx Queue to stop the influx of traffic.
2487 */
2488 if (hdd_ctx == NULL) {
2489 hdd_err("Invalid context passed");
2490 return;
2491 }
2492
2493 adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
2494 if (adapter == NULL) {
2495 hdd_err("vdev_id %d does not exist with host", vdev_id);
2496 return;
2497 }
2498 hdd_debug("Tx Queue action %d on vdev %d", action, vdev_id);
2499
2500 wlan_hdd_netif_queue_control(adapter, action, reason);
2501}
2502
Ravi Joshib89e7f72016-09-07 13:43:15 -07002503#ifdef MSM_PLATFORM
2504/**
2505 * hdd_reset_tcp_delack() - Reset tcp delack value to default
2506 * @hdd_ctx: Handle to hdd context
2507 *
2508 * Function used to reset TCP delack value to its default value
2509 *
2510 * Return: None
2511 */
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002512void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
Ravi Joshib89e7f72016-09-07 13:43:15 -07002513{
Tushnim Bhattacharyyadfbce702018-03-27 12:46:48 -07002514 enum wlan_tp_level next_level = WLAN_SVC_TP_LOW;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002515 struct wlan_rx_tp_data rx_tp_data = {0};
Nirav Shahbd36b062016-07-18 11:12:59 +05302516
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002517 rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002518 rx_tp_data.level = next_level;
Ravi Joshib89e7f72016-09-07 13:43:15 -07002519 hdd_ctx->rx_high_ind_cnt = 0;
2520 wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index, WLAN_SVC_WLAN_TP_IND,
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002521 &rx_tp_data, sizeof(rx_tp_data));
Ravi Joshib89e7f72016-09-07 13:43:15 -07002522}
2523#endif /* MSM_PLATFORM */