blob: 6ef24893b379e921c9589b98317fa786c5c7943b [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Alok Kumar45b9e9c2019-07-22 17:53:00 +05302 * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * DOC: wlan_hdd_tx_rx.c
21 *
22 * Linux HDD Tx/RX APIs
23 */
24
Jeff Johnsona0399642016-12-05 12:39:59 -080025/* denote that this file does not allow legacy hddLog */
26#define HDD_DISALLOW_LEGACY_HDDLOG 1
Dustin Brown96b98dd2019-03-06 12:39:37 -080027#include "osif_sync.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028#include <wlan_hdd_tx_rx.h>
29#include <wlan_hdd_softap_tx_rx.h>
30#include <wlan_hdd_napi.h>
31#include <linux/netdevice.h>
32#include <linux/skbuff.h>
33#include <linux/etherdevice.h>
Ravi Joshibb8d4512016-08-22 10:14:52 -070034#include <linux/if_ether.h>
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +053035#include <linux/inetdevice.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036#include <cds_sched.h>
Manjunathappa Prakash779e4862016-09-12 17:00:11 -070037#include <cds_utils.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038
39#include <wlan_hdd_p2p.h>
40#include <linux/wireless.h>
41#include <net/cfg80211.h>
42#include <net/ieee80211_radiotap.h>
43#include "sap_api.h"
44#include "wlan_hdd_wmm.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include "wlan_hdd_tdls.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046#include "wlan_hdd_ocb.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047#include "wlan_hdd_lro.h"
Leo Changfdb45c32016-10-28 11:09:23 -070048#include <cdp_txrx_cmn.h>
49#include <cdp_txrx_peer_ops.h>
50#include <cdp_txrx_flow_ctrl_v2.h>
Alok Kumara71b36f2019-09-08 20:16:19 +053051#include <cdp_txrx_mon.h>
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -070052#include "wlan_hdd_nan_datapath.h"
Ravi Joshib89e7f72016-09-07 13:43:15 -070053#include "pld_common.h"
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +053054#include <cdp_txrx_misc.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080055#include "wlan_hdd_rx_monitor.h"
Zhu Jianmin04392c42017-05-12 16:34:53 +080056#include "wlan_hdd_power.h"
Poddar, Siddarth31797fa2018-01-22 17:24:15 +053057#include "wlan_hdd_cfg80211.h"
Yu Wangceb357b2017-06-01 12:04:18 +080058#include <wlan_hdd_tsf.h>
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -070059#include <net/tcp.h>
60#include "wma_api.h"
Ravi Joshi106ffe02017-01-18 18:09:05 -080061
Alok Kumarb64650c2018-03-23 17:05:11 +053062#include "wlan_hdd_nud_tracking.h"
Mohit Khanna70322002018-05-15 19:21:32 -070063#include "dp_txrx.h"
Mohit Khanna06cce792020-01-09 05:21:53 -080064#if defined(WLAN_SUPPORT_RX_FISA)
65#include "dp_fisa_rx.h"
66#endif
Rakesh Pillaif94b1622019-11-07 19:54:01 +053067#include <ol_defines.h>
jitiphil869b9f72018-09-25 17:14:01 +053068#include "cfg_ucfg_api.h"
Mohit Khanna81418772018-10-30 14:14:46 -070069#include "target_type.h"
Bala Venkateshf2867902019-03-08 15:01:23 +053070#include "wlan_hdd_object_manager.h"
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +053071#include "nan_public_structs.h"
72#include "nan_ucfg_api.h"
Ashish Kumar Dhanotiya3d5a7422020-01-20 14:42:02 +053073#include <wlan_hdd_sar_limits.h>
Alok Kumarb64650c2018-03-23 17:05:11 +053074
hangtianb9c91362019-06-07 10:39:38 +080075#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053076/*
77 * Mapping Linux AC interpretation to SME AC.
78 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
79 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
80 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
81 */
82const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
83 SME_AC_VO,
84 SME_AC_VI,
85 SME_AC_BE,
86 SME_AC_BK,
87 SME_AC_VO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088};
89
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053090#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080091const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
92 SME_AC_VO,
93 SME_AC_VI,
94 SME_AC_BE,
95 SME_AC_BK,
96};
97
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053098#endif
99
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530100#ifdef QCA_HL_NETDEV_FLOW_CONTROL
101void hdd_register_hl_netdev_fc_timer(struct hdd_adapter *adapter,
102 qdf_mc_timer_callback_t timer_callback)
103{
104 if (!adapter->tx_flow_timer_initialized) {
105 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
106 QDF_TIMER_TYPE_SW, timer_callback, adapter);
107 adapter->tx_flow_timer_initialized = true;
108 }
109}
110
111/**
112 * hdd_deregister_hl_netdev_fc_timer() - Deregister HL Flow Control Timer
113 * @adapter: adapter handle
114 *
115 * Return: none
116 */
117void hdd_deregister_hl_netdev_fc_timer(struct hdd_adapter *adapter)
118{
119 if (adapter->tx_flow_timer_initialized) {
120 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
121 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
122 adapter->tx_flow_timer_initialized = false;
123 }
124}
125
126/**
127 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
128 * @adapter_context: pointer to vdev adapter
129 *
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530130 * Return: None
131 */
132void hdd_tx_resume_timer_expired_handler(void *adapter_context)
133{
134 struct hdd_adapter *adapter = (struct hdd_adapter *)adapter_context;
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530135 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Nirav Shahaa34cbb2019-07-03 10:32:04 +0530136 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530137 u32 p_qpaused;
138 u32 np_qpaused;
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530139
140 if (!adapter) {
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530141 hdd_err("invalid adapter context");
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530142 return;
143 }
144
Nirav Shahaa34cbb2019-07-03 10:32:04 +0530145 cdp_display_stats(soc, CDP_DUMP_TX_FLOW_POOL_INFO,
146 QDF_STATS_VERBOSITY_LEVEL_LOW);
147 wlan_hdd_display_netif_queue_history(hdd_ctx,
148 QDF_STATS_VERBOSITY_LEVEL_LOW);
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530149 hdd_debug("Enabling queues");
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530150 spin_lock_bh(&adapter->pause_map_lock);
151 p_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL_PRIORITY);
152 np_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL);
153 spin_unlock_bh(&adapter->pause_map_lock);
154
155 if (p_qpaused) {
156 wlan_hdd_netif_queue_control(adapter,
157 WLAN_NETIF_PRIORITY_QUEUE_ON,
158 WLAN_DATA_FLOW_CONTROL_PRIORITY);
159 cdp_hl_fc_set_os_queue_status(soc,
Jeff Johnson1abc5662019-02-04 14:27:02 -0800160 adapter->vdev_id,
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530161 WLAN_NETIF_PRIORITY_QUEUE_ON);
162 }
163 if (np_qpaused) {
164 wlan_hdd_netif_queue_control(adapter,
165 WLAN_WAKE_NON_PRIORITY_QUEUE,
166 WLAN_DATA_FLOW_CONTROL);
167 cdp_hl_fc_set_os_queue_status(soc,
Jeff Johnson1abc5662019-02-04 14:27:02 -0800168 adapter->vdev_id,
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530169 WLAN_WAKE_NON_PRIORITY_QUEUE);
170 }
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530171}
172
173#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
174
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
176/**
177 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
178 * @adapter_context: pointer to vdev adapter
179 *
180 * If Blocked OS Q is not resumed during timeout period, to prevent
181 * permanent stall, resume OS Q forcefully.
182 *
183 * Return: None
184 */
185void hdd_tx_resume_timer_expired_handler(void *adapter_context)
186{
Jeff Johnson80486862017-10-02 13:21:29 -0700187 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800188
Jeff Johnson80486862017-10-02 13:21:29 -0700189 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800190 /* INVALID ARG */
191 return;
192 }
193
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700194 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700195 wlan_hdd_netif_queue_control(adapter, WLAN_WAKE_ALL_NETIF_QUEUE,
Jeff Johnsona0399642016-12-05 12:39:59 -0800196 WLAN_CONTROL_PATH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197}
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530198
199/**
200 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
Jeff Johnson80486862017-10-02 13:21:29 -0700201 * @adapter: pointer to hdd adapter
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530202 * @tx_resume: TX Q resume trigger
203 *
204 *
205 * Return: None
206 */
207static void
Jeff Johnson80486862017-10-02 13:21:29 -0700208hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530209{
210 if (true == tx_resume)
211 return;
212
213 /* Pause TX */
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700214 hdd_debug("Disabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700215 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530216 WLAN_DATA_FLOW_CONTROL);
217
218 if (QDF_TIMER_STATE_STOPPED ==
Jeff Johnson80486862017-10-02 13:21:29 -0700219 qdf_mc_timer_get_current_state(&adapter->
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530220 tx_flow_control_timer)) {
221 QDF_STATUS status;
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700222
Jeff Johnson80486862017-10-02 13:21:29 -0700223 status = qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530224 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
225
226 if (!QDF_IS_STATUS_SUCCESS(status))
227 hdd_err("Failed to start tx_flow_control_timer");
228 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700229 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530230 }
231
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700232 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
233 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530234}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235
Jeff Johnson80486862017-10-02 13:21:29 -0700236static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
gbianec670c592016-11-24 11:21:30 +0800237 struct sk_buff *skb)
238{
Jeff Johnson80486862017-10-02 13:21:29 -0700239 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800240 int need_orphan = 0;
241
Jeff Johnson80486862017-10-02 13:21:29 -0700242 if (adapter->tx_flow_low_watermark > 0) {
tfyubdf453e2017-09-27 13:34:30 +0800243#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
244 /*
245 * The TCP TX throttling logic is changed a little after
246 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
247 * which will throttle the TCP packets to the host driver.
248 * The TCP UP LINK throughput will drop heavily. In order to
249 * fix this issue, need to orphan the socket buffer asap, which
250 * will call skb's destructor to notify the TCP stack that the
251 * SKB buffer is unowned. And then the TCP stack will pump more
252 * packets to host driver.
253 *
254 * The TX packets might be dropped for UDP case in the iperf
255 * testing. So need to be protected by follow control.
256 */
257 need_orphan = 1;
258#else
259 if (hdd_ctx->config->tx_orphan_enable)
260 need_orphan = 1;
261#endif
tfyu5f01db22017-10-11 13:51:04 +0800262 } else if (hdd_ctx->config->tx_orphan_enable) {
263 if (qdf_nbuf_is_ipv4_tcp_pkt(skb) ||
Tiger Yu438c6482017-10-13 11:07:00 +0800264 qdf_nbuf_is_ipv6_tcp_pkt(skb))
tfyu5f01db22017-10-11 13:51:04 +0800265 need_orphan = 1;
tfyubdf453e2017-09-27 13:34:30 +0800266 }
267
tfyu5f01db22017-10-11 13:51:04 +0800268 if (need_orphan) {
gbianec670c592016-11-24 11:21:30 +0800269 skb_orphan(skb);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700270 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Tiger Yu438c6482017-10-13 11:07:00 +0800271 } else
gbianec670c592016-11-24 11:21:30 +0800272 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800273
274 return skb;
275}
276
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277/**
278 * hdd_tx_resume_cb() - Resume OS TX Q.
279 * @adapter_context: pointer to vdev apdapter
280 * @tx_resume: TX Q resume trigger
281 *
282 * Q was stopped due to WLAN TX path low resource condition
283 *
284 * Return: None
285 */
286void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
287{
Jeff Johnson80486862017-10-02 13:21:29 -0700288 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700289 struct hdd_station_ctx *hdd_sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800290
Jeff Johnson80486862017-10-02 13:21:29 -0700291 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292 /* INVALID ARG */
293 return;
294 }
295
Jeff Johnson80486862017-10-02 13:21:29 -0700296 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297
298 /* Resume TX */
299 if (true == tx_resume) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530300 if (QDF_TIMER_STATE_STOPPED !=
Jeff Johnson80486862017-10-02 13:21:29 -0700301 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302 tx_flow_control_timer)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700303 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 }
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700305 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700306 wlan_hdd_netif_queue_control(adapter,
Jeff Johnsona0399642016-12-05 12:39:59 -0800307 WLAN_WAKE_ALL_NETIF_QUEUE,
308 WLAN_DATA_FLOW_CONTROL);
Ajit Pal Singhe6da1de2018-12-27 16:20:45 +0530309 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = false;
310 adapter->hdd_stats.tx_rx_stats.txflow_unpause_cnt++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800311 }
Jeff Johnson80486862017-10-02 13:21:29 -0700312 hdd_tx_resume_false(adapter, tx_resume);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800313}
314
bings284f8be2017-08-11 10:41:30 +0800315bool hdd_tx_flow_control_is_pause(void *adapter_context)
316{
Jeff Johnson80486862017-10-02 13:21:29 -0700317 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
bings284f8be2017-08-11 10:41:30 +0800318
Jeff Johnsond36fa332019-03-18 13:42:25 -0700319 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
bings284f8be2017-08-11 10:41:30 +0800320 /* INVALID ARG */
Jeff Johnson80486862017-10-02 13:21:29 -0700321 hdd_err("invalid adapter %pK", adapter);
bings284f8be2017-08-11 10:41:30 +0800322 return false;
323 }
324
Jeff Johnson80486862017-10-02 13:21:29 -0700325 return adapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
bings284f8be2017-08-11 10:41:30 +0800326}
327
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700328void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530329 qdf_mc_timer_callback_t timer_callback,
bings284f8be2017-08-11 10:41:30 +0800330 ol_txrx_tx_flow_control_fp flow_control_fp,
331 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332{
333 if (adapter->tx_flow_timer_initialized == false) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530334 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530335 QDF_TIMER_TYPE_SW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336 timer_callback,
337 adapter);
338 adapter->tx_flow_timer_initialized = true;
339 }
Leo Changfdb45c32016-10-28 11:09:23 -0700340 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800341 adapter->vdev_id, flow_control_fp, adapter,
bings284f8be2017-08-11 10:41:30 +0800342 flow_control_is_pause_fp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343}
344
345/**
346 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
347 * @adapter: adapter handle
348 *
349 * Return: none
350 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700351void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352{
Leo Changfdb45c32016-10-28 11:09:23 -0700353 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800354 adapter->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355 if (adapter->tx_flow_timer_initialized == true) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530356 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
357 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358 adapter->tx_flow_timer_initialized = false;
359 }
360}
361
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700362void hdd_get_tx_resource(struct hdd_adapter *adapter,
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530363 struct qdf_mac_addr *mac_addr, uint16_t timer_value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364{
365 if (false ==
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530366 cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC),
Rakesh Pillaif94b1622019-11-07 19:54:01 +0530367 OL_TXRX_PDEV_ID,
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530368 *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800369 adapter->tx_flow_low_watermark,
jitiphil869b9f72018-09-25 17:14:01 +0530370 adapter->tx_flow_hi_watermark_offset)) {
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700371 hdd_debug("Disabling queues lwm %d hwm offset %d",
Jeff Johnsona0399642016-12-05 12:39:59 -0800372 adapter->tx_flow_low_watermark,
jitiphil869b9f72018-09-25 17:14:01 +0530373 adapter->tx_flow_hi_watermark_offset);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800374 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
375 WLAN_DATA_FLOW_CONTROL);
376 if ((adapter->tx_flow_timer_initialized == true) &&
Anurag Chouhan210db072016-02-22 18:42:15 +0530377 (QDF_TIMER_STATE_STOPPED ==
378 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379 tx_flow_control_timer))) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530380 qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800381 timer_value);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700382 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
383 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
384 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800385 }
386 }
387}
388
gbianec670c592016-11-24 11:21:30 +0800389#else
Mohit Khannad0b63f52017-02-18 18:05:52 -0800390/**
391 * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
Jeff Johnson80486862017-10-02 13:21:29 -0700392 * @adapter: pointer to HDD adapter
Mohit Khannad0b63f52017-02-18 18:05:52 -0800393 * @skb: pointer to skb data packet
394 *
395 * Return: pointer to skb structure
396 */
Jeff Johnson80486862017-10-02 13:21:29 -0700397static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
Mohit Khannad0b63f52017-02-18 18:05:52 -0800398 struct sk_buff *skb) {
gbianec670c592016-11-24 11:21:30 +0800399
Mohit Khannad0b63f52017-02-18 18:05:52 -0800400 struct sk_buff *nskb;
tfyubdf453e2017-09-27 13:34:30 +0800401#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Jeff Johnson80486862017-10-02 13:21:29 -0700402 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800403#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800404
Mohit Khanna87493732017-08-27 23:26:44 -0700405 hdd_skb_fill_gso_size(adapter->dev, skb);
406
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700407 nskb = skb_unshare(skb, GFP_ATOMIC);
tfyubdf453e2017-09-27 13:34:30 +0800408#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700409 if (unlikely(hdd_ctx->config->tx_orphan_enable) && (nskb == skb)) {
Mohit Khannad0b63f52017-02-18 18:05:52 -0800410 /*
411 * For UDP packets we want to orphan the packet to allow the app
412 * to send more packets. The flow would ultimately be controlled
413 * by the limited number of tx descriptors for the vdev.
414 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700415 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Mohit Khannad0b63f52017-02-18 18:05:52 -0800416 skb_orphan(skb);
417 }
tfyubdf453e2017-09-27 13:34:30 +0800418#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800419 return nskb;
gbianec670c592016-11-24 11:21:30 +0800420}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800421#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
422
Alok Kumarb64650c2018-03-23 17:05:11 +0530423uint32_t hdd_txrx_get_tx_ack_count(struct hdd_adapter *adapter)
424{
425 return cdp_get_tx_ack_stats(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800426 adapter->vdev_id);
Alok Kumarb64650c2018-03-23 17:05:11 +0530427}
428
Qiwei Caiba95ce62018-08-23 10:43:16 +0800429#ifdef FEATURE_WLAN_DIAG_SUPPORT
Nirav Shah5e74bb82016-07-20 16:01:27 +0530430/**
431 * qdf_event_eapol_log() - send event to wlan diag
432 * @skb: skb ptr
433 * @dir: direction
434 * @eapol_key_info: eapol key info
435 *
436 * Return: None
437 */
438void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
439{
440 int16_t eapol_key_info;
441
442 WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
443
444 if ((dir == QDF_TX &&
445 (QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
446 QDF_NBUF_CB_GET_PACKET_TYPE(skb))))
447 return;
448 else if (!qdf_nbuf_is_ipv4_eapol_pkt(skb))
449 return;
450
451 eapol_key_info = (uint16_t)(*(uint16_t *)
452 (skb->data + EAPOL_KEY_INFO_OFFSET));
453
454 wlan_diag_event.event_sub_type =
455 (dir == QDF_TX ?
456 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
457 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
458 wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
459 (skb->data + EAPOL_PACKET_TYPE_OFFSET));
460 wlan_diag_event.eapol_key_info = eapol_key_info;
461 wlan_diag_event.eapol_rate = 0;
462 qdf_mem_copy(wlan_diag_event.dest_addr,
463 (skb->data + QDF_NBUF_DEST_MAC_OFFSET),
464 sizeof(wlan_diag_event.dest_addr));
465 qdf_mem_copy(wlan_diag_event.src_addr,
466 (skb->data + QDF_NBUF_SRC_MAC_OFFSET),
467 sizeof(wlan_diag_event.src_addr));
468
469 WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
470}
Qiwei Caiba95ce62018-08-23 10:43:16 +0800471#endif /* FEATURE_WLAN_DIAG_SUPPORT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472
Rakesh Pillai6c48d132020-06-30 15:22:01 +0530473int hdd_set_udp_qos_upgrade_config(struct hdd_adapter *adapter,
474 uint8_t priority)
475{
476 if (adapter->device_mode != QDF_STA_MODE) {
477 hdd_info_rl("Data priority upgrade only allowed in STA mode:%d",
478 adapter->device_mode);
479 return -EINVAL;
480 }
481
482 if (priority >= QCA_WLAN_AC_ALL) {
483 hdd_err_rl("Invlid data priority: %d", priority);
484 return -EINVAL;
485 }
486
487 adapter->upgrade_udp_qos_threshold = priority;
488
489 return 0;
490}
491
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492/**
Nirav Shah5e74bb82016-07-20 16:01:27 +0530493 * wlan_hdd_classify_pkt() - classify packet
494 * @skb - sk buff
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495 *
Nirav Shah5e74bb82016-07-20 16:01:27 +0530496 * Return: none
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800497 */
Nirav Shah5e74bb82016-07-20 16:01:27 +0530498void wlan_hdd_classify_pkt(struct sk_buff *skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499{
Nirav Shah5e74bb82016-07-20 16:01:27 +0530500 struct ethhdr *eh = (struct ethhdr *)skb->data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501
hangtian127c9532019-01-12 13:29:07 +0800502 qdf_mem_zero(skb->cb, sizeof(skb->cb));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800503
Nirav Shah5e74bb82016-07-20 16:01:27 +0530504 /* check destination mac address is broadcast/multicast */
505 if (is_broadcast_ether_addr((uint8_t *)eh))
506 QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
507 else if (is_multicast_ether_addr((uint8_t *)eh))
508 QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800509
Nirav Shah5e74bb82016-07-20 16:01:27 +0530510 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
511 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
512 QDF_NBUF_CB_PACKET_TYPE_ARP;
513 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
514 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
515 QDF_NBUF_CB_PACKET_TYPE_DHCP;
516 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
517 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
518 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
519 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
520 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
521 QDF_NBUF_CB_PACKET_TYPE_WAPI;
Zhu Jianmin04392c42017-05-12 16:34:53 +0800522 else if (qdf_nbuf_is_icmp_pkt(skb))
523 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
524 QDF_NBUF_CB_PACKET_TYPE_ICMP;
Poddar, Siddarth44aa5aa2017-07-10 17:30:22 +0530525 else if (qdf_nbuf_is_icmpv6_pkt(skb))
526 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
527 QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800528}
529
530/**
jitiphilfb410612018-03-26 22:37:56 +0530531 * hdd_clear_tx_rx_connectivity_stats() - clear connectivity stats
532 * @hdd_ctx: pointer to HDD Station Context
533 *
534 * Return: None
535 */
536static void hdd_clear_tx_rx_connectivity_stats(struct hdd_adapter *adapter)
537{
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +0530538 hdd_debug("Clear txrx connectivity stats");
jitiphilfb410612018-03-26 22:37:56 +0530539 qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
540 sizeof(adapter->hdd_stats.hdd_arp_stats));
541 qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats,
542 sizeof(adapter->hdd_stats.hdd_dns_stats));
543 qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
544 sizeof(adapter->hdd_stats.hdd_tcp_stats));
545 qdf_mem_zero(&adapter->hdd_stats.hdd_icmpv4_stats,
546 sizeof(adapter->hdd_stats.hdd_icmpv4_stats));
547 adapter->pkt_type_bitmap = 0;
548 adapter->track_arp_ip = 0;
549 qdf_mem_zero(adapter->dns_payload, adapter->track_dns_domain_len);
550 adapter->track_dns_domain_len = 0;
551 adapter->track_src_port = 0;
552 adapter->track_dest_port = 0;
553 adapter->track_dest_ipv4 = 0;
554}
555
556void hdd_reset_all_adapters_connectivity_stats(struct hdd_context *hdd_ctx)
557{
Jeff Johnson45100a92019-03-08 22:10:16 -0800558 struct hdd_adapter *adapter = NULL, *next = NULL;
jitiphilfb410612018-03-26 22:37:56 +0530559 QDF_STATUS status;
560
561 hdd_enter();
562
563 status = hdd_get_front_adapter(hdd_ctx, &adapter);
564
Jeff Johnsond36fa332019-03-18 13:42:25 -0700565 while (adapter && QDF_STATUS_SUCCESS == status) {
jitiphilfb410612018-03-26 22:37:56 +0530566 hdd_clear_tx_rx_connectivity_stats(adapter);
Jeff Johnson45100a92019-03-08 22:10:16 -0800567 status = hdd_get_next_adapter(hdd_ctx, adapter, &next);
568 adapter = next;
jitiphilfb410612018-03-26 22:37:56 +0530569 }
570
571 hdd_exit();
572}
573
574/**
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800575 * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
576 * @skb: pointer to OS packet (sk_buff)
Vevek Venkatesan2d88a6b2019-10-04 19:03:10 +0530577 * @vdev_id: virtual interface id
578 * @peer_mac: Peer mac address
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800579 *
580 * This function gets the peer state from DP and check if it is either
581 * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
582 * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
583 * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
584 *
585 * Return: true if Tx is allowed and false otherwise.
586 */
Vevek Venkatesan2d88a6b2019-10-04 19:03:10 +0530587static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t vdev_id,
588 uint8_t *peer_mac)
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800589{
590 enum ol_txrx_peer_state peer_state;
591 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800592
593 QDF_BUG(soc);
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800594
Vevek Venkatesan2d88a6b2019-10-04 19:03:10 +0530595 peer_state = cdp_peer_state_get(soc, vdev_id, peer_mac);
Jeff Johnson68755312017-02-10 11:46:55 -0800596 if (likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800597 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800598 if (OL_TXRX_PEER_STATE_CONN == peer_state &&
Jinwei Chen19846e52018-04-03 19:20:38 +0800599 (ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X
600 || IS_HDD_ETHERTYPE_WAI(skb)))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800601 return true;
hqu8925c8f2017-12-11 19:29:01 +0800602 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson68755312017-02-10 11:46:55 -0800603 FL("Invalid peer state for Tx: %d"), peer_state);
604 return false;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800605}
jitiphilfb410612018-03-26 22:37:56 +0530606
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530607/**
608 * hdd_tx_rx_is_dns_domain_name_match() - function to check whether dns
609 * domain name in the received skb matches with the tracking dns domain
610 * name or not
611 *
612 * @skb: pointer to skb
613 * @adapter: pointer to adapter
614 *
615 * Returns: true if matches else false
616 */
617static bool hdd_tx_rx_is_dns_domain_name_match(struct sk_buff *skb,
618 struct hdd_adapter *adapter)
619{
620 uint8_t *domain_name;
621
622 if (adapter->track_dns_domain_len == 0)
623 return false;
624
Alok Kumar3e9c7132019-02-28 22:54:05 +0530625 /* check OOB , is strncmp accessing data more than skb->len */
626 if ((adapter->track_dns_domain_len +
627 QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET) > qdf_nbuf_len(skb))
628 return false;
629
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530630 domain_name = qdf_nbuf_get_dns_domain_name(skb,
631 adapter->track_dns_domain_len);
632 if (strncmp(domain_name, adapter->dns_payload,
633 adapter->track_dns_domain_len) == 0)
634 return true;
635 else
636 return false;
637}
638
639void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
640 void *context,
641 enum connectivity_stats_pkt_status action,
642 uint8_t *pkt_type)
643{
644 uint32_t pkt_type_bitmap;
645 struct hdd_adapter *adapter = NULL;
646
647 adapter = (struct hdd_adapter *)context;
648 if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
649 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
650 "Magic cookie(%x) for adapter sanity verification is invalid",
651 adapter->magic);
652 return;
653 }
654
655 /* ARP tracking is done already. */
656 pkt_type_bitmap = adapter->pkt_type_bitmap;
657 pkt_type_bitmap &= ~CONNECTIVITY_CHECK_SET_ARP;
658
659 if (!pkt_type_bitmap)
660 return;
661
662 switch (action) {
663 case PKT_TYPE_REQ:
664 case PKT_TYPE_TX_HOST_FW_SENT:
665 if (qdf_nbuf_is_icmp_pkt(skb)) {
666 if (qdf_nbuf_data_is_icmpv4_req(skb) &&
667 (adapter->track_dest_ipv4 ==
668 qdf_nbuf_get_icmpv4_tgt_ip(skb))) {
669 *pkt_type = CONNECTIVITY_CHECK_SET_ICMPV4;
670 if (action == PKT_TYPE_REQ) {
671 ++adapter->hdd_stats.hdd_icmpv4_stats.
672 tx_icmpv4_req_count;
673 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
674 QDF_TRACE_LEVEL_INFO_HIGH,
675 "%s : ICMPv4 Req packet",
676 __func__);
677 } else
678 /* host receives tx completion */
679 ++adapter->hdd_stats.hdd_icmpv4_stats.
680 tx_host_fw_sent;
681 }
682 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
683 if (qdf_nbuf_data_is_tcp_syn(skb) &&
684 (adapter->track_dest_port ==
685 qdf_nbuf_data_get_tcp_dst_port(skb))) {
686 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_SYN;
687 if (action == PKT_TYPE_REQ) {
688 ++adapter->hdd_stats.hdd_tcp_stats.
689 tx_tcp_syn_count;
690 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
691 QDF_TRACE_LEVEL_INFO_HIGH,
692 "%s : TCP Syn packet",
693 __func__);
694 } else
695 /* host receives tx completion */
696 ++adapter->hdd_stats.hdd_tcp_stats.
697 tx_tcp_syn_host_fw_sent;
698 } else if ((adapter->hdd_stats.hdd_tcp_stats.
699 is_tcp_syn_ack_rcv || adapter->hdd_stats.
700 hdd_tcp_stats.is_tcp_ack_sent) &&
701 qdf_nbuf_data_is_tcp_ack(skb) &&
702 (adapter->track_dest_port ==
703 qdf_nbuf_data_get_tcp_dst_port(skb))) {
704 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_ACK;
705 if (action == PKT_TYPE_REQ &&
706 adapter->hdd_stats.hdd_tcp_stats.
707 is_tcp_syn_ack_rcv) {
708 ++adapter->hdd_stats.hdd_tcp_stats.
709 tx_tcp_ack_count;
710 adapter->hdd_stats.hdd_tcp_stats.
711 is_tcp_syn_ack_rcv = false;
712 adapter->hdd_stats.hdd_tcp_stats.
713 is_tcp_ack_sent = true;
714 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
715 QDF_TRACE_LEVEL_INFO_HIGH,
716 "%s : TCP Ack packet",
717 __func__);
718 } else if (action == PKT_TYPE_TX_HOST_FW_SENT &&
719 adapter->hdd_stats.hdd_tcp_stats.
720 is_tcp_ack_sent) {
Srinivas Girigowda683726a2018-09-07 15:10:40 -0700721 /* host receives tx completion */
722 ++adapter->hdd_stats.hdd_tcp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530723 tx_tcp_ack_host_fw_sent;
Srinivas Girigowda683726a2018-09-07 15:10:40 -0700724 adapter->hdd_stats.hdd_tcp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530725 is_tcp_ack_sent = false;
726 }
727 }
728 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
729 if (qdf_nbuf_data_is_dns_query(skb) &&
730 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
731 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
732 if (action == PKT_TYPE_REQ) {
733 ++adapter->hdd_stats.hdd_dns_stats.
734 tx_dns_req_count;
735 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
736 QDF_TRACE_LEVEL_INFO_HIGH,
737 "%s : DNS query packet",
738 __func__);
739 } else
740 /* host receives tx completion */
741 ++adapter->hdd_stats.hdd_dns_stats.
742 tx_host_fw_sent;
743 }
744 }
745 break;
746
747 case PKT_TYPE_RSP:
748 if (qdf_nbuf_is_icmp_pkt(skb)) {
749 if (qdf_nbuf_data_is_icmpv4_rsp(skb) &&
750 (adapter->track_dest_ipv4 ==
751 qdf_nbuf_get_icmpv4_src_ip(skb))) {
752 ++adapter->hdd_stats.hdd_icmpv4_stats.
753 rx_icmpv4_rsp_count;
754 *pkt_type =
755 CONNECTIVITY_CHECK_SET_ICMPV4;
756 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
757 QDF_TRACE_LEVEL_INFO_HIGH,
758 "%s : ICMPv4 Res packet", __func__);
759 }
760 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
761 if (qdf_nbuf_data_is_tcp_syn_ack(skb) &&
762 (adapter->track_dest_port ==
763 qdf_nbuf_data_get_tcp_src_port(skb))) {
764 ++adapter->hdd_stats.hdd_tcp_stats.
765 rx_tcp_syn_ack_count;
766 adapter->hdd_stats.hdd_tcp_stats.
767 is_tcp_syn_ack_rcv = true;
768 *pkt_type =
769 CONNECTIVITY_CHECK_SET_TCP_SYN_ACK;
770 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
771 QDF_TRACE_LEVEL_INFO_HIGH,
772 "%s : TCP Syn ack packet", __func__);
773 }
774 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
775 if (qdf_nbuf_data_is_dns_response(skb) &&
776 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
777 ++adapter->hdd_stats.hdd_dns_stats.
778 rx_dns_rsp_count;
779 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
780 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
781 QDF_TRACE_LEVEL_INFO_HIGH,
782 "%s : DNS response packet", __func__);
783 }
784 }
785 break;
786
787 case PKT_TYPE_TX_DROPPED:
788 switch (*pkt_type) {
789 case CONNECTIVITY_CHECK_SET_ICMPV4:
790 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_dropped;
791 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
792 QDF_TRACE_LEVEL_INFO_HIGH,
793 "%s : ICMPv4 Req packet dropped", __func__);
794 break;
795 case CONNECTIVITY_CHECK_SET_TCP_SYN:
796 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_dropped;
797 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
798 QDF_TRACE_LEVEL_INFO_HIGH,
799 "%s : TCP syn packet dropped", __func__);
800 break;
801 case CONNECTIVITY_CHECK_SET_TCP_ACK:
802 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_dropped;
803 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
804 QDF_TRACE_LEVEL_INFO_HIGH,
805 "%s : TCP ack packet dropped", __func__);
806 break;
807 case CONNECTIVITY_CHECK_SET_DNS:
808 ++adapter->hdd_stats.hdd_dns_stats.tx_dropped;
809 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
810 QDF_TRACE_LEVEL_INFO_HIGH,
811 "%s : DNS query packet dropped", __func__);
812 break;
813 default:
814 break;
815 }
816 break;
817 case PKT_TYPE_RX_DELIVERED:
818 switch (*pkt_type) {
819 case CONNECTIVITY_CHECK_SET_ICMPV4:
820 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_delivered;
821 break;
822 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
823 ++adapter->hdd_stats.hdd_tcp_stats.rx_delivered;
824 break;
825 case CONNECTIVITY_CHECK_SET_DNS:
826 ++adapter->hdd_stats.hdd_dns_stats.rx_delivered;
827 break;
828 default:
829 break;
830 }
831 break;
832 case PKT_TYPE_RX_REFUSED:
833 switch (*pkt_type) {
834 case CONNECTIVITY_CHECK_SET_ICMPV4:
835 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_refused;
836 break;
837 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
838 ++adapter->hdd_stats.hdd_tcp_stats.rx_refused;
839 break;
840 case CONNECTIVITY_CHECK_SET_DNS:
841 ++adapter->hdd_stats.hdd_dns_stats.rx_refused;
842 break;
843 default:
844 break;
845 }
846 break;
847 case PKT_TYPE_TX_ACK_CNT:
848 switch (*pkt_type) {
849 case CONNECTIVITY_CHECK_SET_ICMPV4:
850 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_ack_cnt;
851 break;
852 case CONNECTIVITY_CHECK_SET_TCP_SYN:
853 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_ack_cnt;
854 break;
855 case CONNECTIVITY_CHECK_SET_TCP_ACK:
856 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_ack_cnt;
857 break;
858 case CONNECTIVITY_CHECK_SET_DNS:
859 ++adapter->hdd_stats.hdd_dns_stats.tx_ack_cnt;
860 break;
861 default:
862 break;
863 }
864 break;
865 default:
866 break;
867 }
868}
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800869
870/**
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530871 * hdd_is_xmit_allowed_on_ndi() - Verify if xmit is allowed on NDI
872 * @adapter: The adapter structure
873 *
874 * Return: True if xmit is allowed on NDI and false otherwise
875 */
876static bool hdd_is_xmit_allowed_on_ndi(struct hdd_adapter *adapter)
877{
878 enum nan_datapath_state state;
879
880 state = ucfg_nan_get_ndi_state(adapter->vdev);
881 return (state == NAN_DATA_NDI_CREATED_STATE ||
882 state == NAN_DATA_CONNECTED_STATE ||
883 state == NAN_DATA_CONNECTING_STATE ||
884 state == NAN_DATA_PEER_CREATE_STATE);
885}
886
887/**
888 * hdd_get_transmit_mac_addr() - Get the mac address to validate the xmit
889 * @adapter: The adapter structure
890 * @skb: The network buffer
891 * @mac_addr_tx_allowed: The mac address to be filled
892 *
893 * Return: None
894 */
895static
896void hdd_get_transmit_mac_addr(struct hdd_adapter *adapter, struct sk_buff *skb,
897 struct qdf_mac_addr *mac_addr_tx_allowed)
898{
899 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
900 bool is_mc_bc_addr = false;
901
902 if (QDF_NBUF_CB_GET_IS_BCAST(skb) || QDF_NBUF_CB_GET_IS_MCAST(skb))
903 is_mc_bc_addr = true;
904
905 if (adapter->device_mode == QDF_IBSS_MODE) {
906 if (is_mc_bc_addr)
907 qdf_copy_macaddr(mac_addr_tx_allowed,
908 &adapter->mac_addr);
909 else
910 qdf_copy_macaddr(mac_addr_tx_allowed,
911 (struct qdf_mac_addr *)skb->data);
912 } else if (adapter->device_mode == QDF_NDI_MODE &&
913 hdd_is_xmit_allowed_on_ndi(adapter)) {
914 if (is_mc_bc_addr)
915 qdf_copy_macaddr(mac_addr_tx_allowed,
916 &adapter->mac_addr);
917 else
918 qdf_copy_macaddr(mac_addr_tx_allowed,
919 (struct qdf_mac_addr *)skb->data);
920 } else {
921 if (sta_ctx->conn_info.conn_state ==
922 eConnectionState_Associated)
923 qdf_copy_macaddr(mac_addr_tx_allowed,
924 &sta_ctx->conn_info.bssid);
925 }
926}
927
928/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530929 * __hdd_hard_start_xmit() - Transmit a frame
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800930 * @skb: pointer to OS packet (sk_buff)
931 * @dev: pointer to network device
932 *
933 * Function registered with the Linux OS for transmitting
934 * packets. This version of the function directly passes
935 * the packet to Transport Layer.
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530936 * In case of any packet drop or error, log the error with
937 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800938 *
Dustin Brown96b98dd2019-03-06 12:39:37 -0800939 * Return: None
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800940 */
Dustin Brown96b98dd2019-03-06 12:39:37 -0800941static void __hdd_hard_start_xmit(struct sk_buff *skb,
942 struct net_device *dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800943{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530944 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800945 sme_ac_enum_type ac;
Abhishek Singh12be60f2017-08-11 13:52:42 +0530946 enum sme_qos_wmmuptype up;
Jeff Johnson80486862017-10-02 13:21:29 -0700947 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800948 bool granted;
Jeff Johnsonb9424862017-10-30 08:49:35 -0700949 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
Rakesh Pillai764fa0d2020-07-18 01:07:29 +0530950 struct qdf_mac_addr mac_addr;
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530951 struct qdf_mac_addr mac_addr_tx_allowed = QDF_MAC_ADDR_ZERO_INIT;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530952 uint8_t pkt_type = 0;
Sravan Kumar Kairamdd57ea32017-04-06 16:57:35 +0530953 bool is_arp = false;
Bala Venkateshf2867902019-03-08 15:01:23 +0530954 struct wlan_objmgr_vdev *vdev;
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -0700955 struct hdd_context *hdd_ctx;
Vevek Venkatesan07bddb22019-09-11 20:21:37 +0530956 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800957
958#ifdef QCA_WIFI_FTM
Anurag Chouhan6d760662016-02-20 16:05:43 +0530959 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800960 kfree_skb(skb);
Dustin Brown96b98dd2019-03-06 12:39:37 -0800961 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800962 }
963#endif
964
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700965 ++adapter->hdd_stats.tx_rx_stats.tx_called;
966 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Rakesh Pillai764fa0d2020-07-18 01:07:29 +0530967 qdf_mem_copy(mac_addr.bytes, skb->data, sizeof(mac_addr.bytes));
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530968
Will Huang20de9432018-02-06 17:01:03 +0800969 if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
970 cds_is_load_or_unload_in_progress()) {
Liangwei Dong09e45942020-03-13 14:27:40 +0800971 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HDD_DATA,
972 "Recovery/(Un)load in progress, dropping the packet");
Nirav Shahdf3659e2016-06-27 12:26:28 +0530973 goto drop_pkt;
Govind Singhede435f2015-12-01 16:16:36 +0530974 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800975
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -0700976 hdd_ctx = adapter->hdd_ctx;
977 if (wlan_hdd_validate_context(hdd_ctx)) {
Liangwei Dong09e45942020-03-13 14:27:40 +0800978 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HDD_DATA,
979 "Invalid HDD context");
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -0700980 goto drop_pkt;
981 }
982
Nirav Shah5e74bb82016-07-20 16:01:27 +0530983 wlan_hdd_classify_pkt(skb);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530984 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) == QDF_NBUF_CB_PACKET_TYPE_ARP) {
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530985 if (qdf_nbuf_data_is_arp_req(skb) &&
Alok Kumarb94a2e72019-03-11 19:47:15 +0530986 (adapter->track_arp_ip == qdf_nbuf_get_arp_tgt_ip(skb))) {
Yeshwanth Sriram Guntukab31170f2020-04-02 16:16:47 +0530987 is_arp = true;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530988 ++adapter->hdd_stats.hdd_arp_stats.tx_arp_req_count;
989 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
990 QDF_TRACE_LEVEL_INFO_HIGH,
991 "%s : ARP packet", __func__);
992 }
993 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530994 /* track connectivity stats */
995 if (adapter->pkt_type_bitmap)
996 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
997 PKT_TYPE_REQ, &pkt_type);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530998
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530999 hdd_get_transmit_mac_addr(adapter, skb, &mac_addr_tx_allowed);
1000 if (qdf_is_macaddr_zero(&mac_addr_tx_allowed)) {
hqu5e6b9862017-12-21 18:48:46 +08001001 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +05301002 "tx not allowed, transmit operation suspended");
Ravi Joshi24477b72016-07-19 15:45:09 -07001003 goto drop_pkt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001004 }
1005
Rakesh Pillai764fa0d2020-07-18 01:07:29 +05301006 hdd_get_tx_resource(adapter, &mac_addr,
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +05301007 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001008
1009 /* Get TL AC corresponding to Qdisc queue index/AC. */
1010 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
1011
Nirav Shahcbc6d722016-03-01 16:24:53 +05301012 if (!qdf_nbuf_ipa_owned_get(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -07001013 skb = hdd_skb_orphan(adapter, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001014 if (!skb)
Nirav Shahdf3659e2016-06-27 12:26:28 +05301015 goto drop_pkt_accounting;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001016 }
1017
Ravi Joshi24477b72016-07-19 15:45:09 -07001018 /*
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301019 * Add SKB to internal tracking table before further processing
1020 * in WLAN driver.
1021 */
1022 qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
1023
1024 /*
Ravi Joshi24477b72016-07-19 15:45:09 -07001025 * user priority from IP header, which is already extracted and set from
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001026 * select_queue call back function
1027 */
1028 up = skb->priority;
1029
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001030 ++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001031#ifdef HDD_WMM_DEBUG
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001032 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001033 "%s: Classified as ac %d up %d", __func__, ac, up);
1034#endif /* HDD_WMM_DEBUG */
1035
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001036 if (HDD_PSB_CHANGED == adapter->psb_changed) {
Ravi Joshi24477b72016-07-19 15:45:09 -07001037 /*
1038 * Function which will determine acquire admittance for a
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001039 * WMM AC is required or not based on psb configuration done
1040 * in the framework
1041 */
Jeff Johnson80486862017-10-02 13:21:29 -07001042 hdd_wmm_acquire_access_required(adapter, ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001043 }
1044 /*
1045 * Make sure we already have access to this access category
1046 * or it is EAPOL or WAPI frame during initial authentication which
1047 * can have artifically boosted higher qos priority.
1048 */
1049
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001050 if (((adapter->psb_changed & (1 << ac)) &&
Jeff Johnson12e12332019-03-08 23:29:23 -08001051 likely(adapter->hdd_wmm_status.ac_status[ac].
Jeff Johnsona5548972019-03-09 14:22:18 -08001052 is_access_allowed)) ||
Jeff Johnson457c2422019-02-27 13:56:04 -08001053 ((sta_ctx->conn_info.is_authenticated == false) &&
Nirav Shah5e74bb82016-07-20 16:01:27 +05301054 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
1055 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
1056 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
1057 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001058 granted = true;
1059 } else {
Jeff Johnson80486862017-10-02 13:21:29 -07001060 status = hdd_wmm_acquire_access(adapter, ac, &granted);
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001061 adapter->psb_changed |= (1 << ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001062 }
1063
1064 if (!granted) {
1065 bool isDefaultAc = false;
Ravi Joshi24477b72016-07-19 15:45:09 -07001066 /*
1067 * ADDTS request for this AC is sent, for now
Jeff Johnson55ceaf02018-05-06 17:22:29 -07001068 * send this packet through next available lower
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001069 * Access category until ADDTS negotiation completes.
1070 */
1071 while (!likely
Jeff Johnson12e12332019-03-08 23:29:23 -08001072 (adapter->hdd_wmm_status.ac_status[ac].
Jeff Johnsona5548972019-03-09 14:22:18 -08001073 is_access_allowed)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001074 switch (ac) {
1075 case SME_AC_VO:
1076 ac = SME_AC_VI;
1077 up = SME_QOS_WMM_UP_VI;
1078 break;
1079 case SME_AC_VI:
1080 ac = SME_AC_BE;
1081 up = SME_QOS_WMM_UP_BE;
1082 break;
1083 case SME_AC_BE:
1084 ac = SME_AC_BK;
1085 up = SME_QOS_WMM_UP_BK;
1086 break;
1087 default:
1088 ac = SME_AC_BK;
1089 up = SME_QOS_WMM_UP_BK;
1090 isDefaultAc = true;
1091 break;
1092 }
1093 if (isDefaultAc)
1094 break;
1095 }
1096 skb->priority = up;
1097 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
1098 }
1099
Jeff Johnson80486862017-10-02 13:21:29 -07001100 adapter->stats.tx_bytes += skb->len;
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001101
Bala Venkateshf2867902019-03-08 15:01:23 +05301102 vdev = hdd_objmgr_get_vdev(adapter);
1103 if (vdev) {
Rakesh Pillai764fa0d2020-07-18 01:07:29 +05301104 ucfg_tdls_update_tx_pkt_cnt(vdev, &mac_addr);
Bala Venkateshf2867902019-03-08 15:01:23 +05301105 hdd_objmgr_put_vdev(vdev);
1106 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001107
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -07001108 if (qdf_nbuf_is_tso(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -07001109 adapter->stats.tx_packets += qdf_nbuf_get_tso_num_seg(skb);
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -07001110 } else {
Jeff Johnson80486862017-10-02 13:21:29 -07001111 ++adapter->stats.tx_packets;
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -07001112 hdd_ctx->no_tx_offload_pkt_cnt++;
1113 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001114
Nirav Shah5e74bb82016-07-20 16:01:27 +05301115 hdd_event_eapol_log(skb, QDF_TX);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301116 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
1117 QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001118
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301119 qdf_dp_trace_set_track(skb, QDF_TX);
Mohit Khannaf8f96822017-05-17 17:11:59 -07001120
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301121 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001122 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
1123 sizeof(qdf_nbuf_data(skb)),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301124 QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001125
Vevek Venkatesan9e79ffe2020-04-28 19:56:44 +05301126 if (!hdd_is_tx_allowed(skb, adapter->vdev_id,
Vevek Venkatesan2d88a6b2019-10-04 19:03:10 +05301127 mac_addr_tx_allowed.bytes)) {
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +05301128 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1129 QDF_TRACE_LEVEL_INFO_HIGH,
1130 FL("Tx not allowed for sta: "
1131 QDF_MAC_ADDR_STR), QDF_MAC_ADDR_ARRAY(
1132 mac_addr_tx_allowed.bytes));
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001133 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301134 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001135 }
1136
jinweic chen51046012018-04-11 16:02:22 +08001137 /* check whether need to linearize skb, like non-linear udp data */
1138 if (hdd_skb_nontso_linearize(skb) != QDF_STATUS_SUCCESS) {
1139 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1140 QDF_TRACE_LEVEL_INFO_HIGH,
1141 "%s: skb %pK linearize failed. drop the pkt",
1142 __func__, skb);
1143 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
1144 goto drop_pkt_and_release_skb;
1145 }
1146
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001147 /*
Ravi Joshi24477b72016-07-19 15:45:09 -07001148 * If a transmit function is not registered, drop packet
1149 */
Jeff Johnson80486862017-10-02 13:21:29 -07001150 if (!adapter->tx_fn) {
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001151 QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1152 "%s: TX function not registered by the data path",
1153 __func__);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001154 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301155 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001156 }
1157
Vevek Venkatesan07bddb22019-09-11 20:21:37 +05301158 if (adapter->tx_fn(soc, adapter->vdev_id, (qdf_nbuf_t)skb)) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301159 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +05301160 "%s: Failed to send packet to txrx for sta_id: "
1161 QDF_MAC_ADDR_STR,
Rakesh Pillai764fa0d2020-07-18 01:07:29 +05301162 __func__, QDF_MAC_ADDR_ARRAY(mac_addr.bytes));
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001163 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301164 goto drop_pkt_and_release_skb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001165 }
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301166
Dustin Browne0024fa2016-10-14 16:29:21 -07001167 netif_trans_update(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168
Ashish Kumar Dhanotiya3d5a7422020-01-20 14:42:02 +05301169 wlan_hdd_sar_unsolicited_timer_start(hdd_ctx);
1170
Dustin Brown96b98dd2019-03-06 12:39:37 -08001171 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001172
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301173drop_pkt_and_release_skb:
1174 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001175drop_pkt:
1176
Alok Kumar2d35a9e2018-08-01 19:41:01 +05301177 /* track connectivity stats */
1178 if (adapter->pkt_type_bitmap)
1179 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
1180 PKT_TYPE_TX_DROPPED,
1181 &pkt_type);
1182 qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
1183 QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
1184 QDF_TX);
1185 kfree_skb(skb);
Nirav Shahdf3659e2016-06-27 12:26:28 +05301186
1187drop_pkt_accounting:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001188
Jeff Johnson80486862017-10-02 13:21:29 -07001189 ++adapter->stats.tx_dropped;
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001190 ++adapter->hdd_stats.tx_rx_stats.tx_dropped;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301191 if (is_arp) {
1192 ++adapter->hdd_stats.hdd_arp_stats.tx_dropped;
1193 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1194 "%s : ARP packet dropped", __func__);
1195 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001196}
1197
1198/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301199 * hdd_hard_start_xmit() - Wrapper function to protect
1200 * __hdd_hard_start_xmit from SSR
1201 * @skb: pointer to OS packet
Dustin Brown96b98dd2019-03-06 12:39:37 -08001202 * @net_dev: pointer to net_device structure
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301203 *
1204 * Function called by OS if any packet needs to transmit.
1205 *
1206 * Return: Always returns NETDEV_TX_OK
1207 */
Dustin Brown96b98dd2019-03-06 12:39:37 -08001208netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301209{
Dustin Brown96b98dd2019-03-06 12:39:37 -08001210 struct osif_vdev_sync *vdev_sync;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301211
Vevek Venkatesane133d562020-05-07 18:39:31 +05301212 if (osif_vdev_sync_op_start(net_dev, &vdev_sync)) {
1213 hdd_debug_rl("Operation on net_dev is not permitted");
1214 kfree_skb(skb);
Dustin Brown96b98dd2019-03-06 12:39:37 -08001215 return NETDEV_TX_OK;
Vevek Venkatesane133d562020-05-07 18:39:31 +05301216 }
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301217
Dustin Brown96b98dd2019-03-06 12:39:37 -08001218 __hdd_hard_start_xmit(skb, net_dev);
1219
1220 osif_vdev_sync_op_stop(vdev_sync);
1221
1222 return NETDEV_TX_OK;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301223}
1224
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001225/**
1226 * __hdd_tx_timeout() - TX timeout handler
1227 * @dev: pointer to network device
1228 *
1229 * This function is registered as a netdev ndo_tx_timeout method, and
1230 * is invoked by the kernel if the driver takes too long to transmit a
1231 * frame.
1232 *
1233 * Return: None
1234 */
1235static void __hdd_tx_timeout(struct net_device *dev)
1236{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001237 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001238 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001239 struct netdev_queue *txq;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301240 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1241 u64 diff_jiffies;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001242 int i = 0;
1243
Rakshith Suresh Patkar5e1fdee2019-04-03 12:07:07 +05301244 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
1245
1246 if (hdd_ctx->hdd_wlan_suspended) {
1247 hdd_debug("Device is suspended, ignore WD timeout");
1248 return;
1249 }
1250
Dustin Browne0024fa2016-10-14 16:29:21 -07001251 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301252 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001253 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301254 NULL, 0, QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001255
1256 /* Getting here implies we disabled the TX queues for too
1257 * long. Queues are disabled either because of disassociation
1258 * or low resource scenarios. In case of disassociation it is
1259 * ok to ignore this. But if associated, we have do possible
1260 * recovery here
1261 */
1262
1263 for (i = 0; i < NUM_TX_QUEUES; i++) {
1264 txq = netdev_get_tx_queue(dev, i);
Rakesh Pillai70f1f542019-09-10 20:26:54 +05301265 hdd_debug("Queue: %d status: %d txq->trans_start: %lu",
1266 i, netif_tx_queue_stopped(txq), txq->trans_start);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267 }
1268
Rakesh Pillai70f1f542019-09-10 20:26:54 +05301269 hdd_debug("carrier state: %d", netif_carrier_ok(dev));
Sravan Kumar Kairam887e89e2018-11-01 09:30:38 +05301270
Alok Kumar1c492fe2020-03-17 16:05:43 +05301271 wlan_hdd_display_adapter_netif_queue_history(adapter);
1272
Leo Changfdb45c32016-10-28 11:09:23 -07001273 cdp_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301274
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001275 ++adapter->hdd_stats.tx_rx_stats.tx_timeout_cnt;
1276 ++adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301277
1278 diff_jiffies = jiffies -
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001279 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301280
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001281 if ((adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301282 (diff_jiffies > (HDD_TX_TIMEOUT * 2))) {
1283 /*
1284 * In case when there is no traffic is running, it may
1285 * possible tx time-out may once happen and later system
1286 * recovered then continuous tx timeout count has to be
1287 * reset as it is gets modified only when traffic is running.
1288 * If over a period of time if this count reaches to threshold
1289 * then host triggers a false subsystem restart. In genuine
1290 * time out case kernel will call the tx time-out back to back
1291 * at interval of HDD_TX_TIMEOUT. Here now check if previous
1292 * TX TIME out has occurred more than twice of HDD_TX_TIMEOUT
1293 * back then host may recovered here from data stall.
1294 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001295 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301296 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson9a27ffa2018-05-06 17:26:57 -07001297 "Reset continuous tx timeout stat");
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301298 }
1299
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001300 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout = jiffies;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301301
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001302 if (adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt >
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301303 HDD_TX_STALL_THRESHOLD) {
1304 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1305 "Data stall due to continuous TX timeouts");
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001306 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Rakesh Pillai6a36b0a2019-09-06 16:30:05 +05301307
jitiphil377bcc12018-10-05 19:46:08 +05301308 if (cdp_cfg_get(soc, cfg_dp_enable_data_stall))
Poddar, Siddarth37033032017-10-11 15:47:40 +05301309 cdp_post_data_stall_event(soc,
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301310 DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
1311 DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
Rakesh Pillai6a36b0a2019-09-06 16:30:05 +05301312 OL_TXRX_PDEV_ID, 0xFF,
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301313 DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
1314 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001315}
1316
1317/**
1318 * hdd_tx_timeout() - Wrapper function to protect __hdd_tx_timeout from SSR
Dustin Brown96b98dd2019-03-06 12:39:37 -08001319 * @net_dev: pointer to net_device structure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001320 *
1321 * Function called by OS if there is any timeout during transmission.
1322 * Since HDD simply enqueues packet and returns control to OS right away,
1323 * this would never be invoked
1324 *
1325 * Return: none
1326 */
Dustin Brown96b98dd2019-03-06 12:39:37 -08001327void hdd_tx_timeout(struct net_device *net_dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001328{
Dustin Brown96b98dd2019-03-06 12:39:37 -08001329 struct osif_vdev_sync *vdev_sync;
1330
1331 if (osif_vdev_sync_op_start(net_dev, &vdev_sync))
1332 return;
1333
1334 __hdd_tx_timeout(net_dev);
1335
1336 osif_vdev_sync_op_stop(vdev_sync);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001337}
1338
1339/**
1340 * @hdd_init_tx_rx() - Initialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001341 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001342 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301343 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1344 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001345 */
Jeff Johnson80486862017-10-02 13:21:29 -07001346QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001347{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301348 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001349
Jeff Johnsond36fa332019-03-18 13:42:25 -07001350 if (!adapter) {
Jeff Johnson80486862017-10-02 13:21:29 -07001351 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301352 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301353 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001354 }
1355
1356 return status;
1357}
1358
1359/**
1360 * @hdd_deinit_tx_rx() - Deinitialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001361 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001362 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301363 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1364 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001365 */
Jeff Johnson80486862017-10-02 13:21:29 -07001366QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001367{
Dustin Brownb0b240a2018-07-30 14:16:30 -07001368 QDF_BUG(adapter);
1369 if (!adapter)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301370 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001371
Dustin Brownb0b240a2018-07-30 14:16:30 -07001372 adapter->tx_fn = NULL;
1373
1374 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001375}
1376
Nirav Shah73713f72018-05-17 14:50:41 +05301377#ifdef FEATURE_MONITOR_MODE_SUPPORT
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001378/**
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001379 * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
1380 * @context: [in] pointer to qdf context
1381 * @rxBuf: [in] pointer to rx qdf_nbuf
1382 *
1383 * TL will call this to notify the HDD when one or more packets were
1384 * received for a registered STA.
1385 *
1386 * Return: QDF_STATUS_E_FAILURE if any errors encountered, QDF_STATUS_SUCCESS
1387 * otherwise
1388 */
1389static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
1390{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001391 struct hdd_adapter *adapter;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001392 int rxstat;
1393 struct sk_buff *skb;
1394 struct sk_buff *skb_next;
1395 unsigned int cpu_index;
1396
1397 /* Sanity check on inputs */
Jeff Johnsond36fa332019-03-18 13:42:25 -07001398 if ((!context) || (!rxbuf)) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001399 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1400 "%s: Null params being passed", __func__);
1401 return QDF_STATUS_E_FAILURE;
1402 }
1403
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001404 adapter = (struct hdd_adapter *)context;
Jeff Johnsond36fa332019-03-18 13:42:25 -07001405 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001406 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001407 "invalid adapter %pK", adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001408 return QDF_STATUS_E_FAILURE;
1409 }
1410
1411 cpu_index = wlan_hdd_get_cpu();
1412
1413 /* walk the chain until all are processed */
1414 skb = (struct sk_buff *) rxbuf;
Jeff Johnsond36fa332019-03-18 13:42:25 -07001415 while (skb) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001416 skb_next = skb->next;
1417 skb->dev = adapter->dev;
1418
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001419 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001420 ++adapter->stats.rx_packets;
1421 adapter->stats.rx_bytes += skb->len;
1422
1423 /* Remove SKB from internal tracking table before submitting
1424 * it to stack
1425 */
1426 qdf_net_buf_debug_release_skb(skb);
1427
1428 /*
1429 * If this is not a last packet on the chain
1430 * Just put packet into backlog queue, not scheduling RX sirq
1431 */
1432 if (skb->next) {
1433 rxstat = netif_rx(skb);
1434 } else {
1435 /*
1436 * This is the last packet on the chain
1437 * Scheduling rx sirq
1438 */
1439 rxstat = netif_rx_ni(skb);
1440 }
1441
1442 if (NET_RX_SUCCESS == rxstat)
1443 ++adapter->
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001444 hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001445 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001446 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001447
1448 skb = skb_next;
1449 }
1450
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001451 return QDF_STATUS_SUCCESS;
1452}
Nirav Shah73713f72018-05-17 14:50:41 +05301453#endif
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001454
Ravi Joshibb8d4512016-08-22 10:14:52 -07001455/*
1456 * hdd_is_mcast_replay() - checks if pkt is multicast replay
1457 * @skb: packet skb
1458 *
1459 * Return: true if replayed multicast pkt, false otherwise
1460 */
1461static bool hdd_is_mcast_replay(struct sk_buff *skb)
1462{
1463 struct ethhdr *eth;
1464
1465 eth = eth_hdr(skb);
1466 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
1467 if (unlikely(ether_addr_equal(eth->h_source,
1468 skb->dev->dev_addr)))
1469 return true;
1470 }
1471 return false;
1472}
1473
Naveen Rawatf28315c2016-06-29 18:06:02 -07001474/**
Jeff Johnsondcf84ce2017-10-05 09:26:24 -07001475 * hdd_is_arp_local() - check if local or non local arp
1476 * @skb: pointer to sk_buff
1477 *
1478 * Return: true if local arp or false otherwise.
1479 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301480static bool hdd_is_arp_local(struct sk_buff *skb)
1481{
1482 struct arphdr *arp;
1483 struct in_ifaddr **ifap = NULL;
1484 struct in_ifaddr *ifa = NULL;
1485 struct in_device *in_dev;
1486 unsigned char *arp_ptr;
1487 __be32 tip;
1488
1489 arp = (struct arphdr *)skb->data;
1490 if (arp->ar_op == htons(ARPOP_REQUEST)) {
1491 in_dev = __in_dev_get_rtnl(skb->dev);
1492 if (in_dev) {
1493 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1494 ifap = &ifa->ifa_next) {
1495 if (!strcmp(skb->dev->name, ifa->ifa_label))
1496 break;
1497 }
1498 }
1499
1500 if (ifa && ifa->ifa_local) {
1501 arp_ptr = (unsigned char *)(arp + 1);
1502 arp_ptr += (skb->dev->addr_len + 4 +
1503 skb->dev->addr_len);
1504 memcpy(&tip, arp_ptr, 4);
Poddar, Siddarthb4b74792017-11-06 14:57:35 +05301505 hdd_debug("ARP packet: local IP: %x dest IP: %x",
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301506 ifa->ifa_local, tip);
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001507 if (ifa->ifa_local == tip)
1508 return true;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301509 }
1510 }
1511
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001512 return false;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301513}
1514
1515/**
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001516 * hdd_is_rx_wake_lock_needed() - check if wake lock is needed
1517 * @skb: pointer to sk_buff
1518 *
1519 * RX wake lock is needed for:
1520 * 1) Unicast data packet OR
1521 * 2) Local ARP data packet
1522 *
1523 * Return: true if wake lock is needed or false otherwise.
1524 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301525static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
1526{
1527 if ((skb->pkt_type != PACKET_BROADCAST &&
1528 skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb))
1529 return true;
1530
1531 return false;
1532}
1533
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001534#ifdef RECEIVE_OFFLOAD
1535/**
1536 * hdd_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
1537 * @hdd_ctx: pointer to HDD Station Context
1538 *
1539 * Return: None
1540 */
1541static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
1542{
jitiphil377bcc12018-10-05 19:46:08 +05301543 void *soc;
1544
1545 soc = cds_get_context(QDF_MODULE_ID_SOC);
1546
1547 if (!(cdp_cfg_get(soc, cfg_dp_lro_enable) ^
1548 cdp_cfg_get(soc, cfg_dp_gro_enable))) {
1549 cdp_cfg_get(soc, cfg_dp_lro_enable) &&
1550 cdp_cfg_get(soc, cfg_dp_gro_enable) ?
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +05301551 hdd_debug("Can't enable both LRO and GRO, disabling Rx offload") :
1552 hdd_debug("LRO and GRO both are disabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001553 hdd_ctx->ol_enable = 0;
jitiphil377bcc12018-10-05 19:46:08 +05301554 } else if (cdp_cfg_get(soc, cfg_dp_lro_enable)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001555 hdd_debug("Rx offload LRO is enabled");
1556 hdd_ctx->ol_enable = CFG_LRO_ENABLED;
1557 } else {
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +05301558 hdd_debug("Rx offload: GRO is enabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001559 hdd_ctx->ol_enable = CFG_GRO_ENABLED;
1560 }
1561}
1562
Rakesh Pillaic73dc112020-07-30 19:44:43 +05301563#ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001564/**
Mohit Khanna81418772018-10-30 14:14:46 -07001565 * hdd_gro_rx_bh_disable() - GRO RX/flush function.
1566 * @napi_to_use: napi to be used to give packets to the stack, gro flush
1567 * @skb: pointer to sk_buff
1568 *
1569 * Function calls napi_gro_receive for the skb. If the skb indicates that a
1570 * flush needs to be done (set by the lower DP layer), the function also calls
1571 * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
1572 * napi_gro__ calls.
1573 *
1574 * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
1575 * QDF error code.
1576 */
1577static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
1578 struct napi_struct *napi_to_use,
1579 struct sk_buff *skb)
1580{
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001581 QDF_STATUS status = QDF_STATUS_SUCCESS;
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001582 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
Mohit Khanna81418772018-10-30 14:14:46 -07001583 gro_result_t gro_res;
Yeshwanth Sriram Guntukaf2f69ba2020-06-26 10:16:53 +05301584 uint32_t rx_aggregation;
1585 uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(skb);
1586
1587 rx_aggregation = qdf_atomic_read(&hdd_ctx->dp_agg_param.rx_aggregation);
Mohit Khanna81418772018-10-30 14:14:46 -07001588
1589 skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
1590
1591 local_bh_disable();
1592 gro_res = napi_gro_receive(napi_to_use, skb);
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001593
Yeshwanth Sriram Guntukaf2f69ba2020-06-26 10:16:53 +05301594 if (hdd_get_current_throughput_level(hdd_ctx) == PLD_BUS_WIDTH_IDLE ||
Yeshwanth Sriram Guntukaf3faf8b2020-07-23 19:52:25 +05301595 !rx_aggregation || adapter->gro_disallowed[rx_ctx_id]) {
Jinwei Chenc1dc5c72019-08-26 16:24:46 +08001596 if (gro_res != GRO_DROP && gro_res != GRO_NORMAL) {
1597 adapter->hdd_stats.tx_rx_stats.
1598 rx_gro_low_tput_flush++;
1599 napi_gro_flush(napi_to_use, false);
1600 }
Yeshwanth Sriram Guntukaf2f69ba2020-06-26 10:16:53 +05301601 if (!rx_aggregation)
1602 hdd_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] = 1;
Yeshwanth Sriram Guntukaf3faf8b2020-07-23 19:52:25 +05301603 if (adapter->gro_disallowed[rx_ctx_id])
1604 adapter->gro_flushed[rx_ctx_id] = 1;
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001605 }
Mohit Khanna81418772018-10-30 14:14:46 -07001606 local_bh_enable();
1607
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001608 if (gro_res == GRO_DROP)
1609 status = QDF_STATUS_E_GRO_DROP;
Mohit Khanna81418772018-10-30 14:14:46 -07001610
Mohit Khanna81418772018-10-30 14:14:46 -07001611 return status;
1612}
1613
Rakesh Pillaic73dc112020-07-30 19:44:43 +05301614#else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
1615
1616/**
1617 * hdd_gro_rx_bh_disable() - GRO RX/flush function.
1618 * @napi_to_use: napi to be used to give packets to the stack, gro flush
1619 * @skb: pointer to sk_buff
1620 *
1621 * Function calls napi_gro_receive for the skb. If the skb indicates that a
1622 * flush needs to be done (set by the lower DP layer), the function also calls
1623 * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
1624 * napi_gro__ calls.
1625 *
1626 * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
1627 * QDF error code.
1628 */
1629static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
1630 struct napi_struct *napi_to_use,
1631 struct sk_buff *skb)
1632{
1633 QDF_STATUS status = QDF_STATUS_SUCCESS;
1634 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
1635 gro_result_t gro_res;
1636
1637 skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
1638
1639 local_bh_disable();
1640 gro_res = napi_gro_receive(napi_to_use, skb);
1641
1642 if (hdd_get_current_throughput_level(hdd_ctx) == PLD_BUS_WIDTH_IDLE) {
1643 if (gro_res != GRO_DROP && gro_res != GRO_NORMAL) {
1644 adapter->hdd_stats.tx_rx_stats.rx_gro_low_tput_flush++;
1645 napi_gro_flush(napi_to_use, false);
1646 }
1647 }
1648 local_bh_enable();
1649
1650 if (gro_res == GRO_DROP)
1651 status = QDF_STATUS_E_GRO_DROP;
1652
1653 return status;
1654}
1655#endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
1656
Mohit Khanna81418772018-10-30 14:14:46 -07001657/**
1658 * hdd_gro_rx_dp_thread() - Handle Rx procesing via GRO for DP thread
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001659 * @adapter: pointer to adapter context
1660 * @skb: pointer to sk_buff
1661 *
1662 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1663 */
Mohit Khanna81418772018-10-30 14:14:46 -07001664static
1665QDF_STATUS hdd_gro_rx_dp_thread(struct hdd_adapter *adapter,
1666 struct sk_buff *skb)
1667{
1668 struct napi_struct *napi_to_use = NULL;
1669 QDF_STATUS status = QDF_STATUS_E_FAILURE;
Mohit Khanna81418772018-10-30 14:14:46 -07001670
1671 if (!adapter->hdd_ctx->enable_dp_rx_threads) {
1672 hdd_dp_err_rl("gro not supported without DP RX thread!");
Mohit Khanna81418772018-10-30 14:14:46 -07001673 return status;
1674 }
1675
1676 napi_to_use =
1677 dp_rx_get_napi_context(cds_get_context(QDF_MODULE_ID_SOC),
1678 QDF_NBUF_CB_RX_CTX_ID(skb));
1679
1680 if (!napi_to_use) {
1681 hdd_dp_err_rl("no napi to use for GRO!");
Mohit Khanna81418772018-10-30 14:14:46 -07001682 return status;
1683 }
1684
Mohit Khanna81418772018-10-30 14:14:46 -07001685 status = hdd_gro_rx_bh_disable(adapter, napi_to_use, skb);
1686
1687 return status;
1688}
1689
1690/**
1691 * hdd_gro_rx_legacy() - Handle Rx processing via GRO for ihelium based targets
1692 * @adapter: pointer to adapter context
1693 * @skb: pointer to sk_buff
1694 *
1695 * Supports GRO for only station mode
1696 *
1697 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1698 */
1699static
1700QDF_STATUS hdd_gro_rx_legacy(struct hdd_adapter *adapter, struct sk_buff *skb)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001701{
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001702 struct qca_napi_info *qca_napii;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001703 struct qca_napi_data *napid;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001704 struct napi_struct *napi_to_use;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001705 QDF_STATUS status = QDF_STATUS_E_FAILURE;
Mohit Khanna81418772018-10-30 14:14:46 -07001706 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001707
1708 /* Only enabling it for STA mode like LRO today */
1709 if (QDF_STA_MODE != adapter->device_mode)
1710 return QDF_STATUS_E_NOSUPPORT;
1711
Mohit Khanna81418772018-10-30 14:14:46 -07001712 if (qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_low_tput) ||
1713 qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_concurrency))
1714 return QDF_STATUS_E_NOSUPPORT;
1715
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001716 napid = hdd_napi_get_all();
Jeff Johnsond36fa332019-03-18 13:42:25 -07001717 if (unlikely(!napid))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001718 goto out;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001719
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001720 qca_napii = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
Jeff Johnsond36fa332019-03-18 13:42:25 -07001721 if (unlikely(!qca_napii))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001722 goto out;
1723
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001724 /*
1725 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1726 * corresponds each hif_napi.
1727 */
1728 if (adapter->hdd_ctx->enable_rxthread)
1729 napi_to_use = &qca_napii->rx_thread_napi;
1730 else
1731 napi_to_use = &qca_napii->napi;
1732
Mohit Khanna81418772018-10-30 14:14:46 -07001733 status = hdd_gro_rx_bh_disable(adapter, napi_to_use, skb);
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001734out:
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001735
1736 return status;
1737}
1738
1739/**
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001740 * hdd_rxthread_napi_gro_flush() - GRO flush callback for NAPI+Rx_Thread Rx mode
1741 * @data: hif NAPI context
1742 *
1743 * Return: none
1744 */
1745static void hdd_rxthread_napi_gro_flush(void *data)
1746{
1747 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1748
1749 local_bh_disable();
1750 /*
1751 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1752 * corresponds each hif_napi.
1753 */
1754 napi_gro_flush(&qca_napii->rx_thread_napi, false);
1755 local_bh_enable();
1756}
1757
1758/**
1759 * hdd_hif_napi_gro_flush() - GRO flush callback for NAPI Rx mode
1760 * @data: hif NAPI context
1761 *
1762 * Return: none
1763 */
1764static void hdd_hif_napi_gro_flush(void *data)
1765{
1766 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1767
1768 local_bh_disable();
1769 napi_gro_flush(&qca_napii->napi, false);
1770 local_bh_enable();
1771}
1772
1773#ifdef FEATURE_LRO
1774/**
1775 * hdd_qdf_lro_flush() - LRO flush wrapper
1776 * @data: hif NAPI context
1777 *
1778 * Return: none
1779 */
1780static void hdd_qdf_lro_flush(void *data)
1781{
1782 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1783 qdf_lro_ctx_t qdf_lro_ctx = qca_napii->lro_ctx;
1784
1785 qdf_lro_flush(qdf_lro_ctx);
1786}
1787#else
1788static void hdd_qdf_lro_flush(void *data)
1789{
1790}
1791#endif
1792
1793/**
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001794 * hdd_register_rx_ol() - Register LRO/GRO rx processing callbacks
Mohit Khanna81418772018-10-30 14:14:46 -07001795 * @hdd_ctx: pointer to hdd_ctx
1796 * @lithium_based_target: whether its a lithium arch based target or not
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001797 *
1798 * Return: none
1799 */
Mohit Khanna81418772018-10-30 14:14:46 -07001800static void hdd_register_rx_ol_cb(struct hdd_context *hdd_ctx,
1801 bool lithium_based_target)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001802{
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001803 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001804
Amar Singhalcc5a4ec2018-09-04 12:27:51 -07001805 if (!hdd_ctx) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001806 hdd_err("HDD context is NULL");
Amar Singhalcc5a4ec2018-09-04 12:27:51 -07001807 return;
1808 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001809
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001810 hdd_ctx->en_tcp_delack_no_lro = 0;
1811
Alok Kumar3dd311d2018-08-17 15:12:36 +05301812 if (!hdd_is_lro_enabled(hdd_ctx)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001813 cdp_register_rx_offld_flush_cb(soc, hdd_qdf_lro_flush);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001814 hdd_ctx->receive_offload_cb = hdd_lro_rx;
1815 hdd_debug("LRO is enabled");
1816 } else if (hdd_ctx->ol_enable == CFG_GRO_ENABLED) {
Yeshwanth Sriram Guntukaf2f69ba2020-06-26 10:16:53 +05301817 qdf_atomic_set(&hdd_ctx->dp_agg_param.rx_aggregation, 1);
Mohit Khanna81418772018-10-30 14:14:46 -07001818 if (lithium_based_target) {
1819 /* no flush registration needed, it happens in DP thread */
1820 hdd_ctx->receive_offload_cb = hdd_gro_rx_dp_thread;
1821 } else {
1822 /*ihelium based targets */
1823 if (hdd_ctx->enable_rxthread)
1824 cdp_register_rx_offld_flush_cb(soc,
1825 hdd_rxthread_napi_gro_flush);
1826 else
1827 cdp_register_rx_offld_flush_cb(soc,
1828 hdd_hif_napi_gro_flush);
1829 hdd_ctx->receive_offload_cb = hdd_gro_rx_legacy;
1830 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001831 hdd_debug("GRO is enabled");
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001832 } else if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
1833 hdd_ctx->en_tcp_delack_no_lro = 1;
Mohit Khanna81418772018-10-30 14:14:46 -07001834 hdd_debug("TCP Del ACK is enabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001835 }
1836}
1837
Mohit Khanna81418772018-10-30 14:14:46 -07001838/**
1839 * hdd_rx_ol_send_config() - Send RX offload configuration to FW
1840 * @hdd_ctx: pointer to hdd_ctx
1841 *
1842 * This function is only used for non lithium targets. Lithium based targets are
1843 * sending LRO config to FW in vdev attach implemented in cmn DP layer.
1844 *
1845 * Return: 0 on success, non zero on failure
1846 */
1847static int hdd_rx_ol_send_config(struct hdd_context *hdd_ctx)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001848{
1849 struct cdp_lro_hash_config lro_config = {0};
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001850 /*
1851 * This will enable flow steering and Toeplitz hash
1852 * So enable it for LRO or GRO processing.
1853 */
Mohit Khanna81418772018-10-30 14:14:46 -07001854 if (cfg_get(hdd_ctx->psoc, CFG_DP_GRO) ||
1855 cfg_get(hdd_ctx->psoc, CFG_DP_LRO)) {
1856 lro_config.lro_enable = 1;
1857 lro_config.tcp_flag = TCPHDR_ACK;
1858 lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN |
1859 TCPHDR_RST | TCPHDR_ACK |
1860 TCPHDR_URG | TCPHDR_ECE |
1861 TCPHDR_CWR;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001862 }
1863
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001864 get_random_bytes(lro_config.toeplitz_hash_ipv4,
1865 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
1866 LRO_IPV4_SEED_ARR_SZ));
1867
1868 get_random_bytes(lro_config.toeplitz_hash_ipv6,
1869 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
1870 LRO_IPV6_SEED_ARR_SZ));
1871
Mohit Khanna81418772018-10-30 14:14:46 -07001872 if (wma_lro_init(&lro_config))
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001873 return -EAGAIN;
Mohit Khanna81418772018-10-30 14:14:46 -07001874 else
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +05301875 hdd_debug("LRO Config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
1876 lro_config.lro_enable, lro_config.tcp_flag,
1877 lro_config.tcp_flag_mask);
Mohit Khanna81418772018-10-30 14:14:46 -07001878
1879 return 0;
1880}
1881
1882int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1883{
1884 int ret = 0;
1885 bool lithium_based_target = false;
1886
1887 if (hdd_ctx->target_type == TARGET_TYPE_QCA6290 ||
Manjunathappa Prakash458f6fe2019-05-13 18:33:01 -07001888 hdd_ctx->target_type == TARGET_TYPE_QCA6390 ||
1889 hdd_ctx->target_type == TARGET_TYPE_QCA6490)
Mohit Khanna81418772018-10-30 14:14:46 -07001890 lithium_based_target = true;
1891
1892 hdd_resolve_rx_ol_mode(hdd_ctx);
1893 hdd_register_rx_ol_cb(hdd_ctx, lithium_based_target);
1894
1895 if (!lithium_based_target) {
1896 ret = hdd_rx_ol_send_config(hdd_ctx);
1897 if (ret) {
1898 hdd_ctx->ol_enable = 0;
1899 hdd_err("Failed to send LRO/GRO configuration! %u", ret);
1900 return ret;
1901 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001902 }
1903
1904 return 0;
1905}
1906
1907void hdd_disable_rx_ol_in_concurrency(bool disable)
1908{
1909 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1910
1911 if (!hdd_ctx) {
1912 hdd_err("hdd_ctx is NULL");
1913 return;
1914 }
1915
1916 if (disable) {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001917 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001918 struct wlan_rx_tp_data rx_tp_data;
1919
1920 hdd_info("Enable TCP delack as LRO disabled in concurrency");
1921 rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
1922 rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
Alok Kumar2fad6442018-11-08 19:19:28 +05301923 wlan_hdd_update_tcp_rx_param(hdd_ctx, &rx_tp_data);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001924 hdd_ctx->en_tcp_delack_no_lro = 1;
1925 }
Mohit Khanna81418772018-10-30 14:14:46 -07001926 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_concurrency, 1);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001927 } else {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001928 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001929 hdd_info("Disable TCP delack as LRO is enabled");
1930 hdd_ctx->en_tcp_delack_no_lro = 0;
1931 hdd_reset_tcp_delack(hdd_ctx);
1932 }
Mohit Khanna81418772018-10-30 14:14:46 -07001933 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_concurrency, 0);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001934 }
1935}
1936
1937void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1938{
1939 if (disable)
Mohit Khanna81418772018-10-30 14:14:46 -07001940 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_low_tput, 1);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001941 else
Mohit Khanna81418772018-10-30 14:14:46 -07001942 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_low_tput, 0);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001943}
1944
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001945#else /* RECEIVE_OFFLOAD */
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001946int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1947{
1948 hdd_err("Rx_OL, LRO/GRO not supported");
1949 return -EPERM;
1950}
1951
1952void hdd_disable_rx_ol_in_concurrency(bool disable)
1953{
1954}
1955
1956void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1957{
1958}
1959#endif /* RECEIVE_OFFLOAD */
1960
Yu Wang66a250b2017-07-19 11:46:40 +08001961#ifdef WLAN_FEATURE_TSF_PLUS
1962static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1963 qdf_nbuf_t netbuf,
1964 uint64_t target_time)
1965{
yuanl2746f072018-09-21 19:19:16 +08001966 if (!hdd_tsf_is_rx_set(hdd_ctx))
Yu Wang66a250b2017-07-19 11:46:40 +08001967 return;
1968
1969 hdd_rx_timestamp(netbuf, target_time);
1970}
1971#else
1972static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1973 qdf_nbuf_t netbuf,
1974 uint64_t target_time)
1975{
1976}
1977#endif
1978
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001979QDF_STATUS hdd_rx_thread_gro_flush_ind_cbk(void *adapter, int rx_ctx_id)
1980{
Jinwei Chenb681a482019-08-14 15:24:06 +08001981 struct hdd_adapter *hdd_adapter = adapter;
1982
1983 if (qdf_unlikely((!hdd_adapter) || (!hdd_adapter->hdd_ctx))) {
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001984 hdd_err("Null params being passed");
1985 return QDF_STATUS_E_FAILURE;
1986 }
Jinwei Chenb681a482019-08-14 15:24:06 +08001987
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001988 if (hdd_is_low_tput_gro_enable(hdd_adapter->hdd_ctx)) {
Jinwei Chenb681a482019-08-14 15:24:06 +08001989 hdd_adapter->hdd_stats.tx_rx_stats.rx_gro_flush_skip++;
1990 return QDF_STATUS_SUCCESS;
1991 }
1992
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001993 return dp_rx_gro_flush_ind(cds_get_context(QDF_MODULE_ID_SOC),
1994 rx_ctx_id);
1995}
1996
Mohit Khanna70322002018-05-15 19:21:32 -07001997QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter,
Srinivas Girigowdaa19eafd2018-09-07 15:28:21 -07001998 qdf_nbuf_t nbuf_list)
1999{
Rakshith Suresh Patkar68ed4952019-08-29 15:40:18 +05302000 struct hdd_adapter *hdd_adapter;
2001 uint8_t vdev_id;
2002 qdf_nbuf_t head_ptr;
2003
Mohit Khannaf0620ce2019-07-28 21:31:05 -07002004 if (qdf_unlikely(!adapter || !nbuf_list)) {
Mohit Khanna70322002018-05-15 19:21:32 -07002005 hdd_err("Null params being passed");
2006 return QDF_STATUS_E_FAILURE;
2007 }
Rakshith Suresh Patkar68ed4952019-08-29 15:40:18 +05302008
2009 hdd_adapter = (struct hdd_adapter *)adapter;
Jinwei Chencdc13112020-05-28 16:57:27 +08002010 if (hdd_validate_adapter(hdd_adapter)) {
2011 hdd_err_rl("adapter validate failed");
Rakshith Suresh Patkar68ed4952019-08-29 15:40:18 +05302012 return QDF_STATUS_E_FAILURE;
Jinwei Chencdc13112020-05-28 16:57:27 +08002013 }
Rakshith Suresh Patkar68ed4952019-08-29 15:40:18 +05302014
2015 vdev_id = hdd_adapter->vdev_id;
2016 head_ptr = nbuf_list;
2017 while (head_ptr) {
2018 qdf_nbuf_cb_update_vdev_id(head_ptr, vdev_id);
2019 head_ptr = qdf_nbuf_next(head_ptr);
2020 }
2021
Mohit Khanna70322002018-05-15 19:21:32 -07002022 return dp_rx_enqueue_pkt(cds_get_context(QDF_MODULE_ID_SOC), nbuf_list);
2023}
2024
Ajit Pal Singhf9e06bc2019-11-13 18:09:29 +05302025#ifdef CONFIG_HL_SUPPORT
2026QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
2027 struct sk_buff *skb)
2028{
2029 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
2030 int status = QDF_STATUS_E_FAILURE;
2031 int netif_status;
2032
2033 adapter->hdd_stats.tx_rx_stats.rx_non_aggregated++;
2034 hdd_ctx->no_rx_offload_pkt_cnt++;
2035 netif_status = netif_rx_ni(skb);
2036
2037 if (netif_status == NET_RX_SUCCESS)
2038 status = QDF_STATUS_SUCCESS;
2039
2040 return status;
2041}
2042#else
Yeshwanth Sriram Guntukaf3faf8b2020-07-23 19:52:25 +05302043
2044#if defined(WLAN_SUPPORT_RX_FISA)
2045/**
2046 * hdd_set_fisa_disallowed_for_vdev() - Set fisa disallowed bit for a vdev
2047 * @soc: DP soc handle
2048 * @vdev_id: Vdev id
2049 * @rx_ctx_id: rx context id
2050 * @val: Enable or disable
2051 *
2052 * The function sets the fisa disallowed flag for a given vdev
2053 *
2054 * Return: None
2055 */
2056static inline
2057void hdd_set_fisa_disallowed_for_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id,
2058 uint8_t rx_ctx_id, uint8_t val)
2059{
2060 dp_set_fisa_disallowed_for_vdev(soc, vdev_id, rx_ctx_id, val);
2061}
2062#else
2063static inline
2064void hdd_set_fisa_disallowed_for_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id,
2065 uint8_t rx_ctx_id, uint8_t val)
2066{
2067}
2068#endif
2069
Rakesh Pillaic73dc112020-07-30 19:44:43 +05302070#ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
Yeshwanth Sriram Guntukaf3faf8b2020-07-23 19:52:25 +05302071/**
2072 * hdd_rx_check_qdisc_for_adapter() - Check if any ingress qdisc is configured
2073 * for given adapter
2074 * @adapter: pointer to HDD adapter context
2075 * @rx_ctx_id: Rx context id
2076 *
2077 * The function checks if ingress qdisc is registered for a given
2078 * net device.
2079 *
2080 * Return: None
2081 */
2082static void
2083hdd_rx_check_qdisc_for_adapter(struct hdd_adapter *adapter, uint8_t rx_ctx_id)
2084{
2085 ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
2086 struct netdev_queue *ingress_q;
2087 struct Qdisc *ingress_qdisc;
2088 bool is_qdisc_ingress = false;
2089
2090 /*
2091 * This additional ingress_queue NULL check is to avoid
2092 * doing RCU lock/unlock in the common scenario where
2093 * ingress_queue is not configured by default
2094 */
2095 if (qdf_likely(!adapter->dev->ingress_queue))
2096 goto reset_wl;
2097
2098 rcu_read_lock();
2099 ingress_q = rcu_dereference(adapter->dev->ingress_queue);
2100
2101 if (qdf_unlikely(!ingress_q))
2102 goto reset;
2103
2104 ingress_qdisc = rcu_dereference(ingress_q->qdisc);
2105 if (!ingress_qdisc)
2106 goto reset;
2107
2108 is_qdisc_ingress = qdf_str_eq(ingress_qdisc->ops->id, "ingress");
2109 if (!is_qdisc_ingress)
2110 goto reset;
2111
2112 rcu_read_unlock();
2113
2114 if (adapter->gro_disallowed[rx_ctx_id])
2115 return;
2116
2117 hdd_debug("ingress qdisc configured disable GRO");
2118 adapter->gro_disallowed[rx_ctx_id] = 1;
2119 hdd_set_fisa_disallowed_for_vdev(soc, adapter->vdev_id, rx_ctx_id, 1);
2120
2121 return;
2122
2123reset:
2124 rcu_read_unlock();
2125
2126reset_wl:
2127 if (adapter->gro_disallowed[rx_ctx_id]) {
2128 hdd_debug("ingress qdisc removed enable GRO");
2129 hdd_set_fisa_disallowed_for_vdev(soc, adapter->vdev_id,
2130 rx_ctx_id, 0);
2131 adapter->gro_disallowed[rx_ctx_id] = 0;
2132 adapter->gro_flushed[rx_ctx_id] = 0;
2133 }
2134}
2135
Mohit Khanna81418772018-10-30 14:14:46 -07002136QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
2137 struct sk_buff *skb)
2138{
2139 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
2140 int status = QDF_STATUS_E_FAILURE;
2141 int netif_status;
2142 bool skb_receive_offload_ok = false;
Yeshwanth Sriram Guntukaf2f69ba2020-06-26 10:16:53 +05302143 uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(skb);
Mohit Khanna81418772018-10-30 14:14:46 -07002144
Yeshwanth Sriram Guntukaf3faf8b2020-07-23 19:52:25 +05302145 hdd_rx_check_qdisc_for_adapter(adapter, rx_ctx_id);
2146
Mohit Khanna81418772018-10-30 14:14:46 -07002147 if (QDF_NBUF_CB_RX_TCP_PROTO(skb) &&
2148 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
2149 skb_receive_offload_ok = true;
2150
Yeshwanth Sriram Guntukaf2f69ba2020-06-26 10:16:53 +05302151 if (skb_receive_offload_ok && hdd_ctx->receive_offload_cb &&
Yeshwanth Sriram Guntukaf3faf8b2020-07-23 19:52:25 +05302152 !hdd_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] &&
2153 !adapter->gro_flushed[rx_ctx_id]) {
Mohit Khanna81418772018-10-30 14:14:46 -07002154 status = hdd_ctx->receive_offload_cb(adapter, skb);
2155
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07002156 if (QDF_IS_STATUS_SUCCESS(status)) {
2157 adapter->hdd_stats.tx_rx_stats.rx_aggregated++;
2158 return status;
2159 }
2160
2161 if (status == QDF_STATUS_E_GRO_DROP) {
2162 adapter->hdd_stats.tx_rx_stats.rx_gro_dropped++;
2163 return status;
2164 }
Mohit Khanna81418772018-10-30 14:14:46 -07002165 }
2166
Yeshwanth Sriram Guntukaf2f69ba2020-06-26 10:16:53 +05302167 /*
2168 * The below case handles the scenario when rx_aggregation is
2169 * re-enabled dynamically, in which case gro_force_flush needs
2170 * to be reset to 0 to allow GRO.
2171 */
2172 if (qdf_atomic_read(&hdd_ctx->dp_agg_param.rx_aggregation) &&
2173 hdd_ctx->dp_agg_param.gro_force_flush[rx_ctx_id])
2174 hdd_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] = 0;
2175
Mohit Khanna81418772018-10-30 14:14:46 -07002176 adapter->hdd_stats.tx_rx_stats.rx_non_aggregated++;
2177
2178 /* Account for GRO/LRO ineligible packets, mostly UDP */
2179 hdd_ctx->no_rx_offload_pkt_cnt++;
2180
2181 if (qdf_likely(hdd_ctx->enable_dp_rx_threads ||
2182 hdd_ctx->enable_rxthread)) {
2183 local_bh_disable();
2184 netif_status = netif_receive_skb(skb);
2185 local_bh_enable();
2186 } else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))) {
2187 /*
2188 * Frames before peer is registered to avoid contention with
2189 * NAPI softirq.
2190 * Refer fix:
2191 * qcacld-3.0: Do netif_rx_ni() for frames received before
2192 * peer assoc
2193 */
2194 netif_status = netif_rx_ni(skb);
2195 } else { /* NAPI Context */
2196 netif_status = netif_receive_skb(skb);
2197 }
2198
2199 if (netif_status == NET_RX_SUCCESS)
2200 status = QDF_STATUS_SUCCESS;
2201
2202 return status;
2203}
Rakesh Pillaic73dc112020-07-30 19:44:43 +05302204
2205#else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
2206
2207QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
2208 struct sk_buff *skb)
2209{
2210 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
2211 int status = QDF_STATUS_E_FAILURE;
2212 int netif_status;
2213 bool skb_receive_offload_ok = false;
2214
2215 if (QDF_NBUF_CB_RX_TCP_PROTO(skb) &&
2216 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
2217 skb_receive_offload_ok = true;
2218
2219 if (skb_receive_offload_ok && hdd_ctx->receive_offload_cb) {
2220 status = hdd_ctx->receive_offload_cb(adapter, skb);
2221
2222 if (QDF_IS_STATUS_SUCCESS(status)) {
2223 adapter->hdd_stats.tx_rx_stats.rx_aggregated++;
2224 return status;
2225 }
2226
2227 if (status == QDF_STATUS_E_GRO_DROP) {
2228 adapter->hdd_stats.tx_rx_stats.rx_gro_dropped++;
2229 return status;
2230 }
2231 }
2232
2233 adapter->hdd_stats.tx_rx_stats.rx_non_aggregated++;
2234
2235 /* Account for GRO/LRO ineligible packets, mostly UDP */
2236 hdd_ctx->no_rx_offload_pkt_cnt++;
2237
2238 if (qdf_likely(hdd_ctx->enable_dp_rx_threads ||
2239 hdd_ctx->enable_rxthread)) {
2240 local_bh_disable();
2241 netif_status = netif_receive_skb(skb);
2242 local_bh_enable();
2243 } else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))) {
2244 /*
2245 * Frames before peer is registered to avoid contention with
2246 * NAPI softirq.
2247 * Refer fix:
2248 * qcacld-3.0: Do netif_rx_ni() for frames received before
2249 * peer assoc
2250 */
2251 netif_status = netif_rx_ni(skb);
2252 } else { /* NAPI Context */
2253 netif_status = netif_receive_skb(skb);
2254 }
2255
2256 if (netif_status == NET_RX_SUCCESS)
2257 status = QDF_STATUS_SUCCESS;
2258
2259 return status;
2260}
2261#endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
Ajit Pal Singhf9e06bc2019-11-13 18:09:29 +05302262#endif
Mohit Khanna81418772018-10-30 14:14:46 -07002263
Yeshwanth Sriram Guntuka7f445f42019-01-30 17:01:35 +05302264#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
2265static bool hdd_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
2266{
2267 return false;
2268}
2269#else
2270static bool hdd_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
2271{
2272 return cfg80211_is_gratuitous_arp_unsolicited_na(skb);
2273}
2274#endif
2275
Rakesh Pillai246f1df2019-10-24 06:40:20 +05302276QDF_STATUS hdd_rx_flush_packet_cbk(void *adapter_context, uint8_t vdev_id)
2277{
Alan Chendd4e7e32019-11-12 12:07:02 -08002278 struct hdd_adapter *adapter;
2279 struct hdd_context *hdd_ctx;
Rakesh Pillai246f1df2019-10-24 06:40:20 +05302280 ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
2281
Rakesh Pillaic6bbd322020-06-04 11:41:54 +05302282 if (qdf_unlikely(!soc))
2283 return QDF_STATUS_E_FAILURE;
2284
Alan Chendd4e7e32019-11-12 12:07:02 -08002285 hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
Rakesh Pillai246f1df2019-10-24 06:40:20 +05302286 if (unlikely(!hdd_ctx)) {
2287 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2288 "%s: HDD context is Null", __func__);
2289 return QDF_STATUS_E_FAILURE;
2290 }
2291
Alan Chendd4e7e32019-11-12 12:07:02 -08002292 adapter = hdd_adapter_get_by_reference(hdd_ctx, adapter_context);
2293 if (!adapter) {
2294 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2295 "%s: Adapter reference is Null", __func__);
2296 return QDF_STATUS_E_FAILURE;
2297 }
2298
Jinwei Chen4c7f6e72020-05-07 14:35:41 +08002299 /* do fisa flush for this vdev */
Jinwei Chenba9fe9f2020-05-16 11:24:53 +08002300 if (hdd_ctx->config->fisa_enable)
2301 hdd_rx_fisa_flush_by_vdev_id(soc, vdev_id);
Jinwei Chen4c7f6e72020-05-07 14:35:41 +08002302
Rakesh Pillai246f1df2019-10-24 06:40:20 +05302303 if (hdd_ctx->enable_dp_rx_threads)
2304 dp_txrx_flush_pkts_by_vdev_id(soc, vdev_id);
2305
Alan Chendd4e7e32019-11-12 12:07:02 -08002306 hdd_adapter_put(adapter);
2307
Rakesh Pillai246f1df2019-10-24 06:40:20 +05302308 return QDF_STATUS_SUCCESS;
2309}
2310
Mohit Khanna06cce792020-01-09 05:21:53 -08002311#if defined(WLAN_SUPPORT_RX_FISA)
2312QDF_STATUS hdd_rx_fisa_cbk(void *dp_soc, void *dp_vdev, qdf_nbuf_t nbuf_list)
2313{
2314 return dp_fisa_rx((struct dp_soc *)dp_soc, (struct dp_vdev *)dp_vdev,
2315 nbuf_list);
2316}
2317
Jinwei Chen4c7f6e72020-05-07 14:35:41 +08002318QDF_STATUS hdd_rx_fisa_flush_by_ctx_id(void *dp_soc, int ring_num)
Mohit Khanna06cce792020-01-09 05:21:53 -08002319{
Jinwei Chen4c7f6e72020-05-07 14:35:41 +08002320 return dp_rx_fisa_flush_by_ctx_id((struct dp_soc *)dp_soc, ring_num);
2321}
2322
2323QDF_STATUS hdd_rx_fisa_flush_by_vdev_id(void *dp_soc, uint8_t vdev_id)
2324{
2325 return dp_rx_fisa_flush_by_vdev_id((struct dp_soc *)dp_soc, vdev_id);
Mohit Khanna06cce792020-01-09 05:21:53 -08002326}
2327#endif
2328
Mohit Khanna70322002018-05-15 19:21:32 -07002329QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
2330 qdf_nbuf_t rxBuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002331{
Jeff Johnson80486862017-10-02 13:21:29 -07002332 struct hdd_adapter *adapter = NULL;
Jeff Johnsoncc011972017-09-03 09:26:36 -07002333 struct hdd_context *hdd_ctx = NULL;
Mohit Khanna81418772018-10-30 14:14:46 -07002334 QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002335 struct sk_buff *skb = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002336 struct sk_buff *next = NULL;
Jeff Johnsond377dce2017-10-04 10:32:42 -07002337 struct hdd_station_ctx *sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002338 unsigned int cpu_index;
hangtiana7938f82019-01-07 16:35:49 +08002339 struct qdf_mac_addr *mac_addr, *dest_mac_addr;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05302340 bool wake_lock = false;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302341 uint8_t pkt_type = 0;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302342 bool track_arp = false;
Bala Venkateshf2867902019-03-08 15:01:23 +05302343 struct wlan_objmgr_vdev *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002344
2345 /* Sanity check on inputs */
Mohit Khanna70322002018-05-15 19:21:32 -07002346 if (unlikely((!adapter_context) || (!rxBuf))) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302347 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002348 "%s: Null params being passed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302349 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002350 }
2351
Mohit Khanna70322002018-05-15 19:21:32 -07002352 adapter = (struct hdd_adapter *)adapter_context;
Jeff Johnson80486862017-10-02 13:21:29 -07002353 if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Srinivas Girigowda028c4482017-03-09 18:52:02 -08002354 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002355 "Magic cookie(%x) for adapter sanity verification is invalid",
Jeff Johnson80486862017-10-02 13:21:29 -07002356 adapter->magic);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302357 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002358 }
2359
Jeff Johnson80486862017-10-02 13:21:29 -07002360 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Jeff Johnsond36fa332019-03-18 13:42:25 -07002361 if (unlikely(!hdd_ctx)) {
Dhanashri Atre182b0272016-02-17 15:35:07 -08002362 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2363 "%s: HDD context is Null", __func__);
2364 return QDF_STATUS_E_FAILURE;
2365 }
2366
2367 cpu_index = wlan_hdd_get_cpu();
2368
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002369 next = (struct sk_buff *)rxBuf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002370
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002371 while (next) {
2372 skb = next;
2373 next = skb->next;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002374 skb->next = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002375
Yeshwanth Sriram Guntukab31170f2020-04-02 16:16:47 +05302376 if (qdf_nbuf_is_ipv4_arp_pkt(skb)) {
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302377 if (qdf_nbuf_data_is_arp_rsp(skb) &&
Alok Kumarb94a2e72019-03-11 19:47:15 +05302378 (adapter->track_arp_ip ==
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302379 qdf_nbuf_get_arp_src_ip(skb))) {
2380 ++adapter->hdd_stats.hdd_arp_stats.
2381 rx_arp_rsp_count;
2382 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
Jingxiang Ge13b87052019-09-03 15:58:49 +08002383 QDF_TRACE_LEVEL_DEBUG,
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302384 "%s: ARP packet received",
2385 __func__);
2386 track_arp = true;
2387 }
2388 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302389 /* track connectivity stats */
2390 if (adapter->pkt_type_bitmap)
2391 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
2392 PKT_TYPE_RSP, &pkt_type);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002393
Jeff Johnsond377dce2017-10-04 10:32:42 -07002394 sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Jeff Johnsonac5170c2019-02-27 10:55:24 -08002395 if ((sta_ctx->conn_info.proxy_arp_service) &&
Yeshwanth Sriram Guntuka7f445f42019-01-30 17:01:35 +05302396 hdd_is_gratuitous_arp_unsolicited_na(skb)) {
Manjunathappa Prakashf39d2372019-02-25 18:18:57 -08002397 qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
2398 rx_usolict_arp_n_mcast_drp);
2399 /* Remove SKB from internal tracking table before
2400 * submitting it to stack.
Dhanashri Atre63d98022017-01-24 18:22:09 -08002401 */
2402 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002403 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002404 }
2405
2406 hdd_event_eapol_log(skb, QDF_RX);
Jeff Johnson1abc5662019-02-04 14:27:02 -08002407 qdf_dp_trace_log_pkt(adapter->vdev_id, skb, QDF_RX,
Mohit Khanna02281da2017-08-27 09:40:55 -07002408 QDF_TRACE_DEFAULT_PDEV_ID);
Mohit Khannaf8f96822017-05-17 17:11:59 -07002409
Dhanashri Atre63d98022017-01-24 18:22:09 -08002410 DPTRACE(qdf_dp_trace(skb,
2411 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07002412 QDF_TRACE_DEFAULT_PDEV_ID,
Dhanashri Atre63d98022017-01-24 18:22:09 -08002413 qdf_nbuf_data_addr(skb),
2414 sizeof(qdf_nbuf_data(skb)), QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07002415
Mohit Khanna02281da2017-08-27 09:40:55 -07002416 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
2417 QDF_DP_TRACE_RX_PACKET_RECORD,
2418 0, QDF_RX));
2419
hangtiana7938f82019-01-07 16:35:49 +08002420 dest_mac_addr = (struct qdf_mac_addr *)(skb->data);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07002421 mac_addr = (struct qdf_mac_addr *)(skb->data+QDF_MAC_ADDR_SIZE);
2422
Bala Venkateshf2867902019-03-08 15:01:23 +05302423 if (!hdd_is_current_high_throughput(hdd_ctx)) {
2424 vdev = hdd_objmgr_get_vdev(adapter);
2425 if (vdev) {
2426 ucfg_tdls_update_rx_pkt_cnt(vdev, mac_addr,
2427 dest_mac_addr);
2428 hdd_objmgr_put_vdev(vdev);
2429 }
2430 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07002431
Jeff Johnson80486862017-10-02 13:21:29 -07002432 skb->dev = adapter->dev;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002433 skb->protocol = eth_type_trans(skb, skb->dev);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07002434 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Jeff Johnson80486862017-10-02 13:21:29 -07002435 ++adapter->stats.rx_packets;
2436 adapter->stats.rx_bytes += skb->len;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002437
Alok Kumarb64650c2018-03-23 17:05:11 +05302438 /* Incr GW Rx count for NUD tracking based on GW mac addr */
2439 hdd_nud_incr_gw_rx_pkt_cnt(adapter, mac_addr);
2440
Dhanashri Atre63d98022017-01-24 18:22:09 -08002441 /* Check & drop replayed mcast packets (for IPV6) */
Jeff Johnsoncc011972017-09-03 09:26:36 -07002442 if (hdd_ctx->config->multicast_replay_filter &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08002443 hdd_is_mcast_replay(skb)) {
Manjunathappa Prakashf39d2372019-02-25 18:18:57 -08002444 qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
2445 rx_usolict_arp_n_mcast_drp);
Dhanashri Atre63d98022017-01-24 18:22:09 -08002446 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002447 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002448 }
2449
2450 /* hold configurable wakelock for unicast traffic */
hangtian2b9856f2019-01-25 11:50:39 +08002451 if (!hdd_is_current_high_throughput(hdd_ctx) &&
2452 hdd_ctx->config->rx_wakelock_timeout &&
Jeff Johnson457c2422019-02-27 13:56:04 -08002453 sta_ctx->conn_info.is_authenticated)
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05302454 wake_lock = hdd_is_rx_wake_lock_needed(skb);
2455
2456 if (wake_lock) {
Jeff Johnsoncc011972017-09-03 09:26:36 -07002457 cds_host_diag_log_work(&hdd_ctx->rx_wake_lock,
2458 hdd_ctx->config->rx_wakelock_timeout,
Dhanashri Atre63d98022017-01-24 18:22:09 -08002459 WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
Jeff Johnsoncc011972017-09-03 09:26:36 -07002460 qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock,
2461 hdd_ctx->config->
Dhanashri Atre63d98022017-01-24 18:22:09 -08002462 rx_wakelock_timeout);
2463 }
2464
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002465 /* Remove SKB from internal tracking table before submitting
2466 * it to stack
2467 */
Dhanashri Atre63d98022017-01-24 18:22:09 -08002468 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002469
Yu Wang66a250b2017-07-19 11:46:40 +08002470 hdd_tsf_timestamp_rx(hdd_ctx, skb, ktime_to_us(skb->tstamp));
2471
Mohit Khanna81418772018-10-30 14:14:46 -07002472 qdf_status = hdd_rx_deliver_to_stack(adapter, skb);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07002473
Mohit Khanna81418772018-10-30 14:14:46 -07002474 if (QDF_IS_STATUS_SUCCESS(qdf_status)) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07002475 ++adapter->hdd_stats.tx_rx_stats.
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302476 rx_delivered[cpu_index];
2477 if (track_arp)
2478 ++adapter->hdd_stats.hdd_arp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302479 rx_delivered;
2480 /* track connectivity stats */
2481 if (adapter->pkt_type_bitmap)
2482 hdd_tx_rx_collect_connectivity_stats_info(
2483 skb, adapter,
2484 PKT_TYPE_RX_DELIVERED, &pkt_type);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07002485 } else {
2486 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
2487 if (track_arp)
2488 ++adapter->hdd_stats.hdd_arp_stats.rx_refused;
2489
2490 /* track connectivity stats */
2491 if (adapter->pkt_type_bitmap)
2492 hdd_tx_rx_collect_connectivity_stats_info(
2493 skb, adapter,
2494 PKT_TYPE_RX_REFUSED, &pkt_type);
Yeshwanth Sriram Guntukad0e884a2019-12-23 11:20:35 +05302495 DPTRACE(qdf_dp_log_proto_pkt_info(NULL, NULL, 0, 0,
2496 QDF_RX,
2497 QDF_TRACE_DEFAULT_MSDU_ID,
2498 QDF_TX_RX_STATUS_DROP));
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07002499
Dhanashri Atre63d98022017-01-24 18:22:09 -08002500 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002501 }
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002502
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302503 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002504}
2505
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002506/**
2507 * hdd_reason_type_to_string() - return string conversion of reason type
2508 * @reason: reason type
2509 *
2510 * This utility function helps log string conversion of reason type.
2511 *
2512 * Return: string conversion of device mode, if match found;
2513 * "Unknown" otherwise.
2514 */
2515const char *hdd_reason_type_to_string(enum netif_reason_type reason)
2516{
2517 switch (reason) {
2518 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
2519 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
2520 CASE_RETURN_STRING(WLAN_FW_PAUSE);
2521 CASE_RETURN_STRING(WLAN_TX_ABORT);
2522 CASE_RETURN_STRING(WLAN_VDEV_STOP);
2523 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
2524 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302525 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002526 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302527 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002528 }
2529}
2530
2531/**
2532 * hdd_action_type_to_string() - return string conversion of action type
2533 * @action: action type
2534 *
2535 * This utility function helps log string conversion of action_type.
2536 *
2537 * Return: string conversion of device mode, if match found;
2538 * "Unknown" otherwise.
2539 */
2540const char *hdd_action_type_to_string(enum netif_action_type action)
2541{
2542
2543 switch (action) {
2544 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
2545 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
2546 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
2547 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
2548 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302549 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
2550 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002551 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
2552 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302553 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
2554 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
chenguob795b832018-10-12 15:23:51 +08002555 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_ON);
2556 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_OFF);
2557 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_ON);
2558 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_OFF);
2559 CASE_RETURN_STRING(WLAN_NETIF_BE_BK_QUEUE_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302560 CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
2561 CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002562 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302563 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002564 }
2565}
2566
2567/**
2568 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
2569 * @adapter: adapter handle
2570 * @action: action type
2571 * @reason: reason type
2572 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002573static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002574 enum netif_action_type action, enum netif_reason_type reason)
2575{
2576 switch (action) {
2577 case WLAN_STOP_ALL_NETIF_QUEUE:
2578 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
chenguodc9f0ec2018-09-03 18:53:26 +08002579 case WLAN_NETIF_BE_BK_QUEUE_OFF:
2580 case WLAN_NETIF_VI_QUEUE_OFF:
2581 case WLAN_NETIF_VO_QUEUE_OFF:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302582 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2583 case WLAN_STOP_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002584 adapter->queue_oper_stats[reason].pause_count++;
2585 break;
2586 case WLAN_START_ALL_NETIF_QUEUE:
2587 case WLAN_WAKE_ALL_NETIF_QUEUE:
2588 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
chenguodc9f0ec2018-09-03 18:53:26 +08002589 case WLAN_NETIF_VI_QUEUE_ON:
2590 case WLAN_NETIF_VO_QUEUE_ON:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302591 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2592 case WLAN_WAKE_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002593 adapter->queue_oper_stats[reason].unpause_count++;
2594 break;
2595 default:
2596 break;
2597 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002598}
2599
2600/**
jiad5b986632017-08-04 11:59:20 +08002601 * hdd_netdev_queue_is_locked()
2602 * @txq: net device tx queue
2603 *
2604 * For SMP system, always return false and we could safely rely on
2605 * __netif_tx_trylock().
2606 *
2607 * Return: true locked; false not locked
2608 */
2609#ifdef QCA_CONFIG_SMP
2610static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2611{
2612 return false;
2613}
2614#else
2615static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2616{
2617 return txq->xmit_lock_owner != -1;
2618}
2619#endif
2620
2621/**
Nirav Shah89223f72016-03-01 18:10:38 +05302622 * wlan_hdd_update_txq_timestamp() - update txq timestamp
2623 * @dev: net device
2624 *
2625 * Return: none
2626 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -07002627static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
Nirav Shah89223f72016-03-01 18:10:38 +05302628{
2629 struct netdev_queue *txq;
2630 int i;
Nirav Shah89223f72016-03-01 18:10:38 +05302631
2632 for (i = 0; i < NUM_TX_QUEUES; i++) {
2633 txq = netdev_get_tx_queue(dev, i);
jiad5b986632017-08-04 11:59:20 +08002634
2635 /*
2636 * On UP system, kernel will trigger watchdog bite if spinlock
2637 * recursion is detected. Unfortunately recursion is possible
2638 * when it is called in dev_queue_xmit() context, where stack
2639 * grabs the lock before calling driver's ndo_start_xmit
2640 * callback.
2641 */
2642 if (!hdd_netdev_queue_is_locked(txq)) {
2643 if (__netif_tx_trylock(txq)) {
2644 txq_trans_update(txq);
2645 __netif_tx_unlock(txq);
2646 }
wadesongba6373e2017-05-15 20:59:05 +08002647 }
Nirav Shah89223f72016-03-01 18:10:38 +05302648 }
2649}
2650
2651/**
Nirav Shah617cff92016-04-25 10:24:24 +05302652 * wlan_hdd_update_unpause_time() - update unpause time
2653 * @adapter: adapter handle
2654 *
2655 * Return: none
2656 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002657static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
Nirav Shah617cff92016-04-25 10:24:24 +05302658{
2659 qdf_time_t curr_time = qdf_system_ticks();
2660
2661 adapter->total_unpause_time += curr_time - adapter->last_time;
2662 adapter->last_time = curr_time;
2663}
2664
2665/**
2666 * wlan_hdd_update_pause_time() - update pause time
2667 * @adapter: adapter handle
2668 *
2669 * Return: none
2670 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002671static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
Nirav Shahda008342016-05-17 18:50:40 +05302672 uint32_t temp_map)
Nirav Shah617cff92016-04-25 10:24:24 +05302673{
2674 qdf_time_t curr_time = qdf_system_ticks();
Nirav Shahda008342016-05-17 18:50:40 +05302675 uint8_t i;
2676 qdf_time_t pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302677
Nirav Shahda008342016-05-17 18:50:40 +05302678 pause_time = curr_time - adapter->last_time;
2679 adapter->total_pause_time += pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302680 adapter->last_time = curr_time;
Nirav Shahda008342016-05-17 18:50:40 +05302681
2682 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
2683 if (temp_map & (1 << i)) {
2684 adapter->queue_oper_stats[i].total_pause_time +=
2685 pause_time;
2686 break;
2687 }
2688 }
2689
Nirav Shah617cff92016-04-25 10:24:24 +05302690}
2691
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002692uint32_t
2693wlan_hdd_dump_queue_history_state(struct hdd_netif_queue_history *queue_history,
2694 char *buf, uint32_t size)
2695{
2696 unsigned int i;
2697 unsigned int index = 0;
2698
2699 for (i = 0; i < NUM_TX_QUEUES; i++) {
2700 index += qdf_scnprintf(buf + index,
2701 size - index,
2702 "%u:0x%lx ",
2703 i, queue_history->tx_q_state[i]);
2704 }
2705
2706 return index;
2707}
2708
2709/**
2710 * wlan_hdd_update_queue_history_state() - Save a copy of dev TX queues state
2711 * @adapter: adapter handle
2712 *
2713 * Save netdev TX queues state into adapter queue history.
2714 *
2715 * Return: None
2716 */
2717static void
2718wlan_hdd_update_queue_history_state(struct net_device *dev,
2719 struct hdd_netif_queue_history *q_hist)
2720{
2721 unsigned int i = 0;
2722 uint32_t num_tx_queues = 0;
2723 struct netdev_queue *txq = NULL;
2724
2725 num_tx_queues = qdf_min(dev->num_tx_queues, (uint32_t)NUM_TX_QUEUES);
2726
2727 for (i = 0; i < num_tx_queues; i++) {
2728 txq = netdev_get_tx_queue(dev, i);
2729 q_hist->tx_q_state[i] = txq->state;
2730 }
2731}
2732
Nirav Shah617cff92016-04-25 10:24:24 +05302733/**
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302734 * wlan_hdd_stop_non_priority_queue() - stop non prority queues
2735 * @adapter: adapter handle
2736 *
2737 * Return: None
2738 */
2739static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
2740{
2741 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2742 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2743 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2744 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2745}
2746
2747/**
2748 * wlan_hdd_wake_non_priority_queue() - wake non prority queues
2749 * @adapter: adapter handle
2750 *
2751 * Return: None
2752 */
2753static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
2754{
2755 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2756 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2757 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2758 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2759}
2760
2761/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002762 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
2763 * @adapter: adapter handle
2764 * @action: action type
2765 * @reason: reason type
2766 *
2767 * This is single function which is used for netif_queue related
2768 * actions like start/stop of network queues and on/off carrier
2769 * option.
2770 *
2771 * Return: None
2772 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002773void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002774 enum netif_action_type action, enum netif_reason_type reason)
2775{
Nirav Shahda008342016-05-17 18:50:40 +05302776 uint32_t temp_map;
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002777 uint8_t index;
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002778 struct hdd_netif_queue_history *txq_hist_ptr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002779
2780 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
2781 (!adapter->dev)) {
2782 hdd_err("adapter is invalid");
2783 return;
2784 }
2785
2786 switch (action) {
2787
2788 case WLAN_NETIF_CARRIER_ON:
2789 netif_carrier_on(adapter->dev);
2790 break;
2791
2792 case WLAN_NETIF_CARRIER_OFF:
2793 netif_carrier_off(adapter->dev);
2794 break;
2795
2796 case WLAN_STOP_ALL_NETIF_QUEUE:
2797 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302798 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002799 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302800 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302801 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302802 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002803 adapter->pause_map |= (1 << reason);
2804 spin_unlock_bh(&adapter->pause_map_lock);
2805 break;
2806
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302807 case WLAN_STOP_NON_PRIORITY_QUEUE:
2808 spin_lock_bh(&adapter->pause_map_lock);
2809 if (!adapter->pause_map) {
2810 wlan_hdd_stop_non_priority_queue(adapter);
2811 wlan_hdd_update_txq_timestamp(adapter->dev);
2812 wlan_hdd_update_unpause_time(adapter);
2813 }
2814 adapter->pause_map |= (1 << reason);
2815 spin_unlock_bh(&adapter->pause_map_lock);
2816 break;
2817
2818 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2819 spin_lock_bh(&adapter->pause_map_lock);
2820 temp_map = adapter->pause_map;
2821 adapter->pause_map &= ~(1 << reason);
2822 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2823 wlan_hdd_update_pause_time(adapter, temp_map);
2824 spin_unlock_bh(&adapter->pause_map_lock);
2825 break;
2826
2827 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2828 spin_lock_bh(&adapter->pause_map_lock);
2829 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2830 wlan_hdd_update_txq_timestamp(adapter->dev);
2831 wlan_hdd_update_unpause_time(adapter);
2832 adapter->pause_map |= (1 << reason);
2833 spin_unlock_bh(&adapter->pause_map_lock);
2834 break;
2835
chenguodc9f0ec2018-09-03 18:53:26 +08002836 case WLAN_NETIF_BE_BK_QUEUE_OFF:
2837 spin_lock_bh(&adapter->pause_map_lock);
2838 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2839 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2840 wlan_hdd_update_txq_timestamp(adapter->dev);
2841 wlan_hdd_update_unpause_time(adapter);
2842 adapter->pause_map |= (1 << reason);
2843 spin_unlock_bh(&adapter->pause_map_lock);
2844 break;
2845
2846 case WLAN_NETIF_VI_QUEUE_OFF:
2847 spin_lock_bh(&adapter->pause_map_lock);
2848 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2849 wlan_hdd_update_txq_timestamp(adapter->dev);
2850 wlan_hdd_update_unpause_time(adapter);
2851 adapter->pause_map |= (1 << reason);
2852 spin_unlock_bh(&adapter->pause_map_lock);
2853 break;
2854
2855 case WLAN_NETIF_VI_QUEUE_ON:
2856 spin_lock_bh(&adapter->pause_map_lock);
2857 temp_map = adapter->pause_map;
2858 adapter->pause_map &= ~(1 << reason);
2859 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2860 wlan_hdd_update_pause_time(adapter, temp_map);
2861 spin_unlock_bh(&adapter->pause_map_lock);
2862 break;
2863
2864 case WLAN_NETIF_VO_QUEUE_OFF:
2865 spin_lock_bh(&adapter->pause_map_lock);
2866 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2867 wlan_hdd_update_txq_timestamp(adapter->dev);
2868 wlan_hdd_update_unpause_time(adapter);
2869 adapter->pause_map |= (1 << reason);
2870 spin_unlock_bh(&adapter->pause_map_lock);
2871 break;
2872
2873 case WLAN_NETIF_VO_QUEUE_ON:
2874 spin_lock_bh(&adapter->pause_map_lock);
2875 temp_map = adapter->pause_map;
2876 adapter->pause_map &= ~(1 << reason);
2877 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2878 wlan_hdd_update_pause_time(adapter, temp_map);
2879 spin_unlock_bh(&adapter->pause_map_lock);
2880 break;
2881
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002882 case WLAN_START_ALL_NETIF_QUEUE:
2883 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302884 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002885 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302886 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002887 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302888 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302889 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002890 spin_unlock_bh(&adapter->pause_map_lock);
2891 break;
2892
2893 case WLAN_WAKE_ALL_NETIF_QUEUE:
2894 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302895 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002896 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302897 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002898 netif_tx_wake_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302899 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302900 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002901 spin_unlock_bh(&adapter->pause_map_lock);
2902 break;
2903
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302904 case WLAN_WAKE_NON_PRIORITY_QUEUE:
2905 spin_lock_bh(&adapter->pause_map_lock);
2906 temp_map = adapter->pause_map;
2907 adapter->pause_map &= ~(1 << reason);
2908 if (!adapter->pause_map) {
2909 wlan_hdd_wake_non_priority_queue(adapter);
2910 wlan_hdd_update_pause_time(adapter, temp_map);
2911 }
2912 spin_unlock_bh(&adapter->pause_map_lock);
2913 break;
2914
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002915 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
2916 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302917 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002918 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302919 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302920 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302921 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002922 adapter->pause_map |= (1 << reason);
2923 netif_carrier_off(adapter->dev);
2924 spin_unlock_bh(&adapter->pause_map_lock);
2925 break;
2926
2927 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
2928 spin_lock_bh(&adapter->pause_map_lock);
2929 netif_carrier_on(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302930 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002931 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302932 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002933 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302934 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302935 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002936 spin_unlock_bh(&adapter->pause_map_lock);
2937 break;
2938
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002939 case WLAN_NETIF_ACTION_TYPE_NONE:
2940 break;
2941
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002942 default:
2943 hdd_err("unsupported action %d", action);
2944 }
2945
2946 spin_lock_bh(&adapter->pause_map_lock);
2947 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
2948 wlan_hdd_process_peer_unauthorised_pause(adapter);
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002949
2950 index = adapter->history_index++;
2951 if (adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
2952 adapter->history_index = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002953 spin_unlock_bh(&adapter->pause_map_lock);
2954
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002955 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
2956
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002957 adapter->queue_oper_history[index].time = qdf_system_ticks();
2958 adapter->queue_oper_history[index].netif_action = action;
2959 adapter->queue_oper_history[index].netif_reason = reason;
2960 adapter->queue_oper_history[index].pause_map = adapter->pause_map;
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002961
2962 txq_hist_ptr = &adapter->queue_oper_history[index];
2963
2964 wlan_hdd_update_queue_history_state(adapter->dev, txq_hist_ptr);
2965}
2966
2967void hdd_print_netdev_txq_status(struct net_device *dev)
2968{
2969 unsigned int i;
2970
2971 if (!dev)
2972 return;
2973
2974 for (i = 0; i < dev->num_tx_queues; i++) {
2975 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2976
2977 hdd_debug("netdev tx queue[%u] state:0x%lx",
2978 i, txq->state);
2979 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002980}
2981
Alok Kumara71b36f2019-09-08 20:16:19 +05302982#ifdef WLAN_FEATURE_PKT_CAPTURE
2983/**
2984 * hdd_set_pktcapture_cb() - Set pkt capture mode callback
2985 * @dev: Pointer to net_device structure
2986 * @pdev_id: pdev id
2987 *
2988 * Return: 0 on success; non-zero for failure
2989 */
2990int hdd_set_pktcapture_cb(struct net_device *dev, uint8_t pdev_id)
2991{
2992 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
2993 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2994
2995 return cdp_register_pktcapture_cb(soc, pdev_id, adapter,
2996 hdd_mon_rx_packet_cbk);
2997}
2998
2999/**
3000 * hdd_reset_pktcapture_cb() - Reset pkt capture mode callback
3001 * @pdev_id: pdev id
3002 *
3003 * Return: None
3004 */
3005void hdd_reset_pktcapture_cb(uint8_t pdev_id)
3006{
3007 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
3008
3009 cdp_deregister_pktcapture_cb(soc, pdev_id);
3010}
3011#endif /* WLAN_FEATURE_PKT_CAPTURE */
3012
Nirav Shah73713f72018-05-17 14:50:41 +05303013#ifdef FEATURE_MONITOR_MODE_SUPPORT
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003014/**
3015 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
3016 * @dev: Pointer to net_device structure
3017 *
3018 * Return: 0 for success; non-zero for failure
3019 */
3020int hdd_set_mon_rx_cb(struct net_device *dev)
3021{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07003022 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Abhishek Singh9fee5182019-11-01 11:39:38 +05303023 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003024 int ret;
3025 QDF_STATUS qdf_status;
3026 struct ol_txrx_desc_type sta_desc = {0};
3027 struct ol_txrx_ops txrx_ops;
Leo Changfdb45c32016-10-28 11:09:23 -07003028 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003029
Kai Liucdc307f2019-10-15 10:28:19 +08003030 WLAN_ADDR_COPY(sta_desc.peer_addr.bytes, adapter->mac_addr.bytes);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003031 qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
3032 txrx_ops.rx.rx = hdd_mon_rx_packet_cbk;
Ravi Joshi106ffe02017-01-18 18:09:05 -08003033 hdd_monitor_set_rx_monitor_cb(&txrx_ops, hdd_rx_monitor_callback);
Vevek Venkatesan0ac759f2019-10-03 04:14:29 +05303034 cdp_vdev_register(soc, adapter->vdev_id,
3035 (ol_osif_vdev_handle)adapter,
3036 &txrx_ops);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003037 /* peer is created wma_vdev_attach->wma_create_peer */
Vevek Venkatesan2d88a6b2019-10-04 19:03:10 +05303038 qdf_status = cdp_peer_register(soc, OL_TXRX_PDEV_ID, &sta_desc);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003039 if (QDF_STATUS_SUCCESS != qdf_status) {
Leo Changfdb45c32016-10-28 11:09:23 -07003040 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003041 qdf_status, qdf_status);
3042 goto exit;
3043 }
3044
Abhishek Singh9fee5182019-11-01 11:39:38 +05303045 qdf_status = sme_create_mon_session(hdd_ctx->mac_handle,
3046 adapter->mac_addr.bytes,
3047 adapter->vdev_id);
3048 if (QDF_STATUS_SUCCESS != qdf_status) {
3049 hdd_err("sme_create_mon_session() failed to register. Status= %d [0x%08X]",
3050 qdf_status, qdf_status);
3051 }
3052
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07003053exit:
3054 ret = qdf_status_to_os_return(qdf_status);
3055 return ret;
3056}
Nirav Shah73713f72018-05-17 14:50:41 +05303057#endif
Nirav Shahbd36b062016-07-18 11:12:59 +05303058
3059/**
3060 * hdd_send_rps_ind() - send rps indication to daemon
3061 * @adapter: adapter context
3062 *
3063 * If RPS feature enabled by INI, send RPS enable indication to daemon
3064 * Indication contents is the name of interface to find correct sysfs node
3065 * Should send all available interfaces
3066 *
3067 * Return: none
3068 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07003069void hdd_send_rps_ind(struct hdd_adapter *adapter)
Nirav Shahbd36b062016-07-18 11:12:59 +05303070{
3071 int i;
3072 uint8_t cpu_map_list_len = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07003073 struct hdd_context *hdd_ctxt = NULL;
Nirav Shahbd36b062016-07-18 11:12:59 +05303074 struct wlan_rps_data rps_data;
Yun Parkff6a16a2017-09-26 16:38:18 -07003075 struct cds_config_info *cds_cfg;
3076
3077 cds_cfg = cds_get_ini_config();
Nirav Shahbd36b062016-07-18 11:12:59 +05303078
3079 if (!adapter) {
3080 hdd_err("adapter is NULL");
3081 return;
3082 }
3083
Yun Parkff6a16a2017-09-26 16:38:18 -07003084 if (!cds_cfg) {
3085 hdd_err("cds_cfg is NULL");
3086 return;
3087 }
3088
Nirav Shahbd36b062016-07-18 11:12:59 +05303089 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
3090 rps_data.num_queues = NUM_TX_QUEUES;
3091
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +05303092 hdd_debug("cpu_map_list '%s'", hdd_ctxt->config->cpu_map_list);
Nirav Shahbd36b062016-07-18 11:12:59 +05303093
3094 /* in case no cpu map list is provided, simply return */
3095 if (!strlen(hdd_ctxt->config->cpu_map_list)) {
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +05303096 hdd_debug("no cpu map list found");
Nirav Shahbd36b062016-07-18 11:12:59 +05303097 goto err;
3098 }
3099
3100 if (QDF_STATUS_SUCCESS !=
3101 hdd_hex_string_to_u16_array(hdd_ctxt->config->cpu_map_list,
3102 rps_data.cpu_map_list,
3103 &cpu_map_list_len,
3104 WLAN_SVC_IFACE_NUM_QUEUES)) {
3105 hdd_err("invalid cpu map list");
3106 goto err;
3107 }
3108
3109 rps_data.num_queues =
3110 (cpu_map_list_len < rps_data.num_queues) ?
3111 cpu_map_list_len : rps_data.num_queues;
3112
3113 for (i = 0; i < rps_data.num_queues; i++) {
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +05303114 hdd_debug("cpu_map_list[%d] = 0x%x",
3115 i, rps_data.cpu_map_list[i]);
Nirav Shahbd36b062016-07-18 11:12:59 +05303116 }
3117
3118 strlcpy(rps_data.ifname, adapter->dev->name,
3119 sizeof(rps_data.ifname));
Kondabattini, Ganesh96ac37b2016-09-02 23:12:15 +05303120 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
3121 WLAN_SVC_RPS_ENABLE_IND,
Nirav Shahbd36b062016-07-18 11:12:59 +05303122 &rps_data, sizeof(rps_data));
3123
Yun Parkff6a16a2017-09-26 16:38:18 -07003124 cds_cfg->rps_enabled = true;
3125
3126 return;
3127
Nirav Shahbd36b062016-07-18 11:12:59 +05303128err:
Arun Kumar Khandavalliafcb0552020-01-20 11:46:36 +05303129 hdd_debug("Wrong RPS configuration. enabling rx_thread");
Yun Parkff6a16a2017-09-26 16:38:18 -07003130 cds_cfg->rps_enabled = false;
3131}
3132
3133/**
3134 * hdd_send_rps_disable_ind() - send rps disable indication to daemon
3135 * @adapter: adapter context
3136 *
3137 * Return: none
3138 */
3139void hdd_send_rps_disable_ind(struct hdd_adapter *adapter)
3140{
Yun Parkff6a16a2017-09-26 16:38:18 -07003141 struct hdd_context *hdd_ctxt = NULL;
3142 struct wlan_rps_data rps_data;
3143 struct cds_config_info *cds_cfg;
3144
3145 cds_cfg = cds_get_ini_config();
3146
3147 if (!adapter) {
3148 hdd_err("adapter is NULL");
3149 return;
3150 }
3151
3152 if (!cds_cfg) {
3153 hdd_err("cds_cfg is NULL");
3154 return;
3155 }
3156
3157 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
3158 rps_data.num_queues = NUM_TX_QUEUES;
3159
3160 hdd_info("Set cpu_map_list 0");
3161
3162 qdf_mem_zero(&rps_data.cpu_map_list, sizeof(rps_data.cpu_map_list));
Yun Parkff6a16a2017-09-26 16:38:18 -07003163
3164 strlcpy(rps_data.ifname, adapter->dev->name, sizeof(rps_data.ifname));
3165 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
3166 WLAN_SVC_RPS_ENABLE_IND,
3167 &rps_data, sizeof(rps_data));
3168
3169 cds_cfg->rps_enabled = false;
Nirav Shahbd36b062016-07-18 11:12:59 +05303170}
3171
Jeff Johnsonda2afa42018-07-04 10:25:42 -07003172void hdd_tx_queue_cb(hdd_handle_t hdd_handle, uint32_t vdev_id,
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08003173 enum netif_action_type action,
3174 enum netif_reason_type reason)
3175{
Jeff Johnsonda2afa42018-07-04 10:25:42 -07003176 struct hdd_context *hdd_ctx = hdd_handle_to_context(hdd_handle);
3177 struct hdd_adapter *adapter;
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08003178
3179 /*
3180 * Validating the context is not required here.
3181 * if there is a driver unload/SSR in progress happening in a
3182 * different context and it has been scheduled to run and
3183 * driver got a firmware event of sta kick out, then it is
3184 * good to disable the Tx Queue to stop the influx of traffic.
3185 */
Jeff Johnsonda2afa42018-07-04 10:25:42 -07003186 if (!hdd_ctx) {
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08003187 hdd_err("Invalid context passed");
3188 return;
3189 }
3190
3191 adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
Jeff Johnsonda2afa42018-07-04 10:25:42 -07003192 if (!adapter) {
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08003193 hdd_err("vdev_id %d does not exist with host", vdev_id);
3194 return;
3195 }
3196 hdd_debug("Tx Queue action %d on vdev %d", action, vdev_id);
3197
3198 wlan_hdd_netif_queue_control(adapter, action, reason);
3199}
3200
Tiger Yu8b119e92019-04-09 13:55:07 +08003201#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
Ravi Joshib89e7f72016-09-07 13:43:15 -07003202/**
3203 * hdd_reset_tcp_delack() - Reset tcp delack value to default
3204 * @hdd_ctx: Handle to hdd context
3205 *
3206 * Function used to reset TCP delack value to its default value
3207 *
3208 * Return: None
3209 */
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07003210void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
Ravi Joshib89e7f72016-09-07 13:43:15 -07003211{
Tushnim Bhattacharyyadfbce702018-03-27 12:46:48 -07003212 enum wlan_tp_level next_level = WLAN_SVC_TP_LOW;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07003213 struct wlan_rx_tp_data rx_tp_data = {0};
Nirav Shahbd36b062016-07-18 11:12:59 +05303214
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07003215 rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07003216 rx_tp_data.level = next_level;
Ravi Joshib89e7f72016-09-07 13:43:15 -07003217 hdd_ctx->rx_high_ind_cnt = 0;
Alok Kumar2fad6442018-11-08 19:19:28 +05303218 wlan_hdd_update_tcp_rx_param(hdd_ctx, &rx_tp_data);
Ravi Joshib89e7f72016-09-07 13:43:15 -07003219}
hangtian2b9856f2019-01-25 11:50:39 +08003220
3221/**
3222 * hdd_is_current_high_throughput() - Check if vote level is high
3223 * @hdd_ctx: Handle to hdd context
3224 *
3225 * Function used to check if vote level is high
3226 *
3227 * Return: True if vote level is high
3228 */
Hangtian Zhu2b2adde2019-09-12 10:47:42 +08003229#ifdef RX_PERFORMANCE
hangtian2b9856f2019-01-25 11:50:39 +08003230bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx)
3231{
Mohit Khannab9d7e4e2019-08-05 17:43:37 -07003232 if (hdd_ctx->cur_vote_level < PLD_BUS_WIDTH_MEDIUM)
hangtian2b9856f2019-01-25 11:50:39 +08003233 return false;
3234 else
3235 return true;
3236}
Tiger Yu8b119e92019-04-09 13:55:07 +08003237#endif
Hangtian Zhu2b2adde2019-09-12 10:47:42 +08003238#endif
jitiphil869b9f72018-09-25 17:14:01 +05303239
3240#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
3241/**
3242 * hdd_ini_tx_flow_control() - Initialize INIs concerned about tx flow control
3243 * @config: pointer to hdd config
3244 * @psoc: pointer to psoc obj
3245 *
3246 * Return: none
3247 */
3248static void hdd_ini_tx_flow_control(struct hdd_config *config,
3249 struct wlan_objmgr_psoc *psoc)
3250{
3251 config->tx_flow_low_watermark =
3252 cfg_get(psoc, CFG_DP_LL_TX_FLOW_LWM);
3253 config->tx_flow_hi_watermark_offset =
3254 cfg_get(psoc, CFG_DP_LL_TX_FLOW_HWM_OFFSET);
3255 config->tx_flow_max_queue_depth =
3256 cfg_get(psoc, CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH);
3257 config->tx_lbw_flow_low_watermark =
3258 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_LWM);
3259 config->tx_lbw_flow_hi_watermark_offset =
3260 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET);
3261 config->tx_lbw_flow_max_queue_depth =
3262 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH);
3263 config->tx_hbw_flow_low_watermark =
3264 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_LWM);
3265 config->tx_hbw_flow_hi_watermark_offset =
3266 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET);
3267 config->tx_hbw_flow_max_queue_depth =
3268 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH);
3269}
3270#else
3271static void hdd_ini_tx_flow_control(struct hdd_config *config,
3272 struct wlan_objmgr_psoc *psoc)
3273{
3274}
3275#endif
3276
Tiger Yu8b119e92019-04-09 13:55:07 +08003277#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
jitiphil869b9f72018-09-25 17:14:01 +05303278/**
3279 * hdd_ini_tx_flow_control() - Initialize INIs concerned about bus bandwidth
3280 * @config: pointer to hdd config
3281 * @psoc: pointer to psoc obj
3282 *
3283 * Return: none
3284 */
3285static void hdd_ini_bus_bandwidth(struct hdd_config *config,
3286 struct wlan_objmgr_psoc *psoc)
3287{
Mohit Khanna6dbf9c82019-07-12 17:23:28 -07003288 config->bus_bw_very_high_threshold =
3289 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_VERY_HIGH_THRESHOLD);
jitiphil869b9f72018-09-25 17:14:01 +05303290 config->bus_bw_high_threshold =
3291 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD);
3292 config->bus_bw_medium_threshold =
3293 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD);
3294 config->bus_bw_low_threshold =
3295 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD);
3296 config->bus_bw_compute_interval =
3297 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL);
Jinwei Chen0dc383e2019-08-23 00:43:04 +08003298 config->bus_low_cnt_threshold =
3299 cfg_get(psoc, CFG_DP_BUS_LOW_BW_CNT_THRESHOLD);
Rakesh Pillai73e145b2020-05-26 21:18:36 +05303300 config->enable_latency_crit_clients =
3301 cfg_get(psoc, CFG_DP_BUS_HANDLE_LATENCY_CRITICAL_CLIENTS);
jitiphil869b9f72018-09-25 17:14:01 +05303302}
3303
3304/**
3305 * hdd_ini_tx_flow_control() - Initialize INIs concerned about tcp settings
3306 * @config: pointer to hdd config
3307 * @psoc: pointer to psoc obj
3308 *
3309 * Return: none
3310 */
3311static void hdd_ini_tcp_settings(struct hdd_config *config,
3312 struct wlan_objmgr_psoc *psoc)
3313{
3314 config->enable_tcp_limit_output =
3315 cfg_get(psoc, CFG_DP_ENABLE_TCP_LIMIT_OUTPUT);
3316 config->enable_tcp_adv_win_scale =
3317 cfg_get(psoc, CFG_DP_ENABLE_TCP_ADV_WIN_SCALE);
3318 config->enable_tcp_delack =
3319 cfg_get(psoc, CFG_DP_ENABLE_TCP_DELACK);
3320 config->tcp_delack_thres_high =
3321 cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_HIGH);
3322 config->tcp_delack_thres_low =
3323 cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_LOW);
3324 config->tcp_delack_timer_count =
3325 cfg_get(psoc, CFG_DP_TCP_DELACK_TIMER_COUNT);
3326 config->tcp_tx_high_tput_thres =
3327 cfg_get(psoc, CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD);
Alok Kumar2fad6442018-11-08 19:19:28 +05303328 config->enable_tcp_param_update =
3329 cfg_get(psoc, CFG_DP_ENABLE_TCP_PARAM_UPDATE);
jitiphil869b9f72018-09-25 17:14:01 +05303330}
3331#else
3332static void hdd_ini_bus_bandwidth(struct hdd_config *config,
Tiger Yu8b119e92019-04-09 13:55:07 +08003333 struct wlan_objmgr_psoc *psoc)
jitiphil869b9f72018-09-25 17:14:01 +05303334{
3335}
3336
3337static void hdd_ini_tcp_settings(struct hdd_config *config,
3338 struct wlan_objmgr_psoc *psoc)
3339{
3340}
Tiger Yu8b119e92019-04-09 13:55:07 +08003341#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
jitiphil869b9f72018-09-25 17:14:01 +05303342
3343/**
3344 * hdd_set_rx_mode_value() - set rx_mode values
3345 * @hdd_ctx: hdd context
3346 *
3347 * Return: none
3348 */
3349static void hdd_set_rx_mode_value(struct hdd_context *hdd_ctx)
3350{
3351 uint32_t rx_mode = hdd_ctx->config->rx_mode;
Venkata Sharath Chandra Manchala702be3e2019-03-28 12:24:39 -07003352 enum QDF_GLOBAL_MODE con_mode = 0;
3353
3354 con_mode = hdd_get_conparam();
jitiphil869b9f72018-09-25 17:14:01 +05303355
3356 /* RPS has higher priority than dynamic RPS when both bits are set */
3357 if (rx_mode & CFG_ENABLE_RPS && rx_mode & CFG_ENABLE_DYNAMIC_RPS)
3358 rx_mode &= ~CFG_ENABLE_DYNAMIC_RPS;
3359
3360 if (rx_mode & CFG_ENABLE_RX_THREAD && rx_mode & CFG_ENABLE_RPS) {
3361 hdd_warn("rx_mode wrong configuration. Make it default");
3362 rx_mode = CFG_RX_MODE_DEFAULT;
3363 }
3364
3365 if (rx_mode & CFG_ENABLE_RX_THREAD)
3366 hdd_ctx->enable_rxthread = true;
Venkata Sharath Chandra Manchala702be3e2019-03-28 12:24:39 -07003367 else if (rx_mode & CFG_ENABLE_DP_RX_THREADS) {
3368 if (con_mode == QDF_GLOBAL_MONITOR_MODE)
3369 hdd_ctx->enable_dp_rx_threads = false;
3370 else
3371 hdd_ctx->enable_dp_rx_threads = true;
3372 }
jitiphil869b9f72018-09-25 17:14:01 +05303373
3374 if (rx_mode & CFG_ENABLE_RPS)
3375 hdd_ctx->rps = true;
3376
3377 if (rx_mode & CFG_ENABLE_NAPI)
3378 hdd_ctx->napi_enable = true;
3379
3380 if (rx_mode & CFG_ENABLE_DYNAMIC_RPS)
3381 hdd_ctx->dynamic_rps = true;
3382
3383 hdd_debug("rx_mode:%u dp_rx_threads:%u rx_thread:%u napi:%u rps:%u dynamic rps %u",
3384 rx_mode, hdd_ctx->enable_dp_rx_threads,
3385 hdd_ctx->enable_rxthread, hdd_ctx->napi_enable,
3386 hdd_ctx->rps, hdd_ctx->dynamic_rps);
3387}
3388
jitiphilb03ae082018-11-09 17:41:59 +05303389#ifdef CONFIG_DP_TRACE
3390static void
3391hdd_dp_dp_trace_cfg_update(struct hdd_config *config,
3392 struct wlan_objmgr_psoc *psoc)
3393{
3394 qdf_size_t array_out_size;
3395
3396 config->enable_dp_trace = cfg_get(psoc, CFG_DP_ENABLE_DP_TRACE);
3397 qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_DP_TRACE_CONFIG),
3398 config->dp_trace_config,
3399 sizeof(config->dp_trace_config), &array_out_size);
Yeshwanth Sriram Guntukad0e884a2019-12-23 11:20:35 +05303400 config->dp_proto_event_bitmap = cfg_get(psoc,
3401 CFG_DP_PROTO_EVENT_BITMAP);
jitiphilb03ae082018-11-09 17:41:59 +05303402}
3403#else
3404static void
3405hdd_dp_dp_trace_cfg_update(struct hdd_config *config,
3406 struct wlan_objmgr_psoc *psoc)
3407{
3408}
3409#endif
3410
3411#ifdef WLAN_NUD_TRACKING
3412static void
3413hdd_dp_nud_tracking_cfg_update(struct hdd_config *config,
3414 struct wlan_objmgr_psoc *psoc)
3415{
3416 config->enable_nud_tracking = cfg_get(psoc, CFG_DP_ENABLE_NUD_TRACKING);
3417}
3418#else
3419static void
3420hdd_dp_nud_tracking_cfg_update(struct hdd_config *config,
3421 struct wlan_objmgr_psoc *psoc)
3422{
3423}
3424#endif
3425
Tiger Yue40e7832019-04-25 10:46:53 +08003426#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
3427static void hdd_ini_tcp_del_ack_settings(struct hdd_config *config,
3428 struct wlan_objmgr_psoc *psoc)
3429{
3430 config->del_ack_threshold_high =
3431 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_HIGH_THRESHOLD);
3432 config->del_ack_threshold_low =
3433 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_LOW_THRESHOLD);
3434 config->del_ack_enable =
3435 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_ENABLE);
3436 config->del_ack_pkt_count =
3437 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_PKT_CNT);
3438 config->del_ack_timer_value =
3439 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_TIMER_VALUE);
3440}
3441#else
3442static void hdd_ini_tcp_del_ack_settings(struct hdd_config *config,
3443 struct wlan_objmgr_psoc *psoc)
3444{
3445}
3446#endif
3447
Nirav Shahfb9b1df2019-11-15 11:40:52 +05303448#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
3449static void hdd_dp_hl_bundle_cfg_update(struct hdd_config *config,
3450 struct wlan_objmgr_psoc *psoc)
3451{
3452 config->pkt_bundle_threshold_high =
3453 cfg_get(psoc, CFG_DP_HL_BUNDLE_HIGH_TH);
3454 config->pkt_bundle_threshold_low =
3455 cfg_get(psoc, CFG_DP_HL_BUNDLE_LOW_TH);
3456 config->pkt_bundle_timer_value =
3457 cfg_get(psoc, CFG_DP_HL_BUNDLE_TIMER_VALUE);
3458 config->pkt_bundle_size =
3459 cfg_get(psoc, CFG_DP_HL_BUNDLE_SIZE);
3460}
3461#else
3462static void hdd_dp_hl_bundle_cfg_update(struct hdd_config *config,
3463 struct wlan_objmgr_psoc *psoc)
3464{
3465}
3466#endif
3467
jitiphil869b9f72018-09-25 17:14:01 +05303468void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
3469 struct hdd_context *hdd_ctx)
3470{
3471 struct hdd_config *config;
jitiphilb03ae082018-11-09 17:41:59 +05303472 qdf_size_t array_out_size;
jitiphil869b9f72018-09-25 17:14:01 +05303473
3474 config = hdd_ctx->config;
3475 hdd_ini_tx_flow_control(config, psoc);
3476 hdd_ini_bus_bandwidth(config, psoc);
3477 hdd_ini_tcp_settings(config, psoc);
Tiger Yue40e7832019-04-25 10:46:53 +08003478
3479 hdd_ini_tcp_del_ack_settings(config, psoc);
3480
Nirav Shahfb9b1df2019-11-15 11:40:52 +05303481 hdd_dp_hl_bundle_cfg_update(config, psoc);
3482
jitiphil869b9f72018-09-25 17:14:01 +05303483 config->napi_cpu_affinity_mask =
3484 cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
Alok Kumar68127f62019-11-21 16:37:36 +05303485 config->rx_thread_ul_affinity_mask =
3486 cfg_get(psoc, CFG_DP_RX_THREAD_UL_CPU_MASK);
jitiphil869b9f72018-09-25 17:14:01 +05303487 config->rx_thread_affinity_mask =
3488 cfg_get(psoc, CFG_DP_RX_THREAD_CPU_MASK);
Mohit Khanna06cce792020-01-09 05:21:53 -08003489 config->fisa_enable = cfg_get(psoc, CFG_DP_RX_FISA_ENABLE);
jitiphil869b9f72018-09-25 17:14:01 +05303490 qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST),
3491 config->cpu_map_list,
jitiphilb03ae082018-11-09 17:41:59 +05303492 sizeof(config->cpu_map_list), &array_out_size);
jitiphil869b9f72018-09-25 17:14:01 +05303493 config->tx_orphan_enable = cfg_get(psoc, CFG_DP_TX_ORPHAN_ENABLE);
3494 config->rx_mode = cfg_get(psoc, CFG_DP_RX_MODE);
3495 hdd_set_rx_mode_value(hdd_ctx);
jitiphil296c23e2018-11-15 16:26:14 +05303496 config->multicast_replay_filter =
3497 cfg_get(psoc, CFG_DP_FILTER_MULTICAST_REPLAY);
3498 config->rx_wakelock_timeout =
3499 cfg_get(psoc, CFG_DP_RX_WAKELOCK_TIMEOUT);
3500 config->num_dp_rx_threads = cfg_get(psoc, CFG_DP_NUM_DP_RX_THREADS);
Manjunathappa Prakashf5b6f5f2019-03-27 15:17:41 -07003501 config->cfg_wmi_credit_cnt = cfg_get(psoc, CFG_DP_HTC_WMI_CREDIT_CNT);
jitiphilb03ae082018-11-09 17:41:59 +05303502 hdd_dp_dp_trace_cfg_update(config, psoc);
3503 hdd_dp_nud_tracking_cfg_update(config, psoc);
jitiphil869b9f72018-09-25 17:14:01 +05303504}
Sravan Kumar Kairam10ed0e82019-08-21 20:52:09 +05303505
3506bool wlan_hdd_rx_rpm_mark_last_busy(struct hdd_context *hdd_ctx,
3507 void *hif_ctx)
3508{
3509 uint64_t duration_us, dp_rx_busy_us, current_us;
3510 uint32_t rpm_delay_ms;
3511
3512 if (!hif_pm_runtime_is_dp_rx_busy(hif_ctx))
3513 return false;
3514
3515 dp_rx_busy_us = hif_pm_runtime_get_dp_rx_busy_mark(hif_ctx);
3516 current_us = qdf_get_log_timestamp_usecs();
3517 duration_us = (unsigned long)((ULONG_MAX - dp_rx_busy_us) +
3518 current_us + 1);
3519 rpm_delay_ms = ucfg_pmo_get_runtime_pm_delay(hdd_ctx->psoc);
3520
Sravan Goud90c34e52019-11-28 14:37:29 +05303521 if (duration_us < (rpm_delay_ms * 1000))
Sravan Kumar Kairam10ed0e82019-08-21 20:52:09 +05303522 return true;
3523 else
3524 return false;
3525}