blob: 2a6b9e8d3b1dd0b6f8673771f3884b33a6d50a49 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
hangtiana7938f82019-01-07 16:35:49 +08002 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * DOC: wlan_hdd_tx_rx.c
21 *
22 * Linux HDD Tx/RX APIs
23 */
24
Jeff Johnsona0399642016-12-05 12:39:59 -080025/* denote that this file does not allow legacy hddLog */
26#define HDD_DISALLOW_LEGACY_HDDLOG 1
Dustin Brown96b98dd2019-03-06 12:39:37 -080027#include "osif_sync.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028#include <wlan_hdd_tx_rx.h>
29#include <wlan_hdd_softap_tx_rx.h>
30#include <wlan_hdd_napi.h>
31#include <linux/netdevice.h>
32#include <linux/skbuff.h>
33#include <linux/etherdevice.h>
Ravi Joshibb8d4512016-08-22 10:14:52 -070034#include <linux/if_ether.h>
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +053035#include <linux/inetdevice.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036#include <cds_sched.h>
Manjunathappa Prakash779e4862016-09-12 17:00:11 -070037#include <cds_utils.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038
39#include <wlan_hdd_p2p.h>
40#include <linux/wireless.h>
41#include <net/cfg80211.h>
42#include <net/ieee80211_radiotap.h>
43#include "sap_api.h"
44#include "wlan_hdd_wmm.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include "wlan_hdd_tdls.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046#include "wlan_hdd_ocb.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047#include "wlan_hdd_lro.h"
Leo Changfdb45c32016-10-28 11:09:23 -070048#include <cdp_txrx_cmn.h>
49#include <cdp_txrx_peer_ops.h>
50#include <cdp_txrx_flow_ctrl_v2.h>
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -070051#include "wlan_hdd_nan_datapath.h"
Ravi Joshib89e7f72016-09-07 13:43:15 -070052#include "pld_common.h"
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +053053#include <cdp_txrx_misc.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080054#include "wlan_hdd_rx_monitor.h"
Zhu Jianmin04392c42017-05-12 16:34:53 +080055#include "wlan_hdd_power.h"
Poddar, Siddarth31797fa2018-01-22 17:24:15 +053056#include "wlan_hdd_cfg80211.h"
Yu Wangceb357b2017-06-01 12:04:18 +080057#include <wlan_hdd_tsf.h>
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -070058#include <net/tcp.h>
59#include "wma_api.h"
Ravi Joshi106ffe02017-01-18 18:09:05 -080060
Alok Kumarb64650c2018-03-23 17:05:11 +053061#include "wlan_hdd_nud_tracking.h"
Mohit Khanna70322002018-05-15 19:21:32 -070062#include "dp_txrx.h"
jitiphil869b9f72018-09-25 17:14:01 +053063#include "cfg_ucfg_api.h"
Mohit Khanna81418772018-10-30 14:14:46 -070064#include "target_type.h"
Bala Venkateshf2867902019-03-08 15:01:23 +053065#include "wlan_hdd_object_manager.h"
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +053066#include "nan_public_structs.h"
67#include "nan_ucfg_api.h"
Alok Kumarb64650c2018-03-23 17:05:11 +053068
hangtianb9c91362019-06-07 10:39:38 +080069#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053070/*
71 * Mapping Linux AC interpretation to SME AC.
72 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
73 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
74 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
75 */
76const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
77 SME_AC_VO,
78 SME_AC_VI,
79 SME_AC_BE,
80 SME_AC_BK,
81 SME_AC_VO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080082};
83
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053084#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
86 SME_AC_VO,
87 SME_AC_VI,
88 SME_AC_BE,
89 SME_AC_BK,
90};
91
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053092#endif
93
Ajit Pal Singh106c1412018-04-18 18:08:49 +053094#ifdef QCA_HL_NETDEV_FLOW_CONTROL
95void hdd_register_hl_netdev_fc_timer(struct hdd_adapter *adapter,
96 qdf_mc_timer_callback_t timer_callback)
97{
98 if (!adapter->tx_flow_timer_initialized) {
99 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
100 QDF_TIMER_TYPE_SW, timer_callback, adapter);
101 adapter->tx_flow_timer_initialized = true;
102 }
103}
104
105/**
106 * hdd_deregister_hl_netdev_fc_timer() - Deregister HL Flow Control Timer
107 * @adapter: adapter handle
108 *
109 * Return: none
110 */
111void hdd_deregister_hl_netdev_fc_timer(struct hdd_adapter *adapter)
112{
113 if (adapter->tx_flow_timer_initialized) {
114 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
115 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
116 adapter->tx_flow_timer_initialized = false;
117 }
118}
119
120/**
121 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
122 * @adapter_context: pointer to vdev adapter
123 *
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530124 * Return: None
125 */
126void hdd_tx_resume_timer_expired_handler(void *adapter_context)
127{
128 struct hdd_adapter *adapter = (struct hdd_adapter *)adapter_context;
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530129 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Nirav Shahaa34cbb2019-07-03 10:32:04 +0530130 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530131 u32 p_qpaused;
132 u32 np_qpaused;
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530133
134 if (!adapter) {
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530135 hdd_err("invalid adapter context");
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530136 return;
137 }
138
Nirav Shahaa34cbb2019-07-03 10:32:04 +0530139 cdp_display_stats(soc, CDP_DUMP_TX_FLOW_POOL_INFO,
140 QDF_STATS_VERBOSITY_LEVEL_LOW);
141 wlan_hdd_display_netif_queue_history(hdd_ctx,
142 QDF_STATS_VERBOSITY_LEVEL_LOW);
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530143 hdd_debug("Enabling queues");
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530144 spin_lock_bh(&adapter->pause_map_lock);
145 p_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL_PRIORITY);
146 np_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL);
147 spin_unlock_bh(&adapter->pause_map_lock);
148
149 if (p_qpaused) {
150 wlan_hdd_netif_queue_control(adapter,
151 WLAN_NETIF_PRIORITY_QUEUE_ON,
152 WLAN_DATA_FLOW_CONTROL_PRIORITY);
153 cdp_hl_fc_set_os_queue_status(soc,
Jeff Johnson1abc5662019-02-04 14:27:02 -0800154 adapter->vdev_id,
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530155 WLAN_NETIF_PRIORITY_QUEUE_ON);
156 }
157 if (np_qpaused) {
158 wlan_hdd_netif_queue_control(adapter,
159 WLAN_WAKE_NON_PRIORITY_QUEUE,
160 WLAN_DATA_FLOW_CONTROL);
161 cdp_hl_fc_set_os_queue_status(soc,
Jeff Johnson1abc5662019-02-04 14:27:02 -0800162 adapter->vdev_id,
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530163 WLAN_WAKE_NON_PRIORITY_QUEUE);
164 }
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530165}
166
167#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
168
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800169#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
170/**
171 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
172 * @adapter_context: pointer to vdev adapter
173 *
174 * If Blocked OS Q is not resumed during timeout period, to prevent
175 * permanent stall, resume OS Q forcefully.
176 *
177 * Return: None
178 */
179void hdd_tx_resume_timer_expired_handler(void *adapter_context)
180{
Jeff Johnson80486862017-10-02 13:21:29 -0700181 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800182
Jeff Johnson80486862017-10-02 13:21:29 -0700183 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184 /* INVALID ARG */
185 return;
186 }
187
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700188 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700189 wlan_hdd_netif_queue_control(adapter, WLAN_WAKE_ALL_NETIF_QUEUE,
Jeff Johnsona0399642016-12-05 12:39:59 -0800190 WLAN_CONTROL_PATH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191}
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530192
193/**
194 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
Jeff Johnson80486862017-10-02 13:21:29 -0700195 * @adapter: pointer to hdd adapter
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530196 * @tx_resume: TX Q resume trigger
197 *
198 *
199 * Return: None
200 */
201static void
Jeff Johnson80486862017-10-02 13:21:29 -0700202hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530203{
204 if (true == tx_resume)
205 return;
206
207 /* Pause TX */
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700208 hdd_debug("Disabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700209 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530210 WLAN_DATA_FLOW_CONTROL);
211
212 if (QDF_TIMER_STATE_STOPPED ==
Jeff Johnson80486862017-10-02 13:21:29 -0700213 qdf_mc_timer_get_current_state(&adapter->
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530214 tx_flow_control_timer)) {
215 QDF_STATUS status;
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700216
Jeff Johnson80486862017-10-02 13:21:29 -0700217 status = qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530218 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
219
220 if (!QDF_IS_STATUS_SUCCESS(status))
221 hdd_err("Failed to start tx_flow_control_timer");
222 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700223 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530224 }
225
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700226 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
227 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530228}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800229
Jeff Johnson80486862017-10-02 13:21:29 -0700230static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
gbianec670c592016-11-24 11:21:30 +0800231 struct sk_buff *skb)
232{
Jeff Johnson80486862017-10-02 13:21:29 -0700233 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800234 int need_orphan = 0;
235
Jeff Johnson80486862017-10-02 13:21:29 -0700236 if (adapter->tx_flow_low_watermark > 0) {
tfyubdf453e2017-09-27 13:34:30 +0800237#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
238 /*
239 * The TCP TX throttling logic is changed a little after
240 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
241 * which will throttle the TCP packets to the host driver.
242 * The TCP UP LINK throughput will drop heavily. In order to
243 * fix this issue, need to orphan the socket buffer asap, which
244 * will call skb's destructor to notify the TCP stack that the
245 * SKB buffer is unowned. And then the TCP stack will pump more
246 * packets to host driver.
247 *
248 * The TX packets might be dropped for UDP case in the iperf
249 * testing. So need to be protected by follow control.
250 */
251 need_orphan = 1;
252#else
253 if (hdd_ctx->config->tx_orphan_enable)
254 need_orphan = 1;
255#endif
tfyu5f01db22017-10-11 13:51:04 +0800256 } else if (hdd_ctx->config->tx_orphan_enable) {
257 if (qdf_nbuf_is_ipv4_tcp_pkt(skb) ||
Tiger Yu438c6482017-10-13 11:07:00 +0800258 qdf_nbuf_is_ipv6_tcp_pkt(skb))
tfyu5f01db22017-10-11 13:51:04 +0800259 need_orphan = 1;
tfyubdf453e2017-09-27 13:34:30 +0800260 }
261
tfyu5f01db22017-10-11 13:51:04 +0800262 if (need_orphan) {
gbianec670c592016-11-24 11:21:30 +0800263 skb_orphan(skb);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700264 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Tiger Yu438c6482017-10-13 11:07:00 +0800265 } else
gbianec670c592016-11-24 11:21:30 +0800266 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800267
268 return skb;
269}
270
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800271/**
272 * hdd_tx_resume_cb() - Resume OS TX Q.
273 * @adapter_context: pointer to vdev apdapter
274 * @tx_resume: TX Q resume trigger
275 *
276 * Q was stopped due to WLAN TX path low resource condition
277 *
278 * Return: None
279 */
280void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
281{
Jeff Johnson80486862017-10-02 13:21:29 -0700282 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700283 struct hdd_station_ctx *hdd_sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284
Jeff Johnson80486862017-10-02 13:21:29 -0700285 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800286 /* INVALID ARG */
287 return;
288 }
289
Jeff Johnson80486862017-10-02 13:21:29 -0700290 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291
292 /* Resume TX */
293 if (true == tx_resume) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530294 if (QDF_TIMER_STATE_STOPPED !=
Jeff Johnson80486862017-10-02 13:21:29 -0700295 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800296 tx_flow_control_timer)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700297 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800298 }
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700299 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700300 wlan_hdd_netif_queue_control(adapter,
Jeff Johnsona0399642016-12-05 12:39:59 -0800301 WLAN_WAKE_ALL_NETIF_QUEUE,
302 WLAN_DATA_FLOW_CONTROL);
Ajit Pal Singhe6da1de2018-12-27 16:20:45 +0530303 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = false;
304 adapter->hdd_stats.tx_rx_stats.txflow_unpause_cnt++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800305 }
Jeff Johnson80486862017-10-02 13:21:29 -0700306 hdd_tx_resume_false(adapter, tx_resume);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800307}
308
bings284f8be2017-08-11 10:41:30 +0800309bool hdd_tx_flow_control_is_pause(void *adapter_context)
310{
Jeff Johnson80486862017-10-02 13:21:29 -0700311 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
bings284f8be2017-08-11 10:41:30 +0800312
Jeff Johnsond36fa332019-03-18 13:42:25 -0700313 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
bings284f8be2017-08-11 10:41:30 +0800314 /* INVALID ARG */
Jeff Johnson80486862017-10-02 13:21:29 -0700315 hdd_err("invalid adapter %pK", adapter);
bings284f8be2017-08-11 10:41:30 +0800316 return false;
317 }
318
Jeff Johnson80486862017-10-02 13:21:29 -0700319 return adapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
bings284f8be2017-08-11 10:41:30 +0800320}
321
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700322void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530323 qdf_mc_timer_callback_t timer_callback,
bings284f8be2017-08-11 10:41:30 +0800324 ol_txrx_tx_flow_control_fp flow_control_fp,
325 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326{
327 if (adapter->tx_flow_timer_initialized == false) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530328 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530329 QDF_TIMER_TYPE_SW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330 timer_callback,
331 adapter);
332 adapter->tx_flow_timer_initialized = true;
333 }
Leo Changfdb45c32016-10-28 11:09:23 -0700334 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800335 adapter->vdev_id, flow_control_fp, adapter,
bings284f8be2017-08-11 10:41:30 +0800336 flow_control_is_pause_fp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800337}
338
339/**
340 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
341 * @adapter: adapter handle
342 *
343 * Return: none
344 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700345void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800346{
Leo Changfdb45c32016-10-28 11:09:23 -0700347 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800348 adapter->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800349 if (adapter->tx_flow_timer_initialized == true) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530350 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
351 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352 adapter->tx_flow_timer_initialized = false;
353 }
354}
355
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700356void hdd_get_tx_resource(struct hdd_adapter *adapter,
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530357 struct qdf_mac_addr *mac_addr, uint16_t timer_value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358{
359 if (false ==
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530360 cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC),
Vevek Venkatesanb8e96622019-10-14 18:40:32 +0530361 cds_get_context(QDF_MODULE_ID_TXRX),
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530362 *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800363 adapter->tx_flow_low_watermark,
jitiphil869b9f72018-09-25 17:14:01 +0530364 adapter->tx_flow_hi_watermark_offset)) {
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700365 hdd_debug("Disabling queues lwm %d hwm offset %d",
Jeff Johnsona0399642016-12-05 12:39:59 -0800366 adapter->tx_flow_low_watermark,
jitiphil869b9f72018-09-25 17:14:01 +0530367 adapter->tx_flow_hi_watermark_offset);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800368 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
369 WLAN_DATA_FLOW_CONTROL);
370 if ((adapter->tx_flow_timer_initialized == true) &&
Anurag Chouhan210db072016-02-22 18:42:15 +0530371 (QDF_TIMER_STATE_STOPPED ==
372 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 tx_flow_control_timer))) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530374 qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 timer_value);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700376 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
377 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
378 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379 }
380 }
381}
382
gbianec670c592016-11-24 11:21:30 +0800383#else
Mohit Khannad0b63f52017-02-18 18:05:52 -0800384/**
385 * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
Jeff Johnson80486862017-10-02 13:21:29 -0700386 * @adapter: pointer to HDD adapter
Mohit Khannad0b63f52017-02-18 18:05:52 -0800387 * @skb: pointer to skb data packet
388 *
389 * Return: pointer to skb structure
390 */
Jeff Johnson80486862017-10-02 13:21:29 -0700391static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
Mohit Khannad0b63f52017-02-18 18:05:52 -0800392 struct sk_buff *skb) {
gbianec670c592016-11-24 11:21:30 +0800393
Mohit Khannad0b63f52017-02-18 18:05:52 -0800394 struct sk_buff *nskb;
tfyubdf453e2017-09-27 13:34:30 +0800395#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Jeff Johnson80486862017-10-02 13:21:29 -0700396 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800397#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800398
Mohit Khanna87493732017-08-27 23:26:44 -0700399 hdd_skb_fill_gso_size(adapter->dev, skb);
400
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700401 nskb = skb_unshare(skb, GFP_ATOMIC);
tfyubdf453e2017-09-27 13:34:30 +0800402#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700403 if (unlikely(hdd_ctx->config->tx_orphan_enable) && (nskb == skb)) {
Mohit Khannad0b63f52017-02-18 18:05:52 -0800404 /*
405 * For UDP packets we want to orphan the packet to allow the app
406 * to send more packets. The flow would ultimately be controlled
407 * by the limited number of tx descriptors for the vdev.
408 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700409 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Mohit Khannad0b63f52017-02-18 18:05:52 -0800410 skb_orphan(skb);
411 }
tfyubdf453e2017-09-27 13:34:30 +0800412#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800413 return nskb;
gbianec670c592016-11-24 11:21:30 +0800414}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800415#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
416
Alok Kumarb64650c2018-03-23 17:05:11 +0530417uint32_t hdd_txrx_get_tx_ack_count(struct hdd_adapter *adapter)
418{
419 return cdp_get_tx_ack_stats(cds_get_context(QDF_MODULE_ID_SOC),
Sravan Kumar Kairam53b43e12019-04-19 22:13:09 +0530420 cds_get_context(QDF_MODULE_ID_TXRX),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800421 adapter->vdev_id);
Alok Kumarb64650c2018-03-23 17:05:11 +0530422}
423
Qiwei Caiba95ce62018-08-23 10:43:16 +0800424#ifdef FEATURE_WLAN_DIAG_SUPPORT
Nirav Shah5e74bb82016-07-20 16:01:27 +0530425/**
426 * qdf_event_eapol_log() - send event to wlan diag
427 * @skb: skb ptr
428 * @dir: direction
429 * @eapol_key_info: eapol key info
430 *
431 * Return: None
432 */
433void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
434{
435 int16_t eapol_key_info;
436
437 WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
438
439 if ((dir == QDF_TX &&
440 (QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
441 QDF_NBUF_CB_GET_PACKET_TYPE(skb))))
442 return;
443 else if (!qdf_nbuf_is_ipv4_eapol_pkt(skb))
444 return;
445
446 eapol_key_info = (uint16_t)(*(uint16_t *)
447 (skb->data + EAPOL_KEY_INFO_OFFSET));
448
449 wlan_diag_event.event_sub_type =
450 (dir == QDF_TX ?
451 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
452 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
453 wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
454 (skb->data + EAPOL_PACKET_TYPE_OFFSET));
455 wlan_diag_event.eapol_key_info = eapol_key_info;
456 wlan_diag_event.eapol_rate = 0;
457 qdf_mem_copy(wlan_diag_event.dest_addr,
458 (skb->data + QDF_NBUF_DEST_MAC_OFFSET),
459 sizeof(wlan_diag_event.dest_addr));
460 qdf_mem_copy(wlan_diag_event.src_addr,
461 (skb->data + QDF_NBUF_SRC_MAC_OFFSET),
462 sizeof(wlan_diag_event.src_addr));
463
464 WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
465}
Qiwei Caiba95ce62018-08-23 10:43:16 +0800466#endif /* FEATURE_WLAN_DIAG_SUPPORT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800467
468/**
Nirav Shah5e74bb82016-07-20 16:01:27 +0530469 * wlan_hdd_classify_pkt() - classify packet
470 * @skb - sk buff
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800471 *
Nirav Shah5e74bb82016-07-20 16:01:27 +0530472 * Return: none
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800473 */
Nirav Shah5e74bb82016-07-20 16:01:27 +0530474void wlan_hdd_classify_pkt(struct sk_buff *skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475{
Nirav Shah5e74bb82016-07-20 16:01:27 +0530476 struct ethhdr *eh = (struct ethhdr *)skb->data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477
hangtian127c9532019-01-12 13:29:07 +0800478 qdf_mem_zero(skb->cb, sizeof(skb->cb));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800479
Nirav Shah5e74bb82016-07-20 16:01:27 +0530480 /* check destination mac address is broadcast/multicast */
481 if (is_broadcast_ether_addr((uint8_t *)eh))
482 QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
483 else if (is_multicast_ether_addr((uint8_t *)eh))
484 QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485
Nirav Shah5e74bb82016-07-20 16:01:27 +0530486 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
487 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
488 QDF_NBUF_CB_PACKET_TYPE_ARP;
489 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
490 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
491 QDF_NBUF_CB_PACKET_TYPE_DHCP;
492 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
493 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
494 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
495 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
496 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
497 QDF_NBUF_CB_PACKET_TYPE_WAPI;
Zhu Jianmin04392c42017-05-12 16:34:53 +0800498 else if (qdf_nbuf_is_icmp_pkt(skb))
499 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
500 QDF_NBUF_CB_PACKET_TYPE_ICMP;
Poddar, Siddarth44aa5aa2017-07-10 17:30:22 +0530501 else if (qdf_nbuf_is_icmpv6_pkt(skb))
502 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
503 QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504}
505
506/**
jitiphilfb410612018-03-26 22:37:56 +0530507 * hdd_clear_tx_rx_connectivity_stats() - clear connectivity stats
508 * @hdd_ctx: pointer to HDD Station Context
509 *
510 * Return: None
511 */
512static void hdd_clear_tx_rx_connectivity_stats(struct hdd_adapter *adapter)
513{
514 hdd_info("Clear txrx connectivity stats");
515 qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
516 sizeof(adapter->hdd_stats.hdd_arp_stats));
517 qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats,
518 sizeof(adapter->hdd_stats.hdd_dns_stats));
519 qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
520 sizeof(adapter->hdd_stats.hdd_tcp_stats));
521 qdf_mem_zero(&adapter->hdd_stats.hdd_icmpv4_stats,
522 sizeof(adapter->hdd_stats.hdd_icmpv4_stats));
523 adapter->pkt_type_bitmap = 0;
524 adapter->track_arp_ip = 0;
525 qdf_mem_zero(adapter->dns_payload, adapter->track_dns_domain_len);
526 adapter->track_dns_domain_len = 0;
527 adapter->track_src_port = 0;
528 adapter->track_dest_port = 0;
529 adapter->track_dest_ipv4 = 0;
530}
531
532void hdd_reset_all_adapters_connectivity_stats(struct hdd_context *hdd_ctx)
533{
Jeff Johnson45100a92019-03-08 22:10:16 -0800534 struct hdd_adapter *adapter = NULL, *next = NULL;
jitiphilfb410612018-03-26 22:37:56 +0530535 QDF_STATUS status;
536
537 hdd_enter();
538
539 status = hdd_get_front_adapter(hdd_ctx, &adapter);
540
Jeff Johnsond36fa332019-03-18 13:42:25 -0700541 while (adapter && QDF_STATUS_SUCCESS == status) {
jitiphilfb410612018-03-26 22:37:56 +0530542 hdd_clear_tx_rx_connectivity_stats(adapter);
Jeff Johnson45100a92019-03-08 22:10:16 -0800543 status = hdd_get_next_adapter(hdd_ctx, adapter, &next);
544 adapter = next;
jitiphilfb410612018-03-26 22:37:56 +0530545 }
546
547 hdd_exit();
548}
549
550/**
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800551 * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
552 * @skb: pointer to OS packet (sk_buff)
553 * @peer_id: Peer STA ID in peer table
554 *
555 * This function gets the peer state from DP and check if it is either
556 * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
557 * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
558 * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
559 *
560 * Return: true if Tx is allowed and false otherwise.
561 */
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530562static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t *peer_mac)
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800563{
564 enum ol_txrx_peer_state peer_state;
565 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
566 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
567 void *peer;
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530568 /* Will be removed in Phase 3 cleanup */
569 uint8_t peer_id;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800570
571 QDF_BUG(soc);
572 QDF_BUG(pdev);
573
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530574 peer = cdp_peer_find_by_addr(soc, pdev, peer_mac, &peer_id);
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800575
Jeff Johnsond36fa332019-03-18 13:42:25 -0700576 if (!peer) {
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530577 hdd_err_rl("Unable to find peer entry for sta: "
578 QDF_MAC_ADDR_STR,
579 QDF_MAC_ADDR_ARRAY(peer_mac));
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800580 return false;
581 }
582
583 peer_state = cdp_peer_state_get(soc, peer);
Jeff Johnson68755312017-02-10 11:46:55 -0800584 if (likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800585 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800586 if (OL_TXRX_PEER_STATE_CONN == peer_state &&
Jinwei Chen19846e52018-04-03 19:20:38 +0800587 (ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X
588 || IS_HDD_ETHERTYPE_WAI(skb)))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800589 return true;
hqu8925c8f2017-12-11 19:29:01 +0800590 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson68755312017-02-10 11:46:55 -0800591 FL("Invalid peer state for Tx: %d"), peer_state);
592 return false;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800593}
jitiphilfb410612018-03-26 22:37:56 +0530594
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530595/**
596 * hdd_tx_rx_is_dns_domain_name_match() - function to check whether dns
597 * domain name in the received skb matches with the tracking dns domain
598 * name or not
599 *
600 * @skb: pointer to skb
601 * @adapter: pointer to adapter
602 *
603 * Returns: true if matches else false
604 */
605static bool hdd_tx_rx_is_dns_domain_name_match(struct sk_buff *skb,
606 struct hdd_adapter *adapter)
607{
608 uint8_t *domain_name;
609
610 if (adapter->track_dns_domain_len == 0)
611 return false;
612
Alok Kumar3e9c7132019-02-28 22:54:05 +0530613 /* check OOB , is strncmp accessing data more than skb->len */
614 if ((adapter->track_dns_domain_len +
615 QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET) > qdf_nbuf_len(skb))
616 return false;
617
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530618 domain_name = qdf_nbuf_get_dns_domain_name(skb,
619 adapter->track_dns_domain_len);
620 if (strncmp(domain_name, adapter->dns_payload,
621 adapter->track_dns_domain_len) == 0)
622 return true;
623 else
624 return false;
625}
626
627void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
628 void *context,
629 enum connectivity_stats_pkt_status action,
630 uint8_t *pkt_type)
631{
632 uint32_t pkt_type_bitmap;
633 struct hdd_adapter *adapter = NULL;
634
635 adapter = (struct hdd_adapter *)context;
636 if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
637 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
638 "Magic cookie(%x) for adapter sanity verification is invalid",
639 adapter->magic);
640 return;
641 }
642
643 /* ARP tracking is done already. */
644 pkt_type_bitmap = adapter->pkt_type_bitmap;
645 pkt_type_bitmap &= ~CONNECTIVITY_CHECK_SET_ARP;
646
647 if (!pkt_type_bitmap)
648 return;
649
650 switch (action) {
651 case PKT_TYPE_REQ:
652 case PKT_TYPE_TX_HOST_FW_SENT:
653 if (qdf_nbuf_is_icmp_pkt(skb)) {
654 if (qdf_nbuf_data_is_icmpv4_req(skb) &&
655 (adapter->track_dest_ipv4 ==
656 qdf_nbuf_get_icmpv4_tgt_ip(skb))) {
657 *pkt_type = CONNECTIVITY_CHECK_SET_ICMPV4;
658 if (action == PKT_TYPE_REQ) {
659 ++adapter->hdd_stats.hdd_icmpv4_stats.
660 tx_icmpv4_req_count;
661 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
662 QDF_TRACE_LEVEL_INFO_HIGH,
663 "%s : ICMPv4 Req packet",
664 __func__);
665 } else
666 /* host receives tx completion */
667 ++adapter->hdd_stats.hdd_icmpv4_stats.
668 tx_host_fw_sent;
669 }
670 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
671 if (qdf_nbuf_data_is_tcp_syn(skb) &&
672 (adapter->track_dest_port ==
673 qdf_nbuf_data_get_tcp_dst_port(skb))) {
674 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_SYN;
675 if (action == PKT_TYPE_REQ) {
676 ++adapter->hdd_stats.hdd_tcp_stats.
677 tx_tcp_syn_count;
678 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
679 QDF_TRACE_LEVEL_INFO_HIGH,
680 "%s : TCP Syn packet",
681 __func__);
682 } else
683 /* host receives tx completion */
684 ++adapter->hdd_stats.hdd_tcp_stats.
685 tx_tcp_syn_host_fw_sent;
686 } else if ((adapter->hdd_stats.hdd_tcp_stats.
687 is_tcp_syn_ack_rcv || adapter->hdd_stats.
688 hdd_tcp_stats.is_tcp_ack_sent) &&
689 qdf_nbuf_data_is_tcp_ack(skb) &&
690 (adapter->track_dest_port ==
691 qdf_nbuf_data_get_tcp_dst_port(skb))) {
692 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_ACK;
693 if (action == PKT_TYPE_REQ &&
694 adapter->hdd_stats.hdd_tcp_stats.
695 is_tcp_syn_ack_rcv) {
696 ++adapter->hdd_stats.hdd_tcp_stats.
697 tx_tcp_ack_count;
698 adapter->hdd_stats.hdd_tcp_stats.
699 is_tcp_syn_ack_rcv = false;
700 adapter->hdd_stats.hdd_tcp_stats.
701 is_tcp_ack_sent = true;
702 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
703 QDF_TRACE_LEVEL_INFO_HIGH,
704 "%s : TCP Ack packet",
705 __func__);
706 } else if (action == PKT_TYPE_TX_HOST_FW_SENT &&
707 adapter->hdd_stats.hdd_tcp_stats.
708 is_tcp_ack_sent) {
Srinivas Girigowda683726a2018-09-07 15:10:40 -0700709 /* host receives tx completion */
710 ++adapter->hdd_stats.hdd_tcp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530711 tx_tcp_ack_host_fw_sent;
Srinivas Girigowda683726a2018-09-07 15:10:40 -0700712 adapter->hdd_stats.hdd_tcp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530713 is_tcp_ack_sent = false;
714 }
715 }
716 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
717 if (qdf_nbuf_data_is_dns_query(skb) &&
718 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
719 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
720 if (action == PKT_TYPE_REQ) {
721 ++adapter->hdd_stats.hdd_dns_stats.
722 tx_dns_req_count;
723 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
724 QDF_TRACE_LEVEL_INFO_HIGH,
725 "%s : DNS query packet",
726 __func__);
727 } else
728 /* host receives tx completion */
729 ++adapter->hdd_stats.hdd_dns_stats.
730 tx_host_fw_sent;
731 }
732 }
733 break;
734
735 case PKT_TYPE_RSP:
736 if (qdf_nbuf_is_icmp_pkt(skb)) {
737 if (qdf_nbuf_data_is_icmpv4_rsp(skb) &&
738 (adapter->track_dest_ipv4 ==
739 qdf_nbuf_get_icmpv4_src_ip(skb))) {
740 ++adapter->hdd_stats.hdd_icmpv4_stats.
741 rx_icmpv4_rsp_count;
742 *pkt_type =
743 CONNECTIVITY_CHECK_SET_ICMPV4;
744 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
745 QDF_TRACE_LEVEL_INFO_HIGH,
746 "%s : ICMPv4 Res packet", __func__);
747 }
748 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
749 if (qdf_nbuf_data_is_tcp_syn_ack(skb) &&
750 (adapter->track_dest_port ==
751 qdf_nbuf_data_get_tcp_src_port(skb))) {
752 ++adapter->hdd_stats.hdd_tcp_stats.
753 rx_tcp_syn_ack_count;
754 adapter->hdd_stats.hdd_tcp_stats.
755 is_tcp_syn_ack_rcv = true;
756 *pkt_type =
757 CONNECTIVITY_CHECK_SET_TCP_SYN_ACK;
758 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
759 QDF_TRACE_LEVEL_INFO_HIGH,
760 "%s : TCP Syn ack packet", __func__);
761 }
762 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
763 if (qdf_nbuf_data_is_dns_response(skb) &&
764 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
765 ++adapter->hdd_stats.hdd_dns_stats.
766 rx_dns_rsp_count;
767 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
768 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
769 QDF_TRACE_LEVEL_INFO_HIGH,
770 "%s : DNS response packet", __func__);
771 }
772 }
773 break;
774
775 case PKT_TYPE_TX_DROPPED:
776 switch (*pkt_type) {
777 case CONNECTIVITY_CHECK_SET_ICMPV4:
778 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_dropped;
779 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
780 QDF_TRACE_LEVEL_INFO_HIGH,
781 "%s : ICMPv4 Req packet dropped", __func__);
782 break;
783 case CONNECTIVITY_CHECK_SET_TCP_SYN:
784 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_dropped;
785 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
786 QDF_TRACE_LEVEL_INFO_HIGH,
787 "%s : TCP syn packet dropped", __func__);
788 break;
789 case CONNECTIVITY_CHECK_SET_TCP_ACK:
790 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_dropped;
791 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
792 QDF_TRACE_LEVEL_INFO_HIGH,
793 "%s : TCP ack packet dropped", __func__);
794 break;
795 case CONNECTIVITY_CHECK_SET_DNS:
796 ++adapter->hdd_stats.hdd_dns_stats.tx_dropped;
797 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
798 QDF_TRACE_LEVEL_INFO_HIGH,
799 "%s : DNS query packet dropped", __func__);
800 break;
801 default:
802 break;
803 }
804 break;
805 case PKT_TYPE_RX_DELIVERED:
806 switch (*pkt_type) {
807 case CONNECTIVITY_CHECK_SET_ICMPV4:
808 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_delivered;
809 break;
810 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
811 ++adapter->hdd_stats.hdd_tcp_stats.rx_delivered;
812 break;
813 case CONNECTIVITY_CHECK_SET_DNS:
814 ++adapter->hdd_stats.hdd_dns_stats.rx_delivered;
815 break;
816 default:
817 break;
818 }
819 break;
820 case PKT_TYPE_RX_REFUSED:
821 switch (*pkt_type) {
822 case CONNECTIVITY_CHECK_SET_ICMPV4:
823 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_refused;
824 break;
825 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
826 ++adapter->hdd_stats.hdd_tcp_stats.rx_refused;
827 break;
828 case CONNECTIVITY_CHECK_SET_DNS:
829 ++adapter->hdd_stats.hdd_dns_stats.rx_refused;
830 break;
831 default:
832 break;
833 }
834 break;
835 case PKT_TYPE_TX_ACK_CNT:
836 switch (*pkt_type) {
837 case CONNECTIVITY_CHECK_SET_ICMPV4:
838 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_ack_cnt;
839 break;
840 case CONNECTIVITY_CHECK_SET_TCP_SYN:
841 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_ack_cnt;
842 break;
843 case CONNECTIVITY_CHECK_SET_TCP_ACK:
844 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_ack_cnt;
845 break;
846 case CONNECTIVITY_CHECK_SET_DNS:
847 ++adapter->hdd_stats.hdd_dns_stats.tx_ack_cnt;
848 break;
849 default:
850 break;
851 }
852 break;
853 default:
854 break;
855 }
856}
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800857
858/**
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530859 * hdd_is_xmit_allowed_on_ndi() - Verify if xmit is allowed on NDI
860 * @adapter: The adapter structure
861 *
862 * Return: True if xmit is allowed on NDI and false otherwise
863 */
864static bool hdd_is_xmit_allowed_on_ndi(struct hdd_adapter *adapter)
865{
866 enum nan_datapath_state state;
867
868 state = ucfg_nan_get_ndi_state(adapter->vdev);
869 return (state == NAN_DATA_NDI_CREATED_STATE ||
870 state == NAN_DATA_CONNECTED_STATE ||
871 state == NAN_DATA_CONNECTING_STATE ||
872 state == NAN_DATA_PEER_CREATE_STATE);
873}
874
875/**
876 * hdd_get_transmit_mac_addr() - Get the mac address to validate the xmit
877 * @adapter: The adapter structure
878 * @skb: The network buffer
879 * @mac_addr_tx_allowed: The mac address to be filled
880 *
881 * Return: None
882 */
883static
884void hdd_get_transmit_mac_addr(struct hdd_adapter *adapter, struct sk_buff *skb,
885 struct qdf_mac_addr *mac_addr_tx_allowed)
886{
887 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
888 bool is_mc_bc_addr = false;
889
890 if (QDF_NBUF_CB_GET_IS_BCAST(skb) || QDF_NBUF_CB_GET_IS_MCAST(skb))
891 is_mc_bc_addr = true;
892
893 if (adapter->device_mode == QDF_IBSS_MODE) {
894 if (is_mc_bc_addr)
895 qdf_copy_macaddr(mac_addr_tx_allowed,
896 &adapter->mac_addr);
897 else
898 qdf_copy_macaddr(mac_addr_tx_allowed,
899 (struct qdf_mac_addr *)skb->data);
900 } else if (adapter->device_mode == QDF_NDI_MODE &&
901 hdd_is_xmit_allowed_on_ndi(adapter)) {
902 if (is_mc_bc_addr)
903 qdf_copy_macaddr(mac_addr_tx_allowed,
904 &adapter->mac_addr);
905 else
906 qdf_copy_macaddr(mac_addr_tx_allowed,
907 (struct qdf_mac_addr *)skb->data);
908 } else {
909 if (sta_ctx->conn_info.conn_state ==
910 eConnectionState_Associated)
911 qdf_copy_macaddr(mac_addr_tx_allowed,
912 &sta_ctx->conn_info.bssid);
913 }
914}
915
916/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530917 * __hdd_hard_start_xmit() - Transmit a frame
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800918 * @skb: pointer to OS packet (sk_buff)
919 * @dev: pointer to network device
920 *
921 * Function registered with the Linux OS for transmitting
922 * packets. This version of the function directly passes
923 * the packet to Transport Layer.
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530924 * In case of any packet drop or error, log the error with
925 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800926 *
Dustin Brown96b98dd2019-03-06 12:39:37 -0800927 * Return: None
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800928 */
Dustin Brown96b98dd2019-03-06 12:39:37 -0800929static void __hdd_hard_start_xmit(struct sk_buff *skb,
930 struct net_device *dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800931{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530932 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800933 sme_ac_enum_type ac;
Abhishek Singh12be60f2017-08-11 13:52:42 +0530934 enum sme_qos_wmmuptype up;
Jeff Johnson80486862017-10-02 13:21:29 -0700935 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800936 bool granted;
Jeff Johnsonb9424862017-10-30 08:49:35 -0700937 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700938 struct qdf_mac_addr *mac_addr;
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530939 struct qdf_mac_addr mac_addr_tx_allowed = QDF_MAC_ADDR_ZERO_INIT;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530940 uint8_t pkt_type = 0;
Sravan Kumar Kairamdd57ea32017-04-06 16:57:35 +0530941 bool is_arp = false;
Bala Venkateshf2867902019-03-08 15:01:23 +0530942 struct wlan_objmgr_vdev *vdev;
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -0700943 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800944
945#ifdef QCA_WIFI_FTM
Anurag Chouhan6d760662016-02-20 16:05:43 +0530946 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800947 kfree_skb(skb);
Dustin Brown96b98dd2019-03-06 12:39:37 -0800948 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800949 }
950#endif
951
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700952 ++adapter->hdd_stats.tx_rx_stats.tx_called;
953 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530954 mac_addr = (struct qdf_mac_addr *)skb->data;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530955
Will Huang20de9432018-02-06 17:01:03 +0800956 if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
957 cds_is_load_or_unload_in_progress()) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530958 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Will Huang20de9432018-02-06 17:01:03 +0800959 "Recovery/(Un)load in progress, dropping the packet");
Nirav Shahdf3659e2016-06-27 12:26:28 +0530960 goto drop_pkt;
Govind Singhede435f2015-12-01 16:16:36 +0530961 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800962
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -0700963 hdd_ctx = adapter->hdd_ctx;
964 if (wlan_hdd_validate_context(hdd_ctx)) {
965 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
966 "Invalid HDD context");
967 goto drop_pkt;
968 }
969
Nirav Shah5e74bb82016-07-20 16:01:27 +0530970 wlan_hdd_classify_pkt(skb);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530971 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) == QDF_NBUF_CB_PACKET_TYPE_ARP) {
972 is_arp = true;
973 if (qdf_nbuf_data_is_arp_req(skb) &&
Alok Kumarb94a2e72019-03-11 19:47:15 +0530974 (adapter->track_arp_ip == qdf_nbuf_get_arp_tgt_ip(skb))) {
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530975 ++adapter->hdd_stats.hdd_arp_stats.tx_arp_req_count;
976 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
977 QDF_TRACE_LEVEL_INFO_HIGH,
978 "%s : ARP packet", __func__);
979 }
980 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530981 /* track connectivity stats */
982 if (adapter->pkt_type_bitmap)
983 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
984 PKT_TYPE_REQ, &pkt_type);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530985
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530986 hdd_get_transmit_mac_addr(adapter, skb, &mac_addr_tx_allowed);
987 if (qdf_is_macaddr_zero(&mac_addr_tx_allowed)) {
hqu5e6b9862017-12-21 18:48:46 +0800988 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +0530989 "tx not allowed, transmit operation suspended");
Ravi Joshi24477b72016-07-19 15:45:09 -0700990 goto drop_pkt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800991 }
992
Sourav Mohapatra5d22fbd2019-08-05 12:21:45 +0530993 hdd_get_tx_resource(adapter, mac_addr,
994 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995
996 /* Get TL AC corresponding to Qdisc queue index/AC. */
997 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
998
Nirav Shahcbc6d722016-03-01 16:24:53 +0530999 if (!qdf_nbuf_ipa_owned_get(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -07001000 skb = hdd_skb_orphan(adapter, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001001 if (!skb)
Nirav Shahdf3659e2016-06-27 12:26:28 +05301002 goto drop_pkt_accounting;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001003 }
1004
Ravi Joshi24477b72016-07-19 15:45:09 -07001005 /*
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301006 * Add SKB to internal tracking table before further processing
1007 * in WLAN driver.
1008 */
1009 qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
1010
1011 /*
Ravi Joshi24477b72016-07-19 15:45:09 -07001012 * user priority from IP header, which is already extracted and set from
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001013 * select_queue call back function
1014 */
1015 up = skb->priority;
1016
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001017 ++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001018#ifdef HDD_WMM_DEBUG
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001019 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001020 "%s: Classified as ac %d up %d", __func__, ac, up);
1021#endif /* HDD_WMM_DEBUG */
1022
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001023 if (HDD_PSB_CHANGED == adapter->psb_changed) {
Ravi Joshi24477b72016-07-19 15:45:09 -07001024 /*
1025 * Function which will determine acquire admittance for a
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001026 * WMM AC is required or not based on psb configuration done
1027 * in the framework
1028 */
Jeff Johnson80486862017-10-02 13:21:29 -07001029 hdd_wmm_acquire_access_required(adapter, ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001030 }
1031 /*
1032 * Make sure we already have access to this access category
1033 * or it is EAPOL or WAPI frame during initial authentication which
1034 * can have artifically boosted higher qos priority.
1035 */
1036
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001037 if (((adapter->psb_changed & (1 << ac)) &&
Jeff Johnson12e12332019-03-08 23:29:23 -08001038 likely(adapter->hdd_wmm_status.ac_status[ac].
Jeff Johnsona5548972019-03-09 14:22:18 -08001039 is_access_allowed)) ||
Jeff Johnson457c2422019-02-27 13:56:04 -08001040 ((sta_ctx->conn_info.is_authenticated == false) &&
Nirav Shah5e74bb82016-07-20 16:01:27 +05301041 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
1042 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
1043 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
1044 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001045 granted = true;
1046 } else {
Jeff Johnson80486862017-10-02 13:21:29 -07001047 status = hdd_wmm_acquire_access(adapter, ac, &granted);
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001048 adapter->psb_changed |= (1 << ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001049 }
1050
1051 if (!granted) {
1052 bool isDefaultAc = false;
Ravi Joshi24477b72016-07-19 15:45:09 -07001053 /*
1054 * ADDTS request for this AC is sent, for now
Jeff Johnson55ceaf02018-05-06 17:22:29 -07001055 * send this packet through next available lower
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001056 * Access category until ADDTS negotiation completes.
1057 */
1058 while (!likely
Jeff Johnson12e12332019-03-08 23:29:23 -08001059 (adapter->hdd_wmm_status.ac_status[ac].
Jeff Johnsona5548972019-03-09 14:22:18 -08001060 is_access_allowed)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001061 switch (ac) {
1062 case SME_AC_VO:
1063 ac = SME_AC_VI;
1064 up = SME_QOS_WMM_UP_VI;
1065 break;
1066 case SME_AC_VI:
1067 ac = SME_AC_BE;
1068 up = SME_QOS_WMM_UP_BE;
1069 break;
1070 case SME_AC_BE:
1071 ac = SME_AC_BK;
1072 up = SME_QOS_WMM_UP_BK;
1073 break;
1074 default:
1075 ac = SME_AC_BK;
1076 up = SME_QOS_WMM_UP_BK;
1077 isDefaultAc = true;
1078 break;
1079 }
1080 if (isDefaultAc)
1081 break;
1082 }
1083 skb->priority = up;
1084 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
1085 }
1086
Jeff Johnson80486862017-10-02 13:21:29 -07001087 adapter->stats.tx_bytes += skb->len;
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001088
Bala Venkateshf2867902019-03-08 15:01:23 +05301089 vdev = hdd_objmgr_get_vdev(adapter);
1090 if (vdev) {
1091 ucfg_tdls_update_tx_pkt_cnt(vdev, mac_addr);
1092 hdd_objmgr_put_vdev(vdev);
1093 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001094
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -07001095 if (qdf_nbuf_is_tso(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -07001096 adapter->stats.tx_packets += qdf_nbuf_get_tso_num_seg(skb);
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -07001097 } else {
Jeff Johnson80486862017-10-02 13:21:29 -07001098 ++adapter->stats.tx_packets;
Manjunathappa Prakash35af2e22019-07-30 20:23:34 -07001099 hdd_ctx->no_tx_offload_pkt_cnt++;
1100 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001101
Nirav Shah5e74bb82016-07-20 16:01:27 +05301102 hdd_event_eapol_log(skb, QDF_TX);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301103 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
1104 QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001105
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301106 qdf_dp_trace_set_track(skb, QDF_TX);
Mohit Khannaf8f96822017-05-17 17:11:59 -07001107
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301108 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001109 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
1110 sizeof(qdf_nbuf_data(skb)),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301111 QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001112
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +05301113 if (!hdd_is_tx_allowed(skb, mac_addr_tx_allowed.bytes)) {
1114 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1115 QDF_TRACE_LEVEL_INFO_HIGH,
1116 FL("Tx not allowed for sta: "
1117 QDF_MAC_ADDR_STR), QDF_MAC_ADDR_ARRAY(
1118 mac_addr_tx_allowed.bytes));
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001119 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301120 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001121 }
1122
jinweic chen51046012018-04-11 16:02:22 +08001123 /* check whether need to linearize skb, like non-linear udp data */
1124 if (hdd_skb_nontso_linearize(skb) != QDF_STATUS_SUCCESS) {
1125 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1126 QDF_TRACE_LEVEL_INFO_HIGH,
1127 "%s: skb %pK linearize failed. drop the pkt",
1128 __func__, skb);
1129 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
1130 goto drop_pkt_and_release_skb;
1131 }
1132
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001133 /*
Ravi Joshi24477b72016-07-19 15:45:09 -07001134 * If a transmit function is not registered, drop packet
1135 */
Jeff Johnson80486862017-10-02 13:21:29 -07001136 if (!adapter->tx_fn) {
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001137 QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1138 "%s: TX function not registered by the data path",
1139 __func__);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001140 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301141 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001142 }
1143
Jeff Johnson80486862017-10-02 13:21:29 -07001144 if (adapter->tx_fn(adapter->txrx_vdev,
Alok Kumar4696fb02018-06-06 00:10:18 +05301145 (qdf_nbuf_t)skb) != NULL) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301146 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Sourav Mohapatraa3cf12a2019-08-19 15:40:49 +05301147 "%s: Failed to send packet to txrx for sta_id: "
1148 QDF_MAC_ADDR_STR,
1149 __func__, QDF_MAC_ADDR_ARRAY(mac_addr->bytes));
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001150 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301151 goto drop_pkt_and_release_skb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001152 }
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301153
Dustin Browne0024fa2016-10-14 16:29:21 -07001154 netif_trans_update(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001155
Dustin Brown96b98dd2019-03-06 12:39:37 -08001156 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001157
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301158drop_pkt_and_release_skb:
1159 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001160drop_pkt:
1161
Alok Kumar2d35a9e2018-08-01 19:41:01 +05301162 /* track connectivity stats */
1163 if (adapter->pkt_type_bitmap)
1164 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
1165 PKT_TYPE_TX_DROPPED,
1166 &pkt_type);
1167 qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
1168 QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
1169 QDF_TX);
1170 kfree_skb(skb);
Nirav Shahdf3659e2016-06-27 12:26:28 +05301171
1172drop_pkt_accounting:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001173
Jeff Johnson80486862017-10-02 13:21:29 -07001174 ++adapter->stats.tx_dropped;
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001175 ++adapter->hdd_stats.tx_rx_stats.tx_dropped;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301176 if (is_arp) {
1177 ++adapter->hdd_stats.hdd_arp_stats.tx_dropped;
1178 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1179 "%s : ARP packet dropped", __func__);
1180 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001181}
1182
1183/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301184 * hdd_hard_start_xmit() - Wrapper function to protect
1185 * __hdd_hard_start_xmit from SSR
1186 * @skb: pointer to OS packet
Dustin Brown96b98dd2019-03-06 12:39:37 -08001187 * @net_dev: pointer to net_device structure
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301188 *
1189 * Function called by OS if any packet needs to transmit.
1190 *
1191 * Return: Always returns NETDEV_TX_OK
1192 */
Dustin Brown96b98dd2019-03-06 12:39:37 -08001193netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301194{
Dustin Brown96b98dd2019-03-06 12:39:37 -08001195 struct osif_vdev_sync *vdev_sync;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301196
Dustin Brown96b98dd2019-03-06 12:39:37 -08001197 if (osif_vdev_sync_op_start(net_dev, &vdev_sync))
1198 return NETDEV_TX_OK;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301199
Dustin Brown96b98dd2019-03-06 12:39:37 -08001200 __hdd_hard_start_xmit(skb, net_dev);
1201
1202 osif_vdev_sync_op_stop(vdev_sync);
1203
1204 return NETDEV_TX_OK;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301205}
1206
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001207/**
1208 * __hdd_tx_timeout() - TX timeout handler
1209 * @dev: pointer to network device
1210 *
1211 * This function is registered as a netdev ndo_tx_timeout method, and
1212 * is invoked by the kernel if the driver takes too long to transmit a
1213 * frame.
1214 *
1215 * Return: None
1216 */
1217static void __hdd_tx_timeout(struct net_device *dev)
1218{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001219 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001220 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001221 struct netdev_queue *txq;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301222 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1223 u64 diff_jiffies;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001224 int i = 0;
1225
Rakshith Suresh Patkar5e1fdee2019-04-03 12:07:07 +05301226 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
1227
1228 if (hdd_ctx->hdd_wlan_suspended) {
1229 hdd_debug("Device is suspended, ignore WD timeout");
1230 return;
1231 }
1232
Dustin Browne0024fa2016-10-14 16:29:21 -07001233 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301234 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001235 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301236 NULL, 0, QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001237
1238 /* Getting here implies we disabled the TX queues for too
1239 * long. Queues are disabled either because of disassociation
1240 * or low resource scenarios. In case of disassociation it is
1241 * ok to ignore this. But if associated, we have do possible
1242 * recovery here
1243 */
1244
1245 for (i = 0; i < NUM_TX_QUEUES; i++) {
1246 txq = netdev_get_tx_queue(dev, i);
Rakesh Pillai70f1f542019-09-10 20:26:54 +05301247 hdd_debug("Queue: %d status: %d txq->trans_start: %lu",
1248 i, netif_tx_queue_stopped(txq), txq->trans_start);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001249 }
1250
Rakesh Pillai70f1f542019-09-10 20:26:54 +05301251 hdd_debug("carrier state: %d", netif_carrier_ok(dev));
Sravan Kumar Kairam887e89e2018-11-01 09:30:38 +05301252
Mohit Khannaca4173b2017-09-12 21:52:19 -07001253 wlan_hdd_display_netif_queue_history(hdd_ctx,
1254 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Leo Changfdb45c32016-10-28 11:09:23 -07001255 cdp_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301256
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001257 ++adapter->hdd_stats.tx_rx_stats.tx_timeout_cnt;
1258 ++adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301259
1260 diff_jiffies = jiffies -
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001261 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301262
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001263 if ((adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301264 (diff_jiffies > (HDD_TX_TIMEOUT * 2))) {
1265 /*
1266 * In case when there is no traffic is running, it may
1267 * possible tx time-out may once happen and later system
1268 * recovered then continuous tx timeout count has to be
1269 * reset as it is gets modified only when traffic is running.
1270 * If over a period of time if this count reaches to threshold
1271 * then host triggers a false subsystem restart. In genuine
1272 * time out case kernel will call the tx time-out back to back
1273 * at interval of HDD_TX_TIMEOUT. Here now check if previous
1274 * TX TIME out has occurred more than twice of HDD_TX_TIMEOUT
1275 * back then host may recovered here from data stall.
1276 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001277 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301278 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson9a27ffa2018-05-06 17:26:57 -07001279 "Reset continuous tx timeout stat");
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301280 }
1281
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001282 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout = jiffies;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301283
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001284 if (adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt >
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301285 HDD_TX_STALL_THRESHOLD) {
1286 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1287 "Data stall due to continuous TX timeouts");
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001288 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
jitiphil377bcc12018-10-05 19:46:08 +05301289 if (cdp_cfg_get(soc, cfg_dp_enable_data_stall))
Poddar, Siddarth37033032017-10-11 15:47:40 +05301290 cdp_post_data_stall_event(soc,
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301291 DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
1292 DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
1293 0xFF, 0xFF,
1294 DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
1295 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001296}
1297
1298/**
1299 * hdd_tx_timeout() - Wrapper function to protect __hdd_tx_timeout from SSR
Dustin Brown96b98dd2019-03-06 12:39:37 -08001300 * @net_dev: pointer to net_device structure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001301 *
1302 * Function called by OS if there is any timeout during transmission.
1303 * Since HDD simply enqueues packet and returns control to OS right away,
1304 * this would never be invoked
1305 *
1306 * Return: none
1307 */
Dustin Brown96b98dd2019-03-06 12:39:37 -08001308void hdd_tx_timeout(struct net_device *net_dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001309{
Dustin Brown96b98dd2019-03-06 12:39:37 -08001310 struct osif_vdev_sync *vdev_sync;
1311
1312 if (osif_vdev_sync_op_start(net_dev, &vdev_sync))
1313 return;
1314
1315 __hdd_tx_timeout(net_dev);
1316
1317 osif_vdev_sync_op_stop(vdev_sync);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001318}
1319
1320/**
1321 * @hdd_init_tx_rx() - Initialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001322 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001323 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301324 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1325 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001326 */
Jeff Johnson80486862017-10-02 13:21:29 -07001327QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001328{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301329 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001330
Jeff Johnsond36fa332019-03-18 13:42:25 -07001331 if (!adapter) {
Jeff Johnson80486862017-10-02 13:21:29 -07001332 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301333 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301334 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001335 }
1336
1337 return status;
1338}
1339
1340/**
1341 * @hdd_deinit_tx_rx() - Deinitialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001342 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001343 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301344 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1345 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001346 */
Jeff Johnson80486862017-10-02 13:21:29 -07001347QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001348{
Dustin Brownb0b240a2018-07-30 14:16:30 -07001349 QDF_BUG(adapter);
1350 if (!adapter)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301351 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001352
Dustin Brownb0b240a2018-07-30 14:16:30 -07001353 adapter->txrx_vdev = NULL;
1354 adapter->tx_fn = NULL;
1355
1356 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001357}
1358
Nirav Shah73713f72018-05-17 14:50:41 +05301359#ifdef FEATURE_MONITOR_MODE_SUPPORT
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001360/**
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001361 * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
1362 * @context: [in] pointer to qdf context
1363 * @rxBuf: [in] pointer to rx qdf_nbuf
1364 *
1365 * TL will call this to notify the HDD when one or more packets were
1366 * received for a registered STA.
1367 *
1368 * Return: QDF_STATUS_E_FAILURE if any errors encountered, QDF_STATUS_SUCCESS
1369 * otherwise
1370 */
1371static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
1372{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001373 struct hdd_adapter *adapter;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001374 int rxstat;
1375 struct sk_buff *skb;
1376 struct sk_buff *skb_next;
1377 unsigned int cpu_index;
1378
1379 /* Sanity check on inputs */
Jeff Johnsond36fa332019-03-18 13:42:25 -07001380 if ((!context) || (!rxbuf)) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001381 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1382 "%s: Null params being passed", __func__);
1383 return QDF_STATUS_E_FAILURE;
1384 }
1385
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001386 adapter = (struct hdd_adapter *)context;
Jeff Johnsond36fa332019-03-18 13:42:25 -07001387 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001388 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001389 "invalid adapter %pK", adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001390 return QDF_STATUS_E_FAILURE;
1391 }
1392
1393 cpu_index = wlan_hdd_get_cpu();
1394
1395 /* walk the chain until all are processed */
1396 skb = (struct sk_buff *) rxbuf;
Jeff Johnsond36fa332019-03-18 13:42:25 -07001397 while (skb) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001398 skb_next = skb->next;
1399 skb->dev = adapter->dev;
1400
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001401 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001402 ++adapter->stats.rx_packets;
1403 adapter->stats.rx_bytes += skb->len;
1404
1405 /* Remove SKB from internal tracking table before submitting
1406 * it to stack
1407 */
1408 qdf_net_buf_debug_release_skb(skb);
1409
1410 /*
1411 * If this is not a last packet on the chain
1412 * Just put packet into backlog queue, not scheduling RX sirq
1413 */
1414 if (skb->next) {
1415 rxstat = netif_rx(skb);
1416 } else {
1417 /*
1418 * This is the last packet on the chain
1419 * Scheduling rx sirq
1420 */
1421 rxstat = netif_rx_ni(skb);
1422 }
1423
1424 if (NET_RX_SUCCESS == rxstat)
1425 ++adapter->
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001426 hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001427 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001428 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001429
1430 skb = skb_next;
1431 }
1432
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001433 return QDF_STATUS_SUCCESS;
1434}
Nirav Shah73713f72018-05-17 14:50:41 +05301435#endif
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001436
Ravi Joshibb8d4512016-08-22 10:14:52 -07001437/*
1438 * hdd_is_mcast_replay() - checks if pkt is multicast replay
1439 * @skb: packet skb
1440 *
1441 * Return: true if replayed multicast pkt, false otherwise
1442 */
1443static bool hdd_is_mcast_replay(struct sk_buff *skb)
1444{
1445 struct ethhdr *eth;
1446
1447 eth = eth_hdr(skb);
1448 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
1449 if (unlikely(ether_addr_equal(eth->h_source,
1450 skb->dev->dev_addr)))
1451 return true;
1452 }
1453 return false;
1454}
1455
Naveen Rawatf28315c2016-06-29 18:06:02 -07001456/**
Jeff Johnsondcf84ce2017-10-05 09:26:24 -07001457 * hdd_is_arp_local() - check if local or non local arp
1458 * @skb: pointer to sk_buff
1459 *
1460 * Return: true if local arp or false otherwise.
1461 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301462static bool hdd_is_arp_local(struct sk_buff *skb)
1463{
1464 struct arphdr *arp;
1465 struct in_ifaddr **ifap = NULL;
1466 struct in_ifaddr *ifa = NULL;
1467 struct in_device *in_dev;
1468 unsigned char *arp_ptr;
1469 __be32 tip;
1470
1471 arp = (struct arphdr *)skb->data;
1472 if (arp->ar_op == htons(ARPOP_REQUEST)) {
1473 in_dev = __in_dev_get_rtnl(skb->dev);
1474 if (in_dev) {
1475 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1476 ifap = &ifa->ifa_next) {
1477 if (!strcmp(skb->dev->name, ifa->ifa_label))
1478 break;
1479 }
1480 }
1481
1482 if (ifa && ifa->ifa_local) {
1483 arp_ptr = (unsigned char *)(arp + 1);
1484 arp_ptr += (skb->dev->addr_len + 4 +
1485 skb->dev->addr_len);
1486 memcpy(&tip, arp_ptr, 4);
Poddar, Siddarthb4b74792017-11-06 14:57:35 +05301487 hdd_debug("ARP packet: local IP: %x dest IP: %x",
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301488 ifa->ifa_local, tip);
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001489 if (ifa->ifa_local == tip)
1490 return true;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301491 }
1492 }
1493
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001494 return false;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301495}
1496
1497/**
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001498 * hdd_is_rx_wake_lock_needed() - check if wake lock is needed
1499 * @skb: pointer to sk_buff
1500 *
1501 * RX wake lock is needed for:
1502 * 1) Unicast data packet OR
1503 * 2) Local ARP data packet
1504 *
1505 * Return: true if wake lock is needed or false otherwise.
1506 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301507static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
1508{
1509 if ((skb->pkt_type != PACKET_BROADCAST &&
1510 skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb))
1511 return true;
1512
1513 return false;
1514}
1515
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001516#ifdef RECEIVE_OFFLOAD
1517/**
1518 * hdd_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
1519 * @hdd_ctx: pointer to HDD Station Context
1520 *
1521 * Return: None
1522 */
1523static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
1524{
jitiphil377bcc12018-10-05 19:46:08 +05301525 void *soc;
1526
1527 soc = cds_get_context(QDF_MODULE_ID_SOC);
1528
1529 if (!(cdp_cfg_get(soc, cfg_dp_lro_enable) ^
1530 cdp_cfg_get(soc, cfg_dp_gro_enable))) {
1531 cdp_cfg_get(soc, cfg_dp_lro_enable) &&
1532 cdp_cfg_get(soc, cfg_dp_gro_enable) ?
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001533 hdd_err("Can't enable both LRO and GRO, disabling Rx offload") :
Mohit Khanna81418772018-10-30 14:14:46 -07001534 hdd_info("LRO and GRO both are disabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001535 hdd_ctx->ol_enable = 0;
jitiphil377bcc12018-10-05 19:46:08 +05301536 } else if (cdp_cfg_get(soc, cfg_dp_lro_enable)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001537 hdd_debug("Rx offload LRO is enabled");
1538 hdd_ctx->ol_enable = CFG_LRO_ENABLED;
1539 } else {
Mohit Khanna81418772018-10-30 14:14:46 -07001540 hdd_info("Rx offload: GRO is enabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001541 hdd_ctx->ol_enable = CFG_GRO_ENABLED;
1542 }
1543}
1544
1545/**
Mohit Khanna81418772018-10-30 14:14:46 -07001546 * hdd_gro_rx_bh_disable() - GRO RX/flush function.
1547 * @napi_to_use: napi to be used to give packets to the stack, gro flush
1548 * @skb: pointer to sk_buff
1549 *
1550 * Function calls napi_gro_receive for the skb. If the skb indicates that a
1551 * flush needs to be done (set by the lower DP layer), the function also calls
1552 * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
1553 * napi_gro__ calls.
1554 *
1555 * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
1556 * QDF error code.
1557 */
1558static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
1559 struct napi_struct *napi_to_use,
1560 struct sk_buff *skb)
1561{
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001562 QDF_STATUS status = QDF_STATUS_SUCCESS;
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001563 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
Mohit Khanna81418772018-10-30 14:14:46 -07001564 gro_result_t gro_res;
Mohit Khanna81418772018-10-30 14:14:46 -07001565
1566 skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
1567
1568 local_bh_disable();
1569 gro_res = napi_gro_receive(napi_to_use, skb);
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001570
1571 if (hdd_get_current_throughput_level(hdd_ctx) == PLD_BUS_WIDTH_IDLE) {
Jinwei Chenc1dc5c72019-08-26 16:24:46 +08001572 if (gro_res != GRO_DROP && gro_res != GRO_NORMAL) {
1573 adapter->hdd_stats.tx_rx_stats.
1574 rx_gro_low_tput_flush++;
1575 napi_gro_flush(napi_to_use, false);
1576 }
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001577 }
Mohit Khanna81418772018-10-30 14:14:46 -07001578 local_bh_enable();
1579
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001580 if (gro_res == GRO_DROP)
1581 status = QDF_STATUS_E_GRO_DROP;
Mohit Khanna81418772018-10-30 14:14:46 -07001582
Mohit Khanna81418772018-10-30 14:14:46 -07001583 return status;
1584}
1585
1586/**
1587 * hdd_gro_rx_dp_thread() - Handle Rx procesing via GRO for DP thread
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001588 * @adapter: pointer to adapter context
1589 * @skb: pointer to sk_buff
1590 *
1591 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1592 */
Mohit Khanna81418772018-10-30 14:14:46 -07001593static
1594QDF_STATUS hdd_gro_rx_dp_thread(struct hdd_adapter *adapter,
1595 struct sk_buff *skb)
1596{
1597 struct napi_struct *napi_to_use = NULL;
1598 QDF_STATUS status = QDF_STATUS_E_FAILURE;
Mohit Khanna81418772018-10-30 14:14:46 -07001599
1600 if (!adapter->hdd_ctx->enable_dp_rx_threads) {
1601 hdd_dp_err_rl("gro not supported without DP RX thread!");
Mohit Khanna81418772018-10-30 14:14:46 -07001602 return status;
1603 }
1604
1605 napi_to_use =
1606 dp_rx_get_napi_context(cds_get_context(QDF_MODULE_ID_SOC),
1607 QDF_NBUF_CB_RX_CTX_ID(skb));
1608
1609 if (!napi_to_use) {
1610 hdd_dp_err_rl("no napi to use for GRO!");
Mohit Khanna81418772018-10-30 14:14:46 -07001611 return status;
1612 }
1613
Mohit Khanna81418772018-10-30 14:14:46 -07001614 status = hdd_gro_rx_bh_disable(adapter, napi_to_use, skb);
1615
1616 return status;
1617}
1618
1619/**
1620 * hdd_gro_rx_legacy() - Handle Rx processing via GRO for ihelium based targets
1621 * @adapter: pointer to adapter context
1622 * @skb: pointer to sk_buff
1623 *
1624 * Supports GRO for only station mode
1625 *
1626 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1627 */
1628static
1629QDF_STATUS hdd_gro_rx_legacy(struct hdd_adapter *adapter, struct sk_buff *skb)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001630{
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001631 struct qca_napi_info *qca_napii;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001632 struct qca_napi_data *napid;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001633 struct napi_struct *napi_to_use;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001634 QDF_STATUS status = QDF_STATUS_E_FAILURE;
Mohit Khanna81418772018-10-30 14:14:46 -07001635 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001636
1637 /* Only enabling it for STA mode like LRO today */
1638 if (QDF_STA_MODE != adapter->device_mode)
1639 return QDF_STATUS_E_NOSUPPORT;
1640
Mohit Khanna81418772018-10-30 14:14:46 -07001641 if (qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_low_tput) ||
1642 qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_concurrency))
1643 return QDF_STATUS_E_NOSUPPORT;
1644
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001645 napid = hdd_napi_get_all();
Jeff Johnsond36fa332019-03-18 13:42:25 -07001646 if (unlikely(!napid))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001647 goto out;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001648
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001649 qca_napii = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
Jeff Johnsond36fa332019-03-18 13:42:25 -07001650 if (unlikely(!qca_napii))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001651 goto out;
1652
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001653 /*
1654 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1655 * corresponds each hif_napi.
1656 */
1657 if (adapter->hdd_ctx->enable_rxthread)
1658 napi_to_use = &qca_napii->rx_thread_napi;
1659 else
1660 napi_to_use = &qca_napii->napi;
1661
Mohit Khanna81418772018-10-30 14:14:46 -07001662 status = hdd_gro_rx_bh_disable(adapter, napi_to_use, skb);
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001663out:
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001664
1665 return status;
1666}
1667
1668/**
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001669 * hdd_rxthread_napi_gro_flush() - GRO flush callback for NAPI+Rx_Thread Rx mode
1670 * @data: hif NAPI context
1671 *
1672 * Return: none
1673 */
1674static void hdd_rxthread_napi_gro_flush(void *data)
1675{
1676 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1677
1678 local_bh_disable();
1679 /*
1680 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1681 * corresponds each hif_napi.
1682 */
1683 napi_gro_flush(&qca_napii->rx_thread_napi, false);
1684 local_bh_enable();
1685}
1686
1687/**
1688 * hdd_hif_napi_gro_flush() - GRO flush callback for NAPI Rx mode
1689 * @data: hif NAPI context
1690 *
1691 * Return: none
1692 */
1693static void hdd_hif_napi_gro_flush(void *data)
1694{
1695 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1696
1697 local_bh_disable();
1698 napi_gro_flush(&qca_napii->napi, false);
1699 local_bh_enable();
1700}
1701
1702#ifdef FEATURE_LRO
1703/**
1704 * hdd_qdf_lro_flush() - LRO flush wrapper
1705 * @data: hif NAPI context
1706 *
1707 * Return: none
1708 */
1709static void hdd_qdf_lro_flush(void *data)
1710{
1711 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1712 qdf_lro_ctx_t qdf_lro_ctx = qca_napii->lro_ctx;
1713
1714 qdf_lro_flush(qdf_lro_ctx);
1715}
1716#else
1717static void hdd_qdf_lro_flush(void *data)
1718{
1719}
1720#endif
1721
1722/**
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001723 * hdd_register_rx_ol() - Register LRO/GRO rx processing callbacks
Mohit Khanna81418772018-10-30 14:14:46 -07001724 * @hdd_ctx: pointer to hdd_ctx
1725 * @lithium_based_target: whether its a lithium arch based target or not
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001726 *
1727 * Return: none
1728 */
Mohit Khanna81418772018-10-30 14:14:46 -07001729static void hdd_register_rx_ol_cb(struct hdd_context *hdd_ctx,
1730 bool lithium_based_target)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001731{
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001732 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001733
Amar Singhalcc5a4ec2018-09-04 12:27:51 -07001734 if (!hdd_ctx) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001735 hdd_err("HDD context is NULL");
Amar Singhalcc5a4ec2018-09-04 12:27:51 -07001736 return;
1737 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001738
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001739 hdd_ctx->en_tcp_delack_no_lro = 0;
1740
Alok Kumar3dd311d2018-08-17 15:12:36 +05301741 if (!hdd_is_lro_enabled(hdd_ctx)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001742 cdp_register_rx_offld_flush_cb(soc, hdd_qdf_lro_flush);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001743 hdd_ctx->receive_offload_cb = hdd_lro_rx;
1744 hdd_debug("LRO is enabled");
1745 } else if (hdd_ctx->ol_enable == CFG_GRO_ENABLED) {
Mohit Khanna81418772018-10-30 14:14:46 -07001746 if (lithium_based_target) {
1747 /* no flush registration needed, it happens in DP thread */
1748 hdd_ctx->receive_offload_cb = hdd_gro_rx_dp_thread;
1749 } else {
1750 /*ihelium based targets */
1751 if (hdd_ctx->enable_rxthread)
1752 cdp_register_rx_offld_flush_cb(soc,
1753 hdd_rxthread_napi_gro_flush);
1754 else
1755 cdp_register_rx_offld_flush_cb(soc,
1756 hdd_hif_napi_gro_flush);
1757 hdd_ctx->receive_offload_cb = hdd_gro_rx_legacy;
1758 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001759 hdd_debug("GRO is enabled");
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001760 } else if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
1761 hdd_ctx->en_tcp_delack_no_lro = 1;
Mohit Khanna81418772018-10-30 14:14:46 -07001762 hdd_debug("TCP Del ACK is enabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001763 }
1764}
1765
Mohit Khanna81418772018-10-30 14:14:46 -07001766/**
1767 * hdd_rx_ol_send_config() - Send RX offload configuration to FW
1768 * @hdd_ctx: pointer to hdd_ctx
1769 *
1770 * This function is only used for non lithium targets. Lithium based targets are
1771 * sending LRO config to FW in vdev attach implemented in cmn DP layer.
1772 *
1773 * Return: 0 on success, non zero on failure
1774 */
1775static int hdd_rx_ol_send_config(struct hdd_context *hdd_ctx)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001776{
1777 struct cdp_lro_hash_config lro_config = {0};
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001778 /*
1779 * This will enable flow steering and Toeplitz hash
1780 * So enable it for LRO or GRO processing.
1781 */
Mohit Khanna81418772018-10-30 14:14:46 -07001782 if (cfg_get(hdd_ctx->psoc, CFG_DP_GRO) ||
1783 cfg_get(hdd_ctx->psoc, CFG_DP_LRO)) {
1784 lro_config.lro_enable = 1;
1785 lro_config.tcp_flag = TCPHDR_ACK;
1786 lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN |
1787 TCPHDR_RST | TCPHDR_ACK |
1788 TCPHDR_URG | TCPHDR_ECE |
1789 TCPHDR_CWR;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001790 }
1791
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001792 get_random_bytes(lro_config.toeplitz_hash_ipv4,
1793 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
1794 LRO_IPV4_SEED_ARR_SZ));
1795
1796 get_random_bytes(lro_config.toeplitz_hash_ipv6,
1797 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
1798 LRO_IPV6_SEED_ARR_SZ));
1799
Mohit Khanna81418772018-10-30 14:14:46 -07001800 if (wma_lro_init(&lro_config))
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001801 return -EAGAIN;
Mohit Khanna81418772018-10-30 14:14:46 -07001802 else
1803 hdd_dp_info("LRO Config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
1804 lro_config.lro_enable, lro_config.tcp_flag,
1805 lro_config.tcp_flag_mask);
1806
1807 return 0;
1808}
1809
1810int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1811{
1812 int ret = 0;
1813 bool lithium_based_target = false;
1814
1815 if (hdd_ctx->target_type == TARGET_TYPE_QCA6290 ||
Manjunathappa Prakash458f6fe2019-05-13 18:33:01 -07001816 hdd_ctx->target_type == TARGET_TYPE_QCA6390 ||
1817 hdd_ctx->target_type == TARGET_TYPE_QCA6490)
Mohit Khanna81418772018-10-30 14:14:46 -07001818 lithium_based_target = true;
1819
1820 hdd_resolve_rx_ol_mode(hdd_ctx);
1821 hdd_register_rx_ol_cb(hdd_ctx, lithium_based_target);
1822
1823 if (!lithium_based_target) {
1824 ret = hdd_rx_ol_send_config(hdd_ctx);
1825 if (ret) {
1826 hdd_ctx->ol_enable = 0;
1827 hdd_err("Failed to send LRO/GRO configuration! %u", ret);
1828 return ret;
1829 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001830 }
1831
1832 return 0;
1833}
1834
1835void hdd_disable_rx_ol_in_concurrency(bool disable)
1836{
1837 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1838
1839 if (!hdd_ctx) {
1840 hdd_err("hdd_ctx is NULL");
1841 return;
1842 }
1843
1844 if (disable) {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001845 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001846 struct wlan_rx_tp_data rx_tp_data;
1847
1848 hdd_info("Enable TCP delack as LRO disabled in concurrency");
1849 rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
1850 rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
Alok Kumar2fad6442018-11-08 19:19:28 +05301851 wlan_hdd_update_tcp_rx_param(hdd_ctx, &rx_tp_data);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001852 hdd_ctx->en_tcp_delack_no_lro = 1;
1853 }
Mohit Khanna81418772018-10-30 14:14:46 -07001854 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_concurrency, 1);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001855 } else {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001856 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001857 hdd_info("Disable TCP delack as LRO is enabled");
1858 hdd_ctx->en_tcp_delack_no_lro = 0;
1859 hdd_reset_tcp_delack(hdd_ctx);
1860 }
Mohit Khanna81418772018-10-30 14:14:46 -07001861 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_concurrency, 0);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001862 }
1863}
1864
1865void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1866{
1867 if (disable)
Mohit Khanna81418772018-10-30 14:14:46 -07001868 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_low_tput, 1);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001869 else
Mohit Khanna81418772018-10-30 14:14:46 -07001870 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_low_tput, 0);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001871}
1872
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001873#else /* RECEIVE_OFFLOAD */
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001874int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1875{
1876 hdd_err("Rx_OL, LRO/GRO not supported");
1877 return -EPERM;
1878}
1879
1880void hdd_disable_rx_ol_in_concurrency(bool disable)
1881{
1882}
1883
1884void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1885{
1886}
1887#endif /* RECEIVE_OFFLOAD */
1888
Yu Wang66a250b2017-07-19 11:46:40 +08001889#ifdef WLAN_FEATURE_TSF_PLUS
1890static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1891 qdf_nbuf_t netbuf,
1892 uint64_t target_time)
1893{
yuanl2746f072018-09-21 19:19:16 +08001894 if (!hdd_tsf_is_rx_set(hdd_ctx))
Yu Wang66a250b2017-07-19 11:46:40 +08001895 return;
1896
1897 hdd_rx_timestamp(netbuf, target_time);
1898}
1899#else
1900static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1901 qdf_nbuf_t netbuf,
1902 uint64_t target_time)
1903{
1904}
1905#endif
1906
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001907QDF_STATUS hdd_rx_thread_gro_flush_ind_cbk(void *adapter, int rx_ctx_id)
1908{
Jinwei Chenb681a482019-08-14 15:24:06 +08001909 struct hdd_adapter *hdd_adapter = adapter;
1910
1911 if (qdf_unlikely((!hdd_adapter) || (!hdd_adapter->hdd_ctx))) {
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001912 hdd_err("Null params being passed");
1913 return QDF_STATUS_E_FAILURE;
1914 }
Jinwei Chenb681a482019-08-14 15:24:06 +08001915
Jinwei Chen0dc383e2019-08-23 00:43:04 +08001916 if (hdd_is_low_tput_gro_enable(hdd_adapter->hdd_ctx)) {
Jinwei Chenb681a482019-08-14 15:24:06 +08001917 hdd_adapter->hdd_stats.tx_rx_stats.rx_gro_flush_skip++;
1918 return QDF_STATUS_SUCCESS;
1919 }
1920
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001921 return dp_rx_gro_flush_ind(cds_get_context(QDF_MODULE_ID_SOC),
1922 rx_ctx_id);
1923}
1924
Mohit Khanna70322002018-05-15 19:21:32 -07001925QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter,
Srinivas Girigowdaa19eafd2018-09-07 15:28:21 -07001926 qdf_nbuf_t nbuf_list)
1927{
Rakshith Suresh Patkar68ed4952019-08-29 15:40:18 +05301928 struct hdd_adapter *hdd_adapter;
1929 uint8_t vdev_id;
1930 qdf_nbuf_t head_ptr;
1931
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001932 if (qdf_unlikely(!adapter || !nbuf_list)) {
Mohit Khanna70322002018-05-15 19:21:32 -07001933 hdd_err("Null params being passed");
1934 return QDF_STATUS_E_FAILURE;
1935 }
Rakshith Suresh Patkar68ed4952019-08-29 15:40:18 +05301936
1937 hdd_adapter = (struct hdd_adapter *)adapter;
1938 if (hdd_validate_adapter(hdd_adapter))
1939 return QDF_STATUS_E_FAILURE;
1940
1941 vdev_id = hdd_adapter->vdev_id;
1942 head_ptr = nbuf_list;
1943 while (head_ptr) {
1944 qdf_nbuf_cb_update_vdev_id(head_ptr, vdev_id);
1945 head_ptr = qdf_nbuf_next(head_ptr);
1946 }
1947
Mohit Khanna70322002018-05-15 19:21:32 -07001948 return dp_rx_enqueue_pkt(cds_get_context(QDF_MODULE_ID_SOC), nbuf_list);
1949}
1950
Mohit Khanna81418772018-10-30 14:14:46 -07001951QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
1952 struct sk_buff *skb)
1953{
1954 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
1955 int status = QDF_STATUS_E_FAILURE;
1956 int netif_status;
1957 bool skb_receive_offload_ok = false;
1958
1959 if (QDF_NBUF_CB_RX_TCP_PROTO(skb) &&
1960 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
1961 skb_receive_offload_ok = true;
1962
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001963 if (skb_receive_offload_ok && hdd_ctx->receive_offload_cb) {
Mohit Khanna81418772018-10-30 14:14:46 -07001964 status = hdd_ctx->receive_offload_cb(adapter, skb);
1965
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001966 if (QDF_IS_STATUS_SUCCESS(status)) {
1967 adapter->hdd_stats.tx_rx_stats.rx_aggregated++;
1968 return status;
1969 }
1970
1971 if (status == QDF_STATUS_E_GRO_DROP) {
1972 adapter->hdd_stats.tx_rx_stats.rx_gro_dropped++;
1973 return status;
1974 }
Mohit Khanna81418772018-10-30 14:14:46 -07001975 }
1976
1977 adapter->hdd_stats.tx_rx_stats.rx_non_aggregated++;
1978
1979 /* Account for GRO/LRO ineligible packets, mostly UDP */
1980 hdd_ctx->no_rx_offload_pkt_cnt++;
1981
1982 if (qdf_likely(hdd_ctx->enable_dp_rx_threads ||
1983 hdd_ctx->enable_rxthread)) {
1984 local_bh_disable();
1985 netif_status = netif_receive_skb(skb);
1986 local_bh_enable();
1987 } else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))) {
1988 /*
1989 * Frames before peer is registered to avoid contention with
1990 * NAPI softirq.
1991 * Refer fix:
1992 * qcacld-3.0: Do netif_rx_ni() for frames received before
1993 * peer assoc
1994 */
1995 netif_status = netif_rx_ni(skb);
1996 } else { /* NAPI Context */
1997 netif_status = netif_receive_skb(skb);
1998 }
1999
2000 if (netif_status == NET_RX_SUCCESS)
2001 status = QDF_STATUS_SUCCESS;
2002
2003 return status;
2004}
2005
Yeshwanth Sriram Guntuka7f445f42019-01-30 17:01:35 +05302006#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
2007static bool hdd_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
2008{
2009 return false;
2010}
2011#else
2012static bool hdd_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
2013{
2014 return cfg80211_is_gratuitous_arp_unsolicited_na(skb);
2015}
2016#endif
2017
Rakesh Pillai246f1df2019-10-24 06:40:20 +05302018QDF_STATUS hdd_rx_flush_packet_cbk(void *adapter_context, uint8_t vdev_id)
2019{
2020 struct hdd_adapter *adapter = NULL;
2021 struct hdd_context *hdd_ctx = NULL;
2022 ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
2023
2024 /* Sanity check on inputs */
2025 if (unlikely(!adapter_context)) {
2026 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2027 "%s: Null params being passed", __func__);
2028 return QDF_STATUS_E_FAILURE;
2029 }
2030
2031 adapter = (struct hdd_adapter *)adapter_context;
2032 if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
2033 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2034 "Magic cookie(%x) for adapter sanity verification is invalid",
2035 adapter->magic);
2036 return QDF_STATUS_E_FAILURE;
2037 }
2038
2039 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
2040 if (unlikely(!hdd_ctx)) {
2041 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2042 "%s: HDD context is Null", __func__);
2043 return QDF_STATUS_E_FAILURE;
2044 }
2045
2046 if (hdd_ctx->enable_dp_rx_threads)
2047 dp_txrx_flush_pkts_by_vdev_id(soc, vdev_id);
2048
2049 return QDF_STATUS_SUCCESS;
2050}
2051
Mohit Khanna70322002018-05-15 19:21:32 -07002052QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
2053 qdf_nbuf_t rxBuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002054{
Jeff Johnson80486862017-10-02 13:21:29 -07002055 struct hdd_adapter *adapter = NULL;
Jeff Johnsoncc011972017-09-03 09:26:36 -07002056 struct hdd_context *hdd_ctx = NULL;
Mohit Khanna81418772018-10-30 14:14:46 -07002057 QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002058 struct sk_buff *skb = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002059 struct sk_buff *next = NULL;
Jeff Johnsond377dce2017-10-04 10:32:42 -07002060 struct hdd_station_ctx *sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002061 unsigned int cpu_index;
hangtiana7938f82019-01-07 16:35:49 +08002062 struct qdf_mac_addr *mac_addr, *dest_mac_addr;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05302063 bool wake_lock = false;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302064 uint8_t pkt_type = 0;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302065 bool track_arp = false;
Bala Venkateshf2867902019-03-08 15:01:23 +05302066 struct wlan_objmgr_vdev *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002067
2068 /* Sanity check on inputs */
Mohit Khanna70322002018-05-15 19:21:32 -07002069 if (unlikely((!adapter_context) || (!rxBuf))) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302070 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002071 "%s: Null params being passed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302072 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002073 }
2074
Mohit Khanna70322002018-05-15 19:21:32 -07002075 adapter = (struct hdd_adapter *)adapter_context;
Jeff Johnson80486862017-10-02 13:21:29 -07002076 if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Srinivas Girigowda028c4482017-03-09 18:52:02 -08002077 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002078 "Magic cookie(%x) for adapter sanity verification is invalid",
Jeff Johnson80486862017-10-02 13:21:29 -07002079 adapter->magic);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302080 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002081 }
2082
Jeff Johnson80486862017-10-02 13:21:29 -07002083 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Jeff Johnsond36fa332019-03-18 13:42:25 -07002084 if (unlikely(!hdd_ctx)) {
Dhanashri Atre182b0272016-02-17 15:35:07 -08002085 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2086 "%s: HDD context is Null", __func__);
2087 return QDF_STATUS_E_FAILURE;
2088 }
2089
2090 cpu_index = wlan_hdd_get_cpu();
2091
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002092 next = (struct sk_buff *)rxBuf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002093
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002094 while (next) {
2095 skb = next;
2096 next = skb->next;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002097 skb->next = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002098
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302099 if (QDF_NBUF_CB_PACKET_TYPE_ARP ==
2100 QDF_NBUF_CB_GET_PACKET_TYPE(skb)) {
2101 if (qdf_nbuf_data_is_arp_rsp(skb) &&
Alok Kumarb94a2e72019-03-11 19:47:15 +05302102 (adapter->track_arp_ip ==
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302103 qdf_nbuf_get_arp_src_ip(skb))) {
2104 ++adapter->hdd_stats.hdd_arp_stats.
2105 rx_arp_rsp_count;
2106 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
Jingxiang Ge13b87052019-09-03 15:58:49 +08002107 QDF_TRACE_LEVEL_DEBUG,
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302108 "%s: ARP packet received",
2109 __func__);
2110 track_arp = true;
2111 }
2112 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302113 /* track connectivity stats */
2114 if (adapter->pkt_type_bitmap)
2115 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
2116 PKT_TYPE_RSP, &pkt_type);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002117
Jeff Johnsond377dce2017-10-04 10:32:42 -07002118 sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Jeff Johnsonac5170c2019-02-27 10:55:24 -08002119 if ((sta_ctx->conn_info.proxy_arp_service) &&
Yeshwanth Sriram Guntuka7f445f42019-01-30 17:01:35 +05302120 hdd_is_gratuitous_arp_unsolicited_na(skb)) {
Manjunathappa Prakashf39d2372019-02-25 18:18:57 -08002121 qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
2122 rx_usolict_arp_n_mcast_drp);
2123 /* Remove SKB from internal tracking table before
2124 * submitting it to stack.
Dhanashri Atre63d98022017-01-24 18:22:09 -08002125 */
2126 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002127 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002128 }
2129
2130 hdd_event_eapol_log(skb, QDF_RX);
Jeff Johnson1abc5662019-02-04 14:27:02 -08002131 qdf_dp_trace_log_pkt(adapter->vdev_id, skb, QDF_RX,
Mohit Khanna02281da2017-08-27 09:40:55 -07002132 QDF_TRACE_DEFAULT_PDEV_ID);
Mohit Khannaf8f96822017-05-17 17:11:59 -07002133
Dhanashri Atre63d98022017-01-24 18:22:09 -08002134 DPTRACE(qdf_dp_trace(skb,
2135 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07002136 QDF_TRACE_DEFAULT_PDEV_ID,
Dhanashri Atre63d98022017-01-24 18:22:09 -08002137 qdf_nbuf_data_addr(skb),
2138 sizeof(qdf_nbuf_data(skb)), QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07002139
Mohit Khanna02281da2017-08-27 09:40:55 -07002140 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
2141 QDF_DP_TRACE_RX_PACKET_RECORD,
2142 0, QDF_RX));
2143
hangtiana7938f82019-01-07 16:35:49 +08002144 dest_mac_addr = (struct qdf_mac_addr *)(skb->data);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07002145 mac_addr = (struct qdf_mac_addr *)(skb->data+QDF_MAC_ADDR_SIZE);
2146
Bala Venkateshf2867902019-03-08 15:01:23 +05302147 if (!hdd_is_current_high_throughput(hdd_ctx)) {
2148 vdev = hdd_objmgr_get_vdev(adapter);
2149 if (vdev) {
2150 ucfg_tdls_update_rx_pkt_cnt(vdev, mac_addr,
2151 dest_mac_addr);
2152 hdd_objmgr_put_vdev(vdev);
2153 }
2154 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07002155
Jeff Johnson80486862017-10-02 13:21:29 -07002156 skb->dev = adapter->dev;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002157 skb->protocol = eth_type_trans(skb, skb->dev);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07002158 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Jeff Johnson80486862017-10-02 13:21:29 -07002159 ++adapter->stats.rx_packets;
2160 adapter->stats.rx_bytes += skb->len;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002161
Alok Kumarb64650c2018-03-23 17:05:11 +05302162 /* Incr GW Rx count for NUD tracking based on GW mac addr */
2163 hdd_nud_incr_gw_rx_pkt_cnt(adapter, mac_addr);
2164
Dhanashri Atre63d98022017-01-24 18:22:09 -08002165 /* Check & drop replayed mcast packets (for IPV6) */
Jeff Johnsoncc011972017-09-03 09:26:36 -07002166 if (hdd_ctx->config->multicast_replay_filter &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08002167 hdd_is_mcast_replay(skb)) {
Manjunathappa Prakashf39d2372019-02-25 18:18:57 -08002168 qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
2169 rx_usolict_arp_n_mcast_drp);
Dhanashri Atre63d98022017-01-24 18:22:09 -08002170 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002171 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002172 }
2173
2174 /* hold configurable wakelock for unicast traffic */
hangtian2b9856f2019-01-25 11:50:39 +08002175 if (!hdd_is_current_high_throughput(hdd_ctx) &&
2176 hdd_ctx->config->rx_wakelock_timeout &&
Jeff Johnson457c2422019-02-27 13:56:04 -08002177 sta_ctx->conn_info.is_authenticated)
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05302178 wake_lock = hdd_is_rx_wake_lock_needed(skb);
2179
2180 if (wake_lock) {
Jeff Johnsoncc011972017-09-03 09:26:36 -07002181 cds_host_diag_log_work(&hdd_ctx->rx_wake_lock,
2182 hdd_ctx->config->rx_wakelock_timeout,
Dhanashri Atre63d98022017-01-24 18:22:09 -08002183 WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
Jeff Johnsoncc011972017-09-03 09:26:36 -07002184 qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock,
2185 hdd_ctx->config->
Dhanashri Atre63d98022017-01-24 18:22:09 -08002186 rx_wakelock_timeout);
2187 }
2188
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002189 /* Remove SKB from internal tracking table before submitting
2190 * it to stack
2191 */
Dhanashri Atre63d98022017-01-24 18:22:09 -08002192 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002193
Yu Wang66a250b2017-07-19 11:46:40 +08002194 hdd_tsf_timestamp_rx(hdd_ctx, skb, ktime_to_us(skb->tstamp));
2195
Mohit Khanna81418772018-10-30 14:14:46 -07002196 qdf_status = hdd_rx_deliver_to_stack(adapter, skb);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07002197
Mohit Khanna81418772018-10-30 14:14:46 -07002198 if (QDF_IS_STATUS_SUCCESS(qdf_status)) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07002199 ++adapter->hdd_stats.tx_rx_stats.
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302200 rx_delivered[cpu_index];
2201 if (track_arp)
2202 ++adapter->hdd_stats.hdd_arp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302203 rx_delivered;
2204 /* track connectivity stats */
2205 if (adapter->pkt_type_bitmap)
2206 hdd_tx_rx_collect_connectivity_stats_info(
2207 skb, adapter,
2208 PKT_TYPE_RX_DELIVERED, &pkt_type);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07002209 } else {
2210 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
2211 if (track_arp)
2212 ++adapter->hdd_stats.hdd_arp_stats.rx_refused;
2213
2214 /* track connectivity stats */
2215 if (adapter->pkt_type_bitmap)
2216 hdd_tx_rx_collect_connectivity_stats_info(
2217 skb, adapter,
2218 PKT_TYPE_RX_REFUSED, &pkt_type);
2219
Dhanashri Atre63d98022017-01-24 18:22:09 -08002220 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002221 }
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002222
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302223 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002224}
2225
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002226/**
2227 * hdd_reason_type_to_string() - return string conversion of reason type
2228 * @reason: reason type
2229 *
2230 * This utility function helps log string conversion of reason type.
2231 *
2232 * Return: string conversion of device mode, if match found;
2233 * "Unknown" otherwise.
2234 */
2235const char *hdd_reason_type_to_string(enum netif_reason_type reason)
2236{
2237 switch (reason) {
2238 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
2239 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
2240 CASE_RETURN_STRING(WLAN_FW_PAUSE);
2241 CASE_RETURN_STRING(WLAN_TX_ABORT);
2242 CASE_RETURN_STRING(WLAN_VDEV_STOP);
2243 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
2244 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302245 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002246 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302247 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002248 }
2249}
2250
2251/**
2252 * hdd_action_type_to_string() - return string conversion of action type
2253 * @action: action type
2254 *
2255 * This utility function helps log string conversion of action_type.
2256 *
2257 * Return: string conversion of device mode, if match found;
2258 * "Unknown" otherwise.
2259 */
2260const char *hdd_action_type_to_string(enum netif_action_type action)
2261{
2262
2263 switch (action) {
2264 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
2265 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
2266 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
2267 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
2268 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302269 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
2270 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002271 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
2272 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302273 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
2274 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
chenguob795b832018-10-12 15:23:51 +08002275 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_ON);
2276 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_OFF);
2277 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_ON);
2278 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_OFF);
2279 CASE_RETURN_STRING(WLAN_NETIF_BE_BK_QUEUE_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302280 CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
2281 CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002282 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302283 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002284 }
2285}
2286
2287/**
2288 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
2289 * @adapter: adapter handle
2290 * @action: action type
2291 * @reason: reason type
2292 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002293static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002294 enum netif_action_type action, enum netif_reason_type reason)
2295{
2296 switch (action) {
2297 case WLAN_STOP_ALL_NETIF_QUEUE:
2298 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
chenguodc9f0ec2018-09-03 18:53:26 +08002299 case WLAN_NETIF_BE_BK_QUEUE_OFF:
2300 case WLAN_NETIF_VI_QUEUE_OFF:
2301 case WLAN_NETIF_VO_QUEUE_OFF:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302302 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2303 case WLAN_STOP_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002304 adapter->queue_oper_stats[reason].pause_count++;
2305 break;
2306 case WLAN_START_ALL_NETIF_QUEUE:
2307 case WLAN_WAKE_ALL_NETIF_QUEUE:
2308 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
chenguodc9f0ec2018-09-03 18:53:26 +08002309 case WLAN_NETIF_VI_QUEUE_ON:
2310 case WLAN_NETIF_VO_QUEUE_ON:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302311 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2312 case WLAN_WAKE_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002313 adapter->queue_oper_stats[reason].unpause_count++;
2314 break;
2315 default:
2316 break;
2317 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002318}
2319
2320/**
jiad5b986632017-08-04 11:59:20 +08002321 * hdd_netdev_queue_is_locked()
2322 * @txq: net device tx queue
2323 *
2324 * For SMP system, always return false and we could safely rely on
2325 * __netif_tx_trylock().
2326 *
2327 * Return: true locked; false not locked
2328 */
2329#ifdef QCA_CONFIG_SMP
2330static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2331{
2332 return false;
2333}
2334#else
2335static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2336{
2337 return txq->xmit_lock_owner != -1;
2338}
2339#endif
2340
2341/**
Nirav Shah89223f72016-03-01 18:10:38 +05302342 * wlan_hdd_update_txq_timestamp() - update txq timestamp
2343 * @dev: net device
2344 *
2345 * Return: none
2346 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -07002347static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
Nirav Shah89223f72016-03-01 18:10:38 +05302348{
2349 struct netdev_queue *txq;
2350 int i;
Nirav Shah89223f72016-03-01 18:10:38 +05302351
2352 for (i = 0; i < NUM_TX_QUEUES; i++) {
2353 txq = netdev_get_tx_queue(dev, i);
jiad5b986632017-08-04 11:59:20 +08002354
2355 /*
2356 * On UP system, kernel will trigger watchdog bite if spinlock
2357 * recursion is detected. Unfortunately recursion is possible
2358 * when it is called in dev_queue_xmit() context, where stack
2359 * grabs the lock before calling driver's ndo_start_xmit
2360 * callback.
2361 */
2362 if (!hdd_netdev_queue_is_locked(txq)) {
2363 if (__netif_tx_trylock(txq)) {
2364 txq_trans_update(txq);
2365 __netif_tx_unlock(txq);
2366 }
wadesongba6373e2017-05-15 20:59:05 +08002367 }
Nirav Shah89223f72016-03-01 18:10:38 +05302368 }
2369}
2370
2371/**
Nirav Shah617cff92016-04-25 10:24:24 +05302372 * wlan_hdd_update_unpause_time() - update unpause time
2373 * @adapter: adapter handle
2374 *
2375 * Return: none
2376 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002377static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
Nirav Shah617cff92016-04-25 10:24:24 +05302378{
2379 qdf_time_t curr_time = qdf_system_ticks();
2380
2381 adapter->total_unpause_time += curr_time - adapter->last_time;
2382 adapter->last_time = curr_time;
2383}
2384
2385/**
2386 * wlan_hdd_update_pause_time() - update pause time
2387 * @adapter: adapter handle
2388 *
2389 * Return: none
2390 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002391static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
Nirav Shahda008342016-05-17 18:50:40 +05302392 uint32_t temp_map)
Nirav Shah617cff92016-04-25 10:24:24 +05302393{
2394 qdf_time_t curr_time = qdf_system_ticks();
Nirav Shahda008342016-05-17 18:50:40 +05302395 uint8_t i;
2396 qdf_time_t pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302397
Nirav Shahda008342016-05-17 18:50:40 +05302398 pause_time = curr_time - adapter->last_time;
2399 adapter->total_pause_time += pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302400 adapter->last_time = curr_time;
Nirav Shahda008342016-05-17 18:50:40 +05302401
2402 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
2403 if (temp_map & (1 << i)) {
2404 adapter->queue_oper_stats[i].total_pause_time +=
2405 pause_time;
2406 break;
2407 }
2408 }
2409
Nirav Shah617cff92016-04-25 10:24:24 +05302410}
2411
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002412uint32_t
2413wlan_hdd_dump_queue_history_state(struct hdd_netif_queue_history *queue_history,
2414 char *buf, uint32_t size)
2415{
2416 unsigned int i;
2417 unsigned int index = 0;
2418
2419 for (i = 0; i < NUM_TX_QUEUES; i++) {
2420 index += qdf_scnprintf(buf + index,
2421 size - index,
2422 "%u:0x%lx ",
2423 i, queue_history->tx_q_state[i]);
2424 }
2425
2426 return index;
2427}
2428
2429/**
2430 * wlan_hdd_update_queue_history_state() - Save a copy of dev TX queues state
2431 * @adapter: adapter handle
2432 *
2433 * Save netdev TX queues state into adapter queue history.
2434 *
2435 * Return: None
2436 */
2437static void
2438wlan_hdd_update_queue_history_state(struct net_device *dev,
2439 struct hdd_netif_queue_history *q_hist)
2440{
2441 unsigned int i = 0;
2442 uint32_t num_tx_queues = 0;
2443 struct netdev_queue *txq = NULL;
2444
2445 num_tx_queues = qdf_min(dev->num_tx_queues, (uint32_t)NUM_TX_QUEUES);
2446
2447 for (i = 0; i < num_tx_queues; i++) {
2448 txq = netdev_get_tx_queue(dev, i);
2449 q_hist->tx_q_state[i] = txq->state;
2450 }
2451}
2452
Nirav Shah617cff92016-04-25 10:24:24 +05302453/**
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302454 * wlan_hdd_stop_non_priority_queue() - stop non prority queues
2455 * @adapter: adapter handle
2456 *
2457 * Return: None
2458 */
2459static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
2460{
2461 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2462 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2463 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2464 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2465}
2466
2467/**
2468 * wlan_hdd_wake_non_priority_queue() - wake non prority queues
2469 * @adapter: adapter handle
2470 *
2471 * Return: None
2472 */
2473static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
2474{
2475 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2476 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2477 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2478 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2479}
2480
2481/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002482 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
2483 * @adapter: adapter handle
2484 * @action: action type
2485 * @reason: reason type
2486 *
2487 * This is single function which is used for netif_queue related
2488 * actions like start/stop of network queues and on/off carrier
2489 * option.
2490 *
2491 * Return: None
2492 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002493void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002494 enum netif_action_type action, enum netif_reason_type reason)
2495{
Nirav Shahda008342016-05-17 18:50:40 +05302496 uint32_t temp_map;
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002497 uint8_t index;
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002498 struct hdd_netif_queue_history *txq_hist_ptr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002499
2500 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
2501 (!adapter->dev)) {
2502 hdd_err("adapter is invalid");
2503 return;
2504 }
2505
2506 switch (action) {
2507
2508 case WLAN_NETIF_CARRIER_ON:
2509 netif_carrier_on(adapter->dev);
2510 break;
2511
2512 case WLAN_NETIF_CARRIER_OFF:
2513 netif_carrier_off(adapter->dev);
2514 break;
2515
2516 case WLAN_STOP_ALL_NETIF_QUEUE:
2517 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302518 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002519 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302520 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302521 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302522 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002523 adapter->pause_map |= (1 << reason);
2524 spin_unlock_bh(&adapter->pause_map_lock);
2525 break;
2526
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302527 case WLAN_STOP_NON_PRIORITY_QUEUE:
2528 spin_lock_bh(&adapter->pause_map_lock);
2529 if (!adapter->pause_map) {
2530 wlan_hdd_stop_non_priority_queue(adapter);
2531 wlan_hdd_update_txq_timestamp(adapter->dev);
2532 wlan_hdd_update_unpause_time(adapter);
2533 }
2534 adapter->pause_map |= (1 << reason);
2535 spin_unlock_bh(&adapter->pause_map_lock);
2536 break;
2537
2538 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2539 spin_lock_bh(&adapter->pause_map_lock);
2540 temp_map = adapter->pause_map;
2541 adapter->pause_map &= ~(1 << reason);
2542 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2543 wlan_hdd_update_pause_time(adapter, temp_map);
2544 spin_unlock_bh(&adapter->pause_map_lock);
2545 break;
2546
2547 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2548 spin_lock_bh(&adapter->pause_map_lock);
2549 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2550 wlan_hdd_update_txq_timestamp(adapter->dev);
2551 wlan_hdd_update_unpause_time(adapter);
2552 adapter->pause_map |= (1 << reason);
2553 spin_unlock_bh(&adapter->pause_map_lock);
2554 break;
2555
chenguodc9f0ec2018-09-03 18:53:26 +08002556 case WLAN_NETIF_BE_BK_QUEUE_OFF:
2557 spin_lock_bh(&adapter->pause_map_lock);
2558 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2559 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2560 wlan_hdd_update_txq_timestamp(adapter->dev);
2561 wlan_hdd_update_unpause_time(adapter);
2562 adapter->pause_map |= (1 << reason);
2563 spin_unlock_bh(&adapter->pause_map_lock);
2564 break;
2565
2566 case WLAN_NETIF_VI_QUEUE_OFF:
2567 spin_lock_bh(&adapter->pause_map_lock);
2568 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2569 wlan_hdd_update_txq_timestamp(adapter->dev);
2570 wlan_hdd_update_unpause_time(adapter);
2571 adapter->pause_map |= (1 << reason);
2572 spin_unlock_bh(&adapter->pause_map_lock);
2573 break;
2574
2575 case WLAN_NETIF_VI_QUEUE_ON:
2576 spin_lock_bh(&adapter->pause_map_lock);
2577 temp_map = adapter->pause_map;
2578 adapter->pause_map &= ~(1 << reason);
2579 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2580 wlan_hdd_update_pause_time(adapter, temp_map);
2581 spin_unlock_bh(&adapter->pause_map_lock);
2582 break;
2583
2584 case WLAN_NETIF_VO_QUEUE_OFF:
2585 spin_lock_bh(&adapter->pause_map_lock);
2586 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2587 wlan_hdd_update_txq_timestamp(adapter->dev);
2588 wlan_hdd_update_unpause_time(adapter);
2589 adapter->pause_map |= (1 << reason);
2590 spin_unlock_bh(&adapter->pause_map_lock);
2591 break;
2592
2593 case WLAN_NETIF_VO_QUEUE_ON:
2594 spin_lock_bh(&adapter->pause_map_lock);
2595 temp_map = adapter->pause_map;
2596 adapter->pause_map &= ~(1 << reason);
2597 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2598 wlan_hdd_update_pause_time(adapter, temp_map);
2599 spin_unlock_bh(&adapter->pause_map_lock);
2600 break;
2601
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002602 case WLAN_START_ALL_NETIF_QUEUE:
2603 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302604 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002605 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302606 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002607 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302608 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302609 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002610 spin_unlock_bh(&adapter->pause_map_lock);
2611 break;
2612
2613 case WLAN_WAKE_ALL_NETIF_QUEUE:
2614 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302615 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002616 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302617 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002618 netif_tx_wake_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302619 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302620 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002621 spin_unlock_bh(&adapter->pause_map_lock);
2622 break;
2623
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302624 case WLAN_WAKE_NON_PRIORITY_QUEUE:
2625 spin_lock_bh(&adapter->pause_map_lock);
2626 temp_map = adapter->pause_map;
2627 adapter->pause_map &= ~(1 << reason);
2628 if (!adapter->pause_map) {
2629 wlan_hdd_wake_non_priority_queue(adapter);
2630 wlan_hdd_update_pause_time(adapter, temp_map);
2631 }
2632 spin_unlock_bh(&adapter->pause_map_lock);
2633 break;
2634
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002635 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
2636 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302637 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002638 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302639 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302640 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302641 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002642 adapter->pause_map |= (1 << reason);
2643 netif_carrier_off(adapter->dev);
2644 spin_unlock_bh(&adapter->pause_map_lock);
2645 break;
2646
2647 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
2648 spin_lock_bh(&adapter->pause_map_lock);
2649 netif_carrier_on(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302650 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002651 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302652 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002653 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302654 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302655 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002656 spin_unlock_bh(&adapter->pause_map_lock);
2657 break;
2658
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002659 case WLAN_NETIF_ACTION_TYPE_NONE:
2660 break;
2661
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002662 default:
2663 hdd_err("unsupported action %d", action);
2664 }
2665
2666 spin_lock_bh(&adapter->pause_map_lock);
2667 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
2668 wlan_hdd_process_peer_unauthorised_pause(adapter);
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002669
2670 index = adapter->history_index++;
2671 if (adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
2672 adapter->history_index = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002673 spin_unlock_bh(&adapter->pause_map_lock);
2674
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002675 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
2676
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002677 adapter->queue_oper_history[index].time = qdf_system_ticks();
2678 adapter->queue_oper_history[index].netif_action = action;
2679 adapter->queue_oper_history[index].netif_reason = reason;
2680 adapter->queue_oper_history[index].pause_map = adapter->pause_map;
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002681
2682 txq_hist_ptr = &adapter->queue_oper_history[index];
2683
2684 wlan_hdd_update_queue_history_state(adapter->dev, txq_hist_ptr);
2685}
2686
2687void hdd_print_netdev_txq_status(struct net_device *dev)
2688{
2689 unsigned int i;
2690
2691 if (!dev)
2692 return;
2693
2694 for (i = 0; i < dev->num_tx_queues; i++) {
2695 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2696
2697 hdd_debug("netdev tx queue[%u] state:0x%lx",
2698 i, txq->state);
2699 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002700}
2701
Nirav Shah73713f72018-05-17 14:50:41 +05302702#ifdef FEATURE_MONITOR_MODE_SUPPORT
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002703/**
2704 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
2705 * @dev: Pointer to net_device structure
2706 *
2707 * Return: 0 for success; non-zero for failure
2708 */
2709int hdd_set_mon_rx_cb(struct net_device *dev)
2710{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002711 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002712 int ret;
2713 QDF_STATUS qdf_status;
2714 struct ol_txrx_desc_type sta_desc = {0};
2715 struct ol_txrx_ops txrx_ops;
Leo Changfdb45c32016-10-28 11:09:23 -07002716 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2717 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002718
Kai Liucdc307f2019-10-15 10:28:19 +08002719 WLAN_ADDR_COPY(sta_desc.peer_addr.bytes, adapter->mac_addr.bytes);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002720 qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
2721 txrx_ops.rx.rx = hdd_mon_rx_packet_cbk;
Ravi Joshi106ffe02017-01-18 18:09:05 -08002722 hdd_monitor_set_rx_monitor_cb(&txrx_ops, hdd_rx_monitor_callback);
Leo Changfdb45c32016-10-28 11:09:23 -07002723 cdp_vdev_register(soc,
chenguo2201c0a2018-11-15 18:07:41 +08002724 (struct cdp_vdev *)cdp_get_mon_vdev_from_pdev(soc,
2725 (struct cdp_pdev *)pdev),
Dustin Brown89fa06e2018-09-07 10:47:27 -07002726 adapter, (struct cdp_ctrl_objmgr_vdev *)adapter->vdev,
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05302727 &txrx_ops);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002728 /* peer is created wma_vdev_attach->wma_create_peer */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002729 qdf_status = cdp_peer_register(soc,
2730 (struct cdp_pdev *)pdev, &sta_desc);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002731 if (QDF_STATUS_SUCCESS != qdf_status) {
Leo Changfdb45c32016-10-28 11:09:23 -07002732 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002733 qdf_status, qdf_status);
2734 goto exit;
2735 }
2736
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002737exit:
2738 ret = qdf_status_to_os_return(qdf_status);
2739 return ret;
2740}
Nirav Shah73713f72018-05-17 14:50:41 +05302741#endif
Nirav Shahbd36b062016-07-18 11:12:59 +05302742
2743/**
2744 * hdd_send_rps_ind() - send rps indication to daemon
2745 * @adapter: adapter context
2746 *
2747 * If RPS feature enabled by INI, send RPS enable indication to daemon
2748 * Indication contents is the name of interface to find correct sysfs node
2749 * Should send all available interfaces
2750 *
2751 * Return: none
2752 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002753void hdd_send_rps_ind(struct hdd_adapter *adapter)
Nirav Shahbd36b062016-07-18 11:12:59 +05302754{
2755 int i;
2756 uint8_t cpu_map_list_len = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002757 struct hdd_context *hdd_ctxt = NULL;
Nirav Shahbd36b062016-07-18 11:12:59 +05302758 struct wlan_rps_data rps_data;
Yun Parkff6a16a2017-09-26 16:38:18 -07002759 struct cds_config_info *cds_cfg;
2760
2761 cds_cfg = cds_get_ini_config();
Nirav Shahbd36b062016-07-18 11:12:59 +05302762
2763 if (!adapter) {
2764 hdd_err("adapter is NULL");
2765 return;
2766 }
2767
Yun Parkff6a16a2017-09-26 16:38:18 -07002768 if (!cds_cfg) {
2769 hdd_err("cds_cfg is NULL");
2770 return;
2771 }
2772
Nirav Shahbd36b062016-07-18 11:12:59 +05302773 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2774 rps_data.num_queues = NUM_TX_QUEUES;
2775
2776 hdd_info("cpu_map_list '%s'", hdd_ctxt->config->cpu_map_list);
2777
2778 /* in case no cpu map list is provided, simply return */
2779 if (!strlen(hdd_ctxt->config->cpu_map_list)) {
2780 hdd_err("no cpu map list found");
2781 goto err;
2782 }
2783
2784 if (QDF_STATUS_SUCCESS !=
2785 hdd_hex_string_to_u16_array(hdd_ctxt->config->cpu_map_list,
2786 rps_data.cpu_map_list,
2787 &cpu_map_list_len,
2788 WLAN_SVC_IFACE_NUM_QUEUES)) {
2789 hdd_err("invalid cpu map list");
2790 goto err;
2791 }
2792
2793 rps_data.num_queues =
2794 (cpu_map_list_len < rps_data.num_queues) ?
2795 cpu_map_list_len : rps_data.num_queues;
2796
2797 for (i = 0; i < rps_data.num_queues; i++) {
2798 hdd_info("cpu_map_list[%d] = 0x%x",
2799 i, rps_data.cpu_map_list[i]);
2800 }
2801
2802 strlcpy(rps_data.ifname, adapter->dev->name,
2803 sizeof(rps_data.ifname));
Kondabattini, Ganesh96ac37b2016-09-02 23:12:15 +05302804 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2805 WLAN_SVC_RPS_ENABLE_IND,
Nirav Shahbd36b062016-07-18 11:12:59 +05302806 &rps_data, sizeof(rps_data));
2807
Yun Parkff6a16a2017-09-26 16:38:18 -07002808 cds_cfg->rps_enabled = true;
2809
2810 return;
2811
Nirav Shahbd36b062016-07-18 11:12:59 +05302812err:
2813 hdd_err("Wrong RPS configuration. enabling rx_thread");
Yun Parkff6a16a2017-09-26 16:38:18 -07002814 cds_cfg->rps_enabled = false;
2815}
2816
2817/**
2818 * hdd_send_rps_disable_ind() - send rps disable indication to daemon
2819 * @adapter: adapter context
2820 *
2821 * Return: none
2822 */
2823void hdd_send_rps_disable_ind(struct hdd_adapter *adapter)
2824{
Yun Parkff6a16a2017-09-26 16:38:18 -07002825 struct hdd_context *hdd_ctxt = NULL;
2826 struct wlan_rps_data rps_data;
2827 struct cds_config_info *cds_cfg;
2828
2829 cds_cfg = cds_get_ini_config();
2830
2831 if (!adapter) {
2832 hdd_err("adapter is NULL");
2833 return;
2834 }
2835
2836 if (!cds_cfg) {
2837 hdd_err("cds_cfg is NULL");
2838 return;
2839 }
2840
2841 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2842 rps_data.num_queues = NUM_TX_QUEUES;
2843
2844 hdd_info("Set cpu_map_list 0");
2845
2846 qdf_mem_zero(&rps_data.cpu_map_list, sizeof(rps_data.cpu_map_list));
Yun Parkff6a16a2017-09-26 16:38:18 -07002847
2848 strlcpy(rps_data.ifname, adapter->dev->name, sizeof(rps_data.ifname));
2849 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2850 WLAN_SVC_RPS_ENABLE_IND,
2851 &rps_data, sizeof(rps_data));
2852
2853 cds_cfg->rps_enabled = false;
Nirav Shahbd36b062016-07-18 11:12:59 +05302854}
2855
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002856void hdd_tx_queue_cb(hdd_handle_t hdd_handle, uint32_t vdev_id,
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002857 enum netif_action_type action,
2858 enum netif_reason_type reason)
2859{
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002860 struct hdd_context *hdd_ctx = hdd_handle_to_context(hdd_handle);
2861 struct hdd_adapter *adapter;
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002862
2863 /*
2864 * Validating the context is not required here.
2865 * if there is a driver unload/SSR in progress happening in a
2866 * different context and it has been scheduled to run and
2867 * driver got a firmware event of sta kick out, then it is
2868 * good to disable the Tx Queue to stop the influx of traffic.
2869 */
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002870 if (!hdd_ctx) {
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002871 hdd_err("Invalid context passed");
2872 return;
2873 }
2874
2875 adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002876 if (!adapter) {
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002877 hdd_err("vdev_id %d does not exist with host", vdev_id);
2878 return;
2879 }
2880 hdd_debug("Tx Queue action %d on vdev %d", action, vdev_id);
2881
2882 wlan_hdd_netif_queue_control(adapter, action, reason);
2883}
2884
Tiger Yu8b119e92019-04-09 13:55:07 +08002885#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
Ravi Joshib89e7f72016-09-07 13:43:15 -07002886/**
2887 * hdd_reset_tcp_delack() - Reset tcp delack value to default
2888 * @hdd_ctx: Handle to hdd context
2889 *
2890 * Function used to reset TCP delack value to its default value
2891 *
2892 * Return: None
2893 */
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002894void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
Ravi Joshib89e7f72016-09-07 13:43:15 -07002895{
Tushnim Bhattacharyyadfbce702018-03-27 12:46:48 -07002896 enum wlan_tp_level next_level = WLAN_SVC_TP_LOW;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002897 struct wlan_rx_tp_data rx_tp_data = {0};
Nirav Shahbd36b062016-07-18 11:12:59 +05302898
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002899 rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002900 rx_tp_data.level = next_level;
Ravi Joshib89e7f72016-09-07 13:43:15 -07002901 hdd_ctx->rx_high_ind_cnt = 0;
Alok Kumar2fad6442018-11-08 19:19:28 +05302902 wlan_hdd_update_tcp_rx_param(hdd_ctx, &rx_tp_data);
Ravi Joshib89e7f72016-09-07 13:43:15 -07002903}
hangtian2b9856f2019-01-25 11:50:39 +08002904
2905/**
2906 * hdd_is_current_high_throughput() - Check if vote level is high
2907 * @hdd_ctx: Handle to hdd context
2908 *
2909 * Function used to check if vote level is high
2910 *
2911 * Return: True if vote level is high
2912 */
Hangtian Zhu2b2adde2019-09-12 10:47:42 +08002913#ifdef RX_PERFORMANCE
hangtian2b9856f2019-01-25 11:50:39 +08002914bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx)
2915{
Mohit Khannab9d7e4e2019-08-05 17:43:37 -07002916 if (hdd_ctx->cur_vote_level < PLD_BUS_WIDTH_MEDIUM)
hangtian2b9856f2019-01-25 11:50:39 +08002917 return false;
2918 else
2919 return true;
2920}
Tiger Yu8b119e92019-04-09 13:55:07 +08002921#endif
Hangtian Zhu2b2adde2019-09-12 10:47:42 +08002922#endif
jitiphil869b9f72018-09-25 17:14:01 +05302923
2924#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
2925/**
2926 * hdd_ini_tx_flow_control() - Initialize INIs concerned about tx flow control
2927 * @config: pointer to hdd config
2928 * @psoc: pointer to psoc obj
2929 *
2930 * Return: none
2931 */
2932static void hdd_ini_tx_flow_control(struct hdd_config *config,
2933 struct wlan_objmgr_psoc *psoc)
2934{
2935 config->tx_flow_low_watermark =
2936 cfg_get(psoc, CFG_DP_LL_TX_FLOW_LWM);
2937 config->tx_flow_hi_watermark_offset =
2938 cfg_get(psoc, CFG_DP_LL_TX_FLOW_HWM_OFFSET);
2939 config->tx_flow_max_queue_depth =
2940 cfg_get(psoc, CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH);
2941 config->tx_lbw_flow_low_watermark =
2942 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_LWM);
2943 config->tx_lbw_flow_hi_watermark_offset =
2944 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET);
2945 config->tx_lbw_flow_max_queue_depth =
2946 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH);
2947 config->tx_hbw_flow_low_watermark =
2948 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_LWM);
2949 config->tx_hbw_flow_hi_watermark_offset =
2950 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET);
2951 config->tx_hbw_flow_max_queue_depth =
2952 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH);
2953}
2954#else
2955static void hdd_ini_tx_flow_control(struct hdd_config *config,
2956 struct wlan_objmgr_psoc *psoc)
2957{
2958}
2959#endif
2960
Tiger Yu8b119e92019-04-09 13:55:07 +08002961#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
jitiphil869b9f72018-09-25 17:14:01 +05302962/**
2963 * hdd_ini_tx_flow_control() - Initialize INIs concerned about bus bandwidth
2964 * @config: pointer to hdd config
2965 * @psoc: pointer to psoc obj
2966 *
2967 * Return: none
2968 */
2969static void hdd_ini_bus_bandwidth(struct hdd_config *config,
2970 struct wlan_objmgr_psoc *psoc)
2971{
Mohit Khanna6dbf9c82019-07-12 17:23:28 -07002972 config->bus_bw_very_high_threshold =
2973 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_VERY_HIGH_THRESHOLD);
jitiphil869b9f72018-09-25 17:14:01 +05302974 config->bus_bw_high_threshold =
2975 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD);
2976 config->bus_bw_medium_threshold =
2977 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD);
2978 config->bus_bw_low_threshold =
2979 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD);
2980 config->bus_bw_compute_interval =
2981 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL);
Jinwei Chen0dc383e2019-08-23 00:43:04 +08002982 config->bus_low_cnt_threshold =
2983 cfg_get(psoc, CFG_DP_BUS_LOW_BW_CNT_THRESHOLD);
jitiphil869b9f72018-09-25 17:14:01 +05302984}
2985
2986/**
2987 * hdd_ini_tx_flow_control() - Initialize INIs concerned about tcp settings
2988 * @config: pointer to hdd config
2989 * @psoc: pointer to psoc obj
2990 *
2991 * Return: none
2992 */
2993static void hdd_ini_tcp_settings(struct hdd_config *config,
2994 struct wlan_objmgr_psoc *psoc)
2995{
2996 config->enable_tcp_limit_output =
2997 cfg_get(psoc, CFG_DP_ENABLE_TCP_LIMIT_OUTPUT);
2998 config->enable_tcp_adv_win_scale =
2999 cfg_get(psoc, CFG_DP_ENABLE_TCP_ADV_WIN_SCALE);
3000 config->enable_tcp_delack =
3001 cfg_get(psoc, CFG_DP_ENABLE_TCP_DELACK);
3002 config->tcp_delack_thres_high =
3003 cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_HIGH);
3004 config->tcp_delack_thres_low =
3005 cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_LOW);
3006 config->tcp_delack_timer_count =
3007 cfg_get(psoc, CFG_DP_TCP_DELACK_TIMER_COUNT);
3008 config->tcp_tx_high_tput_thres =
3009 cfg_get(psoc, CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD);
Alok Kumar2fad6442018-11-08 19:19:28 +05303010 config->enable_tcp_param_update =
3011 cfg_get(psoc, CFG_DP_ENABLE_TCP_PARAM_UPDATE);
jitiphil869b9f72018-09-25 17:14:01 +05303012}
3013#else
3014static void hdd_ini_bus_bandwidth(struct hdd_config *config,
Tiger Yu8b119e92019-04-09 13:55:07 +08003015 struct wlan_objmgr_psoc *psoc)
jitiphil869b9f72018-09-25 17:14:01 +05303016{
3017}
3018
3019static void hdd_ini_tcp_settings(struct hdd_config *config,
3020 struct wlan_objmgr_psoc *psoc)
3021{
3022}
Tiger Yu8b119e92019-04-09 13:55:07 +08003023#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
jitiphil869b9f72018-09-25 17:14:01 +05303024
3025/**
3026 * hdd_set_rx_mode_value() - set rx_mode values
3027 * @hdd_ctx: hdd context
3028 *
3029 * Return: none
3030 */
3031static void hdd_set_rx_mode_value(struct hdd_context *hdd_ctx)
3032{
3033 uint32_t rx_mode = hdd_ctx->config->rx_mode;
Venkata Sharath Chandra Manchala702be3e2019-03-28 12:24:39 -07003034 enum QDF_GLOBAL_MODE con_mode = 0;
3035
3036 con_mode = hdd_get_conparam();
jitiphil869b9f72018-09-25 17:14:01 +05303037
3038 /* RPS has higher priority than dynamic RPS when both bits are set */
3039 if (rx_mode & CFG_ENABLE_RPS && rx_mode & CFG_ENABLE_DYNAMIC_RPS)
3040 rx_mode &= ~CFG_ENABLE_DYNAMIC_RPS;
3041
3042 if (rx_mode & CFG_ENABLE_RX_THREAD && rx_mode & CFG_ENABLE_RPS) {
3043 hdd_warn("rx_mode wrong configuration. Make it default");
3044 rx_mode = CFG_RX_MODE_DEFAULT;
3045 }
3046
3047 if (rx_mode & CFG_ENABLE_RX_THREAD)
3048 hdd_ctx->enable_rxthread = true;
Venkata Sharath Chandra Manchala702be3e2019-03-28 12:24:39 -07003049 else if (rx_mode & CFG_ENABLE_DP_RX_THREADS) {
3050 if (con_mode == QDF_GLOBAL_MONITOR_MODE)
3051 hdd_ctx->enable_dp_rx_threads = false;
3052 else
3053 hdd_ctx->enable_dp_rx_threads = true;
3054 }
jitiphil869b9f72018-09-25 17:14:01 +05303055
3056 if (rx_mode & CFG_ENABLE_RPS)
3057 hdd_ctx->rps = true;
3058
3059 if (rx_mode & CFG_ENABLE_NAPI)
3060 hdd_ctx->napi_enable = true;
3061
3062 if (rx_mode & CFG_ENABLE_DYNAMIC_RPS)
3063 hdd_ctx->dynamic_rps = true;
3064
3065 hdd_debug("rx_mode:%u dp_rx_threads:%u rx_thread:%u napi:%u rps:%u dynamic rps %u",
3066 rx_mode, hdd_ctx->enable_dp_rx_threads,
3067 hdd_ctx->enable_rxthread, hdd_ctx->napi_enable,
3068 hdd_ctx->rps, hdd_ctx->dynamic_rps);
3069}
3070
jitiphilb03ae082018-11-09 17:41:59 +05303071#ifdef CONFIG_DP_TRACE
3072static void
3073hdd_dp_dp_trace_cfg_update(struct hdd_config *config,
3074 struct wlan_objmgr_psoc *psoc)
3075{
3076 qdf_size_t array_out_size;
3077
3078 config->enable_dp_trace = cfg_get(psoc, CFG_DP_ENABLE_DP_TRACE);
3079 qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_DP_TRACE_CONFIG),
3080 config->dp_trace_config,
3081 sizeof(config->dp_trace_config), &array_out_size);
3082}
3083#else
3084static void
3085hdd_dp_dp_trace_cfg_update(struct hdd_config *config,
3086 struct wlan_objmgr_psoc *psoc)
3087{
3088}
3089#endif
3090
3091#ifdef WLAN_NUD_TRACKING
3092static void
3093hdd_dp_nud_tracking_cfg_update(struct hdd_config *config,
3094 struct wlan_objmgr_psoc *psoc)
3095{
3096 config->enable_nud_tracking = cfg_get(psoc, CFG_DP_ENABLE_NUD_TRACKING);
3097}
3098#else
3099static void
3100hdd_dp_nud_tracking_cfg_update(struct hdd_config *config,
3101 struct wlan_objmgr_psoc *psoc)
3102{
3103}
3104#endif
3105
Tiger Yue40e7832019-04-25 10:46:53 +08003106#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
3107static void hdd_ini_tcp_del_ack_settings(struct hdd_config *config,
3108 struct wlan_objmgr_psoc *psoc)
3109{
3110 config->del_ack_threshold_high =
3111 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_HIGH_THRESHOLD);
3112 config->del_ack_threshold_low =
3113 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_LOW_THRESHOLD);
3114 config->del_ack_enable =
3115 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_ENABLE);
3116 config->del_ack_pkt_count =
3117 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_PKT_CNT);
3118 config->del_ack_timer_value =
3119 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_TIMER_VALUE);
3120}
3121#else
3122static void hdd_ini_tcp_del_ack_settings(struct hdd_config *config,
3123 struct wlan_objmgr_psoc *psoc)
3124{
3125}
3126#endif
3127
jitiphil869b9f72018-09-25 17:14:01 +05303128void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
3129 struct hdd_context *hdd_ctx)
3130{
3131 struct hdd_config *config;
jitiphilb03ae082018-11-09 17:41:59 +05303132 qdf_size_t array_out_size;
jitiphil869b9f72018-09-25 17:14:01 +05303133
3134 config = hdd_ctx->config;
3135 hdd_ini_tx_flow_control(config, psoc);
3136 hdd_ini_bus_bandwidth(config, psoc);
3137 hdd_ini_tcp_settings(config, psoc);
Tiger Yue40e7832019-04-25 10:46:53 +08003138
3139 hdd_ini_tcp_del_ack_settings(config, psoc);
3140
jitiphil869b9f72018-09-25 17:14:01 +05303141 config->napi_cpu_affinity_mask =
3142 cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
3143 config->rx_thread_affinity_mask =
3144 cfg_get(psoc, CFG_DP_RX_THREAD_CPU_MASK);
3145 qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST),
3146 config->cpu_map_list,
jitiphilb03ae082018-11-09 17:41:59 +05303147 sizeof(config->cpu_map_list), &array_out_size);
jitiphil869b9f72018-09-25 17:14:01 +05303148 config->tx_orphan_enable = cfg_get(psoc, CFG_DP_TX_ORPHAN_ENABLE);
3149 config->rx_mode = cfg_get(psoc, CFG_DP_RX_MODE);
3150 hdd_set_rx_mode_value(hdd_ctx);
jitiphil296c23e2018-11-15 16:26:14 +05303151 config->multicast_replay_filter =
3152 cfg_get(psoc, CFG_DP_FILTER_MULTICAST_REPLAY);
3153 config->rx_wakelock_timeout =
3154 cfg_get(psoc, CFG_DP_RX_WAKELOCK_TIMEOUT);
3155 config->num_dp_rx_threads = cfg_get(psoc, CFG_DP_NUM_DP_RX_THREADS);
Manjunathappa Prakashf5b6f5f2019-03-27 15:17:41 -07003156 config->cfg_wmi_credit_cnt = cfg_get(psoc, CFG_DP_HTC_WMI_CREDIT_CNT);
jitiphilb03ae082018-11-09 17:41:59 +05303157 hdd_dp_dp_trace_cfg_update(config, psoc);
3158 hdd_dp_nud_tracking_cfg_update(config, psoc);
jitiphil869b9f72018-09-25 17:14:01 +05303159}
Sravan Kumar Kairam10ed0e82019-08-21 20:52:09 +05303160
3161bool wlan_hdd_rx_rpm_mark_last_busy(struct hdd_context *hdd_ctx,
3162 void *hif_ctx)
3163{
3164 uint64_t duration_us, dp_rx_busy_us, current_us;
3165 uint32_t rpm_delay_ms;
3166
3167 if (!hif_pm_runtime_is_dp_rx_busy(hif_ctx))
3168 return false;
3169
3170 dp_rx_busy_us = hif_pm_runtime_get_dp_rx_busy_mark(hif_ctx);
3171 current_us = qdf_get_log_timestamp_usecs();
3172 duration_us = (unsigned long)((ULONG_MAX - dp_rx_busy_us) +
3173 current_us + 1);
3174 rpm_delay_ms = ucfg_pmo_get_runtime_pm_delay(hdd_ctx->psoc);
3175
3176 if ((duration_us / 1000) < rpm_delay_ms)
3177 return true;
3178 else
3179 return false;
3180}