blob: 7399702a1dc77d98b2c1156dde724eac19dc0264 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
hangtiana7938f82019-01-07 16:35:49 +08002 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * DOC: wlan_hdd_tx_rx.c
21 *
22 * Linux HDD Tx/RX APIs
23 */
24
Jeff Johnsona0399642016-12-05 12:39:59 -080025/* denote that this file does not allow legacy hddLog */
26#define HDD_DISALLOW_LEGACY_HDDLOG 1
Dustin Brown96b98dd2019-03-06 12:39:37 -080027#include "osif_sync.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028#include <wlan_hdd_tx_rx.h>
29#include <wlan_hdd_softap_tx_rx.h>
30#include <wlan_hdd_napi.h>
31#include <linux/netdevice.h>
32#include <linux/skbuff.h>
33#include <linux/etherdevice.h>
Ravi Joshibb8d4512016-08-22 10:14:52 -070034#include <linux/if_ether.h>
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +053035#include <linux/inetdevice.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036#include <cds_sched.h>
Manjunathappa Prakash779e4862016-09-12 17:00:11 -070037#include <cds_utils.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038
39#include <wlan_hdd_p2p.h>
40#include <linux/wireless.h>
41#include <net/cfg80211.h>
42#include <net/ieee80211_radiotap.h>
43#include "sap_api.h"
44#include "wlan_hdd_wmm.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include "wlan_hdd_tdls.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046#include "wlan_hdd_ocb.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047#include "wlan_hdd_lro.h"
Leo Changfdb45c32016-10-28 11:09:23 -070048#include <cdp_txrx_cmn.h>
49#include <cdp_txrx_peer_ops.h>
50#include <cdp_txrx_flow_ctrl_v2.h>
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -070051#include "wlan_hdd_nan_datapath.h"
Ravi Joshib89e7f72016-09-07 13:43:15 -070052#include "pld_common.h"
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +053053#include <cdp_txrx_misc.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080054#include "wlan_hdd_rx_monitor.h"
Zhu Jianmin04392c42017-05-12 16:34:53 +080055#include "wlan_hdd_power.h"
Poddar, Siddarth31797fa2018-01-22 17:24:15 +053056#include "wlan_hdd_cfg80211.h"
Yu Wangceb357b2017-06-01 12:04:18 +080057#include <wlan_hdd_tsf.h>
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -070058#include <net/tcp.h>
59#include "wma_api.h"
Ravi Joshi106ffe02017-01-18 18:09:05 -080060
Alok Kumarb64650c2018-03-23 17:05:11 +053061#include "wlan_hdd_nud_tracking.h"
Mohit Khanna70322002018-05-15 19:21:32 -070062#include "dp_txrx.h"
jitiphil869b9f72018-09-25 17:14:01 +053063#include "cfg_ucfg_api.h"
Mohit Khanna81418772018-10-30 14:14:46 -070064#include "target_type.h"
Bala Venkateshf2867902019-03-08 15:01:23 +053065#include "wlan_hdd_object_manager.h"
Alok Kumarb64650c2018-03-23 17:05:11 +053066
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053067#ifdef QCA_LL_TX_FLOW_CONTROL_V2
68/*
69 * Mapping Linux AC interpretation to SME AC.
70 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
71 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
72 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
73 */
74const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
75 SME_AC_VO,
76 SME_AC_VI,
77 SME_AC_BE,
78 SME_AC_BK,
79 SME_AC_VO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080080};
81
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053082#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
84 SME_AC_VO,
85 SME_AC_VI,
86 SME_AC_BE,
87 SME_AC_BK,
88};
89
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053090#endif
91
Ajit Pal Singh106c1412018-04-18 18:08:49 +053092#ifdef QCA_HL_NETDEV_FLOW_CONTROL
93void hdd_register_hl_netdev_fc_timer(struct hdd_adapter *adapter,
94 qdf_mc_timer_callback_t timer_callback)
95{
96 if (!adapter->tx_flow_timer_initialized) {
97 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
98 QDF_TIMER_TYPE_SW, timer_callback, adapter);
99 adapter->tx_flow_timer_initialized = true;
100 }
101}
102
103/**
104 * hdd_deregister_hl_netdev_fc_timer() - Deregister HL Flow Control Timer
105 * @adapter: adapter handle
106 *
107 * Return: none
108 */
109void hdd_deregister_hl_netdev_fc_timer(struct hdd_adapter *adapter)
110{
111 if (adapter->tx_flow_timer_initialized) {
112 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
113 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
114 adapter->tx_flow_timer_initialized = false;
115 }
116}
117
118/**
119 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
120 * @adapter_context: pointer to vdev adapter
121 *
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530122 * Return: None
123 */
124void hdd_tx_resume_timer_expired_handler(void *adapter_context)
125{
126 struct hdd_adapter *adapter = (struct hdd_adapter *)adapter_context;
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530127 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
128 u32 p_qpaused;
129 u32 np_qpaused;
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530130
131 if (!adapter) {
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530132 hdd_err("invalid adapter context");
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530133 return;
134 }
135
136 hdd_debug("Enabling queues");
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530137 spin_lock_bh(&adapter->pause_map_lock);
138 p_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL_PRIORITY);
139 np_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL);
140 spin_unlock_bh(&adapter->pause_map_lock);
141
142 if (p_qpaused) {
143 wlan_hdd_netif_queue_control(adapter,
144 WLAN_NETIF_PRIORITY_QUEUE_ON,
145 WLAN_DATA_FLOW_CONTROL_PRIORITY);
146 cdp_hl_fc_set_os_queue_status(soc,
Jeff Johnson1abc5662019-02-04 14:27:02 -0800147 adapter->vdev_id,
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530148 WLAN_NETIF_PRIORITY_QUEUE_ON);
149 }
150 if (np_qpaused) {
151 wlan_hdd_netif_queue_control(adapter,
152 WLAN_WAKE_NON_PRIORITY_QUEUE,
153 WLAN_DATA_FLOW_CONTROL);
154 cdp_hl_fc_set_os_queue_status(soc,
Jeff Johnson1abc5662019-02-04 14:27:02 -0800155 adapter->vdev_id,
Ajit Pal Singh851a7772018-05-14 16:55:09 +0530156 WLAN_WAKE_NON_PRIORITY_QUEUE);
157 }
Ajit Pal Singh106c1412018-04-18 18:08:49 +0530158}
159
160#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
161
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800162#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
163/**
164 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
165 * @adapter_context: pointer to vdev adapter
166 *
167 * If Blocked OS Q is not resumed during timeout period, to prevent
168 * permanent stall, resume OS Q forcefully.
169 *
170 * Return: None
171 */
172void hdd_tx_resume_timer_expired_handler(void *adapter_context)
173{
Jeff Johnson80486862017-10-02 13:21:29 -0700174 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175
Jeff Johnson80486862017-10-02 13:21:29 -0700176 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800177 /* INVALID ARG */
178 return;
179 }
180
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700181 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700182 wlan_hdd_netif_queue_control(adapter, WLAN_WAKE_ALL_NETIF_QUEUE,
Jeff Johnsona0399642016-12-05 12:39:59 -0800183 WLAN_CONTROL_PATH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184}
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530185
186/**
187 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
Jeff Johnson80486862017-10-02 13:21:29 -0700188 * @adapter: pointer to hdd adapter
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530189 * @tx_resume: TX Q resume trigger
190 *
191 *
192 * Return: None
193 */
194static void
Jeff Johnson80486862017-10-02 13:21:29 -0700195hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530196{
197 if (true == tx_resume)
198 return;
199
200 /* Pause TX */
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700201 hdd_debug("Disabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700202 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530203 WLAN_DATA_FLOW_CONTROL);
204
205 if (QDF_TIMER_STATE_STOPPED ==
Jeff Johnson80486862017-10-02 13:21:29 -0700206 qdf_mc_timer_get_current_state(&adapter->
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530207 tx_flow_control_timer)) {
208 QDF_STATUS status;
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700209
Jeff Johnson80486862017-10-02 13:21:29 -0700210 status = qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530211 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
212
213 if (!QDF_IS_STATUS_SUCCESS(status))
214 hdd_err("Failed to start tx_flow_control_timer");
215 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700216 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530217 }
218
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700219 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
220 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530221}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222
Jeff Johnson80486862017-10-02 13:21:29 -0700223static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
gbianec670c592016-11-24 11:21:30 +0800224 struct sk_buff *skb)
225{
Jeff Johnson80486862017-10-02 13:21:29 -0700226 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800227 int need_orphan = 0;
228
Jeff Johnson80486862017-10-02 13:21:29 -0700229 if (adapter->tx_flow_low_watermark > 0) {
tfyubdf453e2017-09-27 13:34:30 +0800230#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
231 /*
232 * The TCP TX throttling logic is changed a little after
233 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
234 * which will throttle the TCP packets to the host driver.
235 * The TCP UP LINK throughput will drop heavily. In order to
236 * fix this issue, need to orphan the socket buffer asap, which
237 * will call skb's destructor to notify the TCP stack that the
238 * SKB buffer is unowned. And then the TCP stack will pump more
239 * packets to host driver.
240 *
241 * The TX packets might be dropped for UDP case in the iperf
242 * testing. So need to be protected by follow control.
243 */
244 need_orphan = 1;
245#else
246 if (hdd_ctx->config->tx_orphan_enable)
247 need_orphan = 1;
248#endif
tfyu5f01db22017-10-11 13:51:04 +0800249 } else if (hdd_ctx->config->tx_orphan_enable) {
250 if (qdf_nbuf_is_ipv4_tcp_pkt(skb) ||
Tiger Yu438c6482017-10-13 11:07:00 +0800251 qdf_nbuf_is_ipv6_tcp_pkt(skb))
tfyu5f01db22017-10-11 13:51:04 +0800252 need_orphan = 1;
tfyubdf453e2017-09-27 13:34:30 +0800253 }
254
tfyu5f01db22017-10-11 13:51:04 +0800255 if (need_orphan) {
gbianec670c592016-11-24 11:21:30 +0800256 skb_orphan(skb);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700257 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Tiger Yu438c6482017-10-13 11:07:00 +0800258 } else
gbianec670c592016-11-24 11:21:30 +0800259 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800260
261 return skb;
262}
263
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800264/**
265 * hdd_tx_resume_cb() - Resume OS TX Q.
266 * @adapter_context: pointer to vdev apdapter
267 * @tx_resume: TX Q resume trigger
268 *
269 * Q was stopped due to WLAN TX path low resource condition
270 *
271 * Return: None
272 */
273void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
274{
Jeff Johnson80486862017-10-02 13:21:29 -0700275 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700276 struct hdd_station_ctx *hdd_sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277
Jeff Johnson80486862017-10-02 13:21:29 -0700278 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800279 /* INVALID ARG */
280 return;
281 }
282
Jeff Johnson80486862017-10-02 13:21:29 -0700283 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284
285 /* Resume TX */
286 if (true == tx_resume) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530287 if (QDF_TIMER_STATE_STOPPED !=
Jeff Johnson80486862017-10-02 13:21:29 -0700288 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289 tx_flow_control_timer)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700290 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291 }
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700292 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700293 wlan_hdd_netif_queue_control(adapter,
Jeff Johnsona0399642016-12-05 12:39:59 -0800294 WLAN_WAKE_ALL_NETIF_QUEUE,
295 WLAN_DATA_FLOW_CONTROL);
Ajit Pal Singhe6da1de2018-12-27 16:20:45 +0530296 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = false;
297 adapter->hdd_stats.tx_rx_stats.txflow_unpause_cnt++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800298 }
Jeff Johnson80486862017-10-02 13:21:29 -0700299 hdd_tx_resume_false(adapter, tx_resume);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300}
301
bings284f8be2017-08-11 10:41:30 +0800302bool hdd_tx_flow_control_is_pause(void *adapter_context)
303{
Jeff Johnson80486862017-10-02 13:21:29 -0700304 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
bings284f8be2017-08-11 10:41:30 +0800305
Jeff Johnsond36fa332019-03-18 13:42:25 -0700306 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
bings284f8be2017-08-11 10:41:30 +0800307 /* INVALID ARG */
Jeff Johnson80486862017-10-02 13:21:29 -0700308 hdd_err("invalid adapter %pK", adapter);
bings284f8be2017-08-11 10:41:30 +0800309 return false;
310 }
311
Jeff Johnson80486862017-10-02 13:21:29 -0700312 return adapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
bings284f8be2017-08-11 10:41:30 +0800313}
314
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700315void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530316 qdf_mc_timer_callback_t timer_callback,
bings284f8be2017-08-11 10:41:30 +0800317 ol_txrx_tx_flow_control_fp flow_control_fp,
318 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800319{
320 if (adapter->tx_flow_timer_initialized == false) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530321 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530322 QDF_TIMER_TYPE_SW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323 timer_callback,
324 adapter);
325 adapter->tx_flow_timer_initialized = true;
326 }
Leo Changfdb45c32016-10-28 11:09:23 -0700327 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800328 adapter->vdev_id, flow_control_fp, adapter,
bings284f8be2017-08-11 10:41:30 +0800329 flow_control_is_pause_fp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330}
331
332/**
333 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
334 * @adapter: adapter handle
335 *
336 * Return: none
337 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700338void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800339{
Leo Changfdb45c32016-10-28 11:09:23 -0700340 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800341 adapter->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800342 if (adapter->tx_flow_timer_initialized == true) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530343 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
344 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800345 adapter->tx_flow_timer_initialized = false;
346 }
347}
348
349/**
350 * hdd_get_tx_resource() - check tx resources and take action
351 * @adapter: adapter handle
352 * @STAId: station id
353 * @timer_value: timer value
354 *
355 * Return: none
356 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700357void hdd_get_tx_resource(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358 uint8_t STAId, uint16_t timer_value)
359{
360 if (false ==
Leo Changfdb45c32016-10-28 11:09:23 -0700361 cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC), STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362 adapter->tx_flow_low_watermark,
jitiphil869b9f72018-09-25 17:14:01 +0530363 adapter->tx_flow_hi_watermark_offset)) {
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700364 hdd_debug("Disabling queues lwm %d hwm offset %d",
Jeff Johnsona0399642016-12-05 12:39:59 -0800365 adapter->tx_flow_low_watermark,
jitiphil869b9f72018-09-25 17:14:01 +0530366 adapter->tx_flow_hi_watermark_offset);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
368 WLAN_DATA_FLOW_CONTROL);
369 if ((adapter->tx_flow_timer_initialized == true) &&
Anurag Chouhan210db072016-02-22 18:42:15 +0530370 (QDF_TIMER_STATE_STOPPED ==
371 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372 tx_flow_control_timer))) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530373 qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800374 timer_value);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700375 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
376 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
377 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800378 }
379 }
380}
381
gbianec670c592016-11-24 11:21:30 +0800382#else
Mohit Khannad0b63f52017-02-18 18:05:52 -0800383/**
384 * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
Jeff Johnson80486862017-10-02 13:21:29 -0700385 * @adapter: pointer to HDD adapter
Mohit Khannad0b63f52017-02-18 18:05:52 -0800386 * @skb: pointer to skb data packet
387 *
388 * Return: pointer to skb structure
389 */
Jeff Johnson80486862017-10-02 13:21:29 -0700390static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
Mohit Khannad0b63f52017-02-18 18:05:52 -0800391 struct sk_buff *skb) {
gbianec670c592016-11-24 11:21:30 +0800392
Mohit Khannad0b63f52017-02-18 18:05:52 -0800393 struct sk_buff *nskb;
tfyubdf453e2017-09-27 13:34:30 +0800394#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Jeff Johnson80486862017-10-02 13:21:29 -0700395 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800396#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800397
Mohit Khanna87493732017-08-27 23:26:44 -0700398 hdd_skb_fill_gso_size(adapter->dev, skb);
399
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700400 nskb = skb_unshare(skb, GFP_ATOMIC);
tfyubdf453e2017-09-27 13:34:30 +0800401#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700402 if (unlikely(hdd_ctx->config->tx_orphan_enable) && (nskb == skb)) {
Mohit Khannad0b63f52017-02-18 18:05:52 -0800403 /*
404 * For UDP packets we want to orphan the packet to allow the app
405 * to send more packets. The flow would ultimately be controlled
406 * by the limited number of tx descriptors for the vdev.
407 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700408 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Mohit Khannad0b63f52017-02-18 18:05:52 -0800409 skb_orphan(skb);
410 }
tfyubdf453e2017-09-27 13:34:30 +0800411#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800412 return nskb;
gbianec670c592016-11-24 11:21:30 +0800413}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800414#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
415
Alok Kumarb64650c2018-03-23 17:05:11 +0530416uint32_t hdd_txrx_get_tx_ack_count(struct hdd_adapter *adapter)
417{
418 return cdp_get_tx_ack_stats(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1abc5662019-02-04 14:27:02 -0800419 adapter->vdev_id);
Alok Kumarb64650c2018-03-23 17:05:11 +0530420}
421
Qiwei Caiba95ce62018-08-23 10:43:16 +0800422#ifdef FEATURE_WLAN_DIAG_SUPPORT
Nirav Shah5e74bb82016-07-20 16:01:27 +0530423/**
424 * qdf_event_eapol_log() - send event to wlan diag
425 * @skb: skb ptr
426 * @dir: direction
427 * @eapol_key_info: eapol key info
428 *
429 * Return: None
430 */
431void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
432{
433 int16_t eapol_key_info;
434
435 WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
436
437 if ((dir == QDF_TX &&
438 (QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
439 QDF_NBUF_CB_GET_PACKET_TYPE(skb))))
440 return;
441 else if (!qdf_nbuf_is_ipv4_eapol_pkt(skb))
442 return;
443
444 eapol_key_info = (uint16_t)(*(uint16_t *)
445 (skb->data + EAPOL_KEY_INFO_OFFSET));
446
447 wlan_diag_event.event_sub_type =
448 (dir == QDF_TX ?
449 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
450 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
451 wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
452 (skb->data + EAPOL_PACKET_TYPE_OFFSET));
453 wlan_diag_event.eapol_key_info = eapol_key_info;
454 wlan_diag_event.eapol_rate = 0;
455 qdf_mem_copy(wlan_diag_event.dest_addr,
456 (skb->data + QDF_NBUF_DEST_MAC_OFFSET),
457 sizeof(wlan_diag_event.dest_addr));
458 qdf_mem_copy(wlan_diag_event.src_addr,
459 (skb->data + QDF_NBUF_SRC_MAC_OFFSET),
460 sizeof(wlan_diag_event.src_addr));
461
462 WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
463}
Qiwei Caiba95ce62018-08-23 10:43:16 +0800464#endif /* FEATURE_WLAN_DIAG_SUPPORT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465
466/**
Nirav Shah5e74bb82016-07-20 16:01:27 +0530467 * wlan_hdd_classify_pkt() - classify packet
468 * @skb - sk buff
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800469 *
Nirav Shah5e74bb82016-07-20 16:01:27 +0530470 * Return: none
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800471 */
Nirav Shah5e74bb82016-07-20 16:01:27 +0530472void wlan_hdd_classify_pkt(struct sk_buff *skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800473{
Nirav Shah5e74bb82016-07-20 16:01:27 +0530474 struct ethhdr *eh = (struct ethhdr *)skb->data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475
hangtian127c9532019-01-12 13:29:07 +0800476 qdf_mem_zero(skb->cb, sizeof(skb->cb));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477
Nirav Shah5e74bb82016-07-20 16:01:27 +0530478 /* check destination mac address is broadcast/multicast */
479 if (is_broadcast_ether_addr((uint8_t *)eh))
480 QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
481 else if (is_multicast_ether_addr((uint8_t *)eh))
482 QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483
Nirav Shah5e74bb82016-07-20 16:01:27 +0530484 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
485 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
486 QDF_NBUF_CB_PACKET_TYPE_ARP;
487 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
488 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
489 QDF_NBUF_CB_PACKET_TYPE_DHCP;
490 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
491 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
492 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
493 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
494 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
495 QDF_NBUF_CB_PACKET_TYPE_WAPI;
Zhu Jianmin04392c42017-05-12 16:34:53 +0800496 else if (qdf_nbuf_is_icmp_pkt(skb))
497 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
498 QDF_NBUF_CB_PACKET_TYPE_ICMP;
Poddar, Siddarth44aa5aa2017-07-10 17:30:22 +0530499 else if (qdf_nbuf_is_icmpv6_pkt(skb))
500 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
501 QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502}
503
504/**
Ravi Joshi24477b72016-07-19 15:45:09 -0700505 * hdd_get_transmit_sta_id() - function to retrieve station id to be used for
506 * sending traffic towards a particular destination address. The destination
507 * address can be unicast, multicast or broadcast
508 *
509 * @adapter: Handle to adapter context
510 * @dst_addr: Destination address
511 * @station_id: station id
512 *
513 * Returns: None
514 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700515static void hdd_get_transmit_sta_id(struct hdd_adapter *adapter,
Nirav Shah5e74bb82016-07-20 16:01:27 +0530516 struct sk_buff *skb, uint8_t *station_id)
Ravi Joshi24477b72016-07-19 15:45:09 -0700517{
518 bool mcbc_addr = false;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700519 QDF_STATUS status;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700520 struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530521 struct qdf_mac_addr *dst_addr = NULL;
Ravi Joshi24477b72016-07-19 15:45:09 -0700522
Nirav Shah5e74bb82016-07-20 16:01:27 +0530523 dst_addr = (struct qdf_mac_addr *)skb->data;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700524 status = hdd_get_peer_sta_id(sta_ctx, dst_addr, station_id);
525 if (QDF_IS_STATUS_ERROR(status)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530526 if (QDF_NBUF_CB_GET_IS_BCAST(skb) ||
527 QDF_NBUF_CB_GET_IS_MCAST(skb)) {
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -0700528 hdd_debug("Received MC/BC packet for transmission");
Ravi Joshi24477b72016-07-19 15:45:09 -0700529 mcbc_addr = true;
Ravi Joshi24477b72016-07-19 15:45:09 -0700530 }
531 }
532
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700533 if (adapter->device_mode == QDF_IBSS_MODE ||
534 adapter->device_mode == QDF_NDI_MODE) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700535 /*
536 * This check is necessary to make sure station id is not
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700537 * overwritten for UC traffic in IBSS or NDI mode
Ravi Joshi24477b72016-07-19 15:45:09 -0700538 */
539 if (mcbc_addr)
Jeff Johnson0a082d92019-03-04 12:25:49 -0800540 *station_id = sta_ctx->broadcast_sta_id;
Ravi Joshi24477b72016-07-19 15:45:09 -0700541 } else {
542 /* For the rest, traffic is directed to AP/P2P GO */
Jeff Johnsone7951512019-02-27 10:02:51 -0800543 if (eConnectionState_Associated == sta_ctx->conn_info.conn_state)
Jeff Johnson0a082d92019-03-04 12:25:49 -0800544 *station_id = sta_ctx->conn_info.sta_id[0];
Ravi Joshi24477b72016-07-19 15:45:09 -0700545 }
546}
547
548/**
jitiphilfb410612018-03-26 22:37:56 +0530549 * hdd_clear_tx_rx_connectivity_stats() - clear connectivity stats
550 * @hdd_ctx: pointer to HDD Station Context
551 *
552 * Return: None
553 */
554static void hdd_clear_tx_rx_connectivity_stats(struct hdd_adapter *adapter)
555{
556 hdd_info("Clear txrx connectivity stats");
557 qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
558 sizeof(adapter->hdd_stats.hdd_arp_stats));
559 qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats,
560 sizeof(adapter->hdd_stats.hdd_dns_stats));
561 qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
562 sizeof(adapter->hdd_stats.hdd_tcp_stats));
563 qdf_mem_zero(&adapter->hdd_stats.hdd_icmpv4_stats,
564 sizeof(adapter->hdd_stats.hdd_icmpv4_stats));
565 adapter->pkt_type_bitmap = 0;
566 adapter->track_arp_ip = 0;
567 qdf_mem_zero(adapter->dns_payload, adapter->track_dns_domain_len);
568 adapter->track_dns_domain_len = 0;
569 adapter->track_src_port = 0;
570 adapter->track_dest_port = 0;
571 adapter->track_dest_ipv4 = 0;
572}
573
574void hdd_reset_all_adapters_connectivity_stats(struct hdd_context *hdd_ctx)
575{
Jeff Johnson45100a92019-03-08 22:10:16 -0800576 struct hdd_adapter *adapter = NULL, *next = NULL;
jitiphilfb410612018-03-26 22:37:56 +0530577 QDF_STATUS status;
578
579 hdd_enter();
580
581 status = hdd_get_front_adapter(hdd_ctx, &adapter);
582
Jeff Johnsond36fa332019-03-18 13:42:25 -0700583 while (adapter && QDF_STATUS_SUCCESS == status) {
jitiphilfb410612018-03-26 22:37:56 +0530584 hdd_clear_tx_rx_connectivity_stats(adapter);
Jeff Johnson45100a92019-03-08 22:10:16 -0800585 status = hdd_get_next_adapter(hdd_ctx, adapter, &next);
586 adapter = next;
jitiphilfb410612018-03-26 22:37:56 +0530587 }
588
589 hdd_exit();
590}
591
592/**
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800593 * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
594 * @skb: pointer to OS packet (sk_buff)
595 * @peer_id: Peer STA ID in peer table
596 *
597 * This function gets the peer state from DP and check if it is either
598 * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
599 * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
600 * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
601 *
602 * Return: true if Tx is allowed and false otherwise.
603 */
604static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t peer_id)
605{
606 enum ol_txrx_peer_state peer_state;
607 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
608 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
609 void *peer;
610
611 QDF_BUG(soc);
612 QDF_BUG(pdev);
613
614 peer = cdp_peer_find_by_local_id(soc, pdev, peer_id);
615
Jeff Johnsond36fa332019-03-18 13:42:25 -0700616 if (!peer) {
Jeff Johnson0a082d92019-03-04 12:25:49 -0800617 hdd_err_rl("Unable to find peer entry for sta_id: %d", peer_id);
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800618 return false;
619 }
620
621 peer_state = cdp_peer_state_get(soc, peer);
Jeff Johnson68755312017-02-10 11:46:55 -0800622 if (likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800623 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800624 if (OL_TXRX_PEER_STATE_CONN == peer_state &&
Jinwei Chen19846e52018-04-03 19:20:38 +0800625 (ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X
626 || IS_HDD_ETHERTYPE_WAI(skb)))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800627 return true;
hqu8925c8f2017-12-11 19:29:01 +0800628 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson68755312017-02-10 11:46:55 -0800629 FL("Invalid peer state for Tx: %d"), peer_state);
630 return false;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800631}
jitiphilfb410612018-03-26 22:37:56 +0530632
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530633/**
634 * hdd_tx_rx_is_dns_domain_name_match() - function to check whether dns
635 * domain name in the received skb matches with the tracking dns domain
636 * name or not
637 *
638 * @skb: pointer to skb
639 * @adapter: pointer to adapter
640 *
641 * Returns: true if matches else false
642 */
643static bool hdd_tx_rx_is_dns_domain_name_match(struct sk_buff *skb,
644 struct hdd_adapter *adapter)
645{
646 uint8_t *domain_name;
647
648 if (adapter->track_dns_domain_len == 0)
649 return false;
650
Alok Kumar3e9c7132019-02-28 22:54:05 +0530651 /* check OOB , is strncmp accessing data more than skb->len */
652 if ((adapter->track_dns_domain_len +
653 QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET) > qdf_nbuf_len(skb))
654 return false;
655
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530656 domain_name = qdf_nbuf_get_dns_domain_name(skb,
657 adapter->track_dns_domain_len);
658 if (strncmp(domain_name, adapter->dns_payload,
659 adapter->track_dns_domain_len) == 0)
660 return true;
661 else
662 return false;
663}
664
665void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
666 void *context,
667 enum connectivity_stats_pkt_status action,
668 uint8_t *pkt_type)
669{
670 uint32_t pkt_type_bitmap;
671 struct hdd_adapter *adapter = NULL;
672
673 adapter = (struct hdd_adapter *)context;
674 if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
675 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
676 "Magic cookie(%x) for adapter sanity verification is invalid",
677 adapter->magic);
678 return;
679 }
680
681 /* ARP tracking is done already. */
682 pkt_type_bitmap = adapter->pkt_type_bitmap;
683 pkt_type_bitmap &= ~CONNECTIVITY_CHECK_SET_ARP;
684
685 if (!pkt_type_bitmap)
686 return;
687
688 switch (action) {
689 case PKT_TYPE_REQ:
690 case PKT_TYPE_TX_HOST_FW_SENT:
691 if (qdf_nbuf_is_icmp_pkt(skb)) {
692 if (qdf_nbuf_data_is_icmpv4_req(skb) &&
693 (adapter->track_dest_ipv4 ==
694 qdf_nbuf_get_icmpv4_tgt_ip(skb))) {
695 *pkt_type = CONNECTIVITY_CHECK_SET_ICMPV4;
696 if (action == PKT_TYPE_REQ) {
697 ++adapter->hdd_stats.hdd_icmpv4_stats.
698 tx_icmpv4_req_count;
699 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
700 QDF_TRACE_LEVEL_INFO_HIGH,
701 "%s : ICMPv4 Req packet",
702 __func__);
703 } else
704 /* host receives tx completion */
705 ++adapter->hdd_stats.hdd_icmpv4_stats.
706 tx_host_fw_sent;
707 }
708 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
709 if (qdf_nbuf_data_is_tcp_syn(skb) &&
710 (adapter->track_dest_port ==
711 qdf_nbuf_data_get_tcp_dst_port(skb))) {
712 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_SYN;
713 if (action == PKT_TYPE_REQ) {
714 ++adapter->hdd_stats.hdd_tcp_stats.
715 tx_tcp_syn_count;
716 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
717 QDF_TRACE_LEVEL_INFO_HIGH,
718 "%s : TCP Syn packet",
719 __func__);
720 } else
721 /* host receives tx completion */
722 ++adapter->hdd_stats.hdd_tcp_stats.
723 tx_tcp_syn_host_fw_sent;
724 } else if ((adapter->hdd_stats.hdd_tcp_stats.
725 is_tcp_syn_ack_rcv || adapter->hdd_stats.
726 hdd_tcp_stats.is_tcp_ack_sent) &&
727 qdf_nbuf_data_is_tcp_ack(skb) &&
728 (adapter->track_dest_port ==
729 qdf_nbuf_data_get_tcp_dst_port(skb))) {
730 *pkt_type = CONNECTIVITY_CHECK_SET_TCP_ACK;
731 if (action == PKT_TYPE_REQ &&
732 adapter->hdd_stats.hdd_tcp_stats.
733 is_tcp_syn_ack_rcv) {
734 ++adapter->hdd_stats.hdd_tcp_stats.
735 tx_tcp_ack_count;
736 adapter->hdd_stats.hdd_tcp_stats.
737 is_tcp_syn_ack_rcv = false;
738 adapter->hdd_stats.hdd_tcp_stats.
739 is_tcp_ack_sent = true;
740 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
741 QDF_TRACE_LEVEL_INFO_HIGH,
742 "%s : TCP Ack packet",
743 __func__);
744 } else if (action == PKT_TYPE_TX_HOST_FW_SENT &&
745 adapter->hdd_stats.hdd_tcp_stats.
746 is_tcp_ack_sent) {
Srinivas Girigowda683726a2018-09-07 15:10:40 -0700747 /* host receives tx completion */
748 ++adapter->hdd_stats.hdd_tcp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530749 tx_tcp_ack_host_fw_sent;
Srinivas Girigowda683726a2018-09-07 15:10:40 -0700750 adapter->hdd_stats.hdd_tcp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530751 is_tcp_ack_sent = false;
752 }
753 }
754 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
755 if (qdf_nbuf_data_is_dns_query(skb) &&
756 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
757 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
758 if (action == PKT_TYPE_REQ) {
759 ++adapter->hdd_stats.hdd_dns_stats.
760 tx_dns_req_count;
761 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
762 QDF_TRACE_LEVEL_INFO_HIGH,
763 "%s : DNS query packet",
764 __func__);
765 } else
766 /* host receives tx completion */
767 ++adapter->hdd_stats.hdd_dns_stats.
768 tx_host_fw_sent;
769 }
770 }
771 break;
772
773 case PKT_TYPE_RSP:
774 if (qdf_nbuf_is_icmp_pkt(skb)) {
775 if (qdf_nbuf_data_is_icmpv4_rsp(skb) &&
776 (adapter->track_dest_ipv4 ==
777 qdf_nbuf_get_icmpv4_src_ip(skb))) {
778 ++adapter->hdd_stats.hdd_icmpv4_stats.
779 rx_icmpv4_rsp_count;
780 *pkt_type =
781 CONNECTIVITY_CHECK_SET_ICMPV4;
782 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
783 QDF_TRACE_LEVEL_INFO_HIGH,
784 "%s : ICMPv4 Res packet", __func__);
785 }
786 } else if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
787 if (qdf_nbuf_data_is_tcp_syn_ack(skb) &&
788 (adapter->track_dest_port ==
789 qdf_nbuf_data_get_tcp_src_port(skb))) {
790 ++adapter->hdd_stats.hdd_tcp_stats.
791 rx_tcp_syn_ack_count;
792 adapter->hdd_stats.hdd_tcp_stats.
793 is_tcp_syn_ack_rcv = true;
794 *pkt_type =
795 CONNECTIVITY_CHECK_SET_TCP_SYN_ACK;
796 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
797 QDF_TRACE_LEVEL_INFO_HIGH,
798 "%s : TCP Syn ack packet", __func__);
799 }
800 } else if (qdf_nbuf_is_ipv4_udp_pkt(skb)) {
801 if (qdf_nbuf_data_is_dns_response(skb) &&
802 hdd_tx_rx_is_dns_domain_name_match(skb, adapter)) {
803 ++adapter->hdd_stats.hdd_dns_stats.
804 rx_dns_rsp_count;
805 *pkt_type = CONNECTIVITY_CHECK_SET_DNS;
806 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
807 QDF_TRACE_LEVEL_INFO_HIGH,
808 "%s : DNS response packet", __func__);
809 }
810 }
811 break;
812
813 case PKT_TYPE_TX_DROPPED:
814 switch (*pkt_type) {
815 case CONNECTIVITY_CHECK_SET_ICMPV4:
816 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_dropped;
817 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
818 QDF_TRACE_LEVEL_INFO_HIGH,
819 "%s : ICMPv4 Req packet dropped", __func__);
820 break;
821 case CONNECTIVITY_CHECK_SET_TCP_SYN:
822 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_dropped;
823 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
824 QDF_TRACE_LEVEL_INFO_HIGH,
825 "%s : TCP syn packet dropped", __func__);
826 break;
827 case CONNECTIVITY_CHECK_SET_TCP_ACK:
828 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_dropped;
829 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
830 QDF_TRACE_LEVEL_INFO_HIGH,
831 "%s : TCP ack packet dropped", __func__);
832 break;
833 case CONNECTIVITY_CHECK_SET_DNS:
834 ++adapter->hdd_stats.hdd_dns_stats.tx_dropped;
835 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
836 QDF_TRACE_LEVEL_INFO_HIGH,
837 "%s : DNS query packet dropped", __func__);
838 break;
839 default:
840 break;
841 }
842 break;
843 case PKT_TYPE_RX_DELIVERED:
844 switch (*pkt_type) {
845 case CONNECTIVITY_CHECK_SET_ICMPV4:
846 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_delivered;
847 break;
848 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
849 ++adapter->hdd_stats.hdd_tcp_stats.rx_delivered;
850 break;
851 case CONNECTIVITY_CHECK_SET_DNS:
852 ++adapter->hdd_stats.hdd_dns_stats.rx_delivered;
853 break;
854 default:
855 break;
856 }
857 break;
858 case PKT_TYPE_RX_REFUSED:
859 switch (*pkt_type) {
860 case CONNECTIVITY_CHECK_SET_ICMPV4:
861 ++adapter->hdd_stats.hdd_icmpv4_stats.rx_refused;
862 break;
863 case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
864 ++adapter->hdd_stats.hdd_tcp_stats.rx_refused;
865 break;
866 case CONNECTIVITY_CHECK_SET_DNS:
867 ++adapter->hdd_stats.hdd_dns_stats.rx_refused;
868 break;
869 default:
870 break;
871 }
872 break;
873 case PKT_TYPE_TX_ACK_CNT:
874 switch (*pkt_type) {
875 case CONNECTIVITY_CHECK_SET_ICMPV4:
876 ++adapter->hdd_stats.hdd_icmpv4_stats.tx_ack_cnt;
877 break;
878 case CONNECTIVITY_CHECK_SET_TCP_SYN:
879 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_ack_cnt;
880 break;
881 case CONNECTIVITY_CHECK_SET_TCP_ACK:
882 ++adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_ack_cnt;
883 break;
884 case CONNECTIVITY_CHECK_SET_DNS:
885 ++adapter->hdd_stats.hdd_dns_stats.tx_ack_cnt;
886 break;
887 default:
888 break;
889 }
890 break;
891 default:
892 break;
893 }
894}
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800895
896/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530897 * __hdd_hard_start_xmit() - Transmit a frame
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800898 * @skb: pointer to OS packet (sk_buff)
899 * @dev: pointer to network device
900 *
901 * Function registered with the Linux OS for transmitting
902 * packets. This version of the function directly passes
903 * the packet to Transport Layer.
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530904 * In case of any packet drop or error, log the error with
905 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800906 *
Dustin Brown96b98dd2019-03-06 12:39:37 -0800907 * Return: None
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800908 */
Dustin Brown96b98dd2019-03-06 12:39:37 -0800909static void __hdd_hard_start_xmit(struct sk_buff *skb,
910 struct net_device *dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800911{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530912 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800913 sme_ac_enum_type ac;
Abhishek Singh12be60f2017-08-11 13:52:42 +0530914 enum sme_qos_wmmuptype up;
Jeff Johnson80486862017-10-02 13:21:29 -0700915 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800916 bool granted;
Nirav Shah5e74bb82016-07-20 16:01:27 +0530917 uint8_t STAId;
Jeff Johnsonb9424862017-10-30 08:49:35 -0700918 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700919 struct qdf_mac_addr *mac_addr;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530920 uint8_t pkt_type = 0;
Sravan Kumar Kairamdd57ea32017-04-06 16:57:35 +0530921 bool is_arp = false;
Bala Venkateshf2867902019-03-08 15:01:23 +0530922 struct wlan_objmgr_vdev *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800923
924#ifdef QCA_WIFI_FTM
Anurag Chouhan6d760662016-02-20 16:05:43 +0530925 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800926 kfree_skb(skb);
Dustin Brown96b98dd2019-03-06 12:39:37 -0800927 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800928 }
929#endif
930
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700931 ++adapter->hdd_stats.tx_rx_stats.tx_called;
932 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530933
Will Huang20de9432018-02-06 17:01:03 +0800934 if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
935 cds_is_load_or_unload_in_progress()) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530936 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Will Huang20de9432018-02-06 17:01:03 +0800937 "Recovery/(Un)load in progress, dropping the packet");
Nirav Shahdf3659e2016-06-27 12:26:28 +0530938 goto drop_pkt;
Govind Singhede435f2015-12-01 16:16:36 +0530939 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800940
Nirav Shah5e74bb82016-07-20 16:01:27 +0530941 wlan_hdd_classify_pkt(skb);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530942 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) == QDF_NBUF_CB_PACKET_TYPE_ARP) {
943 is_arp = true;
944 if (qdf_nbuf_data_is_arp_req(skb) &&
Alok Kumarb94a2e72019-03-11 19:47:15 +0530945 (adapter->track_arp_ip == qdf_nbuf_get_arp_tgt_ip(skb))) {
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530946 ++adapter->hdd_stats.hdd_arp_stats.tx_arp_req_count;
947 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
948 QDF_TRACE_LEVEL_INFO_HIGH,
949 "%s : ARP packet", __func__);
950 }
951 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530952 /* track connectivity stats */
953 if (adapter->pkt_type_bitmap)
954 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
955 PKT_TYPE_REQ, &pkt_type);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530956
957 if (cds_is_driver_recovering()) {
958 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_WARN,
959 "Recovery in progress, dropping the packet");
960 goto drop_pkt;
961 }
Nirav Shah5e74bb82016-07-20 16:01:27 +0530962
Ravi Joshi24477b72016-07-19 15:45:09 -0700963 STAId = HDD_WLAN_INVALID_STA_ID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800964
Jeff Johnson80486862017-10-02 13:21:29 -0700965 hdd_get_transmit_sta_id(adapter, skb, &STAId);
Naveen Rawat209d0932016-08-03 15:07:23 -0700966 if (STAId >= WLAN_MAX_STA_COUNT) {
hqu5e6b9862017-12-21 18:48:46 +0800967 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsona0399642016-12-05 12:39:59 -0800968 "Invalid station id, transmit operation suspended");
Ravi Joshi24477b72016-07-19 15:45:09 -0700969 goto drop_pkt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800970 }
971
Jeff Johnson80486862017-10-02 13:21:29 -0700972 hdd_get_tx_resource(adapter, STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800973 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
974
975 /* Get TL AC corresponding to Qdisc queue index/AC. */
976 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
977
Nirav Shahcbc6d722016-03-01 16:24:53 +0530978 if (!qdf_nbuf_ipa_owned_get(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700979 skb = hdd_skb_orphan(adapter, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980 if (!skb)
Nirav Shahdf3659e2016-06-27 12:26:28 +0530981 goto drop_pkt_accounting;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800982 }
983
Ravi Joshi24477b72016-07-19 15:45:09 -0700984 /*
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530985 * Add SKB to internal tracking table before further processing
986 * in WLAN driver.
987 */
988 qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
989
990 /*
Ravi Joshi24477b72016-07-19 15:45:09 -0700991 * user priority from IP header, which is already extracted and set from
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800992 * select_queue call back function
993 */
994 up = skb->priority;
995
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700996 ++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800997#ifdef HDD_WMM_DEBUG
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800998 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800999 "%s: Classified as ac %d up %d", __func__, ac, up);
1000#endif /* HDD_WMM_DEBUG */
1001
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001002 if (HDD_PSB_CHANGED == adapter->psb_changed) {
Ravi Joshi24477b72016-07-19 15:45:09 -07001003 /*
1004 * Function which will determine acquire admittance for a
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001005 * WMM AC is required or not based on psb configuration done
1006 * in the framework
1007 */
Jeff Johnson80486862017-10-02 13:21:29 -07001008 hdd_wmm_acquire_access_required(adapter, ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001009 }
1010 /*
1011 * Make sure we already have access to this access category
1012 * or it is EAPOL or WAPI frame during initial authentication which
1013 * can have artifically boosted higher qos priority.
1014 */
1015
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001016 if (((adapter->psb_changed & (1 << ac)) &&
Jeff Johnson12e12332019-03-08 23:29:23 -08001017 likely(adapter->hdd_wmm_status.ac_status[ac].
Jeff Johnsona5548972019-03-09 14:22:18 -08001018 is_access_allowed)) ||
Jeff Johnson457c2422019-02-27 13:56:04 -08001019 ((sta_ctx->conn_info.is_authenticated == false) &&
Nirav Shah5e74bb82016-07-20 16:01:27 +05301020 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
1021 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
1022 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
1023 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001024 granted = true;
1025 } else {
Jeff Johnson80486862017-10-02 13:21:29 -07001026 status = hdd_wmm_acquire_access(adapter, ac, &granted);
Jeff Johnson137c8ee2017-10-28 13:06:48 -07001027 adapter->psb_changed |= (1 << ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001028 }
1029
1030 if (!granted) {
1031 bool isDefaultAc = false;
Ravi Joshi24477b72016-07-19 15:45:09 -07001032 /*
1033 * ADDTS request for this AC is sent, for now
Jeff Johnson55ceaf02018-05-06 17:22:29 -07001034 * send this packet through next available lower
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001035 * Access category until ADDTS negotiation completes.
1036 */
1037 while (!likely
Jeff Johnson12e12332019-03-08 23:29:23 -08001038 (adapter->hdd_wmm_status.ac_status[ac].
Jeff Johnsona5548972019-03-09 14:22:18 -08001039 is_access_allowed)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001040 switch (ac) {
1041 case SME_AC_VO:
1042 ac = SME_AC_VI;
1043 up = SME_QOS_WMM_UP_VI;
1044 break;
1045 case SME_AC_VI:
1046 ac = SME_AC_BE;
1047 up = SME_QOS_WMM_UP_BE;
1048 break;
1049 case SME_AC_BE:
1050 ac = SME_AC_BK;
1051 up = SME_QOS_WMM_UP_BK;
1052 break;
1053 default:
1054 ac = SME_AC_BK;
1055 up = SME_QOS_WMM_UP_BK;
1056 isDefaultAc = true;
1057 break;
1058 }
1059 if (isDefaultAc)
1060 break;
1061 }
1062 skb->priority = up;
1063 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
1064 }
1065
Jeff Johnson80486862017-10-02 13:21:29 -07001066 adapter->stats.tx_bytes += skb->len;
Kabilan Kannan36090ce2016-05-03 19:28:44 -07001067
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001068 mac_addr = (struct qdf_mac_addr *)skb->data;
1069
Bala Venkateshf2867902019-03-08 15:01:23 +05301070 vdev = hdd_objmgr_get_vdev(adapter);
1071 if (vdev) {
1072 ucfg_tdls_update_tx_pkt_cnt(vdev, mac_addr);
1073 hdd_objmgr_put_vdev(vdev);
1074 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001075
Mohit Khannab1dd1e82017-02-04 15:14:38 -08001076 if (qdf_nbuf_is_tso(skb))
Jeff Johnson80486862017-10-02 13:21:29 -07001077 adapter->stats.tx_packets += qdf_nbuf_get_tso_num_seg(skb);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -07001078 else
Jeff Johnson80486862017-10-02 13:21:29 -07001079 ++adapter->stats.tx_packets;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001080
Nirav Shah5e74bb82016-07-20 16:01:27 +05301081 hdd_event_eapol_log(skb, QDF_TX);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301082 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
1083 QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001084
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301085 qdf_dp_trace_set_track(skb, QDF_TX);
Mohit Khannaf8f96822017-05-17 17:11:59 -07001086
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301087 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001088 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
1089 sizeof(qdf_nbuf_data(skb)),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301090 QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001091
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -08001092 if (!hdd_is_tx_allowed(skb, STAId)) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301093 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -08001094 FL("Tx not allowed for sta_id: %d"), STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001095 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301096 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001097 }
1098
jinweic chen51046012018-04-11 16:02:22 +08001099 /* check whether need to linearize skb, like non-linear udp data */
1100 if (hdd_skb_nontso_linearize(skb) != QDF_STATUS_SUCCESS) {
1101 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1102 QDF_TRACE_LEVEL_INFO_HIGH,
1103 "%s: skb %pK linearize failed. drop the pkt",
1104 __func__, skb);
1105 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
1106 goto drop_pkt_and_release_skb;
1107 }
1108
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001109 /*
Ravi Joshi24477b72016-07-19 15:45:09 -07001110 * If a transmit function is not registered, drop packet
1111 */
Jeff Johnson80486862017-10-02 13:21:29 -07001112 if (!adapter->tx_fn) {
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001113 QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1114 "%s: TX function not registered by the data path",
1115 __func__);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001116 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301117 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001118 }
1119
Jeff Johnson80486862017-10-02 13:21:29 -07001120 if (adapter->tx_fn(adapter->txrx_vdev,
Alok Kumar4696fb02018-06-06 00:10:18 +05301121 (qdf_nbuf_t)skb) != NULL) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +05301122 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson0a082d92019-03-04 12:25:49 -08001123 "%s: Failed to send packet to txrx for sta_id: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001124 __func__, STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001125 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301126 goto drop_pkt_and_release_skb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001127 }
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301128
Dustin Browne0024fa2016-10-14 16:29:21 -07001129 netif_trans_update(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001130
Dustin Brown96b98dd2019-03-06 12:39:37 -08001131 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001132
Himanshu Agarwal53298d12017-02-20 19:14:17 +05301133drop_pkt_and_release_skb:
1134 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001135drop_pkt:
1136
Alok Kumar2d35a9e2018-08-01 19:41:01 +05301137 /* track connectivity stats */
1138 if (adapter->pkt_type_bitmap)
1139 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
1140 PKT_TYPE_TX_DROPPED,
1141 &pkt_type);
1142 qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
1143 QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
1144 QDF_TX);
1145 kfree_skb(skb);
Nirav Shahdf3659e2016-06-27 12:26:28 +05301146
1147drop_pkt_accounting:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001148
Jeff Johnson80486862017-10-02 13:21:29 -07001149 ++adapter->stats.tx_dropped;
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001150 ++adapter->hdd_stats.tx_rx_stats.tx_dropped;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05301151 if (is_arp) {
1152 ++adapter->hdd_stats.hdd_arp_stats.tx_dropped;
1153 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
1154 "%s : ARP packet dropped", __func__);
1155 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001156}
1157
1158/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301159 * hdd_hard_start_xmit() - Wrapper function to protect
1160 * __hdd_hard_start_xmit from SSR
1161 * @skb: pointer to OS packet
Dustin Brown96b98dd2019-03-06 12:39:37 -08001162 * @net_dev: pointer to net_device structure
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301163 *
1164 * Function called by OS if any packet needs to transmit.
1165 *
1166 * Return: Always returns NETDEV_TX_OK
1167 */
Dustin Brown96b98dd2019-03-06 12:39:37 -08001168netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301169{
Dustin Brown96b98dd2019-03-06 12:39:37 -08001170 struct osif_vdev_sync *vdev_sync;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301171
Dustin Brown96b98dd2019-03-06 12:39:37 -08001172 if (osif_vdev_sync_op_start(net_dev, &vdev_sync))
1173 return NETDEV_TX_OK;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301174
Dustin Brown96b98dd2019-03-06 12:39:37 -08001175 __hdd_hard_start_xmit(skb, net_dev);
1176
1177 osif_vdev_sync_op_stop(vdev_sync);
1178
1179 return NETDEV_TX_OK;
Mukul Sharmac4de4ef2016-09-12 15:39:00 +05301180}
1181
Jeff Johnsond377dce2017-10-04 10:32:42 -07001182QDF_STATUS hdd_get_peer_sta_id(struct hdd_station_ctx *sta_ctx,
Jeff Johnsoneeddb382018-11-17 13:04:38 -08001183 struct qdf_mac_addr *mac_address,
1184 uint8_t *sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001185{
1186 uint8_t idx;
1187
Naveen Rawatc45d1622016-07-05 12:20:09 -07001188 for (idx = 0; idx < MAX_PEERS; idx++) {
Jeff Johnson5ec1abf2019-02-27 14:48:24 -08001189 if (!qdf_mem_cmp(&sta_ctx->conn_info.peer_macaddr[idx],
Jeff Johnsoneeddb382018-11-17 13:04:38 -08001190 mac_address, QDF_MAC_ADDR_SIZE)) {
Jeff Johnson0a082d92019-03-04 12:25:49 -08001191 *sta_id = sta_ctx->conn_info.sta_id[idx];
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301192 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001193 }
1194 }
1195
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301196 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001197}
1198
1199/**
1200 * __hdd_tx_timeout() - TX timeout handler
1201 * @dev: pointer to network device
1202 *
1203 * This function is registered as a netdev ndo_tx_timeout method, and
1204 * is invoked by the kernel if the driver takes too long to transmit a
1205 * frame.
1206 *
1207 * Return: None
1208 */
1209static void __hdd_tx_timeout(struct net_device *dev)
1210{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001211 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001212 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001213 struct netdev_queue *txq;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301214 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1215 u64 diff_jiffies;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001216 int i = 0;
1217
Rakshith Suresh Patkar5e1fdee2019-04-03 12:07:07 +05301218 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
1219
1220 if (hdd_ctx->hdd_wlan_suspended) {
1221 hdd_debug("Device is suspended, ignore WD timeout");
1222 return;
1223 }
1224
Dustin Browne0024fa2016-10-14 16:29:21 -07001225 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301226 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001227 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301228 NULL, 0, QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229
1230 /* Getting here implies we disabled the TX queues for too
1231 * long. Queues are disabled either because of disassociation
1232 * or low resource scenarios. In case of disassociation it is
1233 * ok to ignore this. But if associated, we have do possible
1234 * recovery here
1235 */
1236
1237 for (i = 0; i < NUM_TX_QUEUES; i++) {
1238 txq = netdev_get_tx_queue(dev, i);
Sravan Kumar Kairam887e89e2018-11-01 09:30:38 +05301239 hdd_info("Queue: %d status: %d txq->trans_start: %lu",
1240 i, netif_tx_queue_stopped(txq), txq->trans_start);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001241 }
1242
Sravan Kumar Kairam887e89e2018-11-01 09:30:38 +05301243 hdd_info("carrier state: %d", netif_carrier_ok(dev));
1244
Mohit Khannaca4173b2017-09-12 21:52:19 -07001245 wlan_hdd_display_netif_queue_history(hdd_ctx,
1246 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Leo Changfdb45c32016-10-28 11:09:23 -07001247 cdp_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301248
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001249 ++adapter->hdd_stats.tx_rx_stats.tx_timeout_cnt;
1250 ++adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301251
1252 diff_jiffies = jiffies -
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001253 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301254
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001255 if ((adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301256 (diff_jiffies > (HDD_TX_TIMEOUT * 2))) {
1257 /*
1258 * In case when there is no traffic is running, it may
1259 * possible tx time-out may once happen and later system
1260 * recovered then continuous tx timeout count has to be
1261 * reset as it is gets modified only when traffic is running.
1262 * If over a period of time if this count reaches to threshold
1263 * then host triggers a false subsystem restart. In genuine
1264 * time out case kernel will call the tx time-out back to back
1265 * at interval of HDD_TX_TIMEOUT. Here now check if previous
1266 * TX TIME out has occurred more than twice of HDD_TX_TIMEOUT
1267 * back then host may recovered here from data stall.
1268 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001269 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301270 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson9a27ffa2018-05-06 17:26:57 -07001271 "Reset continuous tx timeout stat");
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301272 }
1273
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001274 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout = jiffies;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301275
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001276 if (adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt >
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301277 HDD_TX_STALL_THRESHOLD) {
1278 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1279 "Data stall due to continuous TX timeouts");
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001280 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
jitiphil377bcc12018-10-05 19:46:08 +05301281 if (cdp_cfg_get(soc, cfg_dp_enable_data_stall))
Poddar, Siddarth37033032017-10-11 15:47:40 +05301282 cdp_post_data_stall_event(soc,
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +05301283 DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
1284 DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
1285 0xFF, 0xFF,
1286 DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
1287 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001288}
1289
1290/**
1291 * hdd_tx_timeout() - Wrapper function to protect __hdd_tx_timeout from SSR
Dustin Brown96b98dd2019-03-06 12:39:37 -08001292 * @net_dev: pointer to net_device structure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001293 *
1294 * Function called by OS if there is any timeout during transmission.
1295 * Since HDD simply enqueues packet and returns control to OS right away,
1296 * this would never be invoked
1297 *
1298 * Return: none
1299 */
Dustin Brown96b98dd2019-03-06 12:39:37 -08001300void hdd_tx_timeout(struct net_device *net_dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001301{
Dustin Brown96b98dd2019-03-06 12:39:37 -08001302 struct osif_vdev_sync *vdev_sync;
1303
1304 if (osif_vdev_sync_op_start(net_dev, &vdev_sync))
1305 return;
1306
1307 __hdd_tx_timeout(net_dev);
1308
1309 osif_vdev_sync_op_stop(vdev_sync);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001310}
1311
1312/**
1313 * @hdd_init_tx_rx() - Initialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001314 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001315 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301316 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1317 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001318 */
Jeff Johnson80486862017-10-02 13:21:29 -07001319QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001320{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301321 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001322
Jeff Johnsond36fa332019-03-18 13:42:25 -07001323 if (!adapter) {
Jeff Johnson80486862017-10-02 13:21:29 -07001324 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301325 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301326 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001327 }
1328
1329 return status;
1330}
1331
1332/**
1333 * @hdd_deinit_tx_rx() - Deinitialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -07001334 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001335 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301336 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1337 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001338 */
Jeff Johnson80486862017-10-02 13:21:29 -07001339QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001340{
Dustin Brownb0b240a2018-07-30 14:16:30 -07001341 QDF_BUG(adapter);
1342 if (!adapter)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301343 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001344
Dustin Brownb0b240a2018-07-30 14:16:30 -07001345 adapter->txrx_vdev = NULL;
1346 adapter->tx_fn = NULL;
1347
1348 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001349}
1350
Nirav Shah73713f72018-05-17 14:50:41 +05301351#ifdef FEATURE_MONITOR_MODE_SUPPORT
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001352/**
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001353 * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
1354 * @context: [in] pointer to qdf context
1355 * @rxBuf: [in] pointer to rx qdf_nbuf
1356 *
1357 * TL will call this to notify the HDD when one or more packets were
1358 * received for a registered STA.
1359 *
1360 * Return: QDF_STATUS_E_FAILURE if any errors encountered, QDF_STATUS_SUCCESS
1361 * otherwise
1362 */
1363static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
1364{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001365 struct hdd_adapter *adapter;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001366 int rxstat;
1367 struct sk_buff *skb;
1368 struct sk_buff *skb_next;
1369 unsigned int cpu_index;
1370
1371 /* Sanity check on inputs */
Jeff Johnsond36fa332019-03-18 13:42:25 -07001372 if ((!context) || (!rxbuf)) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001373 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1374 "%s: Null params being passed", __func__);
1375 return QDF_STATUS_E_FAILURE;
1376 }
1377
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001378 adapter = (struct hdd_adapter *)context;
Jeff Johnsond36fa332019-03-18 13:42:25 -07001379 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001380 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001381 "invalid adapter %pK", adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001382 return QDF_STATUS_E_FAILURE;
1383 }
1384
1385 cpu_index = wlan_hdd_get_cpu();
1386
1387 /* walk the chain until all are processed */
1388 skb = (struct sk_buff *) rxbuf;
Jeff Johnsond36fa332019-03-18 13:42:25 -07001389 while (skb) {
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001390 skb_next = skb->next;
1391 skb->dev = adapter->dev;
1392
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001393 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001394 ++adapter->stats.rx_packets;
1395 adapter->stats.rx_bytes += skb->len;
1396
1397 /* Remove SKB from internal tracking table before submitting
1398 * it to stack
1399 */
1400 qdf_net_buf_debug_release_skb(skb);
1401
1402 /*
1403 * If this is not a last packet on the chain
1404 * Just put packet into backlog queue, not scheduling RX sirq
1405 */
1406 if (skb->next) {
1407 rxstat = netif_rx(skb);
1408 } else {
1409 /*
1410 * This is the last packet on the chain
1411 * Scheduling rx sirq
1412 */
1413 rxstat = netif_rx_ni(skb);
1414 }
1415
1416 if (NET_RX_SUCCESS == rxstat)
1417 ++adapter->
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001418 hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001419 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001420 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001421
1422 skb = skb_next;
1423 }
1424
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001425 return QDF_STATUS_SUCCESS;
1426}
Nirav Shah73713f72018-05-17 14:50:41 +05301427#endif
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001428
1429/**
Naveen Rawatf28315c2016-06-29 18:06:02 -07001430 * hdd_get_peer_idx() - Get the idx for given address in peer table
1431 * @sta_ctx: pointer to HDD Station Context
1432 * @addr: pointer to Peer Mac address
1433 *
1434 * Return: index when success else INVALID_PEER_IDX
1435 */
Jeff Johnson811f47d2017-10-03 11:33:09 -07001436int hdd_get_peer_idx(struct hdd_station_ctx *sta_ctx,
1437 struct qdf_mac_addr *addr)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001438{
1439 uint8_t idx;
1440
Naveen Rawatc45d1622016-07-05 12:20:09 -07001441 for (idx = 0; idx < MAX_PEERS; idx++) {
Jeff Johnson0a082d92019-03-04 12:25:49 -08001442 if (sta_ctx->conn_info.sta_id[idx] == HDD_WLAN_INVALID_STA_ID)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001443 continue;
Jeff Johnson5ec1abf2019-02-27 14:48:24 -08001444 if (qdf_mem_cmp(&sta_ctx->conn_info.peer_macaddr[idx],
Naveen Rawatf28315c2016-06-29 18:06:02 -07001445 addr, sizeof(struct qdf_mac_addr)))
1446 continue;
1447 return idx;
1448 }
1449
1450 return INVALID_PEER_IDX;
1451}
1452
Ravi Joshibb8d4512016-08-22 10:14:52 -07001453/*
1454 * hdd_is_mcast_replay() - checks if pkt is multicast replay
1455 * @skb: packet skb
1456 *
1457 * Return: true if replayed multicast pkt, false otherwise
1458 */
1459static bool hdd_is_mcast_replay(struct sk_buff *skb)
1460{
1461 struct ethhdr *eth;
1462
1463 eth = eth_hdr(skb);
1464 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
1465 if (unlikely(ether_addr_equal(eth->h_source,
1466 skb->dev->dev_addr)))
1467 return true;
1468 }
1469 return false;
1470}
1471
Naveen Rawatf28315c2016-06-29 18:06:02 -07001472/**
Jeff Johnsondcf84ce2017-10-05 09:26:24 -07001473 * hdd_is_arp_local() - check if local or non local arp
1474 * @skb: pointer to sk_buff
1475 *
1476 * Return: true if local arp or false otherwise.
1477 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301478static bool hdd_is_arp_local(struct sk_buff *skb)
1479{
1480 struct arphdr *arp;
1481 struct in_ifaddr **ifap = NULL;
1482 struct in_ifaddr *ifa = NULL;
1483 struct in_device *in_dev;
1484 unsigned char *arp_ptr;
1485 __be32 tip;
1486
1487 arp = (struct arphdr *)skb->data;
1488 if (arp->ar_op == htons(ARPOP_REQUEST)) {
1489 in_dev = __in_dev_get_rtnl(skb->dev);
1490 if (in_dev) {
1491 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1492 ifap = &ifa->ifa_next) {
1493 if (!strcmp(skb->dev->name, ifa->ifa_label))
1494 break;
1495 }
1496 }
1497
1498 if (ifa && ifa->ifa_local) {
1499 arp_ptr = (unsigned char *)(arp + 1);
1500 arp_ptr += (skb->dev->addr_len + 4 +
1501 skb->dev->addr_len);
1502 memcpy(&tip, arp_ptr, 4);
Poddar, Siddarthb4b74792017-11-06 14:57:35 +05301503 hdd_debug("ARP packet: local IP: %x dest IP: %x",
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301504 ifa->ifa_local, tip);
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001505 if (ifa->ifa_local == tip)
1506 return true;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301507 }
1508 }
1509
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001510 return false;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301511}
1512
1513/**
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001514 * hdd_is_rx_wake_lock_needed() - check if wake lock is needed
1515 * @skb: pointer to sk_buff
1516 *
1517 * RX wake lock is needed for:
1518 * 1) Unicast data packet OR
1519 * 2) Local ARP data packet
1520 *
1521 * Return: true if wake lock is needed or false otherwise.
1522 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301523static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
1524{
1525 if ((skb->pkt_type != PACKET_BROADCAST &&
1526 skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb))
1527 return true;
1528
1529 return false;
1530}
1531
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001532#ifdef RECEIVE_OFFLOAD
1533/**
1534 * hdd_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
1535 * @hdd_ctx: pointer to HDD Station Context
1536 *
1537 * Return: None
1538 */
1539static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
1540{
jitiphil377bcc12018-10-05 19:46:08 +05301541 void *soc;
1542
1543 soc = cds_get_context(QDF_MODULE_ID_SOC);
1544
1545 if (!(cdp_cfg_get(soc, cfg_dp_lro_enable) ^
1546 cdp_cfg_get(soc, cfg_dp_gro_enable))) {
1547 cdp_cfg_get(soc, cfg_dp_lro_enable) &&
1548 cdp_cfg_get(soc, cfg_dp_gro_enable) ?
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001549 hdd_err("Can't enable both LRO and GRO, disabling Rx offload") :
Mohit Khanna81418772018-10-30 14:14:46 -07001550 hdd_info("LRO and GRO both are disabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001551 hdd_ctx->ol_enable = 0;
jitiphil377bcc12018-10-05 19:46:08 +05301552 } else if (cdp_cfg_get(soc, cfg_dp_lro_enable)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001553 hdd_debug("Rx offload LRO is enabled");
1554 hdd_ctx->ol_enable = CFG_LRO_ENABLED;
1555 } else {
Mohit Khanna81418772018-10-30 14:14:46 -07001556 hdd_info("Rx offload: GRO is enabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001557 hdd_ctx->ol_enable = CFG_GRO_ENABLED;
1558 }
1559}
1560
1561/**
Mohit Khanna81418772018-10-30 14:14:46 -07001562 * hdd_gro_rx_bh_disable() - GRO RX/flush function.
1563 * @napi_to_use: napi to be used to give packets to the stack, gro flush
1564 * @skb: pointer to sk_buff
1565 *
1566 * Function calls napi_gro_receive for the skb. If the skb indicates that a
1567 * flush needs to be done (set by the lower DP layer), the function also calls
1568 * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
1569 * napi_gro__ calls.
1570 *
1571 * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
1572 * QDF error code.
1573 */
1574static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
1575 struct napi_struct *napi_to_use,
1576 struct sk_buff *skb)
1577{
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001578 QDF_STATUS status = QDF_STATUS_SUCCESS;
Mohit Khanna81418772018-10-30 14:14:46 -07001579 gro_result_t gro_res;
1580 bool flush_ind = QDF_NBUF_CB_RX_FLUSH_IND(skb);
1581
1582 skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
1583
1584 local_bh_disable();
1585 gro_res = napi_gro_receive(napi_to_use, skb);
1586 if (flush_ind)
1587 napi_gro_flush(napi_to_use, false);
1588 local_bh_enable();
1589
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001590 if (gro_res == GRO_DROP)
1591 status = QDF_STATUS_E_GRO_DROP;
Mohit Khanna81418772018-10-30 14:14:46 -07001592
1593 if (flush_ind)
1594 adapter->hdd_stats.tx_rx_stats.rx_gro_flushes++;
1595
1596 return status;
1597}
1598
1599/**
1600 * hdd_gro_rx_dp_thread() - Handle Rx procesing via GRO for DP thread
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001601 * @adapter: pointer to adapter context
1602 * @skb: pointer to sk_buff
1603 *
1604 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1605 */
Mohit Khanna81418772018-10-30 14:14:46 -07001606static
1607QDF_STATUS hdd_gro_rx_dp_thread(struct hdd_adapter *adapter,
1608 struct sk_buff *skb)
1609{
1610 struct napi_struct *napi_to_use = NULL;
1611 QDF_STATUS status = QDF_STATUS_E_FAILURE;
Mohit Khanna81418772018-10-30 14:14:46 -07001612 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
1613
1614 if (!adapter->hdd_ctx->enable_dp_rx_threads) {
1615 hdd_dp_err_rl("gro not supported without DP RX thread!");
Mohit Khanna81418772018-10-30 14:14:46 -07001616 return status;
1617 }
1618
1619 napi_to_use =
1620 dp_rx_get_napi_context(cds_get_context(QDF_MODULE_ID_SOC),
1621 QDF_NBUF_CB_RX_CTX_ID(skb));
1622
1623 if (!napi_to_use) {
1624 hdd_dp_err_rl("no napi to use for GRO!");
Mohit Khanna81418772018-10-30 14:14:46 -07001625 return status;
1626 }
1627
Manjunathappa Prakasha0d38092019-01-28 14:52:46 -08001628 if (qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_low_tput))
1629 return status;
Mohit Khanna81418772018-10-30 14:14:46 -07001630
1631 status = hdd_gro_rx_bh_disable(adapter, napi_to_use, skb);
1632
1633 return status;
1634}
1635
1636/**
1637 * hdd_gro_rx_legacy() - Handle Rx processing via GRO for ihelium based targets
1638 * @adapter: pointer to adapter context
1639 * @skb: pointer to sk_buff
1640 *
1641 * Supports GRO for only station mode
1642 *
1643 * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1644 */
1645static
1646QDF_STATUS hdd_gro_rx_legacy(struct hdd_adapter *adapter, struct sk_buff *skb)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001647{
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001648 struct qca_napi_info *qca_napii;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001649 struct qca_napi_data *napid;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001650 struct napi_struct *napi_to_use;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001651 QDF_STATUS status = QDF_STATUS_E_FAILURE;
Mohit Khanna81418772018-10-30 14:14:46 -07001652 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001653
1654 /* Only enabling it for STA mode like LRO today */
1655 if (QDF_STA_MODE != adapter->device_mode)
1656 return QDF_STATUS_E_NOSUPPORT;
1657
Mohit Khanna81418772018-10-30 14:14:46 -07001658 if (qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_low_tput) ||
1659 qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_concurrency))
1660 return QDF_STATUS_E_NOSUPPORT;
1661
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001662 napid = hdd_napi_get_all();
Jeff Johnsond36fa332019-03-18 13:42:25 -07001663 if (unlikely(!napid))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001664 goto out;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001665
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001666 qca_napii = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
Jeff Johnsond36fa332019-03-18 13:42:25 -07001667 if (unlikely(!qca_napii))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001668 goto out;
1669
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001670 /*
1671 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1672 * corresponds each hif_napi.
1673 */
1674 if (adapter->hdd_ctx->enable_rxthread)
1675 napi_to_use = &qca_napii->rx_thread_napi;
1676 else
1677 napi_to_use = &qca_napii->napi;
1678
Mohit Khanna81418772018-10-30 14:14:46 -07001679 status = hdd_gro_rx_bh_disable(adapter, napi_to_use, skb);
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001680out:
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001681
1682 return status;
1683}
1684
1685/**
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001686 * hdd_rxthread_napi_gro_flush() - GRO flush callback for NAPI+Rx_Thread Rx mode
1687 * @data: hif NAPI context
1688 *
1689 * Return: none
1690 */
1691static void hdd_rxthread_napi_gro_flush(void *data)
1692{
1693 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1694
1695 local_bh_disable();
1696 /*
1697 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
1698 * corresponds each hif_napi.
1699 */
1700 napi_gro_flush(&qca_napii->rx_thread_napi, false);
1701 local_bh_enable();
1702}
1703
1704/**
1705 * hdd_hif_napi_gro_flush() - GRO flush callback for NAPI Rx mode
1706 * @data: hif NAPI context
1707 *
1708 * Return: none
1709 */
1710static void hdd_hif_napi_gro_flush(void *data)
1711{
1712 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1713
1714 local_bh_disable();
1715 napi_gro_flush(&qca_napii->napi, false);
1716 local_bh_enable();
1717}
1718
1719#ifdef FEATURE_LRO
1720/**
1721 * hdd_qdf_lro_flush() - LRO flush wrapper
1722 * @data: hif NAPI context
1723 *
1724 * Return: none
1725 */
1726static void hdd_qdf_lro_flush(void *data)
1727{
1728 struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
1729 qdf_lro_ctx_t qdf_lro_ctx = qca_napii->lro_ctx;
1730
1731 qdf_lro_flush(qdf_lro_ctx);
1732}
1733#else
1734static void hdd_qdf_lro_flush(void *data)
1735{
1736}
1737#endif
1738
1739/**
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001740 * hdd_register_rx_ol() - Register LRO/GRO rx processing callbacks
Mohit Khanna81418772018-10-30 14:14:46 -07001741 * @hdd_ctx: pointer to hdd_ctx
1742 * @lithium_based_target: whether its a lithium arch based target or not
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001743 *
1744 * Return: none
1745 */
Mohit Khanna81418772018-10-30 14:14:46 -07001746static void hdd_register_rx_ol_cb(struct hdd_context *hdd_ctx,
1747 bool lithium_based_target)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001748{
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001749 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001750
Amar Singhalcc5a4ec2018-09-04 12:27:51 -07001751 if (!hdd_ctx) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001752 hdd_err("HDD context is NULL");
Amar Singhalcc5a4ec2018-09-04 12:27:51 -07001753 return;
1754 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001755
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001756 hdd_ctx->en_tcp_delack_no_lro = 0;
1757
Alok Kumar3dd311d2018-08-17 15:12:36 +05301758 if (!hdd_is_lro_enabled(hdd_ctx)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001759 cdp_register_rx_offld_flush_cb(soc, hdd_qdf_lro_flush);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001760 hdd_ctx->receive_offload_cb = hdd_lro_rx;
1761 hdd_debug("LRO is enabled");
1762 } else if (hdd_ctx->ol_enable == CFG_GRO_ENABLED) {
Mohit Khanna81418772018-10-30 14:14:46 -07001763 if (lithium_based_target) {
1764 /* no flush registration needed, it happens in DP thread */
1765 hdd_ctx->receive_offload_cb = hdd_gro_rx_dp_thread;
1766 } else {
1767 /*ihelium based targets */
1768 if (hdd_ctx->enable_rxthread)
1769 cdp_register_rx_offld_flush_cb(soc,
1770 hdd_rxthread_napi_gro_flush);
1771 else
1772 cdp_register_rx_offld_flush_cb(soc,
1773 hdd_hif_napi_gro_flush);
1774 hdd_ctx->receive_offload_cb = hdd_gro_rx_legacy;
1775 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001776 hdd_debug("GRO is enabled");
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001777 } else if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
1778 hdd_ctx->en_tcp_delack_no_lro = 1;
Mohit Khanna81418772018-10-30 14:14:46 -07001779 hdd_debug("TCP Del ACK is enabled");
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001780 }
1781}
1782
Mohit Khanna81418772018-10-30 14:14:46 -07001783/**
1784 * hdd_rx_ol_send_config() - Send RX offload configuration to FW
1785 * @hdd_ctx: pointer to hdd_ctx
1786 *
1787 * This function is only used for non lithium targets. Lithium based targets are
1788 * sending LRO config to FW in vdev attach implemented in cmn DP layer.
1789 *
1790 * Return: 0 on success, non zero on failure
1791 */
1792static int hdd_rx_ol_send_config(struct hdd_context *hdd_ctx)
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001793{
1794 struct cdp_lro_hash_config lro_config = {0};
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001795 /*
1796 * This will enable flow steering and Toeplitz hash
1797 * So enable it for LRO or GRO processing.
1798 */
Mohit Khanna81418772018-10-30 14:14:46 -07001799 if (cfg_get(hdd_ctx->psoc, CFG_DP_GRO) ||
1800 cfg_get(hdd_ctx->psoc, CFG_DP_LRO)) {
1801 lro_config.lro_enable = 1;
1802 lro_config.tcp_flag = TCPHDR_ACK;
1803 lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN |
1804 TCPHDR_RST | TCPHDR_ACK |
1805 TCPHDR_URG | TCPHDR_ECE |
1806 TCPHDR_CWR;
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001807 }
1808
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001809 get_random_bytes(lro_config.toeplitz_hash_ipv4,
1810 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
1811 LRO_IPV4_SEED_ARR_SZ));
1812
1813 get_random_bytes(lro_config.toeplitz_hash_ipv6,
1814 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
1815 LRO_IPV6_SEED_ARR_SZ));
1816
Mohit Khanna81418772018-10-30 14:14:46 -07001817 if (wma_lro_init(&lro_config))
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001818 return -EAGAIN;
Mohit Khanna81418772018-10-30 14:14:46 -07001819 else
1820 hdd_dp_info("LRO Config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
1821 lro_config.lro_enable, lro_config.tcp_flag,
1822 lro_config.tcp_flag_mask);
1823
1824 return 0;
1825}
1826
1827int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1828{
1829 int ret = 0;
1830 bool lithium_based_target = false;
1831
1832 if (hdd_ctx->target_type == TARGET_TYPE_QCA6290 ||
1833 hdd_ctx->target_type == TARGET_TYPE_QCA6390)
1834 lithium_based_target = true;
1835
1836 hdd_resolve_rx_ol_mode(hdd_ctx);
1837 hdd_register_rx_ol_cb(hdd_ctx, lithium_based_target);
1838
1839 if (!lithium_based_target) {
1840 ret = hdd_rx_ol_send_config(hdd_ctx);
1841 if (ret) {
1842 hdd_ctx->ol_enable = 0;
1843 hdd_err("Failed to send LRO/GRO configuration! %u", ret);
1844 return ret;
1845 }
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001846 }
1847
1848 return 0;
1849}
1850
1851void hdd_disable_rx_ol_in_concurrency(bool disable)
1852{
1853 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1854
1855 if (!hdd_ctx) {
1856 hdd_err("hdd_ctx is NULL");
1857 return;
1858 }
1859
1860 if (disable) {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001861 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001862 struct wlan_rx_tp_data rx_tp_data;
1863
1864 hdd_info("Enable TCP delack as LRO disabled in concurrency");
1865 rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
1866 rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
Alok Kumar2fad6442018-11-08 19:19:28 +05301867 wlan_hdd_update_tcp_rx_param(hdd_ctx, &rx_tp_data);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001868 hdd_ctx->en_tcp_delack_no_lro = 1;
1869 }
Mohit Khanna81418772018-10-30 14:14:46 -07001870 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_concurrency, 1);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001871 } else {
Manjunathappa Prakashbfd12762018-04-29 22:44:52 -07001872 if (HDD_MSM_CFG(hdd_ctx->config->enable_tcp_delack)) {
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001873 hdd_info("Disable TCP delack as LRO is enabled");
1874 hdd_ctx->en_tcp_delack_no_lro = 0;
1875 hdd_reset_tcp_delack(hdd_ctx);
1876 }
Mohit Khanna81418772018-10-30 14:14:46 -07001877 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_concurrency, 0);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001878 }
1879}
1880
1881void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1882{
1883 if (disable)
Mohit Khanna81418772018-10-30 14:14:46 -07001884 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_low_tput, 1);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001885 else
Mohit Khanna81418772018-10-30 14:14:46 -07001886 qdf_atomic_set(&hdd_ctx->disable_rx_ol_in_low_tput, 0);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001887}
1888
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001889#else /* RECEIVE_OFFLOAD */
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07001890int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
1891{
1892 hdd_err("Rx_OL, LRO/GRO not supported");
1893 return -EPERM;
1894}
1895
1896void hdd_disable_rx_ol_in_concurrency(bool disable)
1897{
1898}
1899
1900void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
1901{
1902}
1903#endif /* RECEIVE_OFFLOAD */
1904
Yu Wang66a250b2017-07-19 11:46:40 +08001905#ifdef WLAN_FEATURE_TSF_PLUS
1906static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1907 qdf_nbuf_t netbuf,
1908 uint64_t target_time)
1909{
yuanl2746f072018-09-21 19:19:16 +08001910 if (!hdd_tsf_is_rx_set(hdd_ctx))
Yu Wang66a250b2017-07-19 11:46:40 +08001911 return;
1912
1913 hdd_rx_timestamp(netbuf, target_time);
1914}
1915#else
1916static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1917 qdf_nbuf_t netbuf,
1918 uint64_t target_time)
1919{
1920}
1921#endif
1922
Mohit Khanna70322002018-05-15 19:21:32 -07001923QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter,
Srinivas Girigowdaa19eafd2018-09-07 15:28:21 -07001924 qdf_nbuf_t nbuf_list)
1925{
Mohit Khanna70322002018-05-15 19:21:32 -07001926 if (unlikely((!adapter) || (!nbuf_list))) {
1927 hdd_err("Null params being passed");
1928 return QDF_STATUS_E_FAILURE;
1929 }
1930 return dp_rx_enqueue_pkt(cds_get_context(QDF_MODULE_ID_SOC), nbuf_list);
1931}
1932
Mohit Khanna81418772018-10-30 14:14:46 -07001933QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
1934 struct sk_buff *skb)
1935{
1936 struct hdd_context *hdd_ctx = adapter->hdd_ctx;
1937 int status = QDF_STATUS_E_FAILURE;
1938 int netif_status;
1939 bool skb_receive_offload_ok = false;
1940
1941 if (QDF_NBUF_CB_RX_TCP_PROTO(skb) &&
1942 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
1943 skb_receive_offload_ok = true;
1944
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001945 if (skb_receive_offload_ok && hdd_ctx->receive_offload_cb) {
Mohit Khanna81418772018-10-30 14:14:46 -07001946 status = hdd_ctx->receive_offload_cb(adapter, skb);
1947
Manjunathappa Prakash78b6a882019-03-28 19:59:23 -07001948 if (QDF_IS_STATUS_SUCCESS(status)) {
1949 adapter->hdd_stats.tx_rx_stats.rx_aggregated++;
1950 return status;
1951 }
1952
1953 if (status == QDF_STATUS_E_GRO_DROP) {
1954 adapter->hdd_stats.tx_rx_stats.rx_gro_dropped++;
1955 return status;
1956 }
Mohit Khanna81418772018-10-30 14:14:46 -07001957 }
1958
1959 adapter->hdd_stats.tx_rx_stats.rx_non_aggregated++;
1960
1961 /* Account for GRO/LRO ineligible packets, mostly UDP */
1962 hdd_ctx->no_rx_offload_pkt_cnt++;
1963
1964 if (qdf_likely(hdd_ctx->enable_dp_rx_threads ||
1965 hdd_ctx->enable_rxthread)) {
1966 local_bh_disable();
1967 netif_status = netif_receive_skb(skb);
1968 local_bh_enable();
1969 } else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))) {
1970 /*
1971 * Frames before peer is registered to avoid contention with
1972 * NAPI softirq.
1973 * Refer fix:
1974 * qcacld-3.0: Do netif_rx_ni() for frames received before
1975 * peer assoc
1976 */
1977 netif_status = netif_rx_ni(skb);
1978 } else { /* NAPI Context */
1979 netif_status = netif_receive_skb(skb);
1980 }
1981
1982 if (netif_status == NET_RX_SUCCESS)
1983 status = QDF_STATUS_SUCCESS;
1984
1985 return status;
1986}
1987
Yeshwanth Sriram Guntuka7f445f42019-01-30 17:01:35 +05301988#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
1989static bool hdd_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
1990{
1991 return false;
1992}
1993#else
1994static bool hdd_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
1995{
1996 return cfg80211_is_gratuitous_arp_unsolicited_na(skb);
1997}
1998#endif
1999
Mohit Khanna70322002018-05-15 19:21:32 -07002000QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
2001 qdf_nbuf_t rxBuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002002{
Jeff Johnson80486862017-10-02 13:21:29 -07002003 struct hdd_adapter *adapter = NULL;
Jeff Johnsoncc011972017-09-03 09:26:36 -07002004 struct hdd_context *hdd_ctx = NULL;
Mohit Khanna81418772018-10-30 14:14:46 -07002005 QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002006 struct sk_buff *skb = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002007 struct sk_buff *next = NULL;
Jeff Johnsond377dce2017-10-04 10:32:42 -07002008 struct hdd_station_ctx *sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002009 unsigned int cpu_index;
hangtiana7938f82019-01-07 16:35:49 +08002010 struct qdf_mac_addr *mac_addr, *dest_mac_addr;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05302011 bool wake_lock = false;
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302012 uint8_t pkt_type = 0;
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302013 bool track_arp = false;
Bala Venkateshf2867902019-03-08 15:01:23 +05302014 struct wlan_objmgr_vdev *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002015
2016 /* Sanity check on inputs */
Mohit Khanna70322002018-05-15 19:21:32 -07002017 if (unlikely((!adapter_context) || (!rxBuf))) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302018 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002019 "%s: Null params being passed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302020 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002021 }
2022
Mohit Khanna70322002018-05-15 19:21:32 -07002023 adapter = (struct hdd_adapter *)adapter_context;
Jeff Johnson80486862017-10-02 13:21:29 -07002024 if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Srinivas Girigowda028c4482017-03-09 18:52:02 -08002025 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002026 "Magic cookie(%x) for adapter sanity verification is invalid",
Jeff Johnson80486862017-10-02 13:21:29 -07002027 adapter->magic);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302028 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002029 }
2030
Jeff Johnson80486862017-10-02 13:21:29 -07002031 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Jeff Johnsond36fa332019-03-18 13:42:25 -07002032 if (unlikely(!hdd_ctx)) {
Dhanashri Atre182b0272016-02-17 15:35:07 -08002033 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
2034 "%s: HDD context is Null", __func__);
2035 return QDF_STATUS_E_FAILURE;
2036 }
2037
2038 cpu_index = wlan_hdd_get_cpu();
2039
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002040 next = (struct sk_buff *)rxBuf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002041
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002042 while (next) {
2043 skb = next;
2044 next = skb->next;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002045 skb->next = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002046
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302047 if (QDF_NBUF_CB_PACKET_TYPE_ARP ==
2048 QDF_NBUF_CB_GET_PACKET_TYPE(skb)) {
2049 if (qdf_nbuf_data_is_arp_rsp(skb) &&
Alok Kumarb94a2e72019-03-11 19:47:15 +05302050 (adapter->track_arp_ip ==
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302051 qdf_nbuf_get_arp_src_ip(skb))) {
2052 ++adapter->hdd_stats.hdd_arp_stats.
2053 rx_arp_rsp_count;
2054 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
2055 QDF_TRACE_LEVEL_INFO,
2056 "%s: ARP packet received",
2057 __func__);
2058 track_arp = true;
2059 }
2060 }
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302061 /* track connectivity stats */
2062 if (adapter->pkt_type_bitmap)
2063 hdd_tx_rx_collect_connectivity_stats_info(skb, adapter,
2064 PKT_TYPE_RSP, &pkt_type);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002065
Jeff Johnsond377dce2017-10-04 10:32:42 -07002066 sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Jeff Johnsonac5170c2019-02-27 10:55:24 -08002067 if ((sta_ctx->conn_info.proxy_arp_service) &&
Yeshwanth Sriram Guntuka7f445f42019-01-30 17:01:35 +05302068 hdd_is_gratuitous_arp_unsolicited_na(skb)) {
Manjunathappa Prakashf39d2372019-02-25 18:18:57 -08002069 qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
2070 rx_usolict_arp_n_mcast_drp);
2071 /* Remove SKB from internal tracking table before
2072 * submitting it to stack.
Dhanashri Atre63d98022017-01-24 18:22:09 -08002073 */
2074 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002075 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002076 }
2077
2078 hdd_event_eapol_log(skb, QDF_RX);
Jeff Johnson1abc5662019-02-04 14:27:02 -08002079 qdf_dp_trace_log_pkt(adapter->vdev_id, skb, QDF_RX,
Mohit Khanna02281da2017-08-27 09:40:55 -07002080 QDF_TRACE_DEFAULT_PDEV_ID);
Mohit Khannaf8f96822017-05-17 17:11:59 -07002081
Dhanashri Atre63d98022017-01-24 18:22:09 -08002082 DPTRACE(qdf_dp_trace(skb,
2083 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07002084 QDF_TRACE_DEFAULT_PDEV_ID,
Dhanashri Atre63d98022017-01-24 18:22:09 -08002085 qdf_nbuf_data_addr(skb),
2086 sizeof(qdf_nbuf_data(skb)), QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07002087
Mohit Khanna02281da2017-08-27 09:40:55 -07002088 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
2089 QDF_DP_TRACE_RX_PACKET_RECORD,
2090 0, QDF_RX));
2091
hangtiana7938f82019-01-07 16:35:49 +08002092 dest_mac_addr = (struct qdf_mac_addr *)(skb->data);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07002093 mac_addr = (struct qdf_mac_addr *)(skb->data+QDF_MAC_ADDR_SIZE);
2094
Bala Venkateshf2867902019-03-08 15:01:23 +05302095 if (!hdd_is_current_high_throughput(hdd_ctx)) {
2096 vdev = hdd_objmgr_get_vdev(adapter);
2097 if (vdev) {
2098 ucfg_tdls_update_rx_pkt_cnt(vdev, mac_addr,
2099 dest_mac_addr);
2100 hdd_objmgr_put_vdev(vdev);
2101 }
2102 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07002103
Jeff Johnson80486862017-10-02 13:21:29 -07002104 skb->dev = adapter->dev;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002105 skb->protocol = eth_type_trans(skb, skb->dev);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07002106 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Jeff Johnson80486862017-10-02 13:21:29 -07002107 ++adapter->stats.rx_packets;
2108 adapter->stats.rx_bytes += skb->len;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002109
Alok Kumarb64650c2018-03-23 17:05:11 +05302110 /* Incr GW Rx count for NUD tracking based on GW mac addr */
2111 hdd_nud_incr_gw_rx_pkt_cnt(adapter, mac_addr);
2112
Dhanashri Atre63d98022017-01-24 18:22:09 -08002113 /* Check & drop replayed mcast packets (for IPV6) */
Jeff Johnsoncc011972017-09-03 09:26:36 -07002114 if (hdd_ctx->config->multicast_replay_filter &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08002115 hdd_is_mcast_replay(skb)) {
Manjunathappa Prakashf39d2372019-02-25 18:18:57 -08002116 qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
2117 rx_usolict_arp_n_mcast_drp);
Dhanashri Atre63d98022017-01-24 18:22:09 -08002118 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002119 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08002120 }
2121
2122 /* hold configurable wakelock for unicast traffic */
hangtian2b9856f2019-01-25 11:50:39 +08002123 if (!hdd_is_current_high_throughput(hdd_ctx) &&
2124 hdd_ctx->config->rx_wakelock_timeout &&
Jeff Johnson457c2422019-02-27 13:56:04 -08002125 sta_ctx->conn_info.is_authenticated)
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05302126 wake_lock = hdd_is_rx_wake_lock_needed(skb);
2127
2128 if (wake_lock) {
Jeff Johnsoncc011972017-09-03 09:26:36 -07002129 cds_host_diag_log_work(&hdd_ctx->rx_wake_lock,
2130 hdd_ctx->config->rx_wakelock_timeout,
Dhanashri Atre63d98022017-01-24 18:22:09 -08002131 WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
Jeff Johnsoncc011972017-09-03 09:26:36 -07002132 qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock,
2133 hdd_ctx->config->
Dhanashri Atre63d98022017-01-24 18:22:09 -08002134 rx_wakelock_timeout);
2135 }
2136
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002137 /* Remove SKB from internal tracking table before submitting
2138 * it to stack
2139 */
Dhanashri Atre63d98022017-01-24 18:22:09 -08002140 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002141
Yu Wang66a250b2017-07-19 11:46:40 +08002142 hdd_tsf_timestamp_rx(hdd_ctx, skb, ktime_to_us(skb->tstamp));
2143
Mohit Khanna81418772018-10-30 14:14:46 -07002144 qdf_status = hdd_rx_deliver_to_stack(adapter, skb);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07002145
Mohit Khanna81418772018-10-30 14:14:46 -07002146 if (QDF_IS_STATUS_SUCCESS(qdf_status)) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07002147 ++adapter->hdd_stats.tx_rx_stats.
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +05302148 rx_delivered[cpu_index];
2149 if (track_arp)
2150 ++adapter->hdd_stats.hdd_arp_stats.
Poddar, Siddarth31797fa2018-01-22 17:24:15 +05302151 rx_delivered;
2152 /* track connectivity stats */
2153 if (adapter->pkt_type_bitmap)
2154 hdd_tx_rx_collect_connectivity_stats_info(
2155 skb, adapter,
2156 PKT_TYPE_RX_DELIVERED, &pkt_type);
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -07002157 } else {
2158 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
2159 if (track_arp)
2160 ++adapter->hdd_stats.hdd_arp_stats.rx_refused;
2161
2162 /* track connectivity stats */
2163 if (adapter->pkt_type_bitmap)
2164 hdd_tx_rx_collect_connectivity_stats_info(
2165 skb, adapter,
2166 PKT_TYPE_RX_REFUSED, &pkt_type);
2167
Dhanashri Atre63d98022017-01-24 18:22:09 -08002168 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002169 }
Dhanashri Atrecefa8802017-02-02 16:17:14 -08002170
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302171 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002172}
2173
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002174/**
2175 * hdd_reason_type_to_string() - return string conversion of reason type
2176 * @reason: reason type
2177 *
2178 * This utility function helps log string conversion of reason type.
2179 *
2180 * Return: string conversion of device mode, if match found;
2181 * "Unknown" otherwise.
2182 */
2183const char *hdd_reason_type_to_string(enum netif_reason_type reason)
2184{
2185 switch (reason) {
2186 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
2187 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
2188 CASE_RETURN_STRING(WLAN_FW_PAUSE);
2189 CASE_RETURN_STRING(WLAN_TX_ABORT);
2190 CASE_RETURN_STRING(WLAN_VDEV_STOP);
2191 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
2192 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302193 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002194 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302195 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002196 }
2197}
2198
2199/**
2200 * hdd_action_type_to_string() - return string conversion of action type
2201 * @action: action type
2202 *
2203 * This utility function helps log string conversion of action_type.
2204 *
2205 * Return: string conversion of device mode, if match found;
2206 * "Unknown" otherwise.
2207 */
2208const char *hdd_action_type_to_string(enum netif_action_type action)
2209{
2210
2211 switch (action) {
2212 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
2213 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
2214 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
2215 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
2216 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302217 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
2218 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002219 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
2220 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302221 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
2222 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
chenguob795b832018-10-12 15:23:51 +08002223 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_ON);
2224 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_OFF);
2225 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_ON);
2226 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_OFF);
2227 CASE_RETURN_STRING(WLAN_NETIF_BE_BK_QUEUE_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302228 CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
2229 CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002230 default:
Nirav Shah617cff92016-04-25 10:24:24 +05302231 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002232 }
2233}
2234
2235/**
2236 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
2237 * @adapter: adapter handle
2238 * @action: action type
2239 * @reason: reason type
2240 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002241static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002242 enum netif_action_type action, enum netif_reason_type reason)
2243{
2244 switch (action) {
2245 case WLAN_STOP_ALL_NETIF_QUEUE:
2246 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
chenguodc9f0ec2018-09-03 18:53:26 +08002247 case WLAN_NETIF_BE_BK_QUEUE_OFF:
2248 case WLAN_NETIF_VI_QUEUE_OFF:
2249 case WLAN_NETIF_VO_QUEUE_OFF:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302250 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2251 case WLAN_STOP_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002252 adapter->queue_oper_stats[reason].pause_count++;
2253 break;
2254 case WLAN_START_ALL_NETIF_QUEUE:
2255 case WLAN_WAKE_ALL_NETIF_QUEUE:
2256 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
chenguodc9f0ec2018-09-03 18:53:26 +08002257 case WLAN_NETIF_VI_QUEUE_ON:
2258 case WLAN_NETIF_VO_QUEUE_ON:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302259 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2260 case WLAN_WAKE_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002261 adapter->queue_oper_stats[reason].unpause_count++;
2262 break;
2263 default:
2264 break;
2265 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002266}
2267
2268/**
jiad5b986632017-08-04 11:59:20 +08002269 * hdd_netdev_queue_is_locked()
2270 * @txq: net device tx queue
2271 *
2272 * For SMP system, always return false and we could safely rely on
2273 * __netif_tx_trylock().
2274 *
2275 * Return: true locked; false not locked
2276 */
2277#ifdef QCA_CONFIG_SMP
2278static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2279{
2280 return false;
2281}
2282#else
2283static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
2284{
2285 return txq->xmit_lock_owner != -1;
2286}
2287#endif
2288
2289/**
Nirav Shah89223f72016-03-01 18:10:38 +05302290 * wlan_hdd_update_txq_timestamp() - update txq timestamp
2291 * @dev: net device
2292 *
2293 * Return: none
2294 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -07002295static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
Nirav Shah89223f72016-03-01 18:10:38 +05302296{
2297 struct netdev_queue *txq;
2298 int i;
Nirav Shah89223f72016-03-01 18:10:38 +05302299
2300 for (i = 0; i < NUM_TX_QUEUES; i++) {
2301 txq = netdev_get_tx_queue(dev, i);
jiad5b986632017-08-04 11:59:20 +08002302
2303 /*
2304 * On UP system, kernel will trigger watchdog bite if spinlock
2305 * recursion is detected. Unfortunately recursion is possible
2306 * when it is called in dev_queue_xmit() context, where stack
2307 * grabs the lock before calling driver's ndo_start_xmit
2308 * callback.
2309 */
2310 if (!hdd_netdev_queue_is_locked(txq)) {
2311 if (__netif_tx_trylock(txq)) {
2312 txq_trans_update(txq);
2313 __netif_tx_unlock(txq);
2314 }
wadesongba6373e2017-05-15 20:59:05 +08002315 }
Nirav Shah89223f72016-03-01 18:10:38 +05302316 }
2317}
2318
2319/**
Nirav Shah617cff92016-04-25 10:24:24 +05302320 * wlan_hdd_update_unpause_time() - update unpause time
2321 * @adapter: adapter handle
2322 *
2323 * Return: none
2324 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002325static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
Nirav Shah617cff92016-04-25 10:24:24 +05302326{
2327 qdf_time_t curr_time = qdf_system_ticks();
2328
2329 adapter->total_unpause_time += curr_time - adapter->last_time;
2330 adapter->last_time = curr_time;
2331}
2332
2333/**
2334 * wlan_hdd_update_pause_time() - update pause time
2335 * @adapter: adapter handle
2336 *
2337 * Return: none
2338 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002339static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
Nirav Shahda008342016-05-17 18:50:40 +05302340 uint32_t temp_map)
Nirav Shah617cff92016-04-25 10:24:24 +05302341{
2342 qdf_time_t curr_time = qdf_system_ticks();
Nirav Shahda008342016-05-17 18:50:40 +05302343 uint8_t i;
2344 qdf_time_t pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302345
Nirav Shahda008342016-05-17 18:50:40 +05302346 pause_time = curr_time - adapter->last_time;
2347 adapter->total_pause_time += pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05302348 adapter->last_time = curr_time;
Nirav Shahda008342016-05-17 18:50:40 +05302349
2350 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
2351 if (temp_map & (1 << i)) {
2352 adapter->queue_oper_stats[i].total_pause_time +=
2353 pause_time;
2354 break;
2355 }
2356 }
2357
Nirav Shah617cff92016-04-25 10:24:24 +05302358}
2359
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002360uint32_t
2361wlan_hdd_dump_queue_history_state(struct hdd_netif_queue_history *queue_history,
2362 char *buf, uint32_t size)
2363{
2364 unsigned int i;
2365 unsigned int index = 0;
2366
2367 for (i = 0; i < NUM_TX_QUEUES; i++) {
2368 index += qdf_scnprintf(buf + index,
2369 size - index,
2370 "%u:0x%lx ",
2371 i, queue_history->tx_q_state[i]);
2372 }
2373
2374 return index;
2375}
2376
2377/**
2378 * wlan_hdd_update_queue_history_state() - Save a copy of dev TX queues state
2379 * @adapter: adapter handle
2380 *
2381 * Save netdev TX queues state into adapter queue history.
2382 *
2383 * Return: None
2384 */
2385static void
2386wlan_hdd_update_queue_history_state(struct net_device *dev,
2387 struct hdd_netif_queue_history *q_hist)
2388{
2389 unsigned int i = 0;
2390 uint32_t num_tx_queues = 0;
2391 struct netdev_queue *txq = NULL;
2392
2393 num_tx_queues = qdf_min(dev->num_tx_queues, (uint32_t)NUM_TX_QUEUES);
2394
2395 for (i = 0; i < num_tx_queues; i++) {
2396 txq = netdev_get_tx_queue(dev, i);
2397 q_hist->tx_q_state[i] = txq->state;
2398 }
2399}
2400
Nirav Shah617cff92016-04-25 10:24:24 +05302401/**
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302402 * wlan_hdd_stop_non_priority_queue() - stop non prority queues
2403 * @adapter: adapter handle
2404 *
2405 * Return: None
2406 */
2407static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
2408{
2409 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2410 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2411 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2412 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2413}
2414
2415/**
2416 * wlan_hdd_wake_non_priority_queue() - wake non prority queues
2417 * @adapter: adapter handle
2418 *
2419 * Return: None
2420 */
2421static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
2422{
2423 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2424 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2425 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2426 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2427}
2428
2429/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002430 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
2431 * @adapter: adapter handle
2432 * @action: action type
2433 * @reason: reason type
2434 *
2435 * This is single function which is used for netif_queue related
2436 * actions like start/stop of network queues and on/off carrier
2437 * option.
2438 *
2439 * Return: None
2440 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002441void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002442 enum netif_action_type action, enum netif_reason_type reason)
2443{
Nirav Shahda008342016-05-17 18:50:40 +05302444 uint32_t temp_map;
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002445 uint8_t index;
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002446 struct hdd_netif_queue_history *txq_hist_ptr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002447
2448 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
2449 (!adapter->dev)) {
2450 hdd_err("adapter is invalid");
2451 return;
2452 }
2453
2454 switch (action) {
2455
2456 case WLAN_NETIF_CARRIER_ON:
2457 netif_carrier_on(adapter->dev);
2458 break;
2459
2460 case WLAN_NETIF_CARRIER_OFF:
2461 netif_carrier_off(adapter->dev);
2462 break;
2463
2464 case WLAN_STOP_ALL_NETIF_QUEUE:
2465 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302466 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002467 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302468 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302469 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302470 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002471 adapter->pause_map |= (1 << reason);
2472 spin_unlock_bh(&adapter->pause_map_lock);
2473 break;
2474
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302475 case WLAN_STOP_NON_PRIORITY_QUEUE:
2476 spin_lock_bh(&adapter->pause_map_lock);
2477 if (!adapter->pause_map) {
2478 wlan_hdd_stop_non_priority_queue(adapter);
2479 wlan_hdd_update_txq_timestamp(adapter->dev);
2480 wlan_hdd_update_unpause_time(adapter);
2481 }
2482 adapter->pause_map |= (1 << reason);
2483 spin_unlock_bh(&adapter->pause_map_lock);
2484 break;
2485
2486 case WLAN_NETIF_PRIORITY_QUEUE_ON:
2487 spin_lock_bh(&adapter->pause_map_lock);
2488 temp_map = adapter->pause_map;
2489 adapter->pause_map &= ~(1 << reason);
2490 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2491 wlan_hdd_update_pause_time(adapter, temp_map);
2492 spin_unlock_bh(&adapter->pause_map_lock);
2493 break;
2494
2495 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
2496 spin_lock_bh(&adapter->pause_map_lock);
2497 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
2498 wlan_hdd_update_txq_timestamp(adapter->dev);
2499 wlan_hdd_update_unpause_time(adapter);
2500 adapter->pause_map |= (1 << reason);
2501 spin_unlock_bh(&adapter->pause_map_lock);
2502 break;
2503
chenguodc9f0ec2018-09-03 18:53:26 +08002504 case WLAN_NETIF_BE_BK_QUEUE_OFF:
2505 spin_lock_bh(&adapter->pause_map_lock);
2506 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
2507 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
2508 wlan_hdd_update_txq_timestamp(adapter->dev);
2509 wlan_hdd_update_unpause_time(adapter);
2510 adapter->pause_map |= (1 << reason);
2511 spin_unlock_bh(&adapter->pause_map_lock);
2512 break;
2513
2514 case WLAN_NETIF_VI_QUEUE_OFF:
2515 spin_lock_bh(&adapter->pause_map_lock);
2516 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2517 wlan_hdd_update_txq_timestamp(adapter->dev);
2518 wlan_hdd_update_unpause_time(adapter);
2519 adapter->pause_map |= (1 << reason);
2520 spin_unlock_bh(&adapter->pause_map_lock);
2521 break;
2522
2523 case WLAN_NETIF_VI_QUEUE_ON:
2524 spin_lock_bh(&adapter->pause_map_lock);
2525 temp_map = adapter->pause_map;
2526 adapter->pause_map &= ~(1 << reason);
2527 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
2528 wlan_hdd_update_pause_time(adapter, temp_map);
2529 spin_unlock_bh(&adapter->pause_map_lock);
2530 break;
2531
2532 case WLAN_NETIF_VO_QUEUE_OFF:
2533 spin_lock_bh(&adapter->pause_map_lock);
2534 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2535 wlan_hdd_update_txq_timestamp(adapter->dev);
2536 wlan_hdd_update_unpause_time(adapter);
2537 adapter->pause_map |= (1 << reason);
2538 spin_unlock_bh(&adapter->pause_map_lock);
2539 break;
2540
2541 case WLAN_NETIF_VO_QUEUE_ON:
2542 spin_lock_bh(&adapter->pause_map_lock);
2543 temp_map = adapter->pause_map;
2544 adapter->pause_map &= ~(1 << reason);
2545 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
2546 wlan_hdd_update_pause_time(adapter, temp_map);
2547 spin_unlock_bh(&adapter->pause_map_lock);
2548 break;
2549
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002550 case WLAN_START_ALL_NETIF_QUEUE:
2551 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302552 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002553 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302554 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002555 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302556 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302557 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002558 spin_unlock_bh(&adapter->pause_map_lock);
2559 break;
2560
2561 case WLAN_WAKE_ALL_NETIF_QUEUE:
2562 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05302563 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002564 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302565 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002566 netif_tx_wake_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302567 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302568 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002569 spin_unlock_bh(&adapter->pause_map_lock);
2570 break;
2571
Rakesh Pillai3e534db2017-09-26 18:59:43 +05302572 case WLAN_WAKE_NON_PRIORITY_QUEUE:
2573 spin_lock_bh(&adapter->pause_map_lock);
2574 temp_map = adapter->pause_map;
2575 adapter->pause_map &= ~(1 << reason);
2576 if (!adapter->pause_map) {
2577 wlan_hdd_wake_non_priority_queue(adapter);
2578 wlan_hdd_update_pause_time(adapter, temp_map);
2579 }
2580 spin_unlock_bh(&adapter->pause_map_lock);
2581 break;
2582
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002583 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
2584 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05302585 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002586 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05302587 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05302588 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05302589 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002590 adapter->pause_map |= (1 << reason);
2591 netif_carrier_off(adapter->dev);
2592 spin_unlock_bh(&adapter->pause_map_lock);
2593 break;
2594
2595 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
2596 spin_lock_bh(&adapter->pause_map_lock);
2597 netif_carrier_on(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302598 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002599 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05302600 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002601 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05302602 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05302603 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002604 spin_unlock_bh(&adapter->pause_map_lock);
2605 break;
2606
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002607 case WLAN_NETIF_ACTION_TYPE_NONE:
2608 break;
2609
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002610 default:
2611 hdd_err("unsupported action %d", action);
2612 }
2613
2614 spin_lock_bh(&adapter->pause_map_lock);
2615 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
2616 wlan_hdd_process_peer_unauthorised_pause(adapter);
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002617
2618 index = adapter->history_index++;
2619 if (adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
2620 adapter->history_index = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002621 spin_unlock_bh(&adapter->pause_map_lock);
2622
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002623 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
2624
Harprit Chhabada1125e0c2019-01-09 17:12:34 -08002625 adapter->queue_oper_history[index].time = qdf_system_ticks();
2626 adapter->queue_oper_history[index].netif_action = action;
2627 adapter->queue_oper_history[index].netif_reason = reason;
2628 adapter->queue_oper_history[index].pause_map = adapter->pause_map;
Mohit Khannaf7e7b342019-04-08 11:54:21 -07002629
2630 txq_hist_ptr = &adapter->queue_oper_history[index];
2631
2632 wlan_hdd_update_queue_history_state(adapter->dev, txq_hist_ptr);
2633}
2634
2635void hdd_print_netdev_txq_status(struct net_device *dev)
2636{
2637 unsigned int i;
2638
2639 if (!dev)
2640 return;
2641
2642 for (i = 0; i < dev->num_tx_queues; i++) {
2643 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2644
2645 hdd_debug("netdev tx queue[%u] state:0x%lx",
2646 i, txq->state);
2647 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002648}
2649
Nirav Shah73713f72018-05-17 14:50:41 +05302650#ifdef FEATURE_MONITOR_MODE_SUPPORT
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002651/**
2652 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
2653 * @dev: Pointer to net_device structure
2654 *
2655 * Return: 0 for success; non-zero for failure
2656 */
2657int hdd_set_mon_rx_cb(struct net_device *dev)
2658{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002659 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002660 int ret;
2661 QDF_STATUS qdf_status;
2662 struct ol_txrx_desc_type sta_desc = {0};
2663 struct ol_txrx_ops txrx_ops;
Leo Changfdb45c32016-10-28 11:09:23 -07002664 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2665 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002666
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002667 qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
2668 txrx_ops.rx.rx = hdd_mon_rx_packet_cbk;
Ravi Joshi106ffe02017-01-18 18:09:05 -08002669 hdd_monitor_set_rx_monitor_cb(&txrx_ops, hdd_rx_monitor_callback);
Leo Changfdb45c32016-10-28 11:09:23 -07002670 cdp_vdev_register(soc,
chenguo2201c0a2018-11-15 18:07:41 +08002671 (struct cdp_vdev *)cdp_get_mon_vdev_from_pdev(soc,
2672 (struct cdp_pdev *)pdev),
Dustin Brown89fa06e2018-09-07 10:47:27 -07002673 adapter, (struct cdp_ctrl_objmgr_vdev *)adapter->vdev,
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05302674 &txrx_ops);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002675 /* peer is created wma_vdev_attach->wma_create_peer */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002676 qdf_status = cdp_peer_register(soc,
2677 (struct cdp_pdev *)pdev, &sta_desc);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002678 if (QDF_STATUS_SUCCESS != qdf_status) {
Leo Changfdb45c32016-10-28 11:09:23 -07002679 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002680 qdf_status, qdf_status);
2681 goto exit;
2682 }
2683
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07002684exit:
2685 ret = qdf_status_to_os_return(qdf_status);
2686 return ret;
2687}
Nirav Shah73713f72018-05-17 14:50:41 +05302688#endif
Nirav Shahbd36b062016-07-18 11:12:59 +05302689
2690/**
2691 * hdd_send_rps_ind() - send rps indication to daemon
2692 * @adapter: adapter context
2693 *
2694 * If RPS feature enabled by INI, send RPS enable indication to daemon
2695 * Indication contents is the name of interface to find correct sysfs node
2696 * Should send all available interfaces
2697 *
2698 * Return: none
2699 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07002700void hdd_send_rps_ind(struct hdd_adapter *adapter)
Nirav Shahbd36b062016-07-18 11:12:59 +05302701{
2702 int i;
2703 uint8_t cpu_map_list_len = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002704 struct hdd_context *hdd_ctxt = NULL;
Nirav Shahbd36b062016-07-18 11:12:59 +05302705 struct wlan_rps_data rps_data;
Yun Parkff6a16a2017-09-26 16:38:18 -07002706 struct cds_config_info *cds_cfg;
2707
2708 cds_cfg = cds_get_ini_config();
Nirav Shahbd36b062016-07-18 11:12:59 +05302709
2710 if (!adapter) {
2711 hdd_err("adapter is NULL");
2712 return;
2713 }
2714
Yun Parkff6a16a2017-09-26 16:38:18 -07002715 if (!cds_cfg) {
2716 hdd_err("cds_cfg is NULL");
2717 return;
2718 }
2719
Nirav Shahbd36b062016-07-18 11:12:59 +05302720 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2721 rps_data.num_queues = NUM_TX_QUEUES;
2722
2723 hdd_info("cpu_map_list '%s'", hdd_ctxt->config->cpu_map_list);
2724
2725 /* in case no cpu map list is provided, simply return */
2726 if (!strlen(hdd_ctxt->config->cpu_map_list)) {
2727 hdd_err("no cpu map list found");
2728 goto err;
2729 }
2730
2731 if (QDF_STATUS_SUCCESS !=
2732 hdd_hex_string_to_u16_array(hdd_ctxt->config->cpu_map_list,
2733 rps_data.cpu_map_list,
2734 &cpu_map_list_len,
2735 WLAN_SVC_IFACE_NUM_QUEUES)) {
2736 hdd_err("invalid cpu map list");
2737 goto err;
2738 }
2739
2740 rps_data.num_queues =
2741 (cpu_map_list_len < rps_data.num_queues) ?
2742 cpu_map_list_len : rps_data.num_queues;
2743
2744 for (i = 0; i < rps_data.num_queues; i++) {
2745 hdd_info("cpu_map_list[%d] = 0x%x",
2746 i, rps_data.cpu_map_list[i]);
2747 }
2748
2749 strlcpy(rps_data.ifname, adapter->dev->name,
2750 sizeof(rps_data.ifname));
Kondabattini, Ganesh96ac37b2016-09-02 23:12:15 +05302751 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2752 WLAN_SVC_RPS_ENABLE_IND,
Nirav Shahbd36b062016-07-18 11:12:59 +05302753 &rps_data, sizeof(rps_data));
2754
Yun Parkff6a16a2017-09-26 16:38:18 -07002755 cds_cfg->rps_enabled = true;
2756
2757 return;
2758
Nirav Shahbd36b062016-07-18 11:12:59 +05302759err:
2760 hdd_err("Wrong RPS configuration. enabling rx_thread");
Yun Parkff6a16a2017-09-26 16:38:18 -07002761 cds_cfg->rps_enabled = false;
2762}
2763
2764/**
2765 * hdd_send_rps_disable_ind() - send rps disable indication to daemon
2766 * @adapter: adapter context
2767 *
2768 * Return: none
2769 */
2770void hdd_send_rps_disable_ind(struct hdd_adapter *adapter)
2771{
Yun Parkff6a16a2017-09-26 16:38:18 -07002772 struct hdd_context *hdd_ctxt = NULL;
2773 struct wlan_rps_data rps_data;
2774 struct cds_config_info *cds_cfg;
2775
2776 cds_cfg = cds_get_ini_config();
2777
2778 if (!adapter) {
2779 hdd_err("adapter is NULL");
2780 return;
2781 }
2782
2783 if (!cds_cfg) {
2784 hdd_err("cds_cfg is NULL");
2785 return;
2786 }
2787
2788 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
2789 rps_data.num_queues = NUM_TX_QUEUES;
2790
2791 hdd_info("Set cpu_map_list 0");
2792
2793 qdf_mem_zero(&rps_data.cpu_map_list, sizeof(rps_data.cpu_map_list));
Yun Parkff6a16a2017-09-26 16:38:18 -07002794
2795 strlcpy(rps_data.ifname, adapter->dev->name, sizeof(rps_data.ifname));
2796 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
2797 WLAN_SVC_RPS_ENABLE_IND,
2798 &rps_data, sizeof(rps_data));
2799
2800 cds_cfg->rps_enabled = false;
Nirav Shahbd36b062016-07-18 11:12:59 +05302801}
2802
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002803void hdd_tx_queue_cb(hdd_handle_t hdd_handle, uint32_t vdev_id,
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002804 enum netif_action_type action,
2805 enum netif_reason_type reason)
2806{
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002807 struct hdd_context *hdd_ctx = hdd_handle_to_context(hdd_handle);
2808 struct hdd_adapter *adapter;
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002809
2810 /*
2811 * Validating the context is not required here.
2812 * if there is a driver unload/SSR in progress happening in a
2813 * different context and it has been scheduled to run and
2814 * driver got a firmware event of sta kick out, then it is
2815 * good to disable the Tx Queue to stop the influx of traffic.
2816 */
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002817 if (!hdd_ctx) {
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002818 hdd_err("Invalid context passed");
2819 return;
2820 }
2821
2822 adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
Jeff Johnsonda2afa42018-07-04 10:25:42 -07002823 if (!adapter) {
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002824 hdd_err("vdev_id %d does not exist with host", vdev_id);
2825 return;
2826 }
2827 hdd_debug("Tx Queue action %d on vdev %d", action, vdev_id);
2828
2829 wlan_hdd_netif_queue_control(adapter, action, reason);
2830}
2831
Tiger Yu8b119e92019-04-09 13:55:07 +08002832#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
Ravi Joshib89e7f72016-09-07 13:43:15 -07002833/**
2834 * hdd_reset_tcp_delack() - Reset tcp delack value to default
2835 * @hdd_ctx: Handle to hdd context
2836 *
2837 * Function used to reset TCP delack value to its default value
2838 *
2839 * Return: None
2840 */
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07002841void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
Ravi Joshib89e7f72016-09-07 13:43:15 -07002842{
Tushnim Bhattacharyyadfbce702018-03-27 12:46:48 -07002843 enum wlan_tp_level next_level = WLAN_SVC_TP_LOW;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002844 struct wlan_rx_tp_data rx_tp_data = {0};
Nirav Shahbd36b062016-07-18 11:12:59 +05302845
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002846 rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07002847 rx_tp_data.level = next_level;
Ravi Joshib89e7f72016-09-07 13:43:15 -07002848 hdd_ctx->rx_high_ind_cnt = 0;
Alok Kumar2fad6442018-11-08 19:19:28 +05302849 wlan_hdd_update_tcp_rx_param(hdd_ctx, &rx_tp_data);
Ravi Joshib89e7f72016-09-07 13:43:15 -07002850}
hangtian2b9856f2019-01-25 11:50:39 +08002851
2852/**
2853 * hdd_is_current_high_throughput() - Check if vote level is high
2854 * @hdd_ctx: Handle to hdd context
2855 *
2856 * Function used to check if vote level is high
2857 *
2858 * Return: True if vote level is high
2859 */
2860bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx)
2861{
2862 if (hdd_ctx->cur_vote_level < PLD_BUS_WIDTH_HIGH)
2863 return false;
2864 else
2865 return true;
2866}
Tiger Yu8b119e92019-04-09 13:55:07 +08002867#endif
jitiphil869b9f72018-09-25 17:14:01 +05302868
2869#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
2870/**
2871 * hdd_ini_tx_flow_control() - Initialize INIs concerned about tx flow control
2872 * @config: pointer to hdd config
2873 * @psoc: pointer to psoc obj
2874 *
2875 * Return: none
2876 */
2877static void hdd_ini_tx_flow_control(struct hdd_config *config,
2878 struct wlan_objmgr_psoc *psoc)
2879{
2880 config->tx_flow_low_watermark =
2881 cfg_get(psoc, CFG_DP_LL_TX_FLOW_LWM);
2882 config->tx_flow_hi_watermark_offset =
2883 cfg_get(psoc, CFG_DP_LL_TX_FLOW_HWM_OFFSET);
2884 config->tx_flow_max_queue_depth =
2885 cfg_get(psoc, CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH);
2886 config->tx_lbw_flow_low_watermark =
2887 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_LWM);
2888 config->tx_lbw_flow_hi_watermark_offset =
2889 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET);
2890 config->tx_lbw_flow_max_queue_depth =
2891 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH);
2892 config->tx_hbw_flow_low_watermark =
2893 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_LWM);
2894 config->tx_hbw_flow_hi_watermark_offset =
2895 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET);
2896 config->tx_hbw_flow_max_queue_depth =
2897 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH);
2898}
2899#else
2900static void hdd_ini_tx_flow_control(struct hdd_config *config,
2901 struct wlan_objmgr_psoc *psoc)
2902{
2903}
2904#endif
2905
Tiger Yu8b119e92019-04-09 13:55:07 +08002906#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
jitiphil869b9f72018-09-25 17:14:01 +05302907/**
2908 * hdd_ini_tx_flow_control() - Initialize INIs concerned about bus bandwidth
2909 * @config: pointer to hdd config
2910 * @psoc: pointer to psoc obj
2911 *
2912 * Return: none
2913 */
2914static void hdd_ini_bus_bandwidth(struct hdd_config *config,
2915 struct wlan_objmgr_psoc *psoc)
2916{
2917 config->bus_bw_high_threshold =
2918 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD);
2919 config->bus_bw_medium_threshold =
2920 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD);
2921 config->bus_bw_low_threshold =
2922 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD);
2923 config->bus_bw_compute_interval =
2924 cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL);
2925}
2926
2927/**
2928 * hdd_ini_tx_flow_control() - Initialize INIs concerned about tcp settings
2929 * @config: pointer to hdd config
2930 * @psoc: pointer to psoc obj
2931 *
2932 * Return: none
2933 */
2934static void hdd_ini_tcp_settings(struct hdd_config *config,
2935 struct wlan_objmgr_psoc *psoc)
2936{
2937 config->enable_tcp_limit_output =
2938 cfg_get(psoc, CFG_DP_ENABLE_TCP_LIMIT_OUTPUT);
2939 config->enable_tcp_adv_win_scale =
2940 cfg_get(psoc, CFG_DP_ENABLE_TCP_ADV_WIN_SCALE);
2941 config->enable_tcp_delack =
2942 cfg_get(psoc, CFG_DP_ENABLE_TCP_DELACK);
2943 config->tcp_delack_thres_high =
2944 cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_HIGH);
2945 config->tcp_delack_thres_low =
2946 cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_LOW);
2947 config->tcp_delack_timer_count =
2948 cfg_get(psoc, CFG_DP_TCP_DELACK_TIMER_COUNT);
2949 config->tcp_tx_high_tput_thres =
2950 cfg_get(psoc, CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD);
Alok Kumar2fad6442018-11-08 19:19:28 +05302951 config->enable_tcp_param_update =
2952 cfg_get(psoc, CFG_DP_ENABLE_TCP_PARAM_UPDATE);
jitiphil869b9f72018-09-25 17:14:01 +05302953}
2954#else
2955static void hdd_ini_bus_bandwidth(struct hdd_config *config,
Tiger Yu8b119e92019-04-09 13:55:07 +08002956 struct wlan_objmgr_psoc *psoc)
jitiphil869b9f72018-09-25 17:14:01 +05302957{
2958}
2959
2960static void hdd_ini_tcp_settings(struct hdd_config *config,
2961 struct wlan_objmgr_psoc *psoc)
2962{
2963}
Tiger Yu8b119e92019-04-09 13:55:07 +08002964#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
jitiphil869b9f72018-09-25 17:14:01 +05302965
2966/**
2967 * hdd_set_rx_mode_value() - set rx_mode values
2968 * @hdd_ctx: hdd context
2969 *
2970 * Return: none
2971 */
2972static void hdd_set_rx_mode_value(struct hdd_context *hdd_ctx)
2973{
2974 uint32_t rx_mode = hdd_ctx->config->rx_mode;
Venkata Sharath Chandra Manchala702be3e2019-03-28 12:24:39 -07002975 enum QDF_GLOBAL_MODE con_mode = 0;
2976
2977 con_mode = hdd_get_conparam();
jitiphil869b9f72018-09-25 17:14:01 +05302978
2979 /* RPS has higher priority than dynamic RPS when both bits are set */
2980 if (rx_mode & CFG_ENABLE_RPS && rx_mode & CFG_ENABLE_DYNAMIC_RPS)
2981 rx_mode &= ~CFG_ENABLE_DYNAMIC_RPS;
2982
2983 if (rx_mode & CFG_ENABLE_RX_THREAD && rx_mode & CFG_ENABLE_RPS) {
2984 hdd_warn("rx_mode wrong configuration. Make it default");
2985 rx_mode = CFG_RX_MODE_DEFAULT;
2986 }
2987
2988 if (rx_mode & CFG_ENABLE_RX_THREAD)
2989 hdd_ctx->enable_rxthread = true;
Venkata Sharath Chandra Manchala702be3e2019-03-28 12:24:39 -07002990 else if (rx_mode & CFG_ENABLE_DP_RX_THREADS) {
2991 if (con_mode == QDF_GLOBAL_MONITOR_MODE)
2992 hdd_ctx->enable_dp_rx_threads = false;
2993 else
2994 hdd_ctx->enable_dp_rx_threads = true;
2995 }
jitiphil869b9f72018-09-25 17:14:01 +05302996
2997 if (rx_mode & CFG_ENABLE_RPS)
2998 hdd_ctx->rps = true;
2999
3000 if (rx_mode & CFG_ENABLE_NAPI)
3001 hdd_ctx->napi_enable = true;
3002
3003 if (rx_mode & CFG_ENABLE_DYNAMIC_RPS)
3004 hdd_ctx->dynamic_rps = true;
3005
3006 hdd_debug("rx_mode:%u dp_rx_threads:%u rx_thread:%u napi:%u rps:%u dynamic rps %u",
3007 rx_mode, hdd_ctx->enable_dp_rx_threads,
3008 hdd_ctx->enable_rxthread, hdd_ctx->napi_enable,
3009 hdd_ctx->rps, hdd_ctx->dynamic_rps);
3010}
3011
jitiphilb03ae082018-11-09 17:41:59 +05303012#ifdef CONFIG_DP_TRACE
3013static void
3014hdd_dp_dp_trace_cfg_update(struct hdd_config *config,
3015 struct wlan_objmgr_psoc *psoc)
3016{
3017 qdf_size_t array_out_size;
3018
3019 config->enable_dp_trace = cfg_get(psoc, CFG_DP_ENABLE_DP_TRACE);
3020 qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_DP_TRACE_CONFIG),
3021 config->dp_trace_config,
3022 sizeof(config->dp_trace_config), &array_out_size);
3023}
3024#else
3025static void
3026hdd_dp_dp_trace_cfg_update(struct hdd_config *config,
3027 struct wlan_objmgr_psoc *psoc)
3028{
3029}
3030#endif
3031
3032#ifdef WLAN_NUD_TRACKING
3033static void
3034hdd_dp_nud_tracking_cfg_update(struct hdd_config *config,
3035 struct wlan_objmgr_psoc *psoc)
3036{
3037 config->enable_nud_tracking = cfg_get(psoc, CFG_DP_ENABLE_NUD_TRACKING);
3038}
3039#else
3040static void
3041hdd_dp_nud_tracking_cfg_update(struct hdd_config *config,
3042 struct wlan_objmgr_psoc *psoc)
3043{
3044}
3045#endif
3046
jitiphil869b9f72018-09-25 17:14:01 +05303047void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
3048 struct hdd_context *hdd_ctx)
3049{
3050 struct hdd_config *config;
jitiphilb03ae082018-11-09 17:41:59 +05303051 qdf_size_t array_out_size;
jitiphil869b9f72018-09-25 17:14:01 +05303052
3053 config = hdd_ctx->config;
3054 hdd_ini_tx_flow_control(config, psoc);
3055 hdd_ini_bus_bandwidth(config, psoc);
3056 hdd_ini_tcp_settings(config, psoc);
3057 config->napi_cpu_affinity_mask =
3058 cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
3059 config->rx_thread_affinity_mask =
3060 cfg_get(psoc, CFG_DP_RX_THREAD_CPU_MASK);
3061 qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST),
3062 config->cpu_map_list,
jitiphilb03ae082018-11-09 17:41:59 +05303063 sizeof(config->cpu_map_list), &array_out_size);
jitiphil869b9f72018-09-25 17:14:01 +05303064 config->tx_orphan_enable = cfg_get(psoc, CFG_DP_TX_ORPHAN_ENABLE);
3065 config->rx_mode = cfg_get(psoc, CFG_DP_RX_MODE);
3066 hdd_set_rx_mode_value(hdd_ctx);
jitiphil296c23e2018-11-15 16:26:14 +05303067 config->multicast_replay_filter =
3068 cfg_get(psoc, CFG_DP_FILTER_MULTICAST_REPLAY);
3069 config->rx_wakelock_timeout =
3070 cfg_get(psoc, CFG_DP_RX_WAKELOCK_TIMEOUT);
3071 config->num_dp_rx_threads = cfg_get(psoc, CFG_DP_NUM_DP_RX_THREADS);
Manjunathappa Prakashf5b6f5f2019-03-27 15:17:41 -07003072 config->cfg_wmi_credit_cnt = cfg_get(psoc, CFG_DP_HTC_WMI_CREDIT_CNT);
jitiphilb03ae082018-11-09 17:41:59 +05303073 hdd_dp_dp_trace_cfg_update(config, psoc);
3074 hdd_dp_nud_tracking_cfg_update(config, psoc);
jitiphil869b9f72018-09-25 17:14:01 +05303075}