blob: c687717682890ea550ae7419f861a26c91d8ba76 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08002 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wlan_hdd_tx_rx.c
30 *
31 * Linux HDD Tx/RX APIs
32 */
33
Jeff Johnsona0399642016-12-05 12:39:59 -080034/* denote that this file does not allow legacy hddLog */
35#define HDD_DISALLOW_LEGACY_HDDLOG 1
36
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include <wlan_hdd_tx_rx.h>
38#include <wlan_hdd_softap_tx_rx.h>
39#include <wlan_hdd_napi.h>
40#include <linux/netdevice.h>
41#include <linux/skbuff.h>
42#include <linux/etherdevice.h>
Ravi Joshibb8d4512016-08-22 10:14:52 -070043#include <linux/if_ether.h>
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +053044#include <linux/inetdevice.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include <cds_sched.h>
Manjunathappa Prakash779e4862016-09-12 17:00:11 -070046#include <cds_utils.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047
48#include <wlan_hdd_p2p.h>
49#include <linux/wireless.h>
50#include <net/cfg80211.h>
51#include <net/ieee80211_radiotap.h>
52#include "sap_api.h"
53#include "wlan_hdd_wmm.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054#include "wlan_hdd_tdls.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080055#include <wlan_hdd_ipa.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056#include "wlan_hdd_ocb.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080057#include "wlan_hdd_lro.h"
Leo Changfdb45c32016-10-28 11:09:23 -070058#include <cdp_txrx_cmn.h>
59#include <cdp_txrx_peer_ops.h>
60#include <cdp_txrx_flow_ctrl_v2.h>
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -070061#include "wlan_hdd_nan_datapath.h"
Ravi Joshib89e7f72016-09-07 13:43:15 -070062#include "pld_common.h"
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +053063#include <cdp_txrx_misc.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080064#include "wlan_hdd_rx_monitor.h"
Zhu Jianmin04392c42017-05-12 16:34:53 +080065#include "wlan_hdd_power.h"
Yu Wangceb357b2017-06-01 12:04:18 +080066#include <wlan_hdd_tsf.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080067
Kabilan Kannan75bd4b32017-12-20 11:49:35 -080068/*
69 * Count to ratelimit the HDD logs during TX failures
70 */
71#define HDD_TX_BLOCKED_RATE 256
72
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053073#ifdef QCA_LL_TX_FLOW_CONTROL_V2
74/*
75 * Mapping Linux AC interpretation to SME AC.
76 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
77 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
78 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
79 */
80const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
81 SME_AC_VO,
82 SME_AC_VI,
83 SME_AC_BE,
84 SME_AC_BK,
85 SME_AC_VO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080086};
87
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053088#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
90 SME_AC_VO,
91 SME_AC_VI,
92 SME_AC_BE,
93 SME_AC_BK,
94};
95
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053096#endif
97
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080098#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
99/**
100 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
101 * @adapter_context: pointer to vdev adapter
102 *
103 * If Blocked OS Q is not resumed during timeout period, to prevent
104 * permanent stall, resume OS Q forcefully.
105 *
106 * Return: None
107 */
108void hdd_tx_resume_timer_expired_handler(void *adapter_context)
109{
Jeff Johnson80486862017-10-02 13:21:29 -0700110 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800111
Jeff Johnson80486862017-10-02 13:21:29 -0700112 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800113 /* INVALID ARG */
114 return;
115 }
116
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700117 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700118 wlan_hdd_netif_queue_control(adapter, WLAN_WAKE_ALL_NETIF_QUEUE,
Jeff Johnsona0399642016-12-05 12:39:59 -0800119 WLAN_CONTROL_PATH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800120}
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530121#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
122
123/**
124 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
Jeff Johnson80486862017-10-02 13:21:29 -0700125 * @adapter: pointer to hdd adapter
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530126 * @tx_resume: TX Q resume trigger
127 *
128 *
129 * Return: None
130 */
131static void
Jeff Johnson80486862017-10-02 13:21:29 -0700132hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530133{
134 if (true == tx_resume)
135 return;
136
137 /* Pause TX */
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700138 hdd_debug("Disabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700139 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530140 WLAN_DATA_FLOW_CONTROL);
141
142 if (QDF_TIMER_STATE_STOPPED ==
Jeff Johnson80486862017-10-02 13:21:29 -0700143 qdf_mc_timer_get_current_state(&adapter->
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530144 tx_flow_control_timer)) {
145 QDF_STATUS status;
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700146
Jeff Johnson80486862017-10-02 13:21:29 -0700147 status = qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530148 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
149
150 if (!QDF_IS_STATUS_SUCCESS(status))
151 hdd_err("Failed to start tx_flow_control_timer");
152 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700153 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530154 }
155
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700156 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
157 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530158}
159#else
160
161static inline void
Jeff Johnson80486862017-10-02 13:21:29 -0700162hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530163{
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530164}
165#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800166
Jeff Johnson80486862017-10-02 13:21:29 -0700167static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
gbianec670c592016-11-24 11:21:30 +0800168 struct sk_buff *skb)
169{
Jeff Johnson80486862017-10-02 13:21:29 -0700170 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800171 int need_orphan = 0;
172
Jeff Johnson80486862017-10-02 13:21:29 -0700173 if (adapter->tx_flow_low_watermark > 0) {
tfyubdf453e2017-09-27 13:34:30 +0800174#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
175 /*
176 * The TCP TX throttling logic is changed a little after
177 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
178 * which will throttle the TCP packets to the host driver.
179 * The TCP UP LINK throughput will drop heavily. In order to
180 * fix this issue, need to orphan the socket buffer asap, which
181 * will call skb's destructor to notify the TCP stack that the
182 * SKB buffer is unowned. And then the TCP stack will pump more
183 * packets to host driver.
184 *
185 * The TX packets might be dropped for UDP case in the iperf
186 * testing. So need to be protected by follow control.
187 */
188 need_orphan = 1;
189#else
190 if (hdd_ctx->config->tx_orphan_enable)
191 need_orphan = 1;
192#endif
tfyu5f01db22017-10-11 13:51:04 +0800193 } else if (hdd_ctx->config->tx_orphan_enable) {
194 if (qdf_nbuf_is_ipv4_tcp_pkt(skb) ||
Tiger Yu438c6482017-10-13 11:07:00 +0800195 qdf_nbuf_is_ipv6_tcp_pkt(skb))
tfyu5f01db22017-10-11 13:51:04 +0800196 need_orphan = 1;
tfyubdf453e2017-09-27 13:34:30 +0800197 }
198
tfyu5f01db22017-10-11 13:51:04 +0800199 if (need_orphan) {
gbianec670c592016-11-24 11:21:30 +0800200 skb_orphan(skb);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700201 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Tiger Yu438c6482017-10-13 11:07:00 +0800202 } else
gbianec670c592016-11-24 11:21:30 +0800203 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800204
205 return skb;
206}
207
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208/**
209 * hdd_tx_resume_cb() - Resume OS TX Q.
210 * @adapter_context: pointer to vdev apdapter
211 * @tx_resume: TX Q resume trigger
212 *
213 * Q was stopped due to WLAN TX path low resource condition
214 *
215 * Return: None
216 */
217void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
218{
Jeff Johnson80486862017-10-02 13:21:29 -0700219 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700220 struct hdd_station_ctx *hdd_sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800221
Jeff Johnson80486862017-10-02 13:21:29 -0700222 if (!adapter) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223 /* INVALID ARG */
224 return;
225 }
226
Jeff Johnson80486862017-10-02 13:21:29 -0700227 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800228
229 /* Resume TX */
230 if (true == tx_resume) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530231 if (QDF_TIMER_STATE_STOPPED !=
Jeff Johnson80486862017-10-02 13:21:29 -0700232 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233 tx_flow_control_timer)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700234 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235 }
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700236 hdd_debug("Enabling queues");
Jeff Johnson80486862017-10-02 13:21:29 -0700237 wlan_hdd_netif_queue_control(adapter,
Jeff Johnsona0399642016-12-05 12:39:59 -0800238 WLAN_WAKE_ALL_NETIF_QUEUE,
239 WLAN_DATA_FLOW_CONTROL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240 }
Jeff Johnson80486862017-10-02 13:21:29 -0700241 hdd_tx_resume_false(adapter, tx_resume);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800242}
243
bings284f8be2017-08-11 10:41:30 +0800244bool hdd_tx_flow_control_is_pause(void *adapter_context)
245{
Jeff Johnson80486862017-10-02 13:21:29 -0700246 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
bings284f8be2017-08-11 10:41:30 +0800247
Jeff Johnson80486862017-10-02 13:21:29 -0700248 if ((NULL == adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
bings284f8be2017-08-11 10:41:30 +0800249 /* INVALID ARG */
Jeff Johnson80486862017-10-02 13:21:29 -0700250 hdd_err("invalid adapter %pK", adapter);
bings284f8be2017-08-11 10:41:30 +0800251 return false;
252 }
253
Jeff Johnson80486862017-10-02 13:21:29 -0700254 return adapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
bings284f8be2017-08-11 10:41:30 +0800255}
256
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700257void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530258 qdf_mc_timer_callback_t timer_callback,
bings284f8be2017-08-11 10:41:30 +0800259 ol_txrx_tx_flow_control_fp flow_control_fp,
260 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800261{
262 if (adapter->tx_flow_timer_initialized == false) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530263 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530264 QDF_TIMER_TYPE_SW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800265 timer_callback,
266 adapter);
267 adapter->tx_flow_timer_initialized = true;
268 }
Leo Changfdb45c32016-10-28 11:09:23 -0700269 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1b780e42017-10-31 14:11:45 -0700270 adapter->session_id, flow_control_fp, adapter,
bings284f8be2017-08-11 10:41:30 +0800271 flow_control_is_pause_fp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800272}
273
274/**
275 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
276 * @adapter: adapter handle
277 *
278 * Return: none
279 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700280void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800281{
Leo Changfdb45c32016-10-28 11:09:23 -0700282 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
Jeff Johnson1b780e42017-10-31 14:11:45 -0700283 adapter->session_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284 if (adapter->tx_flow_timer_initialized == true) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530285 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
286 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800287 adapter->tx_flow_timer_initialized = false;
288 }
289}
290
291/**
292 * hdd_get_tx_resource() - check tx resources and take action
293 * @adapter: adapter handle
294 * @STAId: station id
295 * @timer_value: timer value
296 *
297 * Return: none
298 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700299void hdd_get_tx_resource(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300 uint8_t STAId, uint16_t timer_value)
301{
302 if (false ==
Leo Changfdb45c32016-10-28 11:09:23 -0700303 cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC), STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 adapter->tx_flow_low_watermark,
305 adapter->tx_flow_high_watermark_offset)) {
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700306 hdd_debug("Disabling queues lwm %d hwm offset %d",
Jeff Johnsona0399642016-12-05 12:39:59 -0800307 adapter->tx_flow_low_watermark,
308 adapter->tx_flow_high_watermark_offset);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
310 WLAN_DATA_FLOW_CONTROL);
311 if ((adapter->tx_flow_timer_initialized == true) &&
Anurag Chouhan210db072016-02-22 18:42:15 +0530312 (QDF_TIMER_STATE_STOPPED ==
313 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314 tx_flow_control_timer))) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530315 qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800316 timer_value);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700317 adapter->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
318 adapter->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
319 adapter->hdd_stats.tx_rx_stats.is_txflow_paused = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800320 }
321 }
322}
323
gbianec670c592016-11-24 11:21:30 +0800324#else
Mohit Khannad0b63f52017-02-18 18:05:52 -0800325/**
326 * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
Jeff Johnson80486862017-10-02 13:21:29 -0700327 * @adapter: pointer to HDD adapter
Mohit Khannad0b63f52017-02-18 18:05:52 -0800328 * @skb: pointer to skb data packet
329 *
330 * Return: pointer to skb structure
331 */
Jeff Johnson80486862017-10-02 13:21:29 -0700332static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
Mohit Khannad0b63f52017-02-18 18:05:52 -0800333 struct sk_buff *skb) {
gbianec670c592016-11-24 11:21:30 +0800334
Mohit Khannad0b63f52017-02-18 18:05:52 -0800335 struct sk_buff *nskb;
tfyubdf453e2017-09-27 13:34:30 +0800336#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Jeff Johnson80486862017-10-02 13:21:29 -0700337 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
tfyubdf453e2017-09-27 13:34:30 +0800338#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800339
Mohit Khanna87493732017-08-27 23:26:44 -0700340 hdd_skb_fill_gso_size(adapter->dev, skb);
341
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700342 nskb = skb_unshare(skb, GFP_ATOMIC);
tfyubdf453e2017-09-27 13:34:30 +0800343#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700344 if (unlikely(hdd_ctx->config->tx_orphan_enable) && (nskb == skb)) {
Mohit Khannad0b63f52017-02-18 18:05:52 -0800345 /*
346 * For UDP packets we want to orphan the packet to allow the app
347 * to send more packets. The flow would ultimately be controlled
348 * by the limited number of tx descriptors for the vdev.
349 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700350 ++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
Mohit Khannad0b63f52017-02-18 18:05:52 -0800351 skb_orphan(skb);
352 }
tfyubdf453e2017-09-27 13:34:30 +0800353#endif
Mohit Khannad0b63f52017-02-18 18:05:52 -0800354 return nskb;
gbianec670c592016-11-24 11:21:30 +0800355}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800356#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
357
Nirav Shah5e74bb82016-07-20 16:01:27 +0530358/**
359 * qdf_event_eapol_log() - send event to wlan diag
360 * @skb: skb ptr
361 * @dir: direction
362 * @eapol_key_info: eapol key info
363 *
364 * Return: None
365 */
366void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
367{
368 int16_t eapol_key_info;
369
370 WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
371
372 if ((dir == QDF_TX &&
373 (QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
374 QDF_NBUF_CB_GET_PACKET_TYPE(skb))))
375 return;
376 else if (!qdf_nbuf_is_ipv4_eapol_pkt(skb))
377 return;
378
379 eapol_key_info = (uint16_t)(*(uint16_t *)
380 (skb->data + EAPOL_KEY_INFO_OFFSET));
381
382 wlan_diag_event.event_sub_type =
383 (dir == QDF_TX ?
384 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
385 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
386 wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
387 (skb->data + EAPOL_PACKET_TYPE_OFFSET));
388 wlan_diag_event.eapol_key_info = eapol_key_info;
389 wlan_diag_event.eapol_rate = 0;
390 qdf_mem_copy(wlan_diag_event.dest_addr,
391 (skb->data + QDF_NBUF_DEST_MAC_OFFSET),
392 sizeof(wlan_diag_event.dest_addr));
393 qdf_mem_copy(wlan_diag_event.src_addr,
394 (skb->data + QDF_NBUF_SRC_MAC_OFFSET),
395 sizeof(wlan_diag_event.src_addr));
396
397 WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
398}
399
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400
401/**
Nirav Shah5e74bb82016-07-20 16:01:27 +0530402 * wlan_hdd_classify_pkt() - classify packet
403 * @skb - sk buff
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 *
Nirav Shah5e74bb82016-07-20 16:01:27 +0530405 * Return: none
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800406 */
Nirav Shah5e74bb82016-07-20 16:01:27 +0530407void wlan_hdd_classify_pkt(struct sk_buff *skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408{
Nirav Shah5e74bb82016-07-20 16:01:27 +0530409 struct ethhdr *eh = (struct ethhdr *)skb->data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800410
Nirav Shah5e74bb82016-07-20 16:01:27 +0530411 qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800412
Nirav Shah5e74bb82016-07-20 16:01:27 +0530413 /* check destination mac address is broadcast/multicast */
414 if (is_broadcast_ether_addr((uint8_t *)eh))
415 QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
416 else if (is_multicast_ether_addr((uint8_t *)eh))
417 QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800418
Nirav Shah5e74bb82016-07-20 16:01:27 +0530419 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
420 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
421 QDF_NBUF_CB_PACKET_TYPE_ARP;
422 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
423 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
424 QDF_NBUF_CB_PACKET_TYPE_DHCP;
425 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
426 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
427 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
428 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
429 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
430 QDF_NBUF_CB_PACKET_TYPE_WAPI;
Zhu Jianmin04392c42017-05-12 16:34:53 +0800431 else if (qdf_nbuf_is_icmp_pkt(skb))
432 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
433 QDF_NBUF_CB_PACKET_TYPE_ICMP;
Poddar, Siddarth44aa5aa2017-07-10 17:30:22 +0530434 else if (qdf_nbuf_is_icmpv6_pkt(skb))
435 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
436 QDF_NBUF_CB_PACKET_TYPE_ICMPv6;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800437}
438
439/**
Zhu Jianmin04392c42017-05-12 16:34:53 +0800440 * wlan_hdd_latency_opt()- latency option
441 * @adapter: pointer to the adapter structure
442 * @skb: pointer to sk buff
443 *
444 * Function to disable power save for icmp packets.
445 *
446 * Return: None
447 */
448#ifdef WLAN_ICMP_DISABLE_PS
449static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700450wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800451{
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700452 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800453
454 if (hdd_ctx->config->icmp_disable_ps_val <= 0)
455 return;
456
457 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) ==
458 QDF_NBUF_CB_PACKET_TYPE_ICMP) {
459 wlan_hdd_set_powersave(adapter, false,
460 hdd_ctx->config->icmp_disable_ps_val);
Yeshwanth Sriram Guntukaae03c432017-11-12 13:31:02 +0530461 sme_ps_enable_auto_ps_timer(WLAN_HDD_GET_HAL_CTX(adapter),
462 adapter->session_id,
463 hdd_ctx->config->icmp_disable_ps_val);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800464 }
465}
466#else
467static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700468wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800469{
470}
471#endif
472
473/**
Ravi Joshi24477b72016-07-19 15:45:09 -0700474 * hdd_get_transmit_sta_id() - function to retrieve station id to be used for
475 * sending traffic towards a particular destination address. The destination
476 * address can be unicast, multicast or broadcast
477 *
478 * @adapter: Handle to adapter context
479 * @dst_addr: Destination address
480 * @station_id: station id
481 *
482 * Returns: None
483 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700484static void hdd_get_transmit_sta_id(struct hdd_adapter *adapter,
Nirav Shah5e74bb82016-07-20 16:01:27 +0530485 struct sk_buff *skb, uint8_t *station_id)
Ravi Joshi24477b72016-07-19 15:45:09 -0700486{
487 bool mcbc_addr = false;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700488 QDF_STATUS status;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700489 struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530490 struct qdf_mac_addr *dst_addr = NULL;
Ravi Joshi24477b72016-07-19 15:45:09 -0700491
Nirav Shah5e74bb82016-07-20 16:01:27 +0530492 dst_addr = (struct qdf_mac_addr *)skb->data;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700493 status = hdd_get_peer_sta_id(sta_ctx, dst_addr, station_id);
494 if (QDF_IS_STATUS_ERROR(status)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530495 if (QDF_NBUF_CB_GET_IS_BCAST(skb) ||
496 QDF_NBUF_CB_GET_IS_MCAST(skb)) {
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -0700497 hdd_debug("Received MC/BC packet for transmission");
Ravi Joshi24477b72016-07-19 15:45:09 -0700498 mcbc_addr = true;
Ravi Joshi24477b72016-07-19 15:45:09 -0700499 }
500 }
501
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700502 if (adapter->device_mode == QDF_IBSS_MODE ||
503 adapter->device_mode == QDF_NDI_MODE) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700504 /*
505 * This check is necessary to make sure station id is not
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700506 * overwritten for UC traffic in IBSS or NDI mode
Ravi Joshi24477b72016-07-19 15:45:09 -0700507 */
508 if (mcbc_addr)
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700509 *station_id = sta_ctx->broadcast_staid;
Ravi Joshi24477b72016-07-19 15:45:09 -0700510 } else {
511 /* For the rest, traffic is directed to AP/P2P GO */
512 if (eConnectionState_Associated == sta_ctx->conn_info.connState)
513 *station_id = sta_ctx->conn_info.staId[0];
514 }
515}
516
517/**
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800518 * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
519 * @skb: pointer to OS packet (sk_buff)
520 * @peer_id: Peer STA ID in peer table
521 *
522 * This function gets the peer state from DP and check if it is either
523 * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
524 * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
525 * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
526 *
527 * Return: true if Tx is allowed and false otherwise.
528 */
529static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t peer_id)
530{
531 enum ol_txrx_peer_state peer_state;
532 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
533 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
534 void *peer;
535
536 QDF_BUG(soc);
537 QDF_BUG(pdev);
538
539 peer = cdp_peer_find_by_local_id(soc, pdev, peer_id);
540
541 if (peer == NULL) {
Kabilan Kannan75bd4b32017-12-20 11:49:35 -0800542 hdd_err_ratelimited(HDD_TX_BLOCKED_RATE,
543 "Unable to find peer entry for staid: %d",
544 peer_id);
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800545 return false;
546 }
547
548 peer_state = cdp_peer_state_get(soc, peer);
Jeff Johnson68755312017-02-10 11:46:55 -0800549 if (likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800550 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800551 if (OL_TXRX_PEER_STATE_CONN == peer_state &&
552 ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X)
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800553 return true;
hqu8925c8f2017-12-11 19:29:01 +0800554 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson68755312017-02-10 11:46:55 -0800555 FL("Invalid peer state for Tx: %d"), peer_state);
556 return false;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800557}
558
559/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530560 * __hdd_hard_start_xmit() - Transmit a frame
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800561 * @skb: pointer to OS packet (sk_buff)
562 * @dev: pointer to network device
563 *
564 * Function registered with the Linux OS for transmitting
565 * packets. This version of the function directly passes
566 * the packet to Transport Layer.
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530567 * In case of any packet drop or error, log the error with
568 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800569 *
570 * Return: Always returns NETDEV_TX_OK
571 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -0700572static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530574 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800575 sme_ac_enum_type ac;
Abhishek Singh12be60f2017-08-11 13:52:42 +0530576 enum sme_qos_wmmuptype up;
Jeff Johnson80486862017-10-02 13:21:29 -0700577 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578 bool granted;
Nirav Shah5e74bb82016-07-20 16:01:27 +0530579 uint8_t STAId;
Jeff Johnsonb9424862017-10-30 08:49:35 -0700580 struct hdd_station_ctx *sta_ctx = &adapter->session.station;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700581 struct qdf_mac_addr *mac_addr;
Mohit Khannaf8f96822017-05-17 17:11:59 -0700582 bool pkt_proto_logged = false;
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700583#ifdef QCA_PKT_PROTO_TRACE
584 uint8_t proto_type = 0;
Jeff Johnson80486862017-10-02 13:21:29 -0700585 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Kabilan Kannan32eb5022016-10-04 12:24:50 -0700586#endif /* QCA_PKT_PROTO_TRACE */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800587
588#ifdef QCA_WIFI_FTM
Anurag Chouhan6d760662016-02-20 16:05:43 +0530589 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800590 kfree_skb(skb);
591 return NETDEV_TX_OK;
592 }
593#endif
594
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700595 ++adapter->hdd_stats.tx_rx_stats.tx_called;
596 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530597
Will Huang20de9432018-02-06 17:01:03 +0800598 if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
599 cds_is_load_or_unload_in_progress()) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530600 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Will Huang20de9432018-02-06 17:01:03 +0800601 "Recovery/(Un)load in progress, dropping the packet");
Nirav Shahdf3659e2016-06-27 12:26:28 +0530602 goto drop_pkt;
Govind Singhede435f2015-12-01 16:16:36 +0530603 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800604
Nirav Shah5e74bb82016-07-20 16:01:27 +0530605 wlan_hdd_classify_pkt(skb);
Jeff Johnson80486862017-10-02 13:21:29 -0700606 wlan_hdd_latency_opt(adapter, skb);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530607
Ravi Joshi24477b72016-07-19 15:45:09 -0700608 STAId = HDD_WLAN_INVALID_STA_ID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800609
Jeff Johnson80486862017-10-02 13:21:29 -0700610 hdd_get_transmit_sta_id(adapter, skb, &STAId);
Naveen Rawat209d0932016-08-03 15:07:23 -0700611 if (STAId >= WLAN_MAX_STA_COUNT) {
hqu5e6b9862017-12-21 18:48:46 +0800612 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsona0399642016-12-05 12:39:59 -0800613 "Invalid station id, transmit operation suspended");
Ravi Joshi24477b72016-07-19 15:45:09 -0700614 goto drop_pkt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800615 }
616
Jeff Johnson80486862017-10-02 13:21:29 -0700617 hdd_get_tx_resource(adapter, STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800618 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
619
620 /* Get TL AC corresponding to Qdisc queue index/AC. */
621 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
622
Nirav Shahcbc6d722016-03-01 16:24:53 +0530623 if (!qdf_nbuf_ipa_owned_get(skb)) {
Jeff Johnson80486862017-10-02 13:21:29 -0700624 skb = hdd_skb_orphan(adapter, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800625 if (!skb)
Nirav Shahdf3659e2016-06-27 12:26:28 +0530626 goto drop_pkt_accounting;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800627 }
628
Ravi Joshi24477b72016-07-19 15:45:09 -0700629 /*
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530630 * Add SKB to internal tracking table before further processing
631 * in WLAN driver.
632 */
633 qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
634
635 /*
Ravi Joshi24477b72016-07-19 15:45:09 -0700636 * user priority from IP header, which is already extracted and set from
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637 * select_queue call back function
638 */
639 up = skb->priority;
640
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700641 ++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800642#ifdef HDD_WMM_DEBUG
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800643 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800644 "%s: Classified as ac %d up %d", __func__, ac, up);
645#endif /* HDD_WMM_DEBUG */
646
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700647 if (HDD_PSB_CHANGED == adapter->psb_changed) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700648 /*
649 * Function which will determine acquire admittance for a
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800650 * WMM AC is required or not based on psb configuration done
651 * in the framework
652 */
Jeff Johnson80486862017-10-02 13:21:29 -0700653 hdd_wmm_acquire_access_required(adapter, ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800654 }
655 /*
656 * Make sure we already have access to this access category
657 * or it is EAPOL or WAPI frame during initial authentication which
658 * can have artifically boosted higher qos priority.
659 */
660
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700661 if (((adapter->psb_changed & (1 << ac)) &&
Jeff Johnson02d14ce2017-10-31 09:08:30 -0700662 likely(adapter->hdd_wmm_status.wmmAcStatus[ac].
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800663 wmmAcAccessAllowed)) ||
Jeff Johnsond377dce2017-10-04 10:32:42 -0700664 ((sta_ctx->conn_info.uIsAuthenticated == false) &&
Nirav Shah5e74bb82016-07-20 16:01:27 +0530665 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
666 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
667 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
668 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669 granted = true;
670 } else {
Jeff Johnson80486862017-10-02 13:21:29 -0700671 status = hdd_wmm_acquire_access(adapter, ac, &granted);
Jeff Johnson137c8ee2017-10-28 13:06:48 -0700672 adapter->psb_changed |= (1 << ac);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800673 }
674
675 if (!granted) {
676 bool isDefaultAc = false;
Ravi Joshi24477b72016-07-19 15:45:09 -0700677 /*
678 * ADDTS request for this AC is sent, for now
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800679 * send this packet through next avaiable lower
680 * Access category until ADDTS negotiation completes.
681 */
682 while (!likely
Jeff Johnson02d14ce2017-10-31 09:08:30 -0700683 (adapter->hdd_wmm_status.wmmAcStatus[ac].
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800684 wmmAcAccessAllowed)) {
685 switch (ac) {
686 case SME_AC_VO:
687 ac = SME_AC_VI;
688 up = SME_QOS_WMM_UP_VI;
689 break;
690 case SME_AC_VI:
691 ac = SME_AC_BE;
692 up = SME_QOS_WMM_UP_BE;
693 break;
694 case SME_AC_BE:
695 ac = SME_AC_BK;
696 up = SME_QOS_WMM_UP_BK;
697 break;
698 default:
699 ac = SME_AC_BK;
700 up = SME_QOS_WMM_UP_BK;
701 isDefaultAc = true;
702 break;
703 }
704 if (isDefaultAc)
705 break;
706 }
707 skb->priority = up;
708 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
709 }
710
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700711#ifdef QCA_PKT_PROTO_TRACE
712 if ((hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_EAPOL) ||
713 (hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_DHCP)) {
714 proto_type = cds_pkt_get_proto_type(skb,
715 hdd_ctx->config->gEnableDebugLog,
716 0);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700717 if (CDS_PKT_TRAC_TYPE_EAPOL & proto_type)
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700718 cds_pkt_trace_buf_update("ST:T:EPL");
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700719 else if (CDS_PKT_TRAC_TYPE_DHCP & proto_type)
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700720 cds_pkt_trace_buf_update("ST:T:DHC");
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700721 }
722#endif /* QCA_PKT_PROTO_TRACE */
723
Jeff Johnson80486862017-10-02 13:21:29 -0700724 adapter->stats.tx_bytes += skb->len;
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700725
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700726 mac_addr = (struct qdf_mac_addr *)skb->data;
727
Jeff Johnson80486862017-10-02 13:21:29 -0700728 ucfg_tdls_update_tx_pkt_cnt(adapter->hdd_vdev, mac_addr);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700729
Mohit Khannab1dd1e82017-02-04 15:14:38 -0800730 if (qdf_nbuf_is_tso(skb))
Jeff Johnson80486862017-10-02 13:21:29 -0700731 adapter->stats.tx_packets += qdf_nbuf_get_tso_num_seg(skb);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700732 else
Jeff Johnson80486862017-10-02 13:21:29 -0700733 ++adapter->stats.tx_packets;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800734
Nirav Shah5e74bb82016-07-20 16:01:27 +0530735 hdd_event_eapol_log(skb, QDF_TX);
Jeff Johnson1b780e42017-10-31 14:11:45 -0700736 pkt_proto_logged = qdf_dp_trace_log_pkt(adapter->session_id,
Mohit Khannaf8f96822017-05-17 17:11:59 -0700737 skb, QDF_TX,
738 QDF_TRACE_DEFAULT_PDEV_ID);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530739 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
740 QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800741
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530742 qdf_dp_trace_set_track(skb, QDF_TX);
Mohit Khannaf8f96822017-05-17 17:11:59 -0700743
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530744 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700745 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
746 sizeof(qdf_nbuf_data(skb)),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +0530747 QDF_TX));
Mohit Khannaf8f96822017-05-17 17:11:59 -0700748 if (!pkt_proto_logged) {
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530749 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
Mohit Khannaf8f96822017-05-17 17:11:59 -0700750 QDF_TRACE_DEFAULT_PDEV_ID, (uint8_t *)skb->data,
751 qdf_nbuf_len(skb), QDF_TX));
752 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE) {
753 DPTRACE(qdf_dp_trace(skb,
754 QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
755 QDF_TRACE_DEFAULT_PDEV_ID,
756 (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
757 (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
758 QDF_TX));
759 }
Nirav Shah07e39a62016-04-25 17:46:40 +0530760 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800762 if (!hdd_is_tx_allowed(skb, STAId)) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530763 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800764 FL("Tx not allowed for sta_id: %d"), STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700765 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530766 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800767 }
768
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800769 /*
Ravi Joshi24477b72016-07-19 15:45:09 -0700770 * If a transmit function is not registered, drop packet
771 */
Jeff Johnson80486862017-10-02 13:21:29 -0700772 if (!adapter->tx_fn) {
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800773 QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
774 "%s: TX function not registered by the data path",
775 __func__);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700776 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530777 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800778 }
779
Jeff Johnson80486862017-10-02 13:21:29 -0700780 if (adapter->tx_fn(adapter->txrx_vdev,
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800781 (qdf_nbuf_t) skb) != NULL) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530782 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800783 "%s: Failed to send packet to txrx for staid: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800784 __func__, STAId);
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700785 ++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530786 goto drop_pkt_and_release_skb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787 }
Dustin Browne0024fa2016-10-14 16:29:21 -0700788 netif_trans_update(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789
790 return NETDEV_TX_OK;
791
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530792drop_pkt_and_release_skb:
793 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800794drop_pkt:
795
Nirav Shahdf3659e2016-06-27 12:26:28 +0530796 if (skb) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530797 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700798 QDF_TRACE_DEFAULT_PDEV_ID, (uint8_t *)skb->data,
799 qdf_nbuf_len(skb), QDF_TX));
Nirav Shahdf3659e2016-06-27 12:26:28 +0530800 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
801 DPTRACE(qdf_dp_trace(skb,
802 QDF_DP_TRACE_DROP_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700803 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shahdf3659e2016-06-27 12:26:28 +0530804 (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
805 (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
806 QDF_TX));
807
808 kfree_skb(skb);
809 }
810
811drop_pkt_accounting:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800812
Jeff Johnson80486862017-10-02 13:21:29 -0700813 ++adapter->stats.tx_dropped;
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700814 ++adapter->hdd_stats.tx_rx_stats.tx_dropped;
Nirav Shahdf3659e2016-06-27 12:26:28 +0530815
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800816 return NETDEV_TX_OK;
817}
818
819/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530820 * hdd_hard_start_xmit() - Wrapper function to protect
821 * __hdd_hard_start_xmit from SSR
822 * @skb: pointer to OS packet
823 * @dev: pointer to net_device structure
824 *
825 * Function called by OS if any packet needs to transmit.
826 *
827 * Return: Always returns NETDEV_TX_OK
828 */
829int hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
830{
831 int ret;
832
833 cds_ssr_protect(__func__);
834 ret = __hdd_hard_start_xmit(skb, dev);
835 cds_ssr_unprotect(__func__);
836
837 return ret;
838}
839
840/**
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -0700841 * hdd_get_peer_sta_id() - Get the StationID using the Peer Mac address
Jeff Johnsond377dce2017-10-04 10:32:42 -0700842 * @sta_ctx: pointer to HDD Station Context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800843 * @pMacAddress: pointer to Peer Mac address
844 * @staID: pointer to returned Station Index
845 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530846 * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_FAILURE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847 */
848
Jeff Johnsond377dce2017-10-04 10:32:42 -0700849QDF_STATUS hdd_get_peer_sta_id(struct hdd_station_ctx *sta_ctx,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530850 struct qdf_mac_addr *pMacAddress, uint8_t *staId)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800851{
852 uint8_t idx;
853
Naveen Rawatc45d1622016-07-05 12:20:09 -0700854 for (idx = 0; idx < MAX_PEERS; idx++) {
Jeff Johnsond377dce2017-10-04 10:32:42 -0700855 if (!qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +0530856 pMacAddress, QDF_MAC_ADDR_SIZE)) {
Jeff Johnsond377dce2017-10-04 10:32:42 -0700857 *staId = sta_ctx->conn_info.staId[idx];
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530858 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800859 }
860 }
861
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530862 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800863}
864
865/**
866 * __hdd_tx_timeout() - TX timeout handler
867 * @dev: pointer to network device
868 *
869 * This function is registered as a netdev ndo_tx_timeout method, and
870 * is invoked by the kernel if the driver takes too long to transmit a
871 * frame.
872 *
873 * Return: None
874 */
875static void __hdd_tx_timeout(struct net_device *dev)
876{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700877 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700878 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800879 struct netdev_queue *txq;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530880 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
881 u64 diff_jiffies;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800882 int i = 0;
883
Dustin Browne0024fa2016-10-14 16:29:21 -0700884 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530885 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700886 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530887 NULL, 0, QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800888
889 /* Getting here implies we disabled the TX queues for too
890 * long. Queues are disabled either because of disassociation
891 * or low resource scenarios. In case of disassociation it is
892 * ok to ignore this. But if associated, we have do possible
893 * recovery here
894 */
895
896 for (i = 0; i < NUM_TX_QUEUES; i++) {
897 txq = netdev_get_tx_queue(dev, i);
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800898 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
899 "Queue: %d status: %d txq->trans_start: %lu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800900 i, netif_tx_queue_stopped(txq), txq->trans_start);
901 }
902
Houston Hoffman00227112017-08-14 23:58:18 -0700903 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800904 "carrier state: %d", netif_carrier_ok(dev));
Nirav Shah89223f72016-03-01 18:10:38 +0530905 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Mohit Khannaca4173b2017-09-12 21:52:19 -0700906 wlan_hdd_display_netif_queue_history(hdd_ctx,
907 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Leo Changfdb45c32016-10-28 11:09:23 -0700908 cdp_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530909
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700910 ++adapter->hdd_stats.tx_rx_stats.tx_timeout_cnt;
911 ++adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530912
913 diff_jiffies = jiffies -
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700914 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530915
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700916 if ((adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530917 (diff_jiffies > (HDD_TX_TIMEOUT * 2))) {
918 /*
919 * In case when there is no traffic is running, it may
920 * possible tx time-out may once happen and later system
921 * recovered then continuous tx timeout count has to be
922 * reset as it is gets modified only when traffic is running.
923 * If over a period of time if this count reaches to threshold
924 * then host triggers a false subsystem restart. In genuine
925 * time out case kernel will call the tx time-out back to back
926 * at interval of HDD_TX_TIMEOUT. Here now check if previous
927 * TX TIME out has occurred more than twice of HDD_TX_TIMEOUT
928 * back then host may recovered here from data stall.
929 */
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700930 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530931 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
932 "Reset continous tx timeout stat");
933 }
934
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700935 adapter->hdd_stats.tx_rx_stats.jiffies_last_txtimeout = jiffies;
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530936
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700937 if (adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt >
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530938 HDD_TX_STALL_THRESHOLD) {
939 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
940 "Data stall due to continuous TX timeouts");
Jeff Johnson6ced42c2017-10-20 12:48:11 -0700941 adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
Poddar, Siddarth37033032017-10-11 15:47:40 +0530942 if (hdd_ctx->config->enable_data_stall_det)
943 cdp_post_data_stall_event(soc,
Sravan Kumar Kairam3a698312017-10-16 14:16:16 +0530944 DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
945 DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
946 0xFF, 0xFF,
947 DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
948 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800949}
950
951/**
952 * hdd_tx_timeout() - Wrapper function to protect __hdd_tx_timeout from SSR
953 * @dev: pointer to net_device structure
954 *
955 * Function called by OS if there is any timeout during transmission.
956 * Since HDD simply enqueues packet and returns control to OS right away,
957 * this would never be invoked
958 *
959 * Return: none
960 */
961void hdd_tx_timeout(struct net_device *dev)
962{
963 cds_ssr_protect(__func__);
964 __hdd_tx_timeout(dev);
965 cds_ssr_unprotect(__func__);
966}
967
968/**
969 * @hdd_init_tx_rx() - Initialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -0700970 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800971 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530972 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
973 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800974 */
Jeff Johnson80486862017-10-02 13:21:29 -0700975QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800976{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530977 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800978
Jeff Johnson80486862017-10-02 13:21:29 -0700979 if (NULL == adapter) {
980 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530981 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530982 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800983 }
984
985 return status;
986}
987
988/**
989 * @hdd_deinit_tx_rx() - Deinitialize Tx/RX module
Jeff Johnson80486862017-10-02 13:21:29 -0700990 * @adapter: pointer to adapter context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800991 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530992 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
993 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800994 */
Jeff Johnson80486862017-10-02 13:21:29 -0700995QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800996{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530997 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800998
Jeff Johnson80486862017-10-02 13:21:29 -0700999 if (NULL == adapter) {
1000 hdd_err("adapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301001 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301002 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001003 }
1004
1005 return status;
1006}
1007
1008/**
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001009 * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
1010 * @context: [in] pointer to qdf context
1011 * @rxBuf: [in] pointer to rx qdf_nbuf
1012 *
1013 * TL will call this to notify the HDD when one or more packets were
1014 * received for a registered STA.
1015 *
1016 * Return: QDF_STATUS_E_FAILURE if any errors encountered, QDF_STATUS_SUCCESS
1017 * otherwise
1018 */
1019static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
1020{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001021 struct hdd_adapter *adapter;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001022 int rxstat;
1023 struct sk_buff *skb;
1024 struct sk_buff *skb_next;
1025 unsigned int cpu_index;
1026
1027 /* Sanity check on inputs */
1028 if ((NULL == context) || (NULL == rxbuf)) {
1029 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1030 "%s: Null params being passed", __func__);
1031 return QDF_STATUS_E_FAILURE;
1032 }
1033
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001034 adapter = (struct hdd_adapter *)context;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001035 if ((NULL == adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
1036 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001037 "invalid adapter %pK", adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001038 return QDF_STATUS_E_FAILURE;
1039 }
1040
1041 cpu_index = wlan_hdd_get_cpu();
1042
1043 /* walk the chain until all are processed */
1044 skb = (struct sk_buff *) rxbuf;
1045 while (NULL != skb) {
1046 skb_next = skb->next;
1047 skb->dev = adapter->dev;
1048
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001049 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001050 ++adapter->stats.rx_packets;
1051 adapter->stats.rx_bytes += skb->len;
1052
1053 /* Remove SKB from internal tracking table before submitting
1054 * it to stack
1055 */
1056 qdf_net_buf_debug_release_skb(skb);
1057
1058 /*
1059 * If this is not a last packet on the chain
1060 * Just put packet into backlog queue, not scheduling RX sirq
1061 */
1062 if (skb->next) {
1063 rxstat = netif_rx(skb);
1064 } else {
1065 /*
1066 * This is the last packet on the chain
1067 * Scheduling rx sirq
1068 */
1069 rxstat = netif_rx_ni(skb);
1070 }
1071
1072 if (NET_RX_SUCCESS == rxstat)
1073 ++adapter->
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001074 hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001075 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001076 ++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001077
1078 skb = skb_next;
1079 }
1080
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001081 return QDF_STATUS_SUCCESS;
1082}
1083
1084/**
Naveen Rawatf28315c2016-06-29 18:06:02 -07001085 * hdd_get_peer_idx() - Get the idx for given address in peer table
1086 * @sta_ctx: pointer to HDD Station Context
1087 * @addr: pointer to Peer Mac address
1088 *
1089 * Return: index when success else INVALID_PEER_IDX
1090 */
Jeff Johnson811f47d2017-10-03 11:33:09 -07001091int hdd_get_peer_idx(struct hdd_station_ctx *sta_ctx,
1092 struct qdf_mac_addr *addr)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001093{
1094 uint8_t idx;
1095
Naveen Rawatc45d1622016-07-05 12:20:09 -07001096 for (idx = 0; idx < MAX_PEERS; idx++) {
Naveen Rawatac027cb2017-04-27 15:02:42 -07001097 if (sta_ctx->conn_info.staId[idx] == HDD_WLAN_INVALID_STA_ID)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001098 continue;
1099 if (qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
1100 addr, sizeof(struct qdf_mac_addr)))
1101 continue;
1102 return idx;
1103 }
1104
1105 return INVALID_PEER_IDX;
1106}
1107
Ravi Joshibb8d4512016-08-22 10:14:52 -07001108/*
1109 * hdd_is_mcast_replay() - checks if pkt is multicast replay
1110 * @skb: packet skb
1111 *
1112 * Return: true if replayed multicast pkt, false otherwise
1113 */
1114static bool hdd_is_mcast_replay(struct sk_buff *skb)
1115{
1116 struct ethhdr *eth;
1117
1118 eth = eth_hdr(skb);
1119 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
1120 if (unlikely(ether_addr_equal(eth->h_source,
1121 skb->dev->dev_addr)))
1122 return true;
1123 }
1124 return false;
1125}
1126
Naveen Rawatf28315c2016-06-29 18:06:02 -07001127/**
Jeff Johnsondcf84ce2017-10-05 09:26:24 -07001128 * hdd_is_arp_local() - check if local or non local arp
1129 * @skb: pointer to sk_buff
1130 *
1131 * Return: true if local arp or false otherwise.
1132 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301133static bool hdd_is_arp_local(struct sk_buff *skb)
1134{
1135 struct arphdr *arp;
1136 struct in_ifaddr **ifap = NULL;
1137 struct in_ifaddr *ifa = NULL;
1138 struct in_device *in_dev;
1139 unsigned char *arp_ptr;
1140 __be32 tip;
1141
1142 arp = (struct arphdr *)skb->data;
1143 if (arp->ar_op == htons(ARPOP_REQUEST)) {
1144 in_dev = __in_dev_get_rtnl(skb->dev);
1145 if (in_dev) {
1146 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1147 ifap = &ifa->ifa_next) {
1148 if (!strcmp(skb->dev->name, ifa->ifa_label))
1149 break;
1150 }
1151 }
1152
1153 if (ifa && ifa->ifa_local) {
1154 arp_ptr = (unsigned char *)(arp + 1);
1155 arp_ptr += (skb->dev->addr_len + 4 +
1156 skb->dev->addr_len);
1157 memcpy(&tip, arp_ptr, 4);
Poddar, Siddarthb4b74792017-11-06 14:57:35 +05301158 hdd_debug("ARP packet: local IP: %x dest IP: %x",
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301159 ifa->ifa_local, tip);
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001160 if (ifa->ifa_local == tip)
1161 return true;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301162 }
1163 }
1164
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001165 return false;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301166}
1167
1168/**
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001169 * hdd_is_rx_wake_lock_needed() - check if wake lock is needed
1170 * @skb: pointer to sk_buff
1171 *
1172 * RX wake lock is needed for:
1173 * 1) Unicast data packet OR
1174 * 2) Local ARP data packet
1175 *
1176 * Return: true if wake lock is needed or false otherwise.
1177 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301178static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
1179{
1180 if ((skb->pkt_type != PACKET_BROADCAST &&
1181 skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb))
1182 return true;
1183
1184 return false;
1185}
1186
Yu Wang66a250b2017-07-19 11:46:40 +08001187#ifdef WLAN_FEATURE_TSF_PLUS
1188static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1189 qdf_nbuf_t netbuf,
1190 uint64_t target_time)
1191{
1192 if (!HDD_TSF_IS_RX_SET(hdd_ctx))
1193 return;
1194
1195 hdd_rx_timestamp(netbuf, target_time);
1196}
1197#else
1198static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
1199 qdf_nbuf_t netbuf,
1200 uint64_t target_time)
1201{
1202}
1203#endif
1204
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301205/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001206 * hdd_rx_packet_cbk() - Receive packet handler
Dhanashri Atre182b0272016-02-17 15:35:07 -08001207 * @context: pointer to HDD context
Nirav Shahcbc6d722016-03-01 16:24:53 +05301208 * @rxBuf: pointer to rx qdf_nbuf
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001209 *
1210 * Receive callback registered with TL. TL will call this to notify
1211 * the HDD when one or more packets were received for a registered
1212 * STA.
1213 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301214 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1215 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001216 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001217QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001218{
Jeff Johnson80486862017-10-02 13:21:29 -07001219 struct hdd_adapter *adapter = NULL;
Jeff Johnsoncc011972017-09-03 09:26:36 -07001220 struct hdd_context *hdd_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001221 int rxstat;
1222 struct sk_buff *skb = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001223 struct sk_buff *next = NULL;
Jeff Johnsond377dce2017-10-04 10:32:42 -07001224 struct hdd_station_ctx *sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001225 unsigned int cpu_index;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001226 struct qdf_mac_addr *mac_addr;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301227 bool wake_lock = false;
Mohit Khannaf8f96822017-05-17 17:11:59 -07001228 bool proto_pkt_logged = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229
1230 /* Sanity check on inputs */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001231 if (unlikely((NULL == context) || (NULL == rxBuf))) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301232 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001233 "%s: Null params being passed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301234 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001235 }
1236
Jeff Johnson80486862017-10-02 13:21:29 -07001237 adapter = (struct hdd_adapter *)context;
1238 if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001239 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 "Magic cookie(%x) for adapter sanity verification is invalid",
Jeff Johnson80486862017-10-02 13:21:29 -07001241 adapter->magic);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301242 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001243 }
1244
Jeff Johnson80486862017-10-02 13:21:29 -07001245 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Jeff Johnsoncc011972017-09-03 09:26:36 -07001246 if (unlikely(NULL == hdd_ctx)) {
Dhanashri Atre182b0272016-02-17 15:35:07 -08001247 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1248 "%s: HDD context is Null", __func__);
1249 return QDF_STATUS_E_FAILURE;
1250 }
1251
1252 cpu_index = wlan_hdd_get_cpu();
1253
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001254 next = (struct sk_buff *)rxBuf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001255
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001256 while (next) {
1257 skb = next;
1258 next = skb->next;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001259 skb->next = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001260
psimha884025c2017-08-01 15:07:32 -07001261#ifdef QCA_WIFI_QCA6290 /* Debug code, remove later */
Venkata Sharath Chandra Manchala9bf41ff2017-08-31 00:50:06 -07001262 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson36e74c42017-09-18 08:15:42 -07001263 "%s: skb %pK skb->len %d\n", __func__, skb, skb->len);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001264#endif
1265
Jeff Johnsond377dce2017-10-04 10:32:42 -07001266 sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
1267 if ((sta_ctx->conn_info.proxyARPService) &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08001268 cfg80211_is_gratuitous_arp_unsolicited_na(skb)) {
Poddar, Siddarth37a17d32017-08-09 19:04:39 +05301269 uint32_t rx_dropped;
1270
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001271 rx_dropped = ++adapter->hdd_stats.tx_rx_stats.
1272 rx_dropped[cpu_index];
Poddar, Siddarth37a17d32017-08-09 19:04:39 +05301273 /* rate limit error messages to 1/8th */
1274 if ((rx_dropped & 0x07) == 0)
1275 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1276 QDF_TRACE_LEVEL_INFO,
1277 "%s: Dropping HS 2.0 Gratuitous ARP or Unsolicited NA count=%u",
1278 __func__, rx_dropped);
Dhanashri Atre63d98022017-01-24 18:22:09 -08001279 /* Remove SKB from internal tracking table before submitting
1280 * it to stack
1281 */
1282 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001283 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001284 }
1285
1286 hdd_event_eapol_log(skb, QDF_RX);
Jeff Johnson1b780e42017-10-31 14:11:45 -07001287 proto_pkt_logged = qdf_dp_trace_log_pkt(adapter->session_id,
Mohit Khannaf8f96822017-05-17 17:11:59 -07001288 skb, QDF_RX,
1289 QDF_TRACE_DEFAULT_PDEV_ID);
1290
Dhanashri Atre63d98022017-01-24 18:22:09 -08001291 DPTRACE(qdf_dp_trace(skb,
1292 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001293 QDF_TRACE_DEFAULT_PDEV_ID,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001294 qdf_nbuf_data_addr(skb),
1295 sizeof(qdf_nbuf_data(skb)), QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07001296
1297 if (!proto_pkt_logged) {
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301298 DPTRACE(qdf_dp_trace(skb,
1299 QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001300 QDF_TRACE_DEFAULT_PDEV_ID,
Mohit Khannaf8f96822017-05-17 17:11:59 -07001301 (uint8_t *)skb->data, qdf_nbuf_len(skb),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301302 QDF_RX));
Mohit Khannaf8f96822017-05-17 17:11:59 -07001303 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
1304 DPTRACE(qdf_dp_trace(skb,
1305 QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
1306 QDF_TRACE_DEFAULT_PDEV_ID,
1307 (uint8_t *)
1308 &skb->data[QDF_DP_TRACE_RECORD_SIZE],
1309 (qdf_nbuf_len(skb) -
1310 QDF_DP_TRACE_RECORD_SIZE),
1311 QDF_RX));
1312 }
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001313 mac_addr = (struct qdf_mac_addr *)(skb->data+QDF_MAC_ADDR_SIZE);
1314
Jeff Johnson80486862017-10-02 13:21:29 -07001315 ucfg_tdls_update_rx_pkt_cnt(adapter->hdd_vdev, mac_addr);
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001316
Jeff Johnson80486862017-10-02 13:21:29 -07001317 skb->dev = adapter->dev;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001318 skb->protocol = eth_type_trans(skb, skb->dev);
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001319 ++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
Jeff Johnson80486862017-10-02 13:21:29 -07001320 ++adapter->stats.rx_packets;
1321 adapter->stats.rx_bytes += skb->len;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001322
1323 /* Check & drop replayed mcast packets (for IPV6) */
Jeff Johnsoncc011972017-09-03 09:26:36 -07001324 if (hdd_ctx->config->multicast_replay_filter &&
Dhanashri Atre63d98022017-01-24 18:22:09 -08001325 hdd_is_mcast_replay(skb)) {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001326 ++adapter->hdd_stats.tx_rx_stats.rx_dropped[cpu_index];
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001327 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001328 "%s: Dropping multicast replay pkt", __func__);
1329 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001330 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001331 }
1332
1333 /* hold configurable wakelock for unicast traffic */
Jeff Johnsoncc011972017-09-03 09:26:36 -07001334 if (hdd_ctx->config->rx_wakelock_timeout &&
Jeff Johnsond377dce2017-10-04 10:32:42 -07001335 sta_ctx->conn_info.uIsAuthenticated)
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301336 wake_lock = hdd_is_rx_wake_lock_needed(skb);
1337
1338 if (wake_lock) {
Jeff Johnsoncc011972017-09-03 09:26:36 -07001339 cds_host_diag_log_work(&hdd_ctx->rx_wake_lock,
1340 hdd_ctx->config->rx_wakelock_timeout,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001341 WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
Jeff Johnsoncc011972017-09-03 09:26:36 -07001342 qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock,
1343 hdd_ctx->config->
Dhanashri Atre63d98022017-01-24 18:22:09 -08001344 rx_wakelock_timeout);
1345 }
1346
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001347 /* Remove SKB from internal tracking table before submitting
1348 * it to stack
1349 */
Dhanashri Atre63d98022017-01-24 18:22:09 -08001350 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001351
Yu Wang66a250b2017-07-19 11:46:40 +08001352 hdd_tsf_timestamp_rx(hdd_ctx, skb, ktime_to_us(skb->tstamp));
1353
Dhanashri Atre63d98022017-01-24 18:22:09 -08001354 if (HDD_LRO_NO_RX ==
Jeff Johnson80486862017-10-02 13:21:29 -07001355 hdd_lro_rx(hdd_ctx, adapter, skb)) {
Dhanashri Atre63d98022017-01-24 18:22:09 -08001356 if (hdd_napi_enabled(HDD_NAPI_ANY) &&
Jeff Johnsone2ba3cd2017-10-30 20:02:09 -07001357 !hdd_ctx->enable_rxthread &&
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05301358 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
Dhanashri Atre63d98022017-01-24 18:22:09 -08001359 rxstat = netif_receive_skb(skb);
1360 else
1361 rxstat = netif_rx_ni(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001362
Dhanashri Atre63d98022017-01-24 18:22:09 -08001363 if (NET_RX_SUCCESS == rxstat)
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001364 ++adapter->hdd_stats.tx_rx_stats.
1365 rx_delivered[cpu_index];
Dhanashri Atre63d98022017-01-24 18:22:09 -08001366 else
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001367 ++adapter->hdd_stats.tx_rx_stats.
1368 rx_refused[cpu_index];
Dhanashri Atre63d98022017-01-24 18:22:09 -08001369 } else {
Jeff Johnson6ced42c2017-10-20 12:48:11 -07001370 ++adapter->hdd_stats.tx_rx_stats.
1371 rx_delivered[cpu_index];
Dhanashri Atre63d98022017-01-24 18:22:09 -08001372 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001373 }
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001374
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301375 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001376}
1377
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001378/**
1379 * hdd_reason_type_to_string() - return string conversion of reason type
1380 * @reason: reason type
1381 *
1382 * This utility function helps log string conversion of reason type.
1383 *
1384 * Return: string conversion of device mode, if match found;
1385 * "Unknown" otherwise.
1386 */
1387const char *hdd_reason_type_to_string(enum netif_reason_type reason)
1388{
1389 switch (reason) {
1390 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
1391 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
1392 CASE_RETURN_STRING(WLAN_FW_PAUSE);
1393 CASE_RETURN_STRING(WLAN_TX_ABORT);
1394 CASE_RETURN_STRING(WLAN_VDEV_STOP);
1395 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
1396 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301397 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001398 default:
Nirav Shah617cff92016-04-25 10:24:24 +05301399 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001400 }
1401}
1402
1403/**
1404 * hdd_action_type_to_string() - return string conversion of action type
1405 * @action: action type
1406 *
1407 * This utility function helps log string conversion of action_type.
1408 *
1409 * Return: string conversion of device mode, if match found;
1410 * "Unknown" otherwise.
1411 */
1412const char *hdd_action_type_to_string(enum netif_action_type action)
1413{
1414
1415 switch (action) {
1416 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
1417 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
1418 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
1419 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
1420 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301421 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
1422 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001423 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
1424 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301425 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
1426 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
1427 CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
1428 CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001429 default:
Nirav Shah617cff92016-04-25 10:24:24 +05301430 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001431 }
1432}
1433
1434/**
1435 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
1436 * @adapter: adapter handle
1437 * @action: action type
1438 * @reason: reason type
1439 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001440static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001441 enum netif_action_type action, enum netif_reason_type reason)
1442{
1443 switch (action) {
1444 case WLAN_STOP_ALL_NETIF_QUEUE:
1445 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301446 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
1447 case WLAN_STOP_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001448 adapter->queue_oper_stats[reason].pause_count++;
1449 break;
1450 case WLAN_START_ALL_NETIF_QUEUE:
1451 case WLAN_WAKE_ALL_NETIF_QUEUE:
1452 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301453 case WLAN_NETIF_PRIORITY_QUEUE_ON:
1454 case WLAN_WAKE_NON_PRIORITY_QUEUE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001455 adapter->queue_oper_stats[reason].unpause_count++;
1456 break;
1457 default:
1458 break;
1459 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001460}
1461
1462/**
jiad5b986632017-08-04 11:59:20 +08001463 * hdd_netdev_queue_is_locked()
1464 * @txq: net device tx queue
1465 *
1466 * For SMP system, always return false and we could safely rely on
1467 * __netif_tx_trylock().
1468 *
1469 * Return: true locked; false not locked
1470 */
1471#ifdef QCA_CONFIG_SMP
1472static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
1473{
1474 return false;
1475}
1476#else
1477static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
1478{
1479 return txq->xmit_lock_owner != -1;
1480}
1481#endif
1482
1483/**
Nirav Shah89223f72016-03-01 18:10:38 +05301484 * wlan_hdd_update_txq_timestamp() - update txq timestamp
1485 * @dev: net device
1486 *
1487 * Return: none
1488 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -07001489static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
Nirav Shah89223f72016-03-01 18:10:38 +05301490{
1491 struct netdev_queue *txq;
1492 int i;
Nirav Shah89223f72016-03-01 18:10:38 +05301493
1494 for (i = 0; i < NUM_TX_QUEUES; i++) {
1495 txq = netdev_get_tx_queue(dev, i);
jiad5b986632017-08-04 11:59:20 +08001496
1497 /*
1498 * On UP system, kernel will trigger watchdog bite if spinlock
1499 * recursion is detected. Unfortunately recursion is possible
1500 * when it is called in dev_queue_xmit() context, where stack
1501 * grabs the lock before calling driver's ndo_start_xmit
1502 * callback.
1503 */
1504 if (!hdd_netdev_queue_is_locked(txq)) {
1505 if (__netif_tx_trylock(txq)) {
1506 txq_trans_update(txq);
1507 __netif_tx_unlock(txq);
1508 }
wadesongba6373e2017-05-15 20:59:05 +08001509 }
Nirav Shah89223f72016-03-01 18:10:38 +05301510 }
1511}
1512
1513/**
Nirav Shah617cff92016-04-25 10:24:24 +05301514 * wlan_hdd_update_unpause_time() - update unpause time
1515 * @adapter: adapter handle
1516 *
1517 * Return: none
1518 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001519static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
Nirav Shah617cff92016-04-25 10:24:24 +05301520{
1521 qdf_time_t curr_time = qdf_system_ticks();
1522
1523 adapter->total_unpause_time += curr_time - adapter->last_time;
1524 adapter->last_time = curr_time;
1525}
1526
1527/**
1528 * wlan_hdd_update_pause_time() - update pause time
1529 * @adapter: adapter handle
1530 *
1531 * Return: none
1532 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001533static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
Nirav Shahda008342016-05-17 18:50:40 +05301534 uint32_t temp_map)
Nirav Shah617cff92016-04-25 10:24:24 +05301535{
1536 qdf_time_t curr_time = qdf_system_ticks();
Nirav Shahda008342016-05-17 18:50:40 +05301537 uint8_t i;
1538 qdf_time_t pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05301539
Nirav Shahda008342016-05-17 18:50:40 +05301540 pause_time = curr_time - adapter->last_time;
1541 adapter->total_pause_time += pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05301542 adapter->last_time = curr_time;
Nirav Shahda008342016-05-17 18:50:40 +05301543
1544 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
1545 if (temp_map & (1 << i)) {
1546 adapter->queue_oper_stats[i].total_pause_time +=
1547 pause_time;
1548 break;
1549 }
1550 }
1551
Nirav Shah617cff92016-04-25 10:24:24 +05301552}
1553
1554/**
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301555 * wlan_hdd_stop_non_priority_queue() - stop non prority queues
1556 * @adapter: adapter handle
1557 *
1558 * Return: None
1559 */
1560static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
1561{
1562 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
1563 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
1564 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
1565 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
1566}
1567
1568/**
1569 * wlan_hdd_wake_non_priority_queue() - wake non prority queues
1570 * @adapter: adapter handle
1571 *
1572 * Return: None
1573 */
1574static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
1575{
1576 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
1577 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
1578 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BE);
1579 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BK);
1580}
1581
1582/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001583 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
1584 * @adapter: adapter handle
1585 * @action: action type
1586 * @reason: reason type
1587 *
1588 * This is single function which is used for netif_queue related
1589 * actions like start/stop of network queues and on/off carrier
1590 * option.
1591 *
1592 * Return: None
1593 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001594void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001595 enum netif_action_type action, enum netif_reason_type reason)
1596{
Nirav Shahda008342016-05-17 18:50:40 +05301597 uint32_t temp_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001598
1599 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
1600 (!adapter->dev)) {
1601 hdd_err("adapter is invalid");
1602 return;
1603 }
1604
1605 switch (action) {
1606
1607 case WLAN_NETIF_CARRIER_ON:
1608 netif_carrier_on(adapter->dev);
1609 break;
1610
1611 case WLAN_NETIF_CARRIER_OFF:
1612 netif_carrier_off(adapter->dev);
1613 break;
1614
1615 case WLAN_STOP_ALL_NETIF_QUEUE:
1616 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05301617 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001618 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05301619 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05301620 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05301621 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001622 adapter->pause_map |= (1 << reason);
1623 spin_unlock_bh(&adapter->pause_map_lock);
1624 break;
1625
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301626 case WLAN_STOP_NON_PRIORITY_QUEUE:
1627 spin_lock_bh(&adapter->pause_map_lock);
1628 if (!adapter->pause_map) {
1629 wlan_hdd_stop_non_priority_queue(adapter);
1630 wlan_hdd_update_txq_timestamp(adapter->dev);
1631 wlan_hdd_update_unpause_time(adapter);
1632 }
1633 adapter->pause_map |= (1 << reason);
1634 spin_unlock_bh(&adapter->pause_map_lock);
1635 break;
1636
1637 case WLAN_NETIF_PRIORITY_QUEUE_ON:
1638 spin_lock_bh(&adapter->pause_map_lock);
1639 temp_map = adapter->pause_map;
1640 adapter->pause_map &= ~(1 << reason);
1641 netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
1642 wlan_hdd_update_pause_time(adapter, temp_map);
1643 spin_unlock_bh(&adapter->pause_map_lock);
1644 break;
1645
1646 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
1647 spin_lock_bh(&adapter->pause_map_lock);
1648 netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
1649 wlan_hdd_update_txq_timestamp(adapter->dev);
1650 wlan_hdd_update_unpause_time(adapter);
1651 adapter->pause_map |= (1 << reason);
1652 spin_unlock_bh(&adapter->pause_map_lock);
1653 break;
1654
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001655 case WLAN_START_ALL_NETIF_QUEUE:
1656 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05301657 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001658 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05301659 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001660 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301661 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05301662 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001663 spin_unlock_bh(&adapter->pause_map_lock);
1664 break;
1665
1666 case WLAN_WAKE_ALL_NETIF_QUEUE:
1667 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05301668 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001669 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05301670 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001671 netif_tx_wake_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301672 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05301673 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001674 spin_unlock_bh(&adapter->pause_map_lock);
1675 break;
1676
Rakesh Pillai3e534db2017-09-26 18:59:43 +05301677 case WLAN_WAKE_NON_PRIORITY_QUEUE:
1678 spin_lock_bh(&adapter->pause_map_lock);
1679 temp_map = adapter->pause_map;
1680 adapter->pause_map &= ~(1 << reason);
1681 if (!adapter->pause_map) {
1682 wlan_hdd_wake_non_priority_queue(adapter);
1683 wlan_hdd_update_pause_time(adapter, temp_map);
1684 }
1685 spin_unlock_bh(&adapter->pause_map_lock);
1686 break;
1687
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001688 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
1689 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05301690 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001691 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05301692 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05301693 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05301694 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001695 adapter->pause_map |= (1 << reason);
1696 netif_carrier_off(adapter->dev);
1697 spin_unlock_bh(&adapter->pause_map_lock);
1698 break;
1699
1700 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
1701 spin_lock_bh(&adapter->pause_map_lock);
1702 netif_carrier_on(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301703 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001704 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05301705 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001706 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301707 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05301708 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001709 spin_unlock_bh(&adapter->pause_map_lock);
1710 break;
1711
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001712 default:
1713 hdd_err("unsupported action %d", action);
1714 }
1715
1716 spin_lock_bh(&adapter->pause_map_lock);
1717 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
1718 wlan_hdd_process_peer_unauthorised_pause(adapter);
1719 spin_unlock_bh(&adapter->pause_map_lock);
1720
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001721 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
1722
1723 adapter->queue_oper_history[adapter->history_index].time =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301724 qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001725 adapter->queue_oper_history[adapter->history_index].netif_action =
1726 action;
1727 adapter->queue_oper_history[adapter->history_index].netif_reason =
1728 reason;
1729 adapter->queue_oper_history[adapter->history_index].pause_map =
1730 adapter->pause_map;
1731 if (++adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
1732 adapter->history_index = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001733}
1734
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001735/**
1736 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
1737 * @dev: Pointer to net_device structure
1738 *
1739 * Return: 0 for success; non-zero for failure
1740 */
1741int hdd_set_mon_rx_cb(struct net_device *dev)
1742{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001743 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001744 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001745 int ret;
1746 QDF_STATUS qdf_status;
1747 struct ol_txrx_desc_type sta_desc = {0};
1748 struct ol_txrx_ops txrx_ops;
Leo Changfdb45c32016-10-28 11:09:23 -07001749 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1750 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001751
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001752 qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
1753 txrx_ops.rx.rx = hdd_mon_rx_packet_cbk;
Ravi Joshi106ffe02017-01-18 18:09:05 -08001754 hdd_monitor_set_rx_monitor_cb(&txrx_ops, hdd_rx_monitor_callback);
Leo Changfdb45c32016-10-28 11:09:23 -07001755 cdp_vdev_register(soc,
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001756 (struct cdp_vdev *)cdp_get_vdev_from_vdev_id(soc,
Jeff Johnson1b780e42017-10-31 14:11:45 -07001757 (struct cdp_pdev *)pdev, adapter->session_id),
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001758 adapter, &txrx_ops);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001759 /* peer is created wma_vdev_attach->wma_create_peer */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001760 qdf_status = cdp_peer_register(soc,
1761 (struct cdp_pdev *)pdev, &sta_desc);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001762 if (QDF_STATUS_SUCCESS != qdf_status) {
Leo Changfdb45c32016-10-28 11:09:23 -07001763 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001764 qdf_status, qdf_status);
1765 goto exit;
1766 }
1767
1768 qdf_status = sme_create_mon_session(hdd_ctx->hHal,
Jeff Johnson1e851a12017-10-28 14:36:12 -07001769 adapter->mac_addr.bytes);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001770 if (QDF_STATUS_SUCCESS != qdf_status) {
1771 hdd_err("sme_create_mon_session() failed to register. Status= %d [0x%08X]",
1772 qdf_status, qdf_status);
1773 }
1774exit:
1775 ret = qdf_status_to_os_return(qdf_status);
1776 return ret;
1777}
Nirav Shahbd36b062016-07-18 11:12:59 +05301778
1779/**
1780 * hdd_send_rps_ind() - send rps indication to daemon
1781 * @adapter: adapter context
1782 *
1783 * If RPS feature enabled by INI, send RPS enable indication to daemon
1784 * Indication contents is the name of interface to find correct sysfs node
1785 * Should send all available interfaces
1786 *
1787 * Return: none
1788 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001789void hdd_send_rps_ind(struct hdd_adapter *adapter)
Nirav Shahbd36b062016-07-18 11:12:59 +05301790{
1791 int i;
1792 uint8_t cpu_map_list_len = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001793 struct hdd_context *hdd_ctxt = NULL;
Nirav Shahbd36b062016-07-18 11:12:59 +05301794 struct wlan_rps_data rps_data;
Yun Parkff6a16a2017-09-26 16:38:18 -07001795 struct cds_config_info *cds_cfg;
1796
1797 cds_cfg = cds_get_ini_config();
Nirav Shahbd36b062016-07-18 11:12:59 +05301798
1799 if (!adapter) {
1800 hdd_err("adapter is NULL");
1801 return;
1802 }
1803
Yun Parkff6a16a2017-09-26 16:38:18 -07001804 if (!cds_cfg) {
1805 hdd_err("cds_cfg is NULL");
1806 return;
1807 }
1808
Nirav Shahbd36b062016-07-18 11:12:59 +05301809 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
1810 rps_data.num_queues = NUM_TX_QUEUES;
1811
1812 hdd_info("cpu_map_list '%s'", hdd_ctxt->config->cpu_map_list);
1813
1814 /* in case no cpu map list is provided, simply return */
1815 if (!strlen(hdd_ctxt->config->cpu_map_list)) {
1816 hdd_err("no cpu map list found");
1817 goto err;
1818 }
1819
1820 if (QDF_STATUS_SUCCESS !=
1821 hdd_hex_string_to_u16_array(hdd_ctxt->config->cpu_map_list,
1822 rps_data.cpu_map_list,
1823 &cpu_map_list_len,
1824 WLAN_SVC_IFACE_NUM_QUEUES)) {
1825 hdd_err("invalid cpu map list");
1826 goto err;
1827 }
1828
1829 rps_data.num_queues =
1830 (cpu_map_list_len < rps_data.num_queues) ?
1831 cpu_map_list_len : rps_data.num_queues;
1832
1833 for (i = 0; i < rps_data.num_queues; i++) {
1834 hdd_info("cpu_map_list[%d] = 0x%x",
1835 i, rps_data.cpu_map_list[i]);
1836 }
1837
1838 strlcpy(rps_data.ifname, adapter->dev->name,
1839 sizeof(rps_data.ifname));
Kondabattini, Ganesh96ac37b2016-09-02 23:12:15 +05301840 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
1841 WLAN_SVC_RPS_ENABLE_IND,
Nirav Shahbd36b062016-07-18 11:12:59 +05301842 &rps_data, sizeof(rps_data));
1843
Yun Parkff6a16a2017-09-26 16:38:18 -07001844 cds_cfg->rps_enabled = true;
1845
1846 return;
1847
Nirav Shahbd36b062016-07-18 11:12:59 +05301848err:
1849 hdd_err("Wrong RPS configuration. enabling rx_thread");
Yun Parkff6a16a2017-09-26 16:38:18 -07001850 cds_cfg->rps_enabled = false;
1851}
1852
1853/**
1854 * hdd_send_rps_disable_ind() - send rps disable indication to daemon
1855 * @adapter: adapter context
1856 *
1857 * Return: none
1858 */
1859void hdd_send_rps_disable_ind(struct hdd_adapter *adapter)
1860{
1861 uint8_t cpu_map_list_len = 0;
1862 struct hdd_context *hdd_ctxt = NULL;
1863 struct wlan_rps_data rps_data;
1864 struct cds_config_info *cds_cfg;
1865
1866 cds_cfg = cds_get_ini_config();
1867
1868 if (!adapter) {
1869 hdd_err("adapter is NULL");
1870 return;
1871 }
1872
1873 if (!cds_cfg) {
1874 hdd_err("cds_cfg is NULL");
1875 return;
1876 }
1877
1878 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
1879 rps_data.num_queues = NUM_TX_QUEUES;
1880
1881 hdd_info("Set cpu_map_list 0");
1882
1883 qdf_mem_zero(&rps_data.cpu_map_list, sizeof(rps_data.cpu_map_list));
1884 cpu_map_list_len = 0;
1885 rps_data.num_queues =
1886 (cpu_map_list_len < rps_data.num_queues) ?
1887 cpu_map_list_len : rps_data.num_queues;
1888
1889 strlcpy(rps_data.ifname, adapter->dev->name, sizeof(rps_data.ifname));
1890 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
1891 WLAN_SVC_RPS_ENABLE_IND,
1892 &rps_data, sizeof(rps_data));
1893
1894 cds_cfg->rps_enabled = false;
Nirav Shahbd36b062016-07-18 11:12:59 +05301895}
1896
Varun Reddy Yeturu076eaa82018-01-16 12:16:14 -08001897void hdd_tx_queue_cb(void *context, uint32_t vdev_id,
1898 enum netif_action_type action,
1899 enum netif_reason_type reason)
1900{
1901 struct hdd_context *hdd_ctx = (struct hdd_context *)context;
1902 struct hdd_adapter *adapter = NULL;
1903
1904 /*
1905 * Validating the context is not required here.
1906 * if there is a driver unload/SSR in progress happening in a
1907 * different context and it has been scheduled to run and
1908 * driver got a firmware event of sta kick out, then it is
1909 * good to disable the Tx Queue to stop the influx of traffic.
1910 */
1911 if (hdd_ctx == NULL) {
1912 hdd_err("Invalid context passed");
1913 return;
1914 }
1915
1916 adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
1917 if (adapter == NULL) {
1918 hdd_err("vdev_id %d does not exist with host", vdev_id);
1919 return;
1920 }
1921 hdd_debug("Tx Queue action %d on vdev %d", action, vdev_id);
1922
1923 wlan_hdd_netif_queue_control(adapter, action, reason);
1924}
1925
Ravi Joshib89e7f72016-09-07 13:43:15 -07001926#ifdef MSM_PLATFORM
1927/**
1928 * hdd_reset_tcp_delack() - Reset tcp delack value to default
1929 * @hdd_ctx: Handle to hdd context
1930 *
1931 * Function used to reset TCP delack value to its default value
1932 *
1933 * Return: None
1934 */
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001935void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
Ravi Joshib89e7f72016-09-07 13:43:15 -07001936{
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07001937 enum pld_bus_width_type next_level = WLAN_SVC_TP_LOW;
1938 struct wlan_rx_tp_data rx_tp_data = {0};
Nirav Shahbd36b062016-07-18 11:12:59 +05301939
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07001940 rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07001941 rx_tp_data.level = next_level;
Ravi Joshib89e7f72016-09-07 13:43:15 -07001942 hdd_ctx->rx_high_ind_cnt = 0;
1943 wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index, WLAN_SVC_WLAN_TP_IND,
Manjunathappa Prakashc13cb5b2017-10-09 01:47:07 -07001944 &rx_tp_data, sizeof(rx_tp_data));
Ravi Joshib89e7f72016-09-07 13:43:15 -07001945}
1946#endif /* MSM_PLATFORM */