blob: 24e518eb0eaba6ed44e650af821a8f28540c4dd7 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Poddar, Siddarth74c67192017-01-04 12:31:27 +05302 * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wlan_hdd_tx_rx.c
30 *
31 * Linux HDD Tx/RX APIs
32 */
33
Jeff Johnsona0399642016-12-05 12:39:59 -080034/* denote that this file does not allow legacy hddLog */
35#define HDD_DISALLOW_LEGACY_HDDLOG 1
36
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include <wlan_hdd_tx_rx.h>
38#include <wlan_hdd_softap_tx_rx.h>
39#include <wlan_hdd_napi.h>
40#include <linux/netdevice.h>
41#include <linux/skbuff.h>
42#include <linux/etherdevice.h>
Ravi Joshibb8d4512016-08-22 10:14:52 -070043#include <linux/if_ether.h>
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +053044#include <linux/inetdevice.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include <cds_sched.h>
Manjunathappa Prakash779e4862016-09-12 17:00:11 -070046#include <cds_utils.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047
48#include <wlan_hdd_p2p.h>
49#include <linux/wireless.h>
50#include <net/cfg80211.h>
51#include <net/ieee80211_radiotap.h>
52#include "sap_api.h"
53#include "wlan_hdd_wmm.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054#include "wlan_hdd_tdls.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080055#include <wlan_hdd_ipa.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056#include "wlan_hdd_ocb.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080057#include "wlan_hdd_lro.h"
Leo Changfdb45c32016-10-28 11:09:23 -070058#include <cdp_txrx_cmn.h>
59#include <cdp_txrx_peer_ops.h>
60#include <cdp_txrx_flow_ctrl_v2.h>
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -070061#include "wlan_hdd_nan_datapath.h"
Ravi Joshib89e7f72016-09-07 13:43:15 -070062#include "pld_common.h"
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080063#include <cdp_txrx_handle.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080064#include "wlan_hdd_rx_monitor.h"
Zhu Jianmin04392c42017-05-12 16:34:53 +080065#include "wlan_hdd_power.h"
Yu Wangceb357b2017-06-01 12:04:18 +080066#include <wlan_hdd_tsf.h>
Ravi Joshi106ffe02017-01-18 18:09:05 -080067
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053068#ifdef QCA_LL_TX_FLOW_CONTROL_V2
69/*
70 * Mapping Linux AC interpretation to SME AC.
71 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
72 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
73 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
74 */
75const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
76 SME_AC_VO,
77 SME_AC_VI,
78 SME_AC_BE,
79 SME_AC_BK,
80 SME_AC_VO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080081};
82
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053083#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080084const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
85 SME_AC_VO,
86 SME_AC_VI,
87 SME_AC_BE,
88 SME_AC_BK,
89};
90
Poddar, Siddarth4acb30a2016-09-22 20:07:53 +053091#endif
92
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080093#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
94/**
95 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
96 * @adapter_context: pointer to vdev adapter
97 *
98 * If Blocked OS Q is not resumed during timeout period, to prevent
99 * permanent stall, resume OS Q forcefully.
100 *
101 * Return: None
102 */
103void hdd_tx_resume_timer_expired_handler(void *adapter_context)
104{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700105 struct hdd_adapter *pAdapter = (struct hdd_adapter *) adapter_context;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800106
107 if (!pAdapter) {
108 /* INVALID ARG */
109 return;
110 }
111
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700112 hdd_debug("Enabling queues");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800113 wlan_hdd_netif_queue_control(pAdapter, WLAN_WAKE_ALL_NETIF_QUEUE,
Jeff Johnsona0399642016-12-05 12:39:59 -0800114 WLAN_CONTROL_PATH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800115}
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530116#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
117
118/**
119 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
120 * @pAdapter: pointer to hdd adapter
121 * @tx_resume: TX Q resume trigger
122 *
123 *
124 * Return: None
125 */
126static void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700127hdd_tx_resume_false(struct hdd_adapter *pAdapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530128{
129 if (true == tx_resume)
130 return;
131
132 /* Pause TX */
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700133 hdd_debug("Disabling queues");
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530134 wlan_hdd_netif_queue_control(pAdapter, WLAN_STOP_ALL_NETIF_QUEUE,
135 WLAN_DATA_FLOW_CONTROL);
136
137 if (QDF_TIMER_STATE_STOPPED ==
138 qdf_mc_timer_get_current_state(&pAdapter->
139 tx_flow_control_timer)) {
140 QDF_STATUS status;
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700141
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530142 status = qdf_mc_timer_start(&pAdapter->tx_flow_control_timer,
143 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
144
145 if (!QDF_IS_STATUS_SUCCESS(status))
146 hdd_err("Failed to start tx_flow_control_timer");
147 else
148 pAdapter->hdd_stats.hddTxRxStats.txflow_timer_cnt++;
149 }
150
151 pAdapter->hdd_stats.hddTxRxStats.txflow_pause_cnt++;
152 pAdapter->hdd_stats.hddTxRxStats.is_txflow_paused = true;
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530153}
154#else
155
156static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700157hdd_tx_resume_false(struct hdd_adapter *pAdapter, bool tx_resume)
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530158{
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530159}
160#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700162static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *pAdapter,
gbianec670c592016-11-24 11:21:30 +0800163 struct sk_buff *skb)
164{
165 if (pAdapter->tx_flow_low_watermark > 0)
166 skb_orphan(skb);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700167 else
gbianec670c592016-11-24 11:21:30 +0800168 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800169
170 return skb;
171}
172
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173/**
174 * hdd_tx_resume_cb() - Resume OS TX Q.
175 * @adapter_context: pointer to vdev apdapter
176 * @tx_resume: TX Q resume trigger
177 *
178 * Q was stopped due to WLAN TX path low resource condition
179 *
180 * Return: None
181 */
182void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
183{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700184 struct hdd_adapter *pAdapter = (struct hdd_adapter *) adapter_context;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700185 struct hdd_station_ctx *hdd_sta_ctx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800186
187 if (!pAdapter) {
188 /* INVALID ARG */
189 return;
190 }
191
192 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
193
194 /* Resume TX */
195 if (true == tx_resume) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530196 if (QDF_TIMER_STATE_STOPPED !=
197 qdf_mc_timer_get_current_state(&pAdapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 tx_flow_control_timer)) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530199 qdf_mc_timer_stop(&pAdapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 }
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700201 hdd_debug("Enabling queues");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202 wlan_hdd_netif_queue_control(pAdapter,
Jeff Johnsona0399642016-12-05 12:39:59 -0800203 WLAN_WAKE_ALL_NETIF_QUEUE,
204 WLAN_DATA_FLOW_CONTROL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205 }
Poddar, Siddarthb61cf642016-04-28 16:02:39 +0530206 hdd_tx_resume_false(pAdapter, tx_resume);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800207}
208
bings284f8be2017-08-11 10:41:30 +0800209bool hdd_tx_flow_control_is_pause(void *adapter_context)
210{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700211 struct hdd_adapter *pAdapter = (struct hdd_adapter *) adapter_context;
bings284f8be2017-08-11 10:41:30 +0800212
213 if ((NULL == pAdapter) || (WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic)) {
214 /* INVALID ARG */
215 hdd_err("invalid adapter %p", pAdapter);
216 return false;
217 }
218
219 return pAdapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
220}
221
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700222void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
Anurag Chouhan210db072016-02-22 18:42:15 +0530223 qdf_mc_timer_callback_t timer_callback,
bings284f8be2017-08-11 10:41:30 +0800224 ol_txrx_tx_flow_control_fp flow_control_fp,
225 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226{
227 if (adapter->tx_flow_timer_initialized == false) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530228 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530229 QDF_TIMER_TYPE_SW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 timer_callback,
231 adapter);
232 adapter->tx_flow_timer_initialized = true;
233 }
Leo Changfdb45c32016-10-28 11:09:23 -0700234 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
bings284f8be2017-08-11 10:41:30 +0800235 adapter->sessionId, flow_control_fp, adapter,
236 flow_control_is_pause_fp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800237}
238
239/**
240 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
241 * @adapter: adapter handle
242 *
243 * Return: none
244 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700245void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800246{
Leo Changfdb45c32016-10-28 11:09:23 -0700247 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
248 adapter->sessionId);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249 if (adapter->tx_flow_timer_initialized == true) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530250 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
251 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800252 adapter->tx_flow_timer_initialized = false;
253 }
254}
255
256/**
257 * hdd_get_tx_resource() - check tx resources and take action
258 * @adapter: adapter handle
259 * @STAId: station id
260 * @timer_value: timer value
261 *
262 * Return: none
263 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700264void hdd_get_tx_resource(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800265 uint8_t STAId, uint16_t timer_value)
266{
267 if (false ==
Leo Changfdb45c32016-10-28 11:09:23 -0700268 cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC), STAId,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800269 adapter->tx_flow_low_watermark,
270 adapter->tx_flow_high_watermark_offset)) {
Varun Reddy Yeturu8a5d3d42017-08-02 13:03:27 -0700271 hdd_debug("Disabling queues lwm %d hwm offset %d",
Jeff Johnsona0399642016-12-05 12:39:59 -0800272 adapter->tx_flow_low_watermark,
273 adapter->tx_flow_high_watermark_offset);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800274 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
275 WLAN_DATA_FLOW_CONTROL);
276 if ((adapter->tx_flow_timer_initialized == true) &&
Anurag Chouhan210db072016-02-22 18:42:15 +0530277 (QDF_TIMER_STATE_STOPPED ==
278 qdf_mc_timer_get_current_state(&adapter->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800279 tx_flow_control_timer))) {
Anurag Chouhan210db072016-02-22 18:42:15 +0530280 qdf_mc_timer_start(&adapter->tx_flow_control_timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800281 timer_value);
282 adapter->hdd_stats.hddTxRxStats.txflow_timer_cnt++;
283 adapter->hdd_stats.hddTxRxStats.txflow_pause_cnt++;
284 adapter->hdd_stats.hddTxRxStats.is_txflow_paused = true;
285 }
286 }
287}
288
gbianec670c592016-11-24 11:21:30 +0800289#else
Mohit Khannad0b63f52017-02-18 18:05:52 -0800290/**
291 * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
292 * @pAdapter: pointer to HDD adapter
293 * @skb: pointer to skb data packet
294 *
295 * Return: pointer to skb structure
296 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700297static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *pAdapter,
Mohit Khannad0b63f52017-02-18 18:05:52 -0800298 struct sk_buff *skb) {
gbianec670c592016-11-24 11:21:30 +0800299
Mohit Khannad0b63f52017-02-18 18:05:52 -0800300 struct sk_buff *nskb;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700301 struct hdd_context *hdd_ctx = pAdapter->pHddCtx;
Mohit Khannad0b63f52017-02-18 18:05:52 -0800302
Manjunathappa Prakashdab74fa2017-06-19 12:11:03 -0700303 nskb = skb_unshare(skb, GFP_ATOMIC);
304 if (unlikely(hdd_ctx->config->tx_orphan_enable) && (nskb == skb)) {
Mohit Khannad0b63f52017-02-18 18:05:52 -0800305 /*
306 * For UDP packets we want to orphan the packet to allow the app
307 * to send more packets. The flow would ultimately be controlled
308 * by the limited number of tx descriptors for the vdev.
309 */
310 ++pAdapter->hdd_stats.hddTxRxStats.txXmitOrphaned;
311 skb_orphan(skb);
312 }
313 return nskb;
gbianec670c592016-11-24 11:21:30 +0800314}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
316
Nirav Shah5e74bb82016-07-20 16:01:27 +0530317/**
318 * qdf_event_eapol_log() - send event to wlan diag
319 * @skb: skb ptr
320 * @dir: direction
321 * @eapol_key_info: eapol key info
322 *
323 * Return: None
324 */
325void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
326{
327 int16_t eapol_key_info;
328
329 WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
330
331 if ((dir == QDF_TX &&
332 (QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
333 QDF_NBUF_CB_GET_PACKET_TYPE(skb))))
334 return;
335 else if (!qdf_nbuf_is_ipv4_eapol_pkt(skb))
336 return;
337
338 eapol_key_info = (uint16_t)(*(uint16_t *)
339 (skb->data + EAPOL_KEY_INFO_OFFSET));
340
341 wlan_diag_event.event_sub_type =
342 (dir == QDF_TX ?
343 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
344 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
345 wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
346 (skb->data + EAPOL_PACKET_TYPE_OFFSET));
347 wlan_diag_event.eapol_key_info = eapol_key_info;
348 wlan_diag_event.eapol_rate = 0;
349 qdf_mem_copy(wlan_diag_event.dest_addr,
350 (skb->data + QDF_NBUF_DEST_MAC_OFFSET),
351 sizeof(wlan_diag_event.dest_addr));
352 qdf_mem_copy(wlan_diag_event.src_addr,
353 (skb->data + QDF_NBUF_SRC_MAC_OFFSET),
354 sizeof(wlan_diag_event.src_addr));
355
356 WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
357}
358
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800359
360/**
Nirav Shah5e74bb82016-07-20 16:01:27 +0530361 * wlan_hdd_classify_pkt() - classify packet
362 * @skb - sk buff
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800363 *
Nirav Shah5e74bb82016-07-20 16:01:27 +0530364 * Return: none
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365 */
Nirav Shah5e74bb82016-07-20 16:01:27 +0530366void wlan_hdd_classify_pkt(struct sk_buff *skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367{
Nirav Shah5e74bb82016-07-20 16:01:27 +0530368 struct ethhdr *eh = (struct ethhdr *)skb->data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800369
Nirav Shah5e74bb82016-07-20 16:01:27 +0530370 qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800371
Nirav Shah5e74bb82016-07-20 16:01:27 +0530372 /* check destination mac address is broadcast/multicast */
373 if (is_broadcast_ether_addr((uint8_t *)eh))
374 QDF_NBUF_CB_GET_IS_BCAST(skb) = true;
375 else if (is_multicast_ether_addr((uint8_t *)eh))
376 QDF_NBUF_CB_GET_IS_MCAST(skb) = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377
Nirav Shah5e74bb82016-07-20 16:01:27 +0530378 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
379 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
380 QDF_NBUF_CB_PACKET_TYPE_ARP;
381 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
382 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
383 QDF_NBUF_CB_PACKET_TYPE_DHCP;
384 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
385 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
386 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
387 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
388 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
389 QDF_NBUF_CB_PACKET_TYPE_WAPI;
Zhu Jianmin04392c42017-05-12 16:34:53 +0800390 else if (qdf_nbuf_is_icmp_pkt(skb))
391 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
392 QDF_NBUF_CB_PACKET_TYPE_ICMP;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800394}
395
396/**
Zhu Jianmin04392c42017-05-12 16:34:53 +0800397 * wlan_hdd_latency_opt()- latency option
398 * @adapter: pointer to the adapter structure
399 * @skb: pointer to sk buff
400 *
401 * Function to disable power save for icmp packets.
402 *
403 * Return: None
404 */
405#ifdef WLAN_ICMP_DISABLE_PS
406static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700407wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800408{
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700409 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800410
411 if (hdd_ctx->config->icmp_disable_ps_val <= 0)
412 return;
413
414 if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) ==
415 QDF_NBUF_CB_PACKET_TYPE_ICMP) {
416 wlan_hdd_set_powersave(adapter, false,
417 hdd_ctx->config->icmp_disable_ps_val);
418 }
419}
420#else
421static inline void
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700422wlan_hdd_latency_opt(struct hdd_adapter *adapter, struct sk_buff *skb)
Zhu Jianmin04392c42017-05-12 16:34:53 +0800423{
424}
425#endif
426
427/**
Ravi Joshi24477b72016-07-19 15:45:09 -0700428 * hdd_get_transmit_sta_id() - function to retrieve station id to be used for
429 * sending traffic towards a particular destination address. The destination
430 * address can be unicast, multicast or broadcast
431 *
432 * @adapter: Handle to adapter context
433 * @dst_addr: Destination address
434 * @station_id: station id
435 *
436 * Returns: None
437 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700438static void hdd_get_transmit_sta_id(struct hdd_adapter *adapter,
Nirav Shah5e74bb82016-07-20 16:01:27 +0530439 struct sk_buff *skb, uint8_t *station_id)
Ravi Joshi24477b72016-07-19 15:45:09 -0700440{
441 bool mcbc_addr = false;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700442 QDF_STATUS status;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700443 struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530444 struct qdf_mac_addr *dst_addr = NULL;
Ravi Joshi24477b72016-07-19 15:45:09 -0700445
Nirav Shah5e74bb82016-07-20 16:01:27 +0530446 dst_addr = (struct qdf_mac_addr *)skb->data;
Naveen Rawatac027cb2017-04-27 15:02:42 -0700447 status = hdd_get_peer_sta_id(sta_ctx, dst_addr, station_id);
448 if (QDF_IS_STATUS_ERROR(status)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530449 if (QDF_NBUF_CB_GET_IS_BCAST(skb) ||
450 QDF_NBUF_CB_GET_IS_MCAST(skb)) {
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -0700451 hdd_debug("Received MC/BC packet for transmission");
Ravi Joshi24477b72016-07-19 15:45:09 -0700452 mcbc_addr = true;
Ravi Joshi24477b72016-07-19 15:45:09 -0700453 }
454 }
455
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700456 if (adapter->device_mode == QDF_IBSS_MODE ||
457 adapter->device_mode == QDF_NDI_MODE) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700458 /*
459 * This check is necessary to make sure station id is not
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700460 * overwritten for UC traffic in IBSS or NDI mode
Ravi Joshi24477b72016-07-19 15:45:09 -0700461 */
462 if (mcbc_addr)
Rakesh Sunkicf1c9ab2016-08-25 14:11:25 -0700463 *station_id = sta_ctx->broadcast_staid;
Ravi Joshi24477b72016-07-19 15:45:09 -0700464 } else {
465 /* For the rest, traffic is directed to AP/P2P GO */
466 if (eConnectionState_Associated == sta_ctx->conn_info.connState)
467 *station_id = sta_ctx->conn_info.staId[0];
468 }
469}
470
471/**
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800472 * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
473 * @skb: pointer to OS packet (sk_buff)
474 * @peer_id: Peer STA ID in peer table
475 *
476 * This function gets the peer state from DP and check if it is either
477 * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
478 * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
479 * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
480 *
481 * Return: true if Tx is allowed and false otherwise.
482 */
483static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t peer_id)
484{
485 enum ol_txrx_peer_state peer_state;
486 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
487 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
488 void *peer;
489
490 QDF_BUG(soc);
491 QDF_BUG(pdev);
492
493 peer = cdp_peer_find_by_local_id(soc, pdev, peer_id);
494
495 if (peer == NULL) {
496 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_WARN,
497 FL("Unable to find peer entry for staid: %d"),
498 peer_id);
499 return false;
500 }
501
502 peer_state = cdp_peer_state_get(soc, peer);
Jeff Johnson68755312017-02-10 11:46:55 -0800503 if (likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800504 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800505 if (OL_TXRX_PEER_STATE_CONN == peer_state &&
506 ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X)
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800507 return true;
Jeff Johnson68755312017-02-10 11:46:55 -0800508 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_WARN,
509 FL("Invalid peer state for Tx: %d"), peer_state);
510 return false;
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800511}
512
513/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530514 * __hdd_hard_start_xmit() - Transmit a frame
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800515 * @skb: pointer to OS packet (sk_buff)
516 * @dev: pointer to network device
517 *
518 * Function registered with the Linux OS for transmitting
519 * packets. This version of the function directly passes
520 * the packet to Transport Layer.
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530521 * In case of any packet drop or error, log the error with
522 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523 *
524 * Return: Always returns NETDEV_TX_OK
525 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -0700526static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800527{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530528 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800529 sme_ac_enum_type ac;
Abhishek Singh12be60f2017-08-11 13:52:42 +0530530 enum sme_qos_wmmuptype up;
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700531 struct hdd_adapter *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800532 bool granted;
Nirav Shah5e74bb82016-07-20 16:01:27 +0530533 uint8_t STAId;
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700534 struct hdd_station_ctx *pHddStaCtx = &pAdapter->sessionCtx.station;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700535 struct qdf_mac_addr *mac_addr;
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700536#ifdef QCA_PKT_PROTO_TRACE
537 uint8_t proto_type = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700538 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(pAdapter);
Kabilan Kannan32eb5022016-10-04 12:24:50 -0700539#endif /* QCA_PKT_PROTO_TRACE */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540
541#ifdef QCA_WIFI_FTM
Anurag Chouhan6d760662016-02-20 16:05:43 +0530542 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800543 kfree_skb(skb);
544 return NETDEV_TX_OK;
545 }
546#endif
547
548 ++pAdapter->hdd_stats.hddTxRxStats.txXmitCalled;
Hanumanth Reddy Pothula2a8a7402017-07-03 14:06:11 +0530549 if (cds_is_driver_recovering() || cds_is_driver_in_bad_state()) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530550 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Prashanth Bhatta9e143052015-12-04 11:56:47 -0800551 "Recovery in progress, dropping the packet");
Nirav Shahdf3659e2016-06-27 12:26:28 +0530552 goto drop_pkt;
Govind Singhede435f2015-12-01 16:16:36 +0530553 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800554
Nirav Shah5e74bb82016-07-20 16:01:27 +0530555 wlan_hdd_classify_pkt(skb);
Zhu Jianmin04392c42017-05-12 16:34:53 +0800556 wlan_hdd_latency_opt(pAdapter, skb);
Nirav Shah5e74bb82016-07-20 16:01:27 +0530557
Ravi Joshi24477b72016-07-19 15:45:09 -0700558 STAId = HDD_WLAN_INVALID_STA_ID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800559
Nirav Shah5e74bb82016-07-20 16:01:27 +0530560 hdd_get_transmit_sta_id(pAdapter, skb, &STAId);
Naveen Rawat209d0932016-08-03 15:07:23 -0700561 if (STAId >= WLAN_MAX_STA_COUNT) {
SaidiReddy Yenuga28dc3272017-03-21 14:37:40 +0530562 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO,
Jeff Johnsona0399642016-12-05 12:39:59 -0800563 "Invalid station id, transmit operation suspended");
Ravi Joshi24477b72016-07-19 15:45:09 -0700564 goto drop_pkt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800565 }
566
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800567 hdd_get_tx_resource(pAdapter, STAId,
568 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
569
570 /* Get TL AC corresponding to Qdisc queue index/AC. */
571 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
572
Nirav Shahcbc6d722016-03-01 16:24:53 +0530573 if (!qdf_nbuf_ipa_owned_get(skb)) {
gbianec670c592016-11-24 11:21:30 +0800574#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
575 /*
Jeff Johnson34759db2017-01-12 08:41:07 -0800576 * The TCP TX throttling logic is changed a little after
577 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
578 * which will throttle the TCP packets to the host driver.
579 * The TCP UP LINK throughput will drop heavily. In order to
580 * fix this issue, need to orphan the socket buffer asap, which
581 * will call skb's destructor to notify the TCP stack that the
582 * SKB buffer is unowned. And then the TCP stack will pump more
583 * packets to host driver.
584 *
585 * The TX packets might be dropped for UDP case in the iperf
586 * testing. So need to be protected by follow control.
587 */
gbianec670c592016-11-24 11:21:30 +0800588 skb = hdd_skb_orphan(pAdapter, skb);
589#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800590 /* Check if the buffer has enough header room */
591 skb = skb_unshare(skb, GFP_ATOMIC);
gbianec670c592016-11-24 11:21:30 +0800592#endif
593
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800594 if (!skb)
Nirav Shahdf3659e2016-06-27 12:26:28 +0530595 goto drop_pkt_accounting;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800596 }
597
Ravi Joshi24477b72016-07-19 15:45:09 -0700598 /*
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530599 * Add SKB to internal tracking table before further processing
600 * in WLAN driver.
601 */
602 qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
603
604 /*
Ravi Joshi24477b72016-07-19 15:45:09 -0700605 * user priority from IP header, which is already extracted and set from
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800606 * select_queue call back function
607 */
608 up = skb->priority;
609
610 ++pAdapter->hdd_stats.hddTxRxStats.txXmitClassifiedAC[ac];
611#ifdef HDD_WMM_DEBUG
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800612 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800613 "%s: Classified as ac %d up %d", __func__, ac, up);
614#endif /* HDD_WMM_DEBUG */
615
616 if (HDD_PSB_CHANGED == pAdapter->psbChanged) {
Ravi Joshi24477b72016-07-19 15:45:09 -0700617 /*
618 * Function which will determine acquire admittance for a
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800619 * WMM AC is required or not based on psb configuration done
620 * in the framework
621 */
622 hdd_wmm_acquire_access_required(pAdapter, ac);
623 }
624 /*
625 * Make sure we already have access to this access category
626 * or it is EAPOL or WAPI frame during initial authentication which
627 * can have artifically boosted higher qos priority.
628 */
629
630 if (((pAdapter->psbChanged & (1 << ac)) &&
631 likely(pAdapter->hddWmmStatus.wmmAcStatus[ac].
632 wmmAcAccessAllowed)) ||
633 ((pHddStaCtx->conn_info.uIsAuthenticated == false) &&
Nirav Shah5e74bb82016-07-20 16:01:27 +0530634 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
635 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
636 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
637 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 granted = true;
639 } else {
640 status = hdd_wmm_acquire_access(pAdapter, ac, &granted);
641 pAdapter->psbChanged |= (1 << ac);
642 }
643
644 if (!granted) {
645 bool isDefaultAc = false;
Ravi Joshi24477b72016-07-19 15:45:09 -0700646 /*
647 * ADDTS request for this AC is sent, for now
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800648 * send this packet through next avaiable lower
649 * Access category until ADDTS negotiation completes.
650 */
651 while (!likely
652 (pAdapter->hddWmmStatus.wmmAcStatus[ac].
653 wmmAcAccessAllowed)) {
654 switch (ac) {
655 case SME_AC_VO:
656 ac = SME_AC_VI;
657 up = SME_QOS_WMM_UP_VI;
658 break;
659 case SME_AC_VI:
660 ac = SME_AC_BE;
661 up = SME_QOS_WMM_UP_BE;
662 break;
663 case SME_AC_BE:
664 ac = SME_AC_BK;
665 up = SME_QOS_WMM_UP_BK;
666 break;
667 default:
668 ac = SME_AC_BK;
669 up = SME_QOS_WMM_UP_BK;
670 isDefaultAc = true;
671 break;
672 }
673 if (isDefaultAc)
674 break;
675 }
676 skb->priority = up;
677 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
678 }
679
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700680#ifdef QCA_PKT_PROTO_TRACE
681 if ((hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_EAPOL) ||
682 (hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_DHCP)) {
683 proto_type = cds_pkt_get_proto_type(skb,
684 hdd_ctx->config->gEnableDebugLog,
685 0);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700686 if (CDS_PKT_TRAC_TYPE_EAPOL & proto_type)
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700687 cds_pkt_trace_buf_update("ST:T:EPL");
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700688 else if (CDS_PKT_TRAC_TYPE_DHCP & proto_type)
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700689 cds_pkt_trace_buf_update("ST:T:DHC");
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700690 }
691#endif /* QCA_PKT_PROTO_TRACE */
692
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693 pAdapter->stats.tx_bytes += skb->len;
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700694
Kabilan Kannan1c1c4022017-04-06 22:49:26 -0700695 mac_addr = (struct qdf_mac_addr *)skb->data;
696
697 ucfg_tdls_update_tx_pkt_cnt(pAdapter->hdd_vdev, mac_addr);
698
Kabilan Kannan32eb5022016-10-04 12:24:50 -0700699 wlan_hdd_tdls_update_tx_pkt_cnt(pAdapter, skb);
Kabilan Kannan36090ce2016-05-03 19:28:44 -0700700
Mohit Khannab1dd1e82017-02-04 15:14:38 -0800701 if (qdf_nbuf_is_tso(skb))
702 pAdapter->stats.tx_packets += qdf_nbuf_get_tso_num_seg(skb);
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700703 else
Mohit Khannab1dd1e82017-02-04 15:14:38 -0800704 ++pAdapter->stats.tx_packets;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800705
Nirav Shah5e74bb82016-07-20 16:01:27 +0530706 hdd_event_eapol_log(skb, QDF_TX);
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700707 qdf_dp_trace_log_pkt(pAdapter->sessionId, skb, QDF_TX,
708 QDF_TRACE_DEFAULT_PDEV_ID);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530709 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
710 QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800711
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530712 qdf_dp_trace_set_track(skb, QDF_TX);
713 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700714 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
715 sizeof(qdf_nbuf_data(skb)),
Himanshu Agarwalee3411a2017-01-31 12:56:47 +0530716 QDF_TX));
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530717 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700718 QDF_TRACE_DEFAULT_PDEV_ID, (uint8_t *)skb->data,
719 qdf_nbuf_len(skb), QDF_TX));
Nirav Shah07e39a62016-04-25 17:46:40 +0530720 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE) {
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530721 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700722 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530723 (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
724 (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE), QDF_TX));
Nirav Shah07e39a62016-04-25 17:46:40 +0530725 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800726
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800727 if (!hdd_is_tx_allowed(skb, STAId)) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530728 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Krishna Kumaar Natarajan5554cec2017-01-12 19:38:55 -0800729 FL("Tx not allowed for sta_id: %d"), STAId);
Nirav Shahdf3659e2016-06-27 12:26:28 +0530730 ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530731 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800732 }
733
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800734 /*
Ravi Joshi24477b72016-07-19 15:45:09 -0700735 * If a transmit function is not registered, drop packet
736 */
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800737 if (!pAdapter->tx_fn) {
738 QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
739 "%s: TX function not registered by the data path",
740 __func__);
Nirav Shahdf3659e2016-06-27 12:26:28 +0530741 ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530742 goto drop_pkt_and_release_skb;
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800743 }
744
Leo Changfdb45c32016-10-28 11:09:23 -0700745 if (pAdapter->tx_fn(pAdapter->txrx_vdev,
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800746 (qdf_nbuf_t) skb) != NULL) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530747 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800748 "%s: Failed to send packet to txrx for staid: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800749 __func__, STAId);
Nirav Shahdf3659e2016-06-27 12:26:28 +0530750 ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac];
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530751 goto drop_pkt_and_release_skb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800752 }
Dustin Browne0024fa2016-10-14 16:29:21 -0700753 netif_trans_update(dev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800754
755 return NETDEV_TX_OK;
756
Himanshu Agarwal53298d12017-02-20 19:14:17 +0530757drop_pkt_and_release_skb:
758 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800759drop_pkt:
760
Nirav Shahdf3659e2016-06-27 12:26:28 +0530761 if (skb) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530762 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700763 QDF_TRACE_DEFAULT_PDEV_ID, (uint8_t *)skb->data,
764 qdf_nbuf_len(skb), QDF_TX));
Nirav Shahdf3659e2016-06-27 12:26:28 +0530765 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
766 DPTRACE(qdf_dp_trace(skb,
767 QDF_DP_TRACE_DROP_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700768 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shahdf3659e2016-06-27 12:26:28 +0530769 (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
770 (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
771 QDF_TX));
772
773 kfree_skb(skb);
774 }
775
776drop_pkt_accounting:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800777
778 ++pAdapter->stats.tx_dropped;
779 ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped;
Nirav Shahdf3659e2016-06-27 12:26:28 +0530780
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800781 return NETDEV_TX_OK;
782}
783
784/**
Mukul Sharmac4de4ef2016-09-12 15:39:00 +0530785 * hdd_hard_start_xmit() - Wrapper function to protect
786 * __hdd_hard_start_xmit from SSR
787 * @skb: pointer to OS packet
788 * @dev: pointer to net_device structure
789 *
790 * Function called by OS if any packet needs to transmit.
791 *
792 * Return: Always returns NETDEV_TX_OK
793 */
794int hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
795{
796 int ret;
797
798 cds_ssr_protect(__func__);
799 ret = __hdd_hard_start_xmit(skb, dev);
800 cds_ssr_unprotect(__func__);
801
802 return ret;
803}
804
805/**
Deepak Dhamdhere5872c8c2016-06-02 15:51:47 -0700806 * hdd_get_peer_sta_id() - Get the StationID using the Peer Mac address
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800807 * @pHddStaCtx: pointer to HDD Station Context
808 * @pMacAddress: pointer to Peer Mac address
809 * @staID: pointer to returned Station Index
810 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530811 * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_FAILURE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800812 */
813
Jeff Johnson40dae4e2017-08-29 14:00:25 -0700814QDF_STATUS hdd_get_peer_sta_id(struct hdd_station_ctx *pHddStaCtx,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530815 struct qdf_mac_addr *pMacAddress, uint8_t *staId)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800816{
817 uint8_t idx;
818
Naveen Rawatc45d1622016-07-05 12:20:09 -0700819 for (idx = 0; idx < MAX_PEERS; idx++) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530820 if (!qdf_mem_cmp(&pHddStaCtx->conn_info.peerMacAddress[idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +0530821 pMacAddress, QDF_MAC_ADDR_SIZE)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800822 *staId = pHddStaCtx->conn_info.staId[idx];
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530823 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800824 }
825 }
826
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530827 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800828}
829
Sen, Devendra154b3c42017-02-13 20:44:15 +0530830#ifdef FEATURE_WLAN_DIAG_SUPPORT
831/**
Srinivas Girigowdae3ae2572017-03-25 14:14:22 -0700832 * hdd_wlan_datastall_sta_event()- send sta datastall information
833 *
834 * This Function send send sta datastall status diag event
835 *
836 * Return: void.
837 */
Sen, Devendra154b3c42017-02-13 20:44:15 +0530838static void hdd_wlan_datastall_sta_event(void)
839{
840 WLAN_HOST_DIAG_EVENT_DEF(sta_data_stall,
841 struct host_event_wlan_datastall);
842 qdf_mem_zero(&sta_data_stall, sizeof(sta_data_stall));
843 sta_data_stall.reason = STA_TX_TIMEOUT;
844 WLAN_HOST_DIAG_EVENT_REPORT(&sta_data_stall, EVENT_WLAN_STA_DATASTALL);
845}
846#else
847static inline void hdd_wlan_datastall_sta_event(void)
848{
Sen, Devendra154b3c42017-02-13 20:44:15 +0530849}
850#endif
851
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800852/**
853 * __hdd_tx_timeout() - TX timeout handler
854 * @dev: pointer to network device
855 *
856 * This function is registered as a netdev ndo_tx_timeout method, and
857 * is invoked by the kernel if the driver takes too long to transmit a
858 * frame.
859 *
860 * Return: None
861 */
862static void __hdd_tx_timeout(struct net_device *dev)
863{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700864 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -0700865 struct hdd_context *hdd_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800866 struct netdev_queue *txq;
867 int i = 0;
868
Dustin Browne0024fa2016-10-14 16:29:21 -0700869 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530870 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700871 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530872 NULL, 0, QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800873
874 /* Getting here implies we disabled the TX queues for too
875 * long. Queues are disabled either because of disassociation
876 * or low resource scenarios. In case of disassociation it is
877 * ok to ignore this. But if associated, we have do possible
878 * recovery here
879 */
880
881 for (i = 0; i < NUM_TX_QUEUES; i++) {
882 txq = netdev_get_tx_queue(dev, i);
Srinivas Girigowda028c4482017-03-09 18:52:02 -0800883 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
884 "Queue: %d status: %d txq->trans_start: %lu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800885 i, netif_tx_queue_stopped(txq), txq->trans_start);
886 }
887
Houston Hoffman00227112017-08-14 23:58:18 -0700888 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800889 "carrier state: %d", netif_carrier_ok(dev));
Nirav Shah89223f72016-03-01 18:10:38 +0530890 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
891 wlan_hdd_display_netif_queue_history(hdd_ctx);
Leo Changfdb45c32016-10-28 11:09:23 -0700892 cdp_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
Sen, Devendra154b3c42017-02-13 20:44:15 +0530893 hdd_wlan_datastall_sta_event();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800894}
895
896/**
897 * hdd_tx_timeout() - Wrapper function to protect __hdd_tx_timeout from SSR
898 * @dev: pointer to net_device structure
899 *
900 * Function called by OS if there is any timeout during transmission.
901 * Since HDD simply enqueues packet and returns control to OS right away,
902 * this would never be invoked
903 *
904 * Return: none
905 */
906void hdd_tx_timeout(struct net_device *dev)
907{
908 cds_ssr_protect(__func__);
909 __hdd_tx_timeout(dev);
910 cds_ssr_unprotect(__func__);
911}
912
913/**
914 * @hdd_init_tx_rx() - Initialize Tx/RX module
915 * @pAdapter: pointer to adapter context
916 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530917 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
918 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800919 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700920QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *pAdapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530922 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800923
924 if (NULL == pAdapter) {
Jeff Johnsona0399642016-12-05 12:39:59 -0800925 hdd_err("pAdapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530926 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530927 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800928 }
929
930 return status;
931}
932
933/**
934 * @hdd_deinit_tx_rx() - Deinitialize Tx/RX module
935 * @pAdapter: pointer to adapter context
936 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530937 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
938 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800939 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700940QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *pAdapter)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800941{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530942 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800943
944 if (NULL == pAdapter) {
Jeff Johnsona0399642016-12-05 12:39:59 -0800945 hdd_err("pAdapter is NULL");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530946 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530947 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800948 }
949
950 return status;
951}
952
953/**
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -0700954 * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
955 * @context: [in] pointer to qdf context
956 * @rxBuf: [in] pointer to rx qdf_nbuf
957 *
958 * TL will call this to notify the HDD when one or more packets were
959 * received for a registered STA.
960 *
961 * Return: QDF_STATUS_E_FAILURE if any errors encountered, QDF_STATUS_SUCCESS
962 * otherwise
963 */
964static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
965{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700966 struct hdd_adapter *adapter;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -0700967 int rxstat;
968 struct sk_buff *skb;
969 struct sk_buff *skb_next;
970 unsigned int cpu_index;
971
972 /* Sanity check on inputs */
973 if ((NULL == context) || (NULL == rxbuf)) {
974 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
975 "%s: Null params being passed", __func__);
976 return QDF_STATUS_E_FAILURE;
977 }
978
Jeff Johnson5b76a3e2017-08-29 14:18:38 -0700979 adapter = (struct hdd_adapter *)context;
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -0700980 if ((NULL == adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
981 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
982 "invalid adapter %p", adapter);
983 return QDF_STATUS_E_FAILURE;
984 }
985
986 cpu_index = wlan_hdd_get_cpu();
987
988 /* walk the chain until all are processed */
989 skb = (struct sk_buff *) rxbuf;
990 while (NULL != skb) {
991 skb_next = skb->next;
992 skb->dev = adapter->dev;
993
994 ++adapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
995 ++adapter->stats.rx_packets;
996 adapter->stats.rx_bytes += skb->len;
997
998 /* Remove SKB from internal tracking table before submitting
999 * it to stack
1000 */
1001 qdf_net_buf_debug_release_skb(skb);
1002
1003 /*
1004 * If this is not a last packet on the chain
1005 * Just put packet into backlog queue, not scheduling RX sirq
1006 */
1007 if (skb->next) {
1008 rxstat = netif_rx(skb);
1009 } else {
1010 /*
1011 * This is the last packet on the chain
1012 * Scheduling rx sirq
1013 */
1014 rxstat = netif_rx_ni(skb);
1015 }
1016
1017 if (NET_RX_SUCCESS == rxstat)
1018 ++adapter->
1019 hdd_stats.hddTxRxStats.rxDelivered[cpu_index];
1020 else
1021 ++adapter->hdd_stats.hddTxRxStats.rxRefused[cpu_index];
1022
1023 skb = skb_next;
1024 }
1025
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001026 return QDF_STATUS_SUCCESS;
1027}
1028
1029/**
Naveen Rawatf28315c2016-06-29 18:06:02 -07001030 * hdd_get_peer_idx() - Get the idx for given address in peer table
1031 * @sta_ctx: pointer to HDD Station Context
1032 * @addr: pointer to Peer Mac address
1033 *
1034 * Return: index when success else INVALID_PEER_IDX
1035 */
Jeff Johnson40dae4e2017-08-29 14:00:25 -07001036int hdd_get_peer_idx(struct hdd_station_ctx *sta_ctx, struct qdf_mac_addr *addr)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001037{
1038 uint8_t idx;
1039
Naveen Rawatc45d1622016-07-05 12:20:09 -07001040 for (idx = 0; idx < MAX_PEERS; idx++) {
Naveen Rawatac027cb2017-04-27 15:02:42 -07001041 if (sta_ctx->conn_info.staId[idx] == HDD_WLAN_INVALID_STA_ID)
Naveen Rawatf28315c2016-06-29 18:06:02 -07001042 continue;
1043 if (qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
1044 addr, sizeof(struct qdf_mac_addr)))
1045 continue;
1046 return idx;
1047 }
1048
1049 return INVALID_PEER_IDX;
1050}
1051
Ravi Joshibb8d4512016-08-22 10:14:52 -07001052/*
1053 * hdd_is_mcast_replay() - checks if pkt is multicast replay
1054 * @skb: packet skb
1055 *
1056 * Return: true if replayed multicast pkt, false otherwise
1057 */
1058static bool hdd_is_mcast_replay(struct sk_buff *skb)
1059{
1060 struct ethhdr *eth;
1061
1062 eth = eth_hdr(skb);
1063 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
1064 if (unlikely(ether_addr_equal(eth->h_source,
1065 skb->dev->dev_addr)))
1066 return true;
1067 }
1068 return false;
1069}
1070
Naveen Rawatf28315c2016-06-29 18:06:02 -07001071/**
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301072* hdd_is_arp_local() - check if local or non local arp
1073* @skb: pointer to sk_buff
1074*
1075* Return: true if local arp or false otherwise.
1076*/
1077static bool hdd_is_arp_local(struct sk_buff *skb)
1078{
1079 struct arphdr *arp;
1080 struct in_ifaddr **ifap = NULL;
1081 struct in_ifaddr *ifa = NULL;
1082 struct in_device *in_dev;
1083 unsigned char *arp_ptr;
1084 __be32 tip;
1085
1086 arp = (struct arphdr *)skb->data;
1087 if (arp->ar_op == htons(ARPOP_REQUEST)) {
1088 in_dev = __in_dev_get_rtnl(skb->dev);
1089 if (in_dev) {
1090 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1091 ifap = &ifa->ifa_next) {
1092 if (!strcmp(skb->dev->name, ifa->ifa_label))
1093 break;
1094 }
1095 }
1096
1097 if (ifa && ifa->ifa_local) {
1098 arp_ptr = (unsigned char *)(arp + 1);
1099 arp_ptr += (skb->dev->addr_len + 4 +
1100 skb->dev->addr_len);
1101 memcpy(&tip, arp_ptr, 4);
1102 hdd_info("ARP packet: local IP: %x dest IP: %x",
1103 ifa->ifa_local, tip);
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001104 if (ifa->ifa_local == tip)
1105 return true;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301106 }
1107 }
1108
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001109 return false;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301110}
1111
1112/**
Rajeev Kumaref3a3362017-05-07 20:11:16 -07001113 * hdd_is_rx_wake_lock_needed() - check if wake lock is needed
1114 * @skb: pointer to sk_buff
1115 *
1116 * RX wake lock is needed for:
1117 * 1) Unicast data packet OR
1118 * 2) Local ARP data packet
1119 *
1120 * Return: true if wake lock is needed or false otherwise.
1121 */
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301122static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
1123{
1124 if ((skb->pkt_type != PACKET_BROADCAST &&
1125 skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb))
1126 return true;
1127
1128 return false;
1129}
1130
1131/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001132 * hdd_rx_packet_cbk() - Receive packet handler
Dhanashri Atre182b0272016-02-17 15:35:07 -08001133 * @context: pointer to HDD context
Nirav Shahcbc6d722016-03-01 16:24:53 +05301134 * @rxBuf: pointer to rx qdf_nbuf
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001135 *
1136 * Receive callback registered with TL. TL will call this to notify
1137 * the HDD when one or more packets were received for a registered
1138 * STA.
1139 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301140 * Return: QDF_STATUS_E_FAILURE if any errors encountered,
1141 * QDF_STATUS_SUCCESS otherwise
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001142 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001143QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001144{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001145 struct hdd_adapter *pAdapter = NULL;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001146 struct hdd_context *pHddCtx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001147 int rxstat;
1148 struct sk_buff *skb = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001149 struct sk_buff *next = NULL;
Jeff Johnson40dae4e2017-08-29 14:00:25 -07001150 struct hdd_station_ctx *pHddStaCtx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001151 unsigned int cpu_index;
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001152 struct qdf_mac_addr *mac_addr;
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301153 bool wake_lock = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001154
1155 /* Sanity check on inputs */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001156 if (unlikely((NULL == context) || (NULL == rxBuf))) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301157 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001158 "%s: Null params being passed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301159 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001160 }
1161
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001162 pAdapter = (struct hdd_adapter *)context;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001163 if (unlikely(WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic)) {
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001164 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001165 "Magic cookie(%x) for adapter sanity verification is invalid",
1166 pAdapter->magic);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301167 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168 }
1169
Dhanashri Atre182b0272016-02-17 15:35:07 -08001170 pHddCtx = pAdapter->pHddCtx;
1171 if (unlikely(NULL == pHddCtx)) {
1172 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
1173 "%s: HDD context is Null", __func__);
1174 return QDF_STATUS_E_FAILURE;
1175 }
1176
1177 cpu_index = wlan_hdd_get_cpu();
1178
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001179 next = (struct sk_buff *)rxBuf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001180
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001181 while (next) {
1182 skb = next;
1183 next = skb->next;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001184 skb->next = NULL;
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001185
psimha884025c2017-08-01 15:07:32 -07001186#ifdef QCA_WIFI_QCA6290 /* Debug code, remove later */
Venkata Sharath Chandra Manchalacc789172017-07-25 23:28:45 -07001187 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO,
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001188 "%s: skb %p skb->len %d\n", __func__, skb, skb->len);
1189#endif
1190
Dhanashri Atre63d98022017-01-24 18:22:09 -08001191 pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
1192 if ((pHddStaCtx->conn_info.proxyARPService) &&
1193 cfg80211_is_gratuitous_arp_unsolicited_na(skb)) {
Poddar, Siddarth37a17d32017-08-09 19:04:39 +05301194 uint32_t rx_dropped;
1195
1196 rx_dropped = ++pAdapter->hdd_stats.hddTxRxStats.
1197 rxDropped[cpu_index];
1198 /* rate limit error messages to 1/8th */
1199 if ((rx_dropped & 0x07) == 0)
1200 QDF_TRACE(QDF_MODULE_ID_HDD_DATA,
1201 QDF_TRACE_LEVEL_INFO,
1202 "%s: Dropping HS 2.0 Gratuitous ARP or Unsolicited NA count=%u",
1203 __func__, rx_dropped);
Dhanashri Atre63d98022017-01-24 18:22:09 -08001204 /* Remove SKB from internal tracking table before submitting
1205 * it to stack
1206 */
1207 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001208 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001209 }
1210
1211 hdd_event_eapol_log(skb, QDF_RX);
1212 DPTRACE(qdf_dp_trace(skb,
1213 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001214 QDF_TRACE_DEFAULT_PDEV_ID,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001215 qdf_nbuf_data_addr(skb),
1216 sizeof(qdf_nbuf_data(skb)), QDF_RX));
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301217 DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001218 QDF_TRACE_DEFAULT_PDEV_ID,
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301219 (uint8_t *)skb->data, qdf_nbuf_len(skb), QDF_RX));
1220 if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
1221 DPTRACE(qdf_dp_trace(skb,
1222 QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07001223 QDF_TRACE_DEFAULT_PDEV_ID,
Himanshu Agarwalee3411a2017-01-31 12:56:47 +05301224 (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
1225 (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
1226 QDF_RX));
Dhanashri Atre63d98022017-01-24 18:22:09 -08001227
Kabilan Kannan1c1c4022017-04-06 22:49:26 -07001228 mac_addr = (struct qdf_mac_addr *)(skb->data+QDF_MAC_ADDR_SIZE);
1229
1230 ucfg_tdls_update_rx_pkt_cnt(pAdapter->hdd_vdev, mac_addr);
1231
Dhanashri Atre63d98022017-01-24 18:22:09 -08001232 wlan_hdd_tdls_update_rx_pkt_cnt(pAdapter, skb);
1233
1234 skb->dev = pAdapter->dev;
1235 skb->protocol = eth_type_trans(skb, skb->dev);
1236 ++pAdapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
1237 ++pAdapter->stats.rx_packets;
1238 pAdapter->stats.rx_bytes += skb->len;
1239
1240 /* Check & drop replayed mcast packets (for IPV6) */
1241 if (pHddCtx->config->multicast_replay_filter &&
1242 hdd_is_mcast_replay(skb)) {
1243 ++pAdapter->hdd_stats.hddTxRxStats.rxDropped[cpu_index];
Srinivas Girigowda028c4482017-03-09 18:52:02 -08001244 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre63d98022017-01-24 18:22:09 -08001245 "%s: Dropping multicast replay pkt", __func__);
1246 qdf_nbuf_free(skb);
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001247 continue;
Dhanashri Atre63d98022017-01-24 18:22:09 -08001248 }
1249
1250 /* hold configurable wakelock for unicast traffic */
Poddar, Siddarth05febac2017-08-07 12:54:41 +05301251 if (pHddCtx->config->rx_wakelock_timeout &&
1252 pHddStaCtx->conn_info.uIsAuthenticated)
Sravan Kumar Kairam2fc47552017-01-04 15:27:56 +05301253 wake_lock = hdd_is_rx_wake_lock_needed(skb);
1254
1255 if (wake_lock) {
Dhanashri Atre63d98022017-01-24 18:22:09 -08001256 cds_host_diag_log_work(&pHddCtx->rx_wake_lock,
1257 pHddCtx->config->rx_wakelock_timeout,
1258 WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
1259 qdf_wake_lock_timeout_acquire(&pHddCtx->rx_wake_lock,
1260 pHddCtx->config->
1261 rx_wakelock_timeout);
1262 }
1263
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001264 /* Remove SKB from internal tracking table before submitting
1265 * it to stack
1266 */
Dhanashri Atre63d98022017-01-24 18:22:09 -08001267 qdf_net_buf_debug_release_skb(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001268
Yu Wangceb357b2017-06-01 12:04:18 +08001269 hdd_rx_timestamp(skb, ktime_to_us(skb->tstamp));
Dhanashri Atre63d98022017-01-24 18:22:09 -08001270 if (HDD_LRO_NO_RX ==
1271 hdd_lro_rx(pHddCtx, pAdapter, skb)) {
1272 if (hdd_napi_enabled(HDD_NAPI_ANY) &&
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05301273 !pHddCtx->enableRxThread &&
1274 !QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
Dhanashri Atre63d98022017-01-24 18:22:09 -08001275 rxstat = netif_receive_skb(skb);
1276 else
1277 rxstat = netif_rx_ni(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001278
Dhanashri Atre63d98022017-01-24 18:22:09 -08001279 if (NET_RX_SUCCESS == rxstat)
1280 ++pAdapter->hdd_stats.hddTxRxStats.
1281 rxDelivered[cpu_index];
1282 else
1283 ++pAdapter->hdd_stats.hddTxRxStats.
1284 rxRefused[cpu_index];
Dhanashri Atre63d98022017-01-24 18:22:09 -08001285 } else {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001286 ++pAdapter->hdd_stats.hddTxRxStats.
1287 rxDelivered[cpu_index];
Dhanashri Atre63d98022017-01-24 18:22:09 -08001288 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001289 }
Dhanashri Atrecefa8802017-02-02 16:17:14 -08001290
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301291 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001292}
1293
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001294/**
1295 * hdd_reason_type_to_string() - return string conversion of reason type
1296 * @reason: reason type
1297 *
1298 * This utility function helps log string conversion of reason type.
1299 *
1300 * Return: string conversion of device mode, if match found;
1301 * "Unknown" otherwise.
1302 */
1303const char *hdd_reason_type_to_string(enum netif_reason_type reason)
1304{
1305 switch (reason) {
1306 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
1307 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
1308 CASE_RETURN_STRING(WLAN_FW_PAUSE);
1309 CASE_RETURN_STRING(WLAN_TX_ABORT);
1310 CASE_RETURN_STRING(WLAN_VDEV_STOP);
1311 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
1312 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
1313 default:
Nirav Shah617cff92016-04-25 10:24:24 +05301314 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001315 }
1316}
1317
1318/**
1319 * hdd_action_type_to_string() - return string conversion of action type
1320 * @action: action type
1321 *
1322 * This utility function helps log string conversion of action_type.
1323 *
1324 * Return: string conversion of device mode, if match found;
1325 * "Unknown" otherwise.
1326 */
1327const char *hdd_action_type_to_string(enum netif_action_type action)
1328{
1329
1330 switch (action) {
1331 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
1332 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
1333 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
1334 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
1335 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
1337 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
1338 default:
Nirav Shah617cff92016-04-25 10:24:24 +05301339 return "Invalid";
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001340 }
1341}
1342
1343/**
1344 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
1345 * @adapter: adapter handle
1346 * @action: action type
1347 * @reason: reason type
1348 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001349static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001350 enum netif_action_type action, enum netif_reason_type reason)
1351{
1352 switch (action) {
1353 case WLAN_STOP_ALL_NETIF_QUEUE:
1354 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001355 adapter->queue_oper_stats[reason].pause_count++;
1356 break;
1357 case WLAN_START_ALL_NETIF_QUEUE:
1358 case WLAN_WAKE_ALL_NETIF_QUEUE:
1359 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
1360 adapter->queue_oper_stats[reason].unpause_count++;
1361 break;
1362 default:
1363 break;
1364 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001365}
1366
1367/**
jiad5b986632017-08-04 11:59:20 +08001368 * hdd_netdev_queue_is_locked()
1369 * @txq: net device tx queue
1370 *
1371 * For SMP system, always return false and we could safely rely on
1372 * __netif_tx_trylock().
1373 *
1374 * Return: true locked; false not locked
1375 */
1376#ifdef QCA_CONFIG_SMP
1377static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
1378{
1379 return false;
1380}
1381#else
1382static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
1383{
1384 return txq->xmit_lock_owner != -1;
1385}
1386#endif
1387
1388/**
Nirav Shah89223f72016-03-01 18:10:38 +05301389 * wlan_hdd_update_txq_timestamp() - update txq timestamp
1390 * @dev: net device
1391 *
1392 * Return: none
1393 */
Jeff Johnson3ae708d2016-10-05 15:45:00 -07001394static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
Nirav Shah89223f72016-03-01 18:10:38 +05301395{
1396 struct netdev_queue *txq;
1397 int i;
Nirav Shah89223f72016-03-01 18:10:38 +05301398
1399 for (i = 0; i < NUM_TX_QUEUES; i++) {
1400 txq = netdev_get_tx_queue(dev, i);
jiad5b986632017-08-04 11:59:20 +08001401
1402 /*
1403 * On UP system, kernel will trigger watchdog bite if spinlock
1404 * recursion is detected. Unfortunately recursion is possible
1405 * when it is called in dev_queue_xmit() context, where stack
1406 * grabs the lock before calling driver's ndo_start_xmit
1407 * callback.
1408 */
1409 if (!hdd_netdev_queue_is_locked(txq)) {
1410 if (__netif_tx_trylock(txq)) {
1411 txq_trans_update(txq);
1412 __netif_tx_unlock(txq);
1413 }
wadesongba6373e2017-05-15 20:59:05 +08001414 }
Nirav Shah89223f72016-03-01 18:10:38 +05301415 }
1416}
1417
1418/**
Nirav Shah617cff92016-04-25 10:24:24 +05301419 * wlan_hdd_update_unpause_time() - update unpause time
1420 * @adapter: adapter handle
1421 *
1422 * Return: none
1423 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001424static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
Nirav Shah617cff92016-04-25 10:24:24 +05301425{
1426 qdf_time_t curr_time = qdf_system_ticks();
1427
1428 adapter->total_unpause_time += curr_time - adapter->last_time;
1429 adapter->last_time = curr_time;
1430}
1431
1432/**
1433 * wlan_hdd_update_pause_time() - update pause time
1434 * @adapter: adapter handle
1435 *
1436 * Return: none
1437 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001438static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
Nirav Shahda008342016-05-17 18:50:40 +05301439 uint32_t temp_map)
Nirav Shah617cff92016-04-25 10:24:24 +05301440{
1441 qdf_time_t curr_time = qdf_system_ticks();
Nirav Shahda008342016-05-17 18:50:40 +05301442 uint8_t i;
1443 qdf_time_t pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05301444
Nirav Shahda008342016-05-17 18:50:40 +05301445 pause_time = curr_time - adapter->last_time;
1446 adapter->total_pause_time += pause_time;
Nirav Shah617cff92016-04-25 10:24:24 +05301447 adapter->last_time = curr_time;
Nirav Shahda008342016-05-17 18:50:40 +05301448
1449 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
1450 if (temp_map & (1 << i)) {
1451 adapter->queue_oper_stats[i].total_pause_time +=
1452 pause_time;
1453 break;
1454 }
1455 }
1456
Nirav Shah617cff92016-04-25 10:24:24 +05301457}
1458
1459/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001460 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
1461 * @adapter: adapter handle
1462 * @action: action type
1463 * @reason: reason type
1464 *
1465 * This is single function which is used for netif_queue related
1466 * actions like start/stop of network queues and on/off carrier
1467 * option.
1468 *
1469 * Return: None
1470 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001471void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001472 enum netif_action_type action, enum netif_reason_type reason)
1473{
Nirav Shahda008342016-05-17 18:50:40 +05301474 uint32_t temp_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001475
1476 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
1477 (!adapter->dev)) {
1478 hdd_err("adapter is invalid");
1479 return;
1480 }
1481
1482 switch (action) {
1483
1484 case WLAN_NETIF_CARRIER_ON:
1485 netif_carrier_on(adapter->dev);
1486 break;
1487
1488 case WLAN_NETIF_CARRIER_OFF:
1489 netif_carrier_off(adapter->dev);
1490 break;
1491
1492 case WLAN_STOP_ALL_NETIF_QUEUE:
1493 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05301494 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001495 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05301496 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05301497 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05301498 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001499 adapter->pause_map |= (1 << reason);
1500 spin_unlock_bh(&adapter->pause_map_lock);
1501 break;
1502
1503 case WLAN_START_ALL_NETIF_QUEUE:
1504 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05301505 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001506 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05301507 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001508 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301509 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05301510 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001511 spin_unlock_bh(&adapter->pause_map_lock);
1512 break;
1513
1514 case WLAN_WAKE_ALL_NETIF_QUEUE:
1515 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shahda008342016-05-17 18:50:40 +05301516 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001517 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05301518 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001519 netif_tx_wake_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301520 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05301521 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001522 spin_unlock_bh(&adapter->pause_map_lock);
1523 break;
1524
1525 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
1526 spin_lock_bh(&adapter->pause_map_lock);
Nirav Shah89223f72016-03-01 18:10:38 +05301527 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001528 netif_tx_stop_all_queues(adapter->dev);
Nirav Shah89223f72016-03-01 18:10:38 +05301529 wlan_hdd_update_txq_timestamp(adapter->dev);
Nirav Shah617cff92016-04-25 10:24:24 +05301530 wlan_hdd_update_unpause_time(adapter);
Nirav Shah89223f72016-03-01 18:10:38 +05301531 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001532 adapter->pause_map |= (1 << reason);
1533 netif_carrier_off(adapter->dev);
1534 spin_unlock_bh(&adapter->pause_map_lock);
1535 break;
1536
1537 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
1538 spin_lock_bh(&adapter->pause_map_lock);
1539 netif_carrier_on(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301540 temp_map = adapter->pause_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001541 adapter->pause_map &= ~(1 << reason);
Nirav Shah617cff92016-04-25 10:24:24 +05301542 if (!adapter->pause_map) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001543 netif_tx_start_all_queues(adapter->dev);
Nirav Shahda008342016-05-17 18:50:40 +05301544 wlan_hdd_update_pause_time(adapter, temp_map);
Nirav Shah617cff92016-04-25 10:24:24 +05301545 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001546 spin_unlock_bh(&adapter->pause_map_lock);
1547 break;
1548
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001549 default:
1550 hdd_err("unsupported action %d", action);
1551 }
1552
1553 spin_lock_bh(&adapter->pause_map_lock);
1554 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
1555 wlan_hdd_process_peer_unauthorised_pause(adapter);
1556 spin_unlock_bh(&adapter->pause_map_lock);
1557
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001558 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
1559
1560 adapter->queue_oper_history[adapter->history_index].time =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301561 qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001562 adapter->queue_oper_history[adapter->history_index].netif_action =
1563 action;
1564 adapter->queue_oper_history[adapter->history_index].netif_reason =
1565 reason;
1566 adapter->queue_oper_history[adapter->history_index].pause_map =
1567 adapter->pause_map;
1568 if (++adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
1569 adapter->history_index = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001570}
1571
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001572/**
1573 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
1574 * @dev: Pointer to net_device structure
1575 *
1576 * Return: 0 for success; non-zero for failure
1577 */
1578int hdd_set_mon_rx_cb(struct net_device *dev)
1579{
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001580 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001581 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001582 int ret;
1583 QDF_STATUS qdf_status;
1584 struct ol_txrx_desc_type sta_desc = {0};
1585 struct ol_txrx_ops txrx_ops;
Leo Changfdb45c32016-10-28 11:09:23 -07001586 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1587 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001588
1589 ret = wlan_hdd_validate_context(hdd_ctx);
1590 if (0 != ret)
1591 return ret;
1592
1593 qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
1594 txrx_ops.rx.rx = hdd_mon_rx_packet_cbk;
Ravi Joshi106ffe02017-01-18 18:09:05 -08001595 hdd_monitor_set_rx_monitor_cb(&txrx_ops, hdd_rx_monitor_callback);
Leo Changfdb45c32016-10-28 11:09:23 -07001596 cdp_vdev_register(soc,
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001597 (struct cdp_vdev *)cdp_get_vdev_from_vdev_id(soc,
1598 (struct cdp_pdev *)pdev, adapter->sessionId),
1599 adapter, &txrx_ops);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001600 /* peer is created wma_vdev_attach->wma_create_peer */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001601 qdf_status = cdp_peer_register(soc,
1602 (struct cdp_pdev *)pdev, &sta_desc);
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001603 if (QDF_STATUS_SUCCESS != qdf_status) {
Leo Changfdb45c32016-10-28 11:09:23 -07001604 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
Manjunathappa Prakash59f861d2016-04-21 10:33:31 -07001605 qdf_status, qdf_status);
1606 goto exit;
1607 }
1608
1609 qdf_status = sme_create_mon_session(hdd_ctx->hHal,
1610 adapter->macAddressCurrent.bytes);
1611 if (QDF_STATUS_SUCCESS != qdf_status) {
1612 hdd_err("sme_create_mon_session() failed to register. Status= %d [0x%08X]",
1613 qdf_status, qdf_status);
1614 }
1615exit:
1616 ret = qdf_status_to_os_return(qdf_status);
1617 return ret;
1618}
Nirav Shahbd36b062016-07-18 11:12:59 +05301619
1620/**
1621 * hdd_send_rps_ind() - send rps indication to daemon
1622 * @adapter: adapter context
1623 *
1624 * If RPS feature enabled by INI, send RPS enable indication to daemon
1625 * Indication contents is the name of interface to find correct sysfs node
1626 * Should send all available interfaces
1627 *
1628 * Return: none
1629 */
Jeff Johnson5b76a3e2017-08-29 14:18:38 -07001630void hdd_send_rps_ind(struct hdd_adapter *adapter)
Nirav Shahbd36b062016-07-18 11:12:59 +05301631{
1632 int i;
1633 uint8_t cpu_map_list_len = 0;
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001634 struct hdd_context *hdd_ctxt = NULL;
Nirav Shahbd36b062016-07-18 11:12:59 +05301635 struct wlan_rps_data rps_data;
1636
1637 if (!adapter) {
1638 hdd_err("adapter is NULL");
1639 return;
1640 }
1641
1642 hdd_ctxt = WLAN_HDD_GET_CTX(adapter);
1643 rps_data.num_queues = NUM_TX_QUEUES;
1644
1645 hdd_info("cpu_map_list '%s'", hdd_ctxt->config->cpu_map_list);
1646
1647 /* in case no cpu map list is provided, simply return */
1648 if (!strlen(hdd_ctxt->config->cpu_map_list)) {
1649 hdd_err("no cpu map list found");
1650 goto err;
1651 }
1652
1653 if (QDF_STATUS_SUCCESS !=
1654 hdd_hex_string_to_u16_array(hdd_ctxt->config->cpu_map_list,
1655 rps_data.cpu_map_list,
1656 &cpu_map_list_len,
1657 WLAN_SVC_IFACE_NUM_QUEUES)) {
1658 hdd_err("invalid cpu map list");
1659 goto err;
1660 }
1661
1662 rps_data.num_queues =
1663 (cpu_map_list_len < rps_data.num_queues) ?
1664 cpu_map_list_len : rps_data.num_queues;
1665
1666 for (i = 0; i < rps_data.num_queues; i++) {
1667 hdd_info("cpu_map_list[%d] = 0x%x",
1668 i, rps_data.cpu_map_list[i]);
1669 }
1670
1671 strlcpy(rps_data.ifname, adapter->dev->name,
1672 sizeof(rps_data.ifname));
Kondabattini, Ganesh96ac37b2016-09-02 23:12:15 +05301673 wlan_hdd_send_svc_nlink_msg(hdd_ctxt->radio_index,
1674 WLAN_SVC_RPS_ENABLE_IND,
Nirav Shahbd36b062016-07-18 11:12:59 +05301675 &rps_data, sizeof(rps_data));
1676
1677err:
1678 hdd_err("Wrong RPS configuration. enabling rx_thread");
1679 hdd_ctxt->rps = false;
1680 hdd_ctxt->enableRxThread = true;
1681}
1682
Ravi Joshib89e7f72016-09-07 13:43:15 -07001683#ifdef MSM_PLATFORM
1684/**
1685 * hdd_reset_tcp_delack() - Reset tcp delack value to default
1686 * @hdd_ctx: Handle to hdd context
1687 *
1688 * Function used to reset TCP delack value to its default value
1689 *
1690 * Return: None
1691 */
Jeff Johnsona9dc1dc2017-08-28 11:37:48 -07001692void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
Ravi Joshib89e7f72016-09-07 13:43:15 -07001693{
1694 enum pld_bus_width_type next_level = PLD_BUS_WIDTH_LOW;
Nirav Shahbd36b062016-07-18 11:12:59 +05301695
Ravi Joshib89e7f72016-09-07 13:43:15 -07001696 hdd_ctx->rx_high_ind_cnt = 0;
1697 wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index, WLAN_SVC_WLAN_TP_IND,
1698 &next_level, sizeof(next_level));
1699}
1700#endif /* MSM_PLATFORM */