blob: b5c109504393d22a3bc5eae4a5510f47f4353b56 [file] [log] [blame]
Bing Zhao5e6e3a92011-03-21 18:00:50 -07001/*
2 * Marvell Wireless LAN device driver: station RX data handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "11n_aggr.h"
26#include "11n_rxreorder.h"
27
28/*
29 * This function processes the received packet and forwards it
30 * to kernel/upper layer.
31 *
32 * This function parses through the received packet and determines
33 * if it is a debug packet or normal packet.
34 *
35 * For non-debug packets, the function chops off unnecessary leading
36 * header bytes, reconstructs the packet as an ethernet frame or
37 * 802.2/llc/snap frame as required, and sends it to kernel/upper layer.
38 *
39 * The completion callback is called after processing in complete.
40 */
Avinash Patilf3b369e2012-10-19 19:19:21 -070041int mwifiex_process_rx_packet(struct mwifiex_private *priv,
Bing Zhao5e6e3a92011-03-21 18:00:50 -070042 struct sk_buff *skb)
43{
Yogesh Ashok Powar270e58e2011-05-03 20:11:46 -070044 int ret;
Bing Zhao5e6e3a92011-03-21 18:00:50 -070045 struct rx_packet_hdr *rx_pkt_hdr;
46 struct rxpd *local_rx_pd;
47 int hdr_chop;
48 struct ethhdr *eth_hdr;
49 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
50
51 local_rx_pd = (struct rxpd *) (skb->data);
52
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -070053 rx_pkt_hdr = (void *)local_rx_pd +
54 le16_to_cpu(local_rx_pd->rx_pkt_offset);
Bing Zhao5e6e3a92011-03-21 18:00:50 -070055
56 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
57 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
58 /*
59 * Replace the 803 header and rfc1042 header (llc/snap) with an
60 * EthernetII header, keep the src/dst and snap_type
61 * (ethertype).
62 * The firmware only passes up SNAP frames converting
63 * all RX Data from 802.11 to 802.2/LLC/SNAP frames.
64 * To create the Ethernet II, just move the src, dst address
65 * right before the snap_type.
66 */
67 eth_hdr = (struct ethhdr *)
68 ((u8 *) &rx_pkt_hdr->eth803_hdr
69 + sizeof(rx_pkt_hdr->eth803_hdr) +
70 sizeof(rx_pkt_hdr->rfc1042_hdr)
71 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
72 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
73 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
74
75 memcpy(eth_hdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
76 sizeof(eth_hdr->h_source));
77 memcpy(eth_hdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
78 sizeof(eth_hdr->h_dest));
79
80 /* Chop off the rxpd + the excess memory from the 802.2/llc/snap
81 header that was removed. */
82 hdr_chop = (u8 *) eth_hdr - (u8 *) local_rx_pd;
83 } else {
84 /* Chop off the rxpd */
85 hdr_chop = (u8 *) &rx_pkt_hdr->eth803_hdr -
86 (u8 *) local_rx_pd;
87 }
88
89 /* Chop off the leading header bytes so the it points to the start of
90 either the reconstructed EthII frame or the 802.2/llc/snap frame */
91 skb_pull(skb, hdr_chop);
92
93 priv->rxpd_rate = local_rx_pd->rx_rate;
94
95 priv->rxpd_htinfo = local_rx_pd->ht_info;
96
Avinash Patilf3b369e2012-10-19 19:19:21 -070097 ret = mwifiex_recv_packet(priv, skb);
Bing Zhao5e6e3a92011-03-21 18:00:50 -070098 if (ret == -1)
Avinash Patilf3b369e2012-10-19 19:19:21 -070099 dev_err(priv->adapter->dev, "recv packet failed\n");
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700100
101 return ret;
102}
103
104/*
105 * This function processes the received buffer.
106 *
107 * The function looks into the RxPD and performs sanity tests on the
108 * received buffer to ensure its a valid packet, before processing it
109 * further. If the packet is determined to be aggregated, it is
110 * de-aggregated accordingly. Non-unicast packets are sent directly to
111 * the kernel/upper layers. Unicast packets are handed over to the
112 * Rx reordering routine if 11n is enabled.
113 *
114 * The completion callback is called after processing in complete.
115 */
Avinash Patilf3b369e2012-10-19 19:19:21 -0700116int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700117 struct sk_buff *skb)
118{
Avinash Patilf3b369e2012-10-19 19:19:21 -0700119 struct mwifiex_adapter *adapter = priv->adapter;
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700120 int ret = 0;
121 struct rxpd *local_rx_pd;
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700122 struct rx_packet_hdr *rx_pkt_hdr;
123 u8 ta[ETH_ALEN];
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700124 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
Yogesh Ashok Powar8ed13032011-11-07 21:41:10 -0800125
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700126 local_rx_pd = (struct rxpd *) (skb->data);
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700127 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
128 rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
129 rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
130 seq_num = le16_to_cpu(local_rx_pd->seq_num);
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700131
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700132 rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700133
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700134 if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
135 dev_err(adapter->dev,
136 "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
137 skb->len, rx_pkt_offset, rx_pkt_length);
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700138 priv->stats.rx_dropped++;
Amitkumar Karwar4daffe32012-04-18 20:08:28 -0700139
140 if (adapter->if_ops.data_complete)
141 adapter->if_ops.data_complete(adapter, skb);
142 else
143 dev_kfree_skb_any(skb);
144
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700145 return ret;
146 }
Yogesh Ashok Powar3b8ab882011-05-13 11:22:32 -0700147
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700148 if (rx_pkt_type == PKT_TYPE_AMSDU) {
Yogesh Ashok Powar3b8ab882011-05-13 11:22:32 -0700149 struct sk_buff_head list;
150 struct sk_buff *rx_skb;
151
152 __skb_queue_head_init(&list);
153
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700154 skb_pull(skb, rx_pkt_offset);
155 skb_trim(skb, rx_pkt_length);
Yogesh Ashok Powar3b8ab882011-05-13 11:22:32 -0700156
157 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
Yogesh Ashok Powarc65a30f2012-03-13 19:22:42 -0700158 priv->wdev->iftype, 0, false);
Yogesh Ashok Powar3b8ab882011-05-13 11:22:32 -0700159
160 while (!skb_queue_empty(&list)) {
161 rx_skb = __skb_dequeue(&list);
Avinash Patilf3b369e2012-10-19 19:19:21 -0700162 ret = mwifiex_recv_packet(priv, rx_skb);
Yogesh Ashok Powar3b8ab882011-05-13 11:22:32 -0700163 if (ret == -1)
164 dev_err(adapter->dev, "Rx of A-MSDU failed");
165 }
166 return 0;
Stone Piao2dbaf752012-09-25 20:23:35 -0700167 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
Avinash Patilf3b369e2012-10-19 19:19:21 -0700168 ret = mwifiex_process_mgmt_packet(priv, skb);
Stone Piao2dbaf752012-09-25 20:23:35 -0700169 if (ret)
170 dev_err(adapter->dev, "Rx of mgmt packet failed");
171 dev_kfree_skb_any(skb);
172 return ret;
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700173 }
Yogesh Ashok Powar3b8ab882011-05-13 11:22:32 -0700174
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700175 /*
176 * If the packet is not an unicast packet then send the packet
177 * directly to os. Don't pass thru rx reordering
178 */
179 if (!IS_11N_ENABLED(priv) ||
180 memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
Avinash Patilf3b369e2012-10-19 19:19:21 -0700181 mwifiex_process_rx_packet(priv, skb);
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700182 return ret;
183 }
184
185 if (mwifiex_queuing_ra_based(priv)) {
186 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
187 } else {
188 if (rx_pkt_type != PKT_TYPE_BAR)
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700189 priv->rx_seq[local_rx_pd->priority] = seq_num;
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700190 memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
191 ETH_ALEN);
192 }
193
194 /* Reorder and send to OS */
Amitkumar Karwared1ea6f2012-08-03 18:06:02 -0700195 ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
196 ta, (u8) rx_pkt_type, skb);
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700197
Amitkumar Karwar4daffe32012-04-18 20:08:28 -0700198 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
199 if (adapter->if_ops.data_complete)
200 adapter->if_ops.data_complete(adapter, skb);
201 else
202 dev_kfree_skb_any(skb);
203 }
Yogesh Ashok Powar8ed13032011-11-07 21:41:10 -0800204
205 if (ret)
206 priv->stats.rx_dropped++;
Bing Zhao5e6e3a92011-03-21 18:00:50 -0700207
208 return ret;
209}