blob: 580de5851fc7f6a129980337a0c6e844d63374c1 [file] [log] [blame]
Johannes Berg780e87c2015-09-03 14:56:10 +02001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon9c36fd72017-01-04 10:49:42 +020010 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
Johannes Berg780e87c2015-09-03 14:56:10 +020011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 * BSD LICENSE
29 *
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon9c36fd72017-01-04 10:49:42 +020032 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
Johannes Berg780e87c2015-09-03 14:56:10 +020033 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *****************************************************************************/
61#include <linux/etherdevice.h>
62#include <linux/skbuff.h>
63#include "iwl-trans.h"
64#include "mvm.h"
65#include "fw-api.h"
Johannes Berg780e87c2015-09-03 14:56:10 +020066
Johannes Bergf5e28ea2015-12-06 14:58:08 +020067static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
68 int queue, struct ieee80211_sta *sta)
69{
70 struct iwl_mvm_sta *mvmsta;
71 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
72 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
73 struct iwl_mvm_key_pn *ptk_pn;
Sara Sharon5ab2ba92016-03-29 10:56:57 +030074 int res;
Johannes Bergf5e28ea2015-12-06 14:58:08 +020075 u8 tid, keyidx;
76 u8 pn[IEEE80211_CCMP_PN_LEN];
77 u8 *extiv;
78
79 /* do PN checking */
80
81 /* multicast and non-data only arrives on default queue */
82 if (!ieee80211_is_data(hdr->frame_control) ||
83 is_multicast_ether_addr(hdr->addr1))
84 return 0;
85
86 /* do not check PN for open AP */
87 if (!(stats->flag & RX_FLAG_DECRYPTED))
88 return 0;
89
90 /*
91 * avoid checking for default queue - we don't want to replicate
92 * all the logic that's necessary for checking the PN on fragmented
93 * frames, leave that to mac80211
94 */
95 if (queue == 0)
96 return 0;
97
98 /* if we are here - this for sure is either CCMP or GCMP */
99 if (IS_ERR_OR_NULL(sta)) {
100 IWL_ERR(mvm,
101 "expected hw-decrypted unicast frame for station\n");
102 return -1;
103 }
104
105 mvmsta = iwl_mvm_sta_from_mac80211(sta);
106
107 extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
108 keyidx = extiv[3] >> 6;
109
110 ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
111 if (!ptk_pn)
112 return -1;
113
114 if (ieee80211_is_data_qos(hdr->frame_control))
115 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
116 else
117 tid = 0;
118
119 /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
120 if (tid >= IWL_MAX_TID_COUNT)
121 return -1;
122
123 /* load pn */
124 pn[0] = extiv[7];
125 pn[1] = extiv[6];
126 pn[2] = extiv[5];
127 pn[3] = extiv[4];
128 pn[4] = extiv[1];
129 pn[5] = extiv[0];
130
Sara Sharon5ab2ba92016-03-29 10:56:57 +0300131 res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
132 if (res < 0)
133 return -1;
134 if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
Johannes Bergf5e28ea2015-12-06 14:58:08 +0200135 return -1;
136
Sara Sharon5ab2ba92016-03-29 10:56:57 +0300137 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
Johannes Bergf5e28ea2015-12-06 14:58:08 +0200138 stats->flag |= RX_FLAG_PN_VALIDATED;
139
140 return 0;
141}
142
143/* iwl_mvm_create_skb Adds the rxb to a new skb */
144static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
145 u16 len, u8 crypt_len,
146 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg780e87c2015-09-03 14:56:10 +0200147{
Sara Sharone29cc6b2016-01-28 14:25:33 +0200148 struct iwl_rx_packet *pkt = rxb_addr(rxb);
149 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
150 unsigned int headlen, fraglen, pad_len = 0;
151 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
152
Johannes Berg4b405712016-12-08 10:38:08 +0100153 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
Sara Sharone29cc6b2016-01-28 14:25:33 +0200154 pad_len = 2;
Johannes Berg4b405712016-12-08 10:38:08 +0100155
156 /*
157 * If the device inserted padding it means that (it thought)
158 * the 802.11 header wasn't a multiple of 4 bytes long. In
159 * this case, reserve two bytes at the start of the SKB to
160 * align the payload properly in case we end up copying it.
161 */
162 skb_reserve(skb, pad_len);
163 }
Sara Sharone29cc6b2016-01-28 14:25:33 +0200164 len -= pad_len;
Johannes Berg780e87c2015-09-03 14:56:10 +0200165
166 /* If frame is small enough to fit in skb->head, pull it completely.
167 * If not, only pull ieee80211_hdr (including crypto if present, and
168 * an additional 8 bytes for SNAP/ethertype, see below) so that
169 * splice() or TCP coalesce are more efficient.
170 *
171 * Since, in addition, ieee80211_data_to_8023() always pull in at
172 * least 8 bytes (possibly more for mesh) we can do the same here
173 * to save the cost of doing it later. That still doesn't pull in
174 * the actual IP header since the typical case has a SNAP header.
175 * If the latter changes (there are efforts in the standards group
176 * to do so) we should revisit this and ieee80211_data_to_8023().
177 */
Sara Sharone29cc6b2016-01-28 14:25:33 +0200178 headlen = (len <= skb_tailroom(skb)) ? len :
179 hdrlen + crypt_len + 8;
Johannes Berg780e87c2015-09-03 14:56:10 +0200180
Sara Sharone29cc6b2016-01-28 14:25:33 +0200181 /* The firmware may align the packet to DWORD.
182 * The padding is inserted after the IV.
183 * After copying the header + IV skip the padding if
184 * present before copying packet data.
185 */
186 hdrlen += crypt_len;
Johannes Berg59ae1d12017-06-16 14:29:20 +0200187 skb_put_data(skb, hdr, hdrlen);
188 skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
Sara Sharone29cc6b2016-01-28 14:25:33 +0200189
190 fraglen = len - headlen;
Johannes Berg780e87c2015-09-03 14:56:10 +0200191
192 if (fraglen) {
Sara Sharone29cc6b2016-01-28 14:25:33 +0200193 int offset = (void *)hdr + headlen + pad_len -
Johannes Berg780e87c2015-09-03 14:56:10 +0200194 rxb_addr(rxb) + rxb_offset(rxb);
195
196 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
197 fraglen, rxb->truesize);
198 }
Johannes Bergf5e28ea2015-12-06 14:58:08 +0200199}
Johannes Berg780e87c2015-09-03 14:56:10 +0200200
Johannes Bergf5e28ea2015-12-06 14:58:08 +0200201/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
202static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
203 struct napi_struct *napi,
204 struct sk_buff *skb, int queue,
205 struct ieee80211_sta *sta)
206{
207 if (iwl_mvm_check_pn(mvm, skb, queue, sta))
208 kfree_skb(skb);
209 else
Johannes Berg43ec72b2016-03-10 11:55:44 +0100210 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
Johannes Berg780e87c2015-09-03 14:56:10 +0200211}
212
213static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
214 struct iwl_rx_mpdu_desc *desc,
215 struct ieee80211_rx_status *rx_status)
216{
Sara Sharond56a7802016-01-26 12:35:13 +0200217 int energy_a, energy_b, max_energy;
Shaul Triebitzc4e45c82017-10-30 17:38:43 +0200218 u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
Johannes Berg780e87c2015-09-03 14:56:10 +0200219
220 energy_a = desc->energy_a;
221 energy_a = energy_a ? -energy_a : S8_MIN;
222 energy_b = desc->energy_b;
223 energy_b = energy_b ? -energy_b : S8_MIN;
Johannes Berg780e87c2015-09-03 14:56:10 +0200224 max_energy = max(energy_a, energy_b);
Johannes Berg780e87c2015-09-03 14:56:10 +0200225
Sara Sharond56a7802016-01-26 12:35:13 +0200226 IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
227 energy_a, energy_b, max_energy);
Johannes Berg780e87c2015-09-03 14:56:10 +0200228
229 rx_status->signal = max_energy;
Shaul Triebitzc4e45c82017-10-30 17:38:43 +0200230 rx_status->chains =
231 (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
Johannes Berg780e87c2015-09-03 14:56:10 +0200232 rx_status->chain_signal[0] = energy_a;
233 rx_status->chain_signal[1] = energy_b;
Sara Sharond56a7802016-01-26 12:35:13 +0200234 rx_status->chain_signal[2] = S8_MIN;
Johannes Berg780e87c2015-09-03 14:56:10 +0200235}
236
Johannes Bergf5e28ea2015-12-06 14:58:08 +0200237static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
Johannes Berg780e87c2015-09-03 14:56:10 +0200238 struct ieee80211_rx_status *stats,
David Spinadel9d0fc5a2016-11-21 17:01:25 +0200239 struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
240 int queue, u8 *crypt_len)
Johannes Berg780e87c2015-09-03 14:56:10 +0200241{
242 u16 status = le16_to_cpu(desc->status);
243
244 if (!ieee80211_has_protected(hdr->frame_control) ||
245 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
246 IWL_RX_MPDU_STATUS_SEC_NONE)
247 return 0;
248
249 /* TODO: handle packets encrypted with unknown alg */
250
251 switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
252 case IWL_RX_MPDU_STATUS_SEC_CCM:
253 case IWL_RX_MPDU_STATUS_SEC_GCM:
Johannes Bergf5e28ea2015-12-06 14:58:08 +0200254 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
Johannes Berg780e87c2015-09-03 14:56:10 +0200255 /* alg is CCM: check MIC only */
256 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
257 return -1;
258
259 stats->flag |= RX_FLAG_DECRYPTED;
Sara Sharonbf190372016-02-08 23:30:47 +0200260 if (pkt_flags & FH_RSCSR_RADA_EN)
261 stats->flag |= RX_FLAG_MIC_STRIPPED;
Johannes Berg780e87c2015-09-03 14:56:10 +0200262 *crypt_len = IEEE80211_CCMP_HDR_LEN;
263 return 0;
264 case IWL_RX_MPDU_STATUS_SEC_TKIP:
265 /* Don't drop the frame and decrypt it in SW */
Sara Sharon57df3832017-12-06 13:57:19 +0200266 if (!fw_has_api(&mvm->fw->ucode_capa,
267 IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
268 !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
Johannes Berg780e87c2015-09-03 14:56:10 +0200269 return 0;
270
271 *crypt_len = IEEE80211_TKIP_IV_LEN;
272 /* fall through if TTAK OK */
273 case IWL_RX_MPDU_STATUS_SEC_WEP:
274 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
275 return -1;
276
277 stats->flag |= RX_FLAG_DECRYPTED;
278 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
279 IWL_RX_MPDU_STATUS_SEC_WEP)
280 *crypt_len = IEEE80211_WEP_IV_LEN;
David Spinadel9d0fc5a2016-11-21 17:01:25 +0200281
282 if (pkt_flags & FH_RSCSR_RADA_EN)
283 stats->flag |= RX_FLAG_ICV_STRIPPED;
284
Johannes Berg780e87c2015-09-03 14:56:10 +0200285 return 0;
286 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
287 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
288 return -1;
289 stats->flag |= RX_FLAG_DECRYPTED;
290 return 0;
291 default:
Shaul Triebitzbaf41bc2017-09-13 16:46:14 +0300292 /* Expected in monitor (not having the keys) */
293 if (!mvm->monitor_on)
294 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
Johannes Berg780e87c2015-09-03 14:56:10 +0200295 }
296
297 return 0;
298}
299
300static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
301 struct sk_buff *skb,
302 struct iwl_rx_mpdu_desc *desc)
303{
304 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
305 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
Sara Sharonb238be02016-03-16 13:57:50 +0200306 u16 flags = le16_to_cpu(desc->l3l4_flags);
307 u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
308 IWL_RX_L3_PROTO_POS);
Johannes Berg780e87c2015-09-03 14:56:10 +0200309
310 if (mvmvif->features & NETIF_F_RXCSUM &&
Sara Sharonb238be02016-03-16 13:57:50 +0200311 flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
312 (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
313 l3_prot == IWL_RX_L3_TYPE_IPV6 ||
314 l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
Johannes Berg780e87c2015-09-03 14:56:10 +0200315 skb->ip_summed = CHECKSUM_UNNECESSARY;
316}
317
Sara Sharona571f5f2015-12-07 12:50:58 +0200318/*
Sara Sharon5ab2ba92016-03-29 10:56:57 +0300319 * returns true if a packet is a duplicate and should be dropped.
320 * Updates AMSDU PN tracking info
Sara Sharona571f5f2015-12-07 12:50:58 +0200321 */
Sara Sharon5ab2ba92016-03-29 10:56:57 +0300322static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
323 struct ieee80211_rx_status *rx_status,
324 struct ieee80211_hdr *hdr,
325 struct iwl_rx_mpdu_desc *desc)
Sara Sharona571f5f2015-12-07 12:50:58 +0200326{
327 struct iwl_mvm_sta *mvm_sta;
328 struct iwl_mvm_rxq_dup_data *dup_data;
Sara Sharon5ab2ba92016-03-29 10:56:57 +0300329 u8 tid, sub_frame_idx;
Sara Sharona571f5f2015-12-07 12:50:58 +0200330
331 if (WARN_ON(IS_ERR_OR_NULL(sta)))
332 return false;
333
Sara Sharona571f5f2015-12-07 12:50:58 +0200334 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
335 dup_data = &mvm_sta->dup_data[queue];
336
337 /*
338 * Drop duplicate 802.11 retransmissions
339 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
340 */
341 if (ieee80211_is_ctl(hdr->frame_control) ||
342 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
343 is_multicast_ether_addr(hdr->addr1)) {
344 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
345 return false;
346 }
347
348 if (ieee80211_is_data_qos(hdr->frame_control))
349 /* frame has qos control */
350 tid = *ieee80211_get_qos_ctl(hdr) &
351 IEEE80211_QOS_CTL_TID_MASK;
352 else
353 tid = IWL_MAX_TID_COUNT;
354
355 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
356 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
357
358 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
359 dup_data->last_seq[tid] == hdr->seq_ctrl &&
360 dup_data->last_sub_frame[tid] >= sub_frame_idx))
361 return true;
362
Sara Sharon5ab2ba92016-03-29 10:56:57 +0300363 /* Allow same PN as the first subframe for following sub frames */
364 if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
365 sub_frame_idx > dup_data->last_sub_frame[tid] &&
366 desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
367 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
368
Sara Sharona571f5f2015-12-07 12:50:58 +0200369 dup_data->last_seq[tid] = hdr->seq_ctrl;
370 dup_data->last_sub_frame[tid] = sub_frame_idx;
371
372 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
373
374 return false;
375}
376
Sara Sharon94bb4482015-12-16 18:48:28 +0200377int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
378 const u8 *data, u32 count)
379{
380 struct iwl_rxq_sync_cmd *cmd;
381 u32 data_size = sizeof(*cmd) + count;
382 int ret;
383
384 /* should be DWORD aligned */
385 if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
386 return -EINVAL;
387
388 cmd = kzalloc(data_size, GFP_KERNEL);
389 if (!cmd)
390 return -ENOMEM;
391
392 cmd->rxq_mask = cpu_to_le32(rxq_mask);
393 cmd->count = cpu_to_le32(count);
394 cmd->flags = 0;
395 memcpy(cmd->payload, data, count);
396
397 ret = iwl_mvm_send_cmd_pdu(mvm,
398 WIDE_ID(DATA_PATH_GROUP,
399 TRIGGER_RX_QUEUES_NOTIF_CMD),
400 0, data_size, cmd);
401
402 kfree(cmd);
403 return ret;
404}
405
Sara Sharon74dd1762016-03-30 20:04:48 +0300406/*
407 * Returns true if sn2 - buffer_size < sn1 < sn2.
408 * To be used only in order to compare reorder buffer head with NSSN.
409 * We fully trust NSSN unless it is behind us due to reorder timeout.
410 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
411 */
412static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
413{
414 return ieee80211_sn_less(sn1, sn2) &&
415 !ieee80211_sn_less(sn1, sn2 - buffer_size);
416}
417
Sara Sharon06904052016-02-28 20:28:17 +0200418#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
419
Sara Sharonb915c102016-03-23 16:32:02 +0200420static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
421 struct ieee80211_sta *sta,
422 struct napi_struct *napi,
Johannes Berg76f4a852017-10-02 13:43:27 +0200423 struct iwl_mvm_baid_data *baid_data,
Sara Sharonb915c102016-03-23 16:32:02 +0200424 struct iwl_mvm_reorder_buffer *reorder_buf,
425 u16 nssn)
426{
Johannes Bergdfdddd92017-09-26 12:24:51 +0200427 struct iwl_mvm_reorder_buf_entry *entries =
428 &baid_data->entries[reorder_buf->queue *
429 baid_data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +0200430 u16 ssn = reorder_buf->head_sn;
431
Sara Sharon06904052016-02-28 20:28:17 +0200432 lockdep_assert_held(&reorder_buf->lock);
433
434 /* ignore nssn smaller than head sn - this can happen due to timeout */
Sara Sharon74dd1762016-03-30 20:04:48 +0300435 if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
Sara Sharon5351f9a2017-01-03 21:03:35 +0200436 goto set_timer;
Sara Sharon06904052016-02-28 20:28:17 +0200437
Sara Sharon74dd1762016-03-30 20:04:48 +0300438 while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
Sara Sharonb915c102016-03-23 16:32:02 +0200439 int index = ssn % reorder_buf->buf_size;
Johannes Bergdfdddd92017-09-26 12:24:51 +0200440 struct sk_buff_head *skb_list = &entries[index].e.frames;
Sara Sharonb915c102016-03-23 16:32:02 +0200441 struct sk_buff *skb;
442
443 ssn = ieee80211_sn_inc(ssn);
444
Sara Sharon5a710b82016-08-03 14:08:00 +0300445 /*
446 * Empty the list. Will have more than one frame for A-MSDU.
447 * Empty list is valid as well since nssn indicates frames were
448 * received.
449 */
Sara Sharonb915c102016-03-23 16:32:02 +0200450 while ((skb = __skb_dequeue(skb_list))) {
451 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
452 reorder_buf->queue,
453 sta);
454 reorder_buf->num_stored--;
455 }
456 }
457 reorder_buf->head_sn = nssn;
Sara Sharon06904052016-02-28 20:28:17 +0200458
Sara Sharon5351f9a2017-01-03 21:03:35 +0200459set_timer:
Sara Sharon06904052016-02-28 20:28:17 +0200460 if (reorder_buf->num_stored && !reorder_buf->removed) {
461 u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
462
Johannes Bergdfdddd92017-09-26 12:24:51 +0200463 while (skb_queue_empty(&entries[index].e.frames))
Sara Sharon06904052016-02-28 20:28:17 +0200464 index = (index + 1) % reorder_buf->buf_size;
465 /* modify timer to match next frame's expiration time */
466 mod_timer(&reorder_buf->reorder_timer,
Johannes Bergdfdddd92017-09-26 12:24:51 +0200467 entries[index].e.reorder_time + 1 +
Sara Sharon06904052016-02-28 20:28:17 +0200468 RX_REORDER_BUF_TIMEOUT_MQ);
469 } else {
470 del_timer(&reorder_buf->reorder_timer);
471 }
472}
473
Kees Cook8cef5342017-10-24 02:29:37 -0700474void iwl_mvm_reorder_timer_expired(struct timer_list *t)
Sara Sharon06904052016-02-28 20:28:17 +0200475{
Kees Cook8cef5342017-10-24 02:29:37 -0700476 struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
Johannes Bergdfdddd92017-09-26 12:24:51 +0200477 struct iwl_mvm_baid_data *baid_data =
478 iwl_mvm_baid_data_from_reorder_buf(buf);
479 struct iwl_mvm_reorder_buf_entry *entries =
480 &baid_data->entries[buf->queue * baid_data->entries_per_queue];
Sara Sharon06904052016-02-28 20:28:17 +0200481 int i;
482 u16 sn = 0, index = 0;
483 bool expired = false;
Sara Sharon9c36fd72017-01-04 10:49:42 +0200484 bool cont = false;
Sara Sharon06904052016-02-28 20:28:17 +0200485
Johannes Berg9b856832016-08-03 13:38:59 +0200486 spin_lock(&buf->lock);
Sara Sharon06904052016-02-28 20:28:17 +0200487
488 if (!buf->num_stored || buf->removed) {
Johannes Berg9b856832016-08-03 13:38:59 +0200489 spin_unlock(&buf->lock);
Sara Sharon06904052016-02-28 20:28:17 +0200490 return;
491 }
492
493 for (i = 0; i < buf->buf_size ; i++) {
494 index = (buf->head_sn + i) % buf->buf_size;
495
Johannes Bergdfdddd92017-09-26 12:24:51 +0200496 if (skb_queue_empty(&entries[index].e.frames)) {
Sara Sharon9c36fd72017-01-04 10:49:42 +0200497 /*
498 * If there is a hole and the next frame didn't expire
499 * we want to break and not advance SN
500 */
501 cont = false;
Sara Sharon06904052016-02-28 20:28:17 +0200502 continue;
Sara Sharon9c36fd72017-01-04 10:49:42 +0200503 }
Johannes Bergdfdddd92017-09-26 12:24:51 +0200504 if (!cont &&
505 !time_after(jiffies, entries[index].e.reorder_time +
Sara Sharon9c36fd72017-01-04 10:49:42 +0200506 RX_REORDER_BUF_TIMEOUT_MQ))
Sara Sharon06904052016-02-28 20:28:17 +0200507 break;
Sara Sharon9c36fd72017-01-04 10:49:42 +0200508
Sara Sharon06904052016-02-28 20:28:17 +0200509 expired = true;
Sara Sharon9c36fd72017-01-04 10:49:42 +0200510 /* continue until next hole after this expired frames */
511 cont = true;
Sara Sharon06904052016-02-28 20:28:17 +0200512 sn = ieee80211_sn_add(buf->head_sn, i + 1);
513 }
514
515 if (expired) {
516 struct ieee80211_sta *sta;
Emmanuel Grumbach528a5422017-08-31 11:52:30 +0300517 struct iwl_mvm_sta *mvmsta;
Sara Sharon3f1c4c52017-10-02 12:07:59 +0300518 u8 sta_id = baid_data->sta_id;
Sara Sharon06904052016-02-28 20:28:17 +0200519
520 rcu_read_lock();
Sara Sharon3f1c4c52017-10-02 12:07:59 +0300521 sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
Emmanuel Grumbach528a5422017-08-31 11:52:30 +0300522 mvmsta = iwl_mvm_sta_from_mac80211(sta);
523
Sara Sharon06904052016-02-28 20:28:17 +0200524 /* SN is set to the last expired frame + 1 */
Sara Sharon35263a02016-06-21 12:12:10 +0300525 IWL_DEBUG_HT(buf->mvm,
526 "Releasing expired frames for sta %u, sn %d\n",
Sara Sharon3f1c4c52017-10-02 12:07:59 +0300527 sta_id, sn);
Emmanuel Grumbach528a5422017-08-31 11:52:30 +0300528 iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
Sara Sharon3f1c4c52017-10-02 12:07:59 +0300529 sta, baid_data->tid);
Johannes Berg76f4a852017-10-02 13:43:27 +0200530 iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
Sara Sharon06904052016-02-28 20:28:17 +0200531 rcu_read_unlock();
Johannes Bergaeb80122017-04-19 09:45:18 +0200532 } else {
Sara Sharon06904052016-02-28 20:28:17 +0200533 /*
534 * If no frame expired and there are stored frames, index is now
535 * pointing to the first unexpired frame - modify timer
536 * accordingly to this frame.
537 */
538 mod_timer(&buf->reorder_timer,
Johannes Bergdfdddd92017-09-26 12:24:51 +0200539 entries[index].e.reorder_time +
Sara Sharon06904052016-02-28 20:28:17 +0200540 1 + RX_REORDER_BUF_TIMEOUT_MQ);
541 }
Johannes Berg9b856832016-08-03 13:38:59 +0200542 spin_unlock(&buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +0200543}
544
545static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
546 struct iwl_mvm_delba_data *data)
547{
548 struct iwl_mvm_baid_data *ba_data;
549 struct ieee80211_sta *sta;
550 struct iwl_mvm_reorder_buffer *reorder_buf;
551 u8 baid = data->baid;
552
Johannes Bergfd659f82016-08-03 13:52:56 +0200553 if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
Sara Sharonb915c102016-03-23 16:32:02 +0200554 return;
555
556 rcu_read_lock();
557
558 ba_data = rcu_dereference(mvm->baid_map[baid]);
559 if (WARN_ON_ONCE(!ba_data))
560 goto out;
561
562 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
563 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
564 goto out;
565
566 reorder_buf = &ba_data->reorder_buf[queue];
567
568 /* release all frames that are in the reorder buffer to the stack */
Sara Sharon06904052016-02-28 20:28:17 +0200569 spin_lock_bh(&reorder_buf->lock);
Johannes Berg76f4a852017-10-02 13:43:27 +0200570 iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
Sara Sharonb915c102016-03-23 16:32:02 +0200571 ieee80211_sn_add(reorder_buf->head_sn,
572 reorder_buf->buf_size));
Sara Sharon06904052016-02-28 20:28:17 +0200573 spin_unlock_bh(&reorder_buf->lock);
574 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +0200575
576out:
577 rcu_read_unlock();
578}
579
Sara Sharon94bb4482015-12-16 18:48:28 +0200580void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
581 int queue)
582{
583 struct iwl_rx_packet *pkt = rxb_addr(rxb);
584 struct iwl_rxq_sync_notification *notif;
585 struct iwl_mvm_internal_rxq_notif *internal_notif;
586
587 notif = (void *)pkt->data;
588 internal_notif = (void *)notif->payload;
589
Sara Sharond0ff5d22016-03-23 16:31:43 +0200590 if (internal_notif->sync) {
591 if (mvm->queue_sync_cookie != internal_notif->cookie) {
Sara Sharon0636b932016-02-18 14:21:12 +0200592 WARN_ONCE(1,
593 "Received expired RX queue sync message\n");
Sara Sharond0ff5d22016-03-23 16:31:43 +0200594 return;
595 }
Sara Sharon3a732c62016-10-09 17:34:24 +0300596 if (!atomic_dec_return(&mvm->queue_sync_counter))
597 wake_up(&mvm->rx_sync_waitq);
Sara Sharond0ff5d22016-03-23 16:31:43 +0200598 }
599
600 switch (internal_notif->type) {
601 case IWL_MVM_RXQ_EMPTY:
Sara Sharon0636b932016-02-18 14:21:12 +0200602 break;
Sara Sharon94bb4482015-12-16 18:48:28 +0200603 case IWL_MVM_RXQ_NOTIF_DEL_BA:
Sara Sharonb915c102016-03-23 16:32:02 +0200604 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
Sara Sharon94bb4482015-12-16 18:48:28 +0200605 break;
606 default:
607 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
608 }
609}
610
Sara Sharonb915c102016-03-23 16:32:02 +0200611/*
612 * Returns true if the MPDU was buffered\dropped, false if it should be passed
613 * to upper layer.
614 */
615static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
616 struct napi_struct *napi,
617 int queue,
618 struct ieee80211_sta *sta,
619 struct sk_buff *skb,
620 struct iwl_rx_mpdu_desc *desc)
621{
622 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Luca Coelho1f9788f2016-05-16 14:34:20 +0300623 struct iwl_mvm_sta *mvm_sta;
Sara Sharonb915c102016-03-23 16:32:02 +0200624 struct iwl_mvm_baid_data *baid_data;
625 struct iwl_mvm_reorder_buffer *buffer;
626 struct sk_buff *tail;
627 u32 reorder = le32_to_cpu(desc->reorder_data);
628 bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
Sara Sharone7e14082016-04-17 14:15:17 +0300629 bool last_subframe =
630 desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
Sara Sharonb915c102016-03-23 16:32:02 +0200631 u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
632 u8 sub_frame_idx = desc->amsdu_info &
633 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
Johannes Bergdfdddd92017-09-26 12:24:51 +0200634 struct iwl_mvm_reorder_buf_entry *entries;
Sara Sharonb915c102016-03-23 16:32:02 +0200635 int index;
636 u16 nssn, sn;
637 u8 baid;
638
639 baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
640 IWL_RX_MPDU_REORDER_BAID_SHIFT;
641
Johannes Berg8ec8ed42016-08-18 14:18:40 +0200642 /*
643 * This also covers the case of receiving a Block Ack Request
644 * outside a BA session; we'll pass it to mac80211 and that
645 * then sends a delBA action frame.
646 */
Sara Sharonb915c102016-03-23 16:32:02 +0200647 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
648 return false;
649
650 /* no sta yet */
Sara Sharon417795a2017-09-28 11:11:51 +0300651 if (WARN_ONCE(IS_ERR_OR_NULL(sta),
652 "Got valid BAID without a valid station assigned\n"))
Sara Sharonb915c102016-03-23 16:32:02 +0200653 return false;
654
Luca Coelho1f9788f2016-05-16 14:34:20 +0300655 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
656
Sara Sharon9a73a7d2016-08-08 13:07:01 +0300657 /* not a data packet or a bar */
658 if (!ieee80211_is_back_req(hdr->frame_control) &&
659 (!ieee80211_is_data_qos(hdr->frame_control) ||
660 is_multicast_ether_addr(hdr->addr1)))
Sara Sharonb915c102016-03-23 16:32:02 +0200661 return false;
662
663 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
664 return false;
665
666 baid_data = rcu_dereference(mvm->baid_map[baid]);
Sara Sharon5d43eab2017-02-02 12:51:39 +0200667 if (!baid_data) {
Emmanuel Grumbacha6008522017-07-27 15:34:12 +0300668 IWL_DEBUG_RX(mvm,
669 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
670 baid, reorder);
Sara Sharonb915c102016-03-23 16:32:02 +0200671 return false;
Sara Sharon5d43eab2017-02-02 12:51:39 +0200672 }
673
Sara Sharonb915c102016-03-23 16:32:02 +0200674 if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
675 "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
676 baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
677 tid))
678 return false;
679
680 nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
681 sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
682 IWL_RX_MPDU_REORDER_SN_SHIFT;
683
684 buffer = &baid_data->reorder_buf[queue];
Johannes Bergdfdddd92017-09-26 12:24:51 +0200685 entries = &baid_data->entries[queue * baid_data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +0200686
Sara Sharon06904052016-02-28 20:28:17 +0200687 spin_lock_bh(&buffer->lock);
688
Sara Sharon5d43eab2017-02-02 12:51:39 +0200689 if (!buffer->valid) {
690 if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
691 spin_unlock_bh(&buffer->lock);
692 return false;
693 }
694 buffer->valid = true;
695 }
696
Sara Sharon9a73a7d2016-08-08 13:07:01 +0300697 if (ieee80211_is_back_req(hdr->frame_control)) {
Johannes Berg76f4a852017-10-02 13:43:27 +0200698 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
Sara Sharon9a73a7d2016-08-08 13:07:01 +0300699 goto drop;
700 }
701
Sara Sharonb915c102016-03-23 16:32:02 +0200702 /*
703 * If there was a significant jump in the nssn - adjust.
704 * If the SN is smaller than the NSSN it might need to first go into
705 * the reorder buffer, in which case we just release up to it and the
Sara Sharon5f904722017-09-04 20:27:04 +0300706 * rest of the function will take care of storing it and releasing up to
707 * the nssn
Sara Sharonb915c102016-03-23 16:32:02 +0200708 */
Sara Sharon74dd1762016-03-30 20:04:48 +0300709 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
Sara Sharon5f904722017-09-04 20:27:04 +0300710 buffer->buf_size) ||
711 !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
Sara Sharonb915c102016-03-23 16:32:02 +0200712 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
713
Johannes Berg76f4a852017-10-02 13:43:27 +0200714 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
715 min_sn);
Sara Sharonb915c102016-03-23 16:32:02 +0200716 }
717
718 /* drop any oudated packets */
719 if (ieee80211_sn_less(sn, buffer->head_sn))
720 goto drop;
721
722 /* release immediately if allowed by nssn and no stored frames */
723 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
Sara Sharon74dd1762016-03-30 20:04:48 +0300724 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
Sara Sharone7e14082016-04-17 14:15:17 +0300725 buffer->buf_size) &&
726 (!amsdu || last_subframe))
Sara Sharon06904052016-02-28 20:28:17 +0200727 buffer->head_sn = nssn;
Sara Sharonb915c102016-03-23 16:32:02 +0200728 /* No need to update AMSDU last SN - we are moving the head */
Sara Sharon06904052016-02-28 20:28:17 +0200729 spin_unlock_bh(&buffer->lock);
Sara Sharonb915c102016-03-23 16:32:02 +0200730 return false;
731 }
732
Sara Sharon14a1f852017-10-17 12:06:52 +0300733 /*
734 * release immediately if there are no stored frames, and the sn is
735 * equal to the head.
736 * This can happen due to reorder timer, where NSSN is behind head_sn.
737 * When we released everything, and we got the next frame in the
738 * sequence, according to the NSSN we can't release immediately,
739 * while technically there is no hole and we can move forward.
740 */
741 if (!buffer->num_stored && sn == buffer->head_sn) {
742 if (!amsdu || last_subframe)
743 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
744 /* No need to update AMSDU last SN - we are moving the head */
745 spin_unlock_bh(&buffer->lock);
746 return false;
747 }
748
Sara Sharonb915c102016-03-23 16:32:02 +0200749 index = sn % buffer->buf_size;
750
751 /*
752 * Check if we already stored this frame
753 * As AMSDU is either received or not as whole, logic is simple:
754 * If we have frames in that position in the buffer and the last frame
755 * originated from AMSDU had a different SN then it is a retransmission.
756 * If it is the same SN then if the subframe index is incrementing it
757 * is the same AMSDU - otherwise it is a retransmission.
758 */
Johannes Bergdfdddd92017-09-26 12:24:51 +0200759 tail = skb_peek_tail(&entries[index].e.frames);
Sara Sharonb915c102016-03-23 16:32:02 +0200760 if (tail && !amsdu)
761 goto drop;
762 else if (tail && (sn != buffer->last_amsdu ||
763 buffer->last_sub_index >= sub_frame_idx))
764 goto drop;
765
766 /* put in reorder buffer */
Johannes Bergdfdddd92017-09-26 12:24:51 +0200767 __skb_queue_tail(&entries[index].e.frames, skb);
Sara Sharonb915c102016-03-23 16:32:02 +0200768 buffer->num_stored++;
Johannes Bergdfdddd92017-09-26 12:24:51 +0200769 entries[index].e.reorder_time = jiffies;
Sara Sharon06904052016-02-28 20:28:17 +0200770
Sara Sharonb915c102016-03-23 16:32:02 +0200771 if (amsdu) {
772 buffer->last_amsdu = sn;
773 buffer->last_sub_index = sub_frame_idx;
774 }
775
Sara Sharone7e14082016-04-17 14:15:17 +0300776 /*
777 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
778 * The reason is that NSSN advances on the first sub-frame, and may
779 * cause the reorder buffer to advance before all the sub-frames arrive.
780 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
781 * SN 1. NSSN for first sub frame will be 3 with the result of driver
782 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
783 * already ahead and it will be dropped.
784 * If the last sub-frame is not on this queue - we will get frame
785 * release notification with up to date NSSN.
786 */
787 if (!amsdu || last_subframe)
Johannes Berg76f4a852017-10-02 13:43:27 +0200788 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
Sara Sharone7e14082016-04-17 14:15:17 +0300789
Sara Sharon06904052016-02-28 20:28:17 +0200790 spin_unlock_bh(&buffer->lock);
Sara Sharonb915c102016-03-23 16:32:02 +0200791 return true;
792
793drop:
794 kfree_skb(skb);
Sara Sharon06904052016-02-28 20:28:17 +0200795 spin_unlock_bh(&buffer->lock);
Sara Sharonb915c102016-03-23 16:32:02 +0200796 return true;
797}
798
Sara Sharon5d43eab2017-02-02 12:51:39 +0200799static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
800 u32 reorder_data, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +0200801{
802 unsigned long now = jiffies;
803 unsigned long timeout;
804 struct iwl_mvm_baid_data *data;
805
806 rcu_read_lock();
807
808 data = rcu_dereference(mvm->baid_map[baid]);
Sara Sharon5d43eab2017-02-02 12:51:39 +0200809 if (!data) {
Emmanuel Grumbacha6008522017-07-27 15:34:12 +0300810 IWL_DEBUG_RX(mvm,
811 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
812 baid, reorder_data);
Sara Sharon10b2b202016-03-20 16:23:41 +0200813 goto out;
Sara Sharon5d43eab2017-02-02 12:51:39 +0200814 }
Sara Sharon10b2b202016-03-20 16:23:41 +0200815
816 if (!data->timeout)
817 goto out;
818
819 timeout = data->timeout;
820 /*
821 * Do not update last rx all the time to avoid cache bouncing
822 * between the rx queues.
823 * Update it every timeout. Worst case is the session will
824 * expire after ~ 2 * timeout, which doesn't matter that much.
825 */
826 if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
827 /* Update is atomic */
828 data->last_rx = now;
829
830out:
831 rcu_read_unlock();
832}
833
Johannes Berg780e87c2015-09-03 14:56:10 +0200834void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
835 struct iwl_rx_cmd_buffer *rxb, int queue)
836{
837 struct ieee80211_rx_status *rx_status;
838 struct iwl_rx_packet *pkt = rxb_addr(rxb);
839 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
Sara Sharon0c1c6e32015-12-16 21:17:06 +0200840 struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
Johannes Berg780e87c2015-09-03 14:56:10 +0200841 u32 len = le16_to_cpu(desc->mpdu_len);
842 u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
Sara Sharonfbe41122016-04-04 19:28:45 +0300843 u16 phy_info = le16_to_cpu(desc->phy_info);
Johannes Berg780e87c2015-09-03 14:56:10 +0200844 struct ieee80211_sta *sta = NULL;
845 struct sk_buff *skb;
Johannes Berg780e87c2015-09-03 14:56:10 +0200846 u8 crypt_len = 0;
847
Shahar S Matityahu364a1ab2017-10-25 11:40:24 +0300848 if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
849 return;
850
Johannes Berg780e87c2015-09-03 14:56:10 +0200851 /* Dont use dev_alloc_skb(), we'll have enough headroom once
852 * ieee80211_hdr pulled.
853 */
854 skb = alloc_skb(128, GFP_ATOMIC);
855 if (!skb) {
856 IWL_ERR(mvm, "alloc_skb failed\n");
857 return;
858 }
859
860 rx_status = IEEE80211_SKB_RXCB(skb);
861
David Spinadel9d0fc5a2016-11-21 17:01:25 +0200862 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
863 le32_to_cpu(pkt->len_n_flags), queue,
864 &crypt_len)) {
Johannes Berg780e87c2015-09-03 14:56:10 +0200865 kfree_skb(skb);
866 return;
867 }
868
869 /*
870 * Keep packets with CRC errors (and with overrun) for monitor mode
871 * (otherwise the firmware discards them) but mark them as bad.
872 */
873 if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
874 !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
875 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
876 le16_to_cpu(desc->status));
877 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
878 }
Sara Sharonfbe41122016-04-04 19:28:45 +0300879 /* set the preamble flag if appropriate */
880 if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
Johannes Berg7fdd69c2017-04-26 11:13:00 +0200881 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
Johannes Berg780e87c2015-09-03 14:56:10 +0200882
Sara Sharonfbe41122016-04-04 19:28:45 +0300883 if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
884 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
885 /* TSF as indicated by the firmware is at INA time */
886 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
887 }
Johannes Berg780e87c2015-09-03 14:56:10 +0200888 rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
Johannes Berg57fbcce2016-04-12 15:56:15 +0200889 rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
890 NL80211_BAND_2GHZ;
Johannes Berg780e87c2015-09-03 14:56:10 +0200891 rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
892 rx_status->band);
893 iwl_mvm_get_signal_strength(mvm, desc, rx_status);
Sara Sharonfbe41122016-04-04 19:28:45 +0300894
895 /* update aggregation data for monitor sake on default queue */
896 if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
897 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
898
899 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
900 rx_status->ampdu_reference = mvm->ampdu_ref;
901 /* toggle is switched whenever new aggregation starts */
902 if (toggle_bit != mvm->ampdu_toggle) {
903 mvm->ampdu_ref++;
904 mvm->ampdu_toggle = toggle_bit;
905 }
906 }
Johannes Berg780e87c2015-09-03 14:56:10 +0200907
908 rcu_read_lock();
909
Johannes Bergc67a3d02017-06-07 09:31:13 +0200910 if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
Johannes Berg780e87c2015-09-03 14:56:10 +0200911 u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
912
Sara Sharon0ae98812017-01-04 14:53:58 +0200913 if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
Johannes Berg780e87c2015-09-03 14:56:10 +0200914 sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
915 if (IS_ERR(sta))
916 sta = NULL;
917 }
918 } else if (!is_multicast_ether_addr(hdr->addr2)) {
919 /*
920 * This is fine since we prevent two stations with the same
921 * address from being added.
922 */
923 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
924 }
925
926 if (sta) {
927 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200928 struct ieee80211_vif *tx_blocked_vif =
929 rcu_dereference(mvm->csa_tx_blocked_vif);
Sara Sharon10b2b202016-03-20 16:23:41 +0200930 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
931 IWL_RX_MPDU_REORDER_BAID_MASK) >>
932 IWL_RX_MPDU_REORDER_BAID_SHIFT);
Johannes Berg780e87c2015-09-03 14:56:10 +0200933
934 /*
935 * We have tx blocked stations (with CS bit). If we heard
936 * frames from a blocked station on a new channel we can
937 * TX to it again.
938 */
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200939 if (unlikely(tx_blocked_vif) &&
940 tx_blocked_vif == mvmsta->vif) {
941 struct iwl_mvm_vif *mvmvif =
942 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
943
944 if (mvmvif->csa_target_freq == rx_status->freq)
945 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
946 false);
947 }
Johannes Berg780e87c2015-09-03 14:56:10 +0200948
Gregory Greenmanecaf71d2017-11-01 07:16:29 +0200949 rs_update_last_rssi(mvm, mvmsta, rx_status);
Johannes Berg780e87c2015-09-03 14:56:10 +0200950
951 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
952 ieee80211_is_beacon(hdr->frame_control)) {
953 struct iwl_fw_dbg_trigger_tlv *trig;
954 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
955 bool trig_check;
956 s32 rssi;
957
958 trig = iwl_fw_dbg_get_trigger(mvm->fw,
959 FW_DBG_TRIGGER_RSSI);
960 rssi_trig = (void *)trig->data;
961 rssi = le32_to_cpu(rssi_trig->rssi);
962
963 trig_check =
Johannes Berg7174beb2017-06-01 16:03:19 +0200964 iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
965 ieee80211_vif_to_wdev(mvmsta->vif),
Johannes Berg780e87c2015-09-03 14:56:10 +0200966 trig);
967 if (trig_check && rx_status->signal < rssi)
Johannes Berg7174beb2017-06-01 16:03:19 +0200968 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
969 NULL);
Johannes Berg780e87c2015-09-03 14:56:10 +0200970 }
971
Johannes Berg780e87c2015-09-03 14:56:10 +0200972 if (ieee80211_is_data(hdr->frame_control))
973 iwl_mvm_rx_csum(sta, skb, desc);
Sara Sharona571f5f2015-12-07 12:50:58 +0200974
Sara Sharon5ab2ba92016-03-29 10:56:57 +0300975 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
Sara Sharona571f5f2015-12-07 12:50:58 +0200976 kfree_skb(skb);
Sara Sharoncb2de6b2017-02-13 13:36:31 +0200977 goto out;
Sara Sharona571f5f2015-12-07 12:50:58 +0200978 }
Sara Sharon62d23402016-03-06 09:51:29 +0200979
980 /*
981 * Our hardware de-aggregates AMSDUs but copies the mac header
982 * as it to the de-aggregated MPDUs. We need to turn off the
983 * AMSDU bit in the QoS control ourselves.
Sara Sharon9dfa2152017-02-14 14:58:21 +0200984 * In addition, HW reverses addr3 and addr4 - reverse it back.
Sara Sharon62d23402016-03-06 09:51:29 +0200985 */
986 if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
987 !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
Sara Sharona56cb4f2017-01-31 14:36:10 +0200988 int i;
Sara Sharon62d23402016-03-06 09:51:29 +0200989 u8 *qc = ieee80211_get_qos_ctl(hdr);
Sara Sharona56cb4f2017-01-31 14:36:10 +0200990 u8 mac_addr[ETH_ALEN];
Sara Sharon62d23402016-03-06 09:51:29 +0200991
992 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
Sara Sharona56cb4f2017-01-31 14:36:10 +0200993
994 for (i = 0; i < ETH_ALEN; i++)
995 mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
996 ether_addr_copy(hdr->addr3, mac_addr);
Sara Sharon9dfa2152017-02-14 14:58:21 +0200997
998 if (ieee80211_has_a4(hdr->frame_control)) {
999 for (i = 0; i < ETH_ALEN; i++)
1000 mac_addr[i] =
1001 hdr->addr4[ETH_ALEN - i - 1];
1002 ether_addr_copy(hdr->addr4, mac_addr);
1003 }
Sara Sharon62d23402016-03-06 09:51:29 +02001004 }
Sara Sharon5d43eab2017-02-02 12:51:39 +02001005 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
1006 u32 reorder_data = le32_to_cpu(desc->reorder_data);
1007
1008 iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
1009 }
Johannes Berg780e87c2015-09-03 14:56:10 +02001010 }
1011
Johannes Berg780e87c2015-09-03 14:56:10 +02001012 /* Set up the HT phy flags */
1013 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1014 case RATE_MCS_CHAN_WIDTH_20:
1015 break;
1016 case RATE_MCS_CHAN_WIDTH_40:
Johannes Bergda6a4352017-04-26 12:14:59 +02001017 rx_status->bw = RATE_INFO_BW_40;
Johannes Berg780e87c2015-09-03 14:56:10 +02001018 break;
1019 case RATE_MCS_CHAN_WIDTH_80:
Johannes Bergda6a4352017-04-26 12:14:59 +02001020 rx_status->bw = RATE_INFO_BW_80;
Johannes Berg780e87c2015-09-03 14:56:10 +02001021 break;
1022 case RATE_MCS_CHAN_WIDTH_160:
Johannes Bergda6a4352017-04-26 12:14:59 +02001023 rx_status->bw = RATE_INFO_BW_160;
Johannes Berg780e87c2015-09-03 14:56:10 +02001024 break;
1025 }
Sara Sharon4c59ff52017-10-29 10:46:39 +02001026
1027 if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
1028 rate_n_flags & RATE_MCS_SGI_MSK)
Johannes Berg7fdd69c2017-04-26 11:13:00 +02001029 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
Johannes Berg780e87c2015-09-03 14:56:10 +02001030 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
Johannes Berg7fdd69c2017-04-26 11:13:00 +02001031 rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
Johannes Berg780e87c2015-09-03 14:56:10 +02001032 if (rate_n_flags & RATE_MCS_LDPC_MSK)
Johannes Berg7fdd69c2017-04-26 11:13:00 +02001033 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
Johannes Berg780e87c2015-09-03 14:56:10 +02001034 if (rate_n_flags & RATE_MCS_HT_MSK) {
Sara Sharon77e40942017-01-11 11:58:38 +02001035 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
Johannes Berg780e87c2015-09-03 14:56:10 +02001036 RATE_MCS_STBC_POS;
Johannes Bergda6a4352017-04-26 12:14:59 +02001037 rx_status->encoding = RX_ENC_HT;
Johannes Berg780e87c2015-09-03 14:56:10 +02001038 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
Johannes Berg7fdd69c2017-04-26 11:13:00 +02001039 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
Johannes Berg780e87c2015-09-03 14:56:10 +02001040 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
Sara Sharon77e40942017-01-11 11:58:38 +02001041 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
Johannes Berg780e87c2015-09-03 14:56:10 +02001042 RATE_MCS_STBC_POS;
Johannes Berg8613c942017-04-26 13:51:41 +02001043 rx_status->nss =
Johannes Berg780e87c2015-09-03 14:56:10 +02001044 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1045 RATE_VHT_MCS_NSS_POS) + 1;
1046 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
Johannes Bergda6a4352017-04-26 12:14:59 +02001047 rx_status->encoding = RX_ENC_VHT;
Johannes Berg7fdd69c2017-04-26 11:13:00 +02001048 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
Johannes Berg780e87c2015-09-03 14:56:10 +02001049 if (rate_n_flags & RATE_MCS_BF_MSK)
Johannes Berg7fdd69c2017-04-26 11:13:00 +02001050 rx_status->enc_flags |= RX_ENC_FLAG_BF;
Johannes Berg780e87c2015-09-03 14:56:10 +02001051 } else {
Sara Sharoncb2de6b2017-02-13 13:36:31 +02001052 int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1053 rx_status->band);
1054
1055 if (WARN(rate < 0 || rate > 0xFF,
1056 "Invalid rate flags 0x%x, band %d,\n",
1057 rate_n_flags, rx_status->band)) {
1058 kfree_skb(skb);
1059 goto out;
1060 }
1061 rx_status->rate_idx = rate;
1062
Johannes Berg780e87c2015-09-03 14:56:10 +02001063 }
1064
Sara Sharonfbe41122016-04-04 19:28:45 +03001065 /* management stuff on default queue */
1066 if (!queue) {
1067 if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
1068 ieee80211_is_probe_resp(hdr->frame_control)) &&
1069 mvm->sched_scan_pass_all ==
1070 SCHED_SCAN_PASS_ALL_ENABLED))
1071 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
1072
1073 if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
1074 ieee80211_is_probe_resp(hdr->frame_control)))
1075 rx_status->boottime_ns = ktime_get_boot_ns();
1076 }
Johannes Berg780e87c2015-09-03 14:56:10 +02001077
Johannes Bergf5e28ea2015-12-06 14:58:08 +02001078 iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
Sara Sharonb915c102016-03-23 16:32:02 +02001079 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
1080 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
Sara Sharoncb2de6b2017-02-13 13:36:31 +02001081out:
Johannes Bergf5e28ea2015-12-06 14:58:08 +02001082 rcu_read_unlock();
Johannes Berg780e87c2015-09-03 14:56:10 +02001083}
Sara Sharon585a6fc2015-12-01 13:48:18 +02001084
Sara Sharona3383842016-02-28 15:41:47 +02001085void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
Sara Sharon585a6fc2015-12-01 13:48:18 +02001086 struct iwl_rx_cmd_buffer *rxb, int queue)
1087{
Sara Sharona3383842016-02-28 15:41:47 +02001088 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1089 struct iwl_frame_release *release = (void *)pkt->data;
1090 struct ieee80211_sta *sta;
1091 struct iwl_mvm_reorder_buffer *reorder_buf;
1092 struct iwl_mvm_baid_data *ba_data;
1093
1094 int baid = release->baid;
1095
Sara Sharon35263a02016-06-21 12:12:10 +03001096 IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
1097 release->baid, le16_to_cpu(release->nssn));
1098
Sara Sharona3383842016-02-28 15:41:47 +02001099 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
1100 return;
1101
1102 rcu_read_lock();
1103
1104 ba_data = rcu_dereference(mvm->baid_map[baid]);
1105 if (WARN_ON_ONCE(!ba_data))
1106 goto out;
1107
1108 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
1109 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1110 goto out;
1111
1112 reorder_buf = &ba_data->reorder_buf[queue];
1113
1114 spin_lock_bh(&reorder_buf->lock);
Johannes Berg76f4a852017-10-02 13:43:27 +02001115 iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
Sara Sharona3383842016-02-28 15:41:47 +02001116 le16_to_cpu(release->nssn));
1117 spin_unlock_bh(&reorder_buf->lock);
1118
1119out:
1120 rcu_read_unlock();
Sara Sharon585a6fc2015-12-01 13:48:18 +02001121}