blob: 73a90d4a43eb9790389c0be4b1322d7074040299 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Emmanuel Grumbach42032632015-04-15 12:43:46 +03009 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Emmanuel Grumbach532beba2016-03-07 22:23:52 +020010 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020035 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Emmanuel Grumbach42032632015-04-15 12:43:46 +030036 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010037 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66#include <linux/ieee80211.h>
67#include <linux/etherdevice.h>
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +030068#include <linux/tcp.h>
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +030069#include <net/ip.h>
Sara Sharon5e6a98d2016-03-10 17:40:56 +020070#include <net/ipv6.h>
Johannes Berg8ca151b2013-01-24 14:25:36 +010071
72#include "iwl-trans.h"
73#include "iwl-eeprom-parse.h"
74#include "mvm.h"
75#include "sta.h"
Golan Ben-Ami2f89a5d2015-10-27 19:17:14 +020076#include "fw-dbg.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010077
Emmanuel Grumbach42032632015-04-15 12:43:46 +030078static void
79iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
80 u16 tid, u16 ssn)
81{
82 struct iwl_fw_dbg_trigger_tlv *trig;
83 struct iwl_fw_dbg_trigger_ba *ba_trig;
84
85 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
86 return;
87
88 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
89 ba_trig = (void *)trig->data;
90
91 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
92 return;
93
94 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
95 return;
96
97 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
98 "BAR sent to %pM, tid %d, ssn %d",
99 addr, tid, ssn);
100}
101
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200102#define OPT_HDR(type, skb, off) \
103 (type *)(skb_network_header(skb) + (off))
104
Sara Sharonb86dd742016-09-29 15:16:03 +0300105static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
106 struct ieee80211_hdr *hdr,
107 struct ieee80211_tx_info *info)
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200108{
Sara Sharonb86dd742016-09-29 15:16:03 +0300109 u16 offload_assist = 0;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200110#if IS_ENABLED(CONFIG_INET)
111 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200112 u8 protocol = 0;
113
114 /*
115 * Do not compute checksum if already computed or if transport will
116 * compute it
117 */
118 if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
Sara Sharonb86dd742016-09-29 15:16:03 +0300119 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200120
121 /* We do not expect to be requested to csum stuff we do not support */
122 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
123 (skb->protocol != htons(ETH_P_IP) &&
124 skb->protocol != htons(ETH_P_IPV6)),
125 "No support for requested checksum\n")) {
126 skb_checksum_help(skb);
Sara Sharonb86dd742016-09-29 15:16:03 +0300127 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200128 }
129
130 if (skb->protocol == htons(ETH_P_IP)) {
131 protocol = ip_hdr(skb)->protocol;
132 } else {
133#if IS_ENABLED(CONFIG_IPV6)
134 struct ipv6hdr *ipv6h =
135 (struct ipv6hdr *)skb_network_header(skb);
136 unsigned int off = sizeof(*ipv6h);
137
138 protocol = ipv6h->nexthdr;
139 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
Sara Sharonecf51422016-06-08 15:15:41 +0300140 struct ipv6_opt_hdr *hp;
141
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200142 /* only supported extension headers */
143 if (protocol != NEXTHDR_ROUTING &&
144 protocol != NEXTHDR_HOP &&
Sara Sharonecf51422016-06-08 15:15:41 +0300145 protocol != NEXTHDR_DEST) {
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200146 skb_checksum_help(skb);
Sara Sharonb86dd742016-09-29 15:16:03 +0300147 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200148 }
149
Sara Sharonecf51422016-06-08 15:15:41 +0300150 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
151 protocol = hp->nexthdr;
152 off += ipv6_optlen(hp);
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200153 }
154 /* if we get here - protocol now should be TCP/UDP */
155#endif
156 }
157
158 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
159 WARN_ON_ONCE(1);
160 skb_checksum_help(skb);
Sara Sharonb86dd742016-09-29 15:16:03 +0300161 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200162 }
163
164 /* enable L4 csum */
165 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
166
167 /*
168 * Set offset to IP header (snap).
169 * We don't support tunneling so no need to take care of inner header.
170 * Size is in words.
171 */
172 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
173
174 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
175 if (skb->protocol == htons(ETH_P_IP) &&
176 (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
177 ip_hdr(skb)->check = 0;
178 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
179 }
180
181 /* reset UDP/TCP header csum */
182 if (protocol == IPPROTO_TCP)
183 tcp_hdr(skb)->check = 0;
184 else
185 udp_hdr(skb)->check = 0;
186
187 /* mac header len should include IV, size is in words */
188 if (info->control.hw_key)
189 mh_len += info->control.hw_key->iv_len;
190 mh_len /= 2;
191 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
192
Sara Sharonb86dd742016-09-29 15:16:03 +0300193out:
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200194#endif
Sara Sharonb86dd742016-09-29 15:16:03 +0300195 return offload_assist;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200196}
197
Johannes Berg8ca151b2013-01-24 14:25:36 +0100198/*
199 * Sets most of the Tx cmd's fields
200 */
Arik Nemtsov6ce73e62014-09-11 13:00:19 +0300201void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
202 struct iwl_tx_cmd *tx_cmd,
203 struct ieee80211_tx_info *info, u8 sta_id)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100204{
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300205 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100206 struct ieee80211_hdr *hdr = (void *)skb->data;
207 __le16 fc = hdr->frame_control;
208 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
209 u32 len = skb->len + FCS_LEN;
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +0200210 u8 ac;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100211
212 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
213 tx_flags |= TX_CMD_FLG_ACK;
214 else
215 tx_flags &= ~TX_CMD_FLG_ACK;
216
217 if (ieee80211_is_probe_resp(fc))
218 tx_flags |= TX_CMD_FLG_TSF;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100219
Johannes Berg8ca151b2013-01-24 14:25:36 +0100220 if (ieee80211_has_morefrags(fc))
221 tx_flags |= TX_CMD_FLG_MORE_FRAG;
222
223 if (ieee80211_is_data_qos(fc)) {
224 u8 *qc = ieee80211_get_qos_ctl(hdr);
225 tx_cmd->tid_tspec = qc[0] & 0xf;
226 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
Sara Sharond8fe4842016-03-09 10:12:45 +0200227 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
228 tx_cmd->offload_assist |=
229 cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
Eyal Shapira9b3b43d2014-12-31 18:34:56 +0200230 } else if (ieee80211_is_back_req(fc)) {
231 struct ieee80211_bar *bar = (void *)skb->data;
232 u16 control = le16_to_cpu(bar->control);
Emmanuel Grumbach42032632015-04-15 12:43:46 +0300233 u16 ssn = le16_to_cpu(bar->start_seq_num);
Eyal Shapira9b3b43d2014-12-31 18:34:56 +0200234
235 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
236 tx_cmd->tid_tspec = (control &
237 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
238 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
239 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
Emmanuel Grumbach42032632015-04-15 12:43:46 +0300240 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
241 ssn);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100242 } else {
243 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
244 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
245 tx_flags |= TX_CMD_FLG_SEQ_CTL;
246 else
247 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
248 }
249
Eyal Shapiraa9dc5062014-12-31 17:58:23 +0200250 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
251 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
252 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
253 else
254 ac = tid_to_mac80211_ac[0];
255
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +0200256 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
257 TX_CMD_FLG_BT_PRIO_POS;
258
Johannes Berg8ca151b2013-01-24 14:25:36 +0100259 if (ieee80211_is_mgmt(fc)) {
260 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
Avri Altmanb084a352015-07-12 09:10:05 +0300261 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
262 else if (ieee80211_is_action(fc))
263 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100264 else
Avri Altmanb084a352015-07-12 09:10:05 +0300265 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100266
267 /* The spec allows Action frames in A-MPDU, we don't support
268 * it
269 */
270 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
Johannes Berg63f75352014-01-31 14:56:18 +0100271 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
Avri Altmanb084a352015-07-12 09:10:05 +0300272 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100273 } else {
Avri Altmanb084a352015-07-12 09:10:05 +0300274 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100275 }
276
Johannes Berg8ca151b2013-01-24 14:25:36 +0100277 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
278 !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
279 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
280
Johannes Berg859d9142015-06-01 17:11:11 +0200281 if (fw_has_capa(&mvm->fw->ucode_capa,
282 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
Andrei Otcheretianskif1daa002014-07-01 12:54:25 +0300283 ieee80211_action_contains_tpc(skb))
284 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
285
Johannes Berg8ca151b2013-01-24 14:25:36 +0100286 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
287 /* Total # bytes to be transmitted */
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300288 tx_cmd->len = cpu_to_le16((u16)skb->len +
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300289 (uintptr_t)skb_info->driver_data[0]);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100290 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
291 tx_cmd->sta_id = sta_id;
Sara Sharond8fe4842016-03-09 10:12:45 +0200292
293 /* padding is inserted later in transport */
294 if (ieee80211_hdrlen(fc) % 4 &&
295 !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
296 tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200297
Sara Sharonb86dd742016-09-29 15:16:03 +0300298 tx_cmd->offload_assist |=
299 cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100300}
301
302/*
303 * Sets the fields in the Tx cmd that are rate related
304 */
Arik Nemtsov6ce73e62014-09-11 13:00:19 +0300305void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
306 struct ieee80211_tx_info *info,
307 struct ieee80211_sta *sta, __le16 fc)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100308{
309 u32 rate_flags;
310 int rate_idx;
311 u8 rate_plcp;
312
313 /* Set retry limit on RTS packets */
314 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
315
316 /* Set retry limit on DATA packets and Probe Responses*/
317 if (ieee80211_is_probe_resp(fc)) {
318 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
319 tx_cmd->rts_retry_limit =
320 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
321 } else if (ieee80211_is_back_req(fc)) {
322 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
323 } else {
324 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
325 }
326
327 /*
Eliad Pellere89044d2013-07-16 17:33:26 +0300328 * for data packets, rate info comes from the table inside the fw. This
Emmanuel Grumbach1ffde692014-10-20 08:29:55 +0300329 * table is controlled by LINK_QUALITY commands
Johannes Berg8ca151b2013-01-24 14:25:36 +0100330 */
331
Emmanuel Grumbach1ffde692014-10-20 08:29:55 +0300332 if (ieee80211_is_data(fc) && sta) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100333 tx_cmd->initial_rate_index = 0;
334 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
335 return;
336 } else if (ieee80211_is_back_req(fc)) {
Emmanuel Grumbach2edc6ec2013-06-02 19:49:15 +0300337 tx_cmd->tx_flags |=
338 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100339 }
340
341 /* HT rate doesn't make sense for a non data frame */
342 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
Johannes Bergf85e9d12014-10-08 09:57:29 +0200343 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
344 info->control.rates[0].flags,
345 info->control.rates[0].idx,
346 le16_to_cpu(fc));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100347
348 rate_idx = info->control.rates[0].idx;
349 /* if the rate isn't a well known legacy rate, take the lowest one */
Johannes Berg7f66ea02016-09-14 10:20:10 +0200350 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100351 rate_idx = rate_lowest_index(
352 &mvm->nvm_data->bands[info->band], sta);
353
354 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
Johannes Berg57fbcce2016-04-12 15:56:15 +0200355 if (info->band == NL80211_BAND_5GHZ)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100356 rate_idx += IWL_FIRST_OFDM_RATE;
357
358 /* For 2.4 GHZ band, check that there is no need to remap */
359 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
360
361 /* Get PLCP rate for tx_cmd->rate_n_flags */
362 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
363
364 mvm->mgmt_last_antenna_idx =
Moshe Harela0544272014-12-08 21:13:14 +0200365 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100366 mvm->mgmt_last_antenna_idx);
Emmanuel Grumbach34c8b242014-05-28 21:53:39 +0300367
Johannes Berg57fbcce2016-04-12 15:56:15 +0200368 if (info->band == NL80211_BAND_2GHZ &&
Emmanuel Grumbach34c8b242014-05-28 21:53:39 +0300369 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
Emmanuel Grumbach923a8c12015-05-31 21:44:22 +0300370 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
Emmanuel Grumbach34c8b242014-05-28 21:53:39 +0300371 else
372 rate_flags =
373 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100374
375 /* Set CCK flag as needed */
376 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
377 rate_flags |= RATE_MCS_CCK_MSK;
378
379 /* Set the rate in the TX cmd */
380 tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
381}
382
Ayala Beker2a53d162016-04-07 16:21:57 +0300383static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
384 u8 *crypto_hdr)
385{
386 struct ieee80211_key_conf *keyconf = info->control.hw_key;
387 u64 pn;
388
389 pn = atomic64_inc_return(&keyconf->tx_pn);
390 crypto_hdr[0] = pn;
391 crypto_hdr[2] = 0;
392 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
393 crypto_hdr[1] = pn >> 8;
394 crypto_hdr[4] = pn >> 16;
395 crypto_hdr[5] = pn >> 24;
396 crypto_hdr[6] = pn >> 32;
397 crypto_hdr[7] = pn >> 40;
398}
399
Johannes Berg8ca151b2013-01-24 14:25:36 +0100400/*
401 * Sets the fields in the Tx cmd that are crypto related
402 */
Johannes Bergca8c0f42015-04-20 17:54:54 +0200403static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
404 struct ieee80211_tx_info *info,
405 struct iwl_tx_cmd *tx_cmd,
406 struct sk_buff *skb_frag,
407 int hdrlen)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100408{
409 struct ieee80211_key_conf *keyconf = info->control.hw_key;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200410 u8 *crypto_hdr = skb_frag->data + hdrlen;
411 u64 pn;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100412
413 switch (keyconf->cipher) {
414 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergca8c0f42015-04-20 17:54:54 +0200415 case WLAN_CIPHER_SUITE_CCMP_256:
416 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
Ayala Beker2a53d162016-04-07 16:21:57 +0300417 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100418 break;
419
420 case WLAN_CIPHER_SUITE_TKIP:
421 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
Eliad Peller1ad4f632016-02-14 13:56:36 +0200422 pn = atomic64_inc_return(&keyconf->tx_pn);
423 ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100424 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
425 break;
426
427 case WLAN_CIPHER_SUITE_WEP104:
428 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
429 /* fall through */
430 case WLAN_CIPHER_SUITE_WEP40:
431 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
432 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
433 TX_CMD_SEC_WEP_KEY_IDX_MSK);
434
435 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
436 break;
Ayala Beker2a53d162016-04-07 16:21:57 +0300437 case WLAN_CIPHER_SUITE_GCMP:
438 case WLAN_CIPHER_SUITE_GCMP_256:
439 /* TODO: Taking the key from the table might introduce a race
440 * when PTK rekeying is done, having an old packets with a PN
441 * based on the old key but the message encrypted with a new
442 * one.
443 * Need to handle this.
444 */
Emmanuel Grumbach4dc65112016-09-13 22:59:27 +0300445 tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
Ayala Beker2a53d162016-04-07 16:21:57 +0300446 tx_cmd->key[0] = keyconf->hw_key_idx;
447 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
448 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100449 default:
Max Stepanove36e5432013-08-27 19:56:13 +0300450 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100451 }
452}
453
454/*
455 * Allocates and sets the Tx cmd the driver data pointers in the skb
456 */
457static struct iwl_device_cmd *
458iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300459 struct ieee80211_tx_info *info, int hdrlen,
460 struct ieee80211_sta *sta, u8 sta_id)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100461{
462 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300463 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100464 struct iwl_device_cmd *dev_cmd;
465 struct iwl_tx_cmd *tx_cmd;
466
467 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
468
469 if (unlikely(!dev_cmd))
470 return NULL;
471
472 memset(dev_cmd, 0, sizeof(*dev_cmd));
Emmanuel Grumbach3961a612013-10-22 11:27:55 +0300473 dev_cmd->hdr.cmd = TX_CMD;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100474 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
475
476 if (info->control.hw_key)
Johannes Bergca8c0f42015-04-20 17:54:54 +0200477 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100478
479 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
480
481 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
482
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300483 memset(&skb_info->status, 0, sizeof(skb_info->status));
484 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100485
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300486 skb_info->driver_data[1] = dev_cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100487
488 return dev_cmd;
489}
490
Liad Kaufmande24f632015-08-04 15:19:18 +0300491static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
492 struct ieee80211_tx_info *info, __le16 fc)
493{
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300494 if (!iwl_mvm_is_dqa_supported(mvm))
495 return info->hw_queue;
Liad Kaufmande24f632015-08-04 15:19:18 +0300496
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300497 switch (info->control.vif->type) {
498 case NL80211_IFTYPE_AP:
499 /*
500 * handle legacy hostapd as well, where station may be added
501 * only after assoc.
502 */
503 if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
504 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
505 if (info->hw_queue == info->control.vif->cab_queue)
506 return info->hw_queue;
507
508 WARN_ON_ONCE(1);
509 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
510 case NL80211_IFTYPE_P2P_DEVICE:
511 if (ieee80211_is_mgmt(fc))
512 return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
513 if (info->hw_queue == info->control.vif->cab_queue)
514 return info->hw_queue;
515
516 WARN_ON_ONCE(1);
517 return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
518 default:
519 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
520 return -1;
521 }
Liad Kaufmande24f632015-08-04 15:19:18 +0300522}
523
Johannes Berg8ca151b2013-01-24 14:25:36 +0100524int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
525{
526 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300527 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
528 struct ieee80211_tx_info info;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100529 struct iwl_device_cmd *dev_cmd;
530 struct iwl_tx_cmd *tx_cmd;
531 u8 sta_id;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200532 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
Liad Kaufmande24f632015-08-04 15:19:18 +0300533 int queue;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100534
Beni Lev54c5ef22016-08-10 17:03:43 +0300535 /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
536 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
537 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
538 * and hence needs to be sent on the aux queue
539 */
540 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
541 skb_info->control.vif->type == NL80211_IFTYPE_STATION)
542 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
543
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300544 memcpy(&info, skb->cb, sizeof(info));
545
546 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100547 return -1;
548
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300549 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
550 (!info.control.vif ||
551 info.hw_queue != info.control.vif->cab_queue)))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100552 return -1;
553
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300554 /* This holds the amsdu headers length */
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300555 skb_info->driver_data[0] = (void *)(uintptr_t)0;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300556
Liad Kaufmande24f632015-08-04 15:19:18 +0300557 queue = info.hw_queue;
558
Ariej Marjieh7da91b02014-07-07 12:09:40 +0300559 /*
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300560 * If the interface on which the frame is sent is the P2P_DEVICE
Johannes Berg8ca151b2013-01-24 14:25:36 +0100561 * or an AP/GO interface use the broadcast station associated
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300562 * with it; otherwise if the interface is a managed interface
563 * use the AP station associated with it for multicast traffic
564 * (this is not possible for unicast packets as a TLDS discovery
565 * response are sent without a station entry); otherwise use the
566 * AUX station.
Liad Kaufmane3118ad2016-06-05 10:49:02 +0300567 * In DQA mode, if vif is of type STATION and frames are not multicast,
568 * they should be sent from the BSS queue. For example, TDLS setup
569 * frames should be sent on this queue, as they go through the AP.
Johannes Berg8ca151b2013-01-24 14:25:36 +0100570 */
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300571 sta_id = mvm->aux_sta.sta_id;
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300572 if (info.control.vif) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100573 struct iwl_mvm_vif *mvmvif =
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300574 iwl_mvm_vif_from_mac80211(info.control.vif);
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300575
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300576 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
Liad Kaufmande24f632015-08-04 15:19:18 +0300577 info.control.vif->type == NL80211_IFTYPE_AP) {
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300578 sta_id = mvmvif->bcast_sta.sta_id;
Liad Kaufmande24f632015-08-04 15:19:18 +0300579 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
580 hdr->frame_control);
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300581 if (queue < 0)
582 return -1;
583
Liad Kaufmande24f632015-08-04 15:19:18 +0300584 } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
585 is_multicast_ether_addr(hdr->addr1)) {
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300586 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
587
588 if (ap_sta_id != IWL_MVM_STATION_COUNT)
589 sta_id = ap_sta_id;
Liad Kaufmane3118ad2016-06-05 10:49:02 +0300590 } else if (iwl_mvm_is_dqa_supported(mvm) &&
591 info.control.vif->type == NL80211_IFTYPE_STATION) {
592 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300593 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100594 }
595
Liad Kaufmande24f632015-08-04 15:19:18 +0300596 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100597
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300598 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100599 if (!dev_cmd)
600 return -1;
601
Johannes Berg8ca151b2013-01-24 14:25:36 +0100602 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
603
604 /* Copy MAC header from skb into command buffer */
Johannes Bergca8c0f42015-04-20 17:54:54 +0200605 memcpy(tx_cmd->hdr, hdr, hdrlen);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100606
Liad Kaufmande24f632015-08-04 15:19:18 +0300607 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100608 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
609 return -1;
610 }
611
Liad Kaufmanfb896c42016-02-14 15:32:58 +0200612 /*
613 * Increase the pending frames counter, so that later when a reply comes
614 * in and the counter is decreased - we don't start getting negative
615 * values.
616 * Note that we don't need to make sure it isn't agg'd, since we're
617 * TXing non-sta
618 */
619 atomic_inc(&mvm->pending_frames[sta_id]);
620
Johannes Berg8ca151b2013-01-24 14:25:36 +0100621 return 0;
622}
623
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300624#ifdef CONFIG_INET
625static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300626 struct ieee80211_tx_info *info,
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300627 struct ieee80211_sta *sta,
628 struct sk_buff_head *mpdus_skb)
629{
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200630 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300631 struct ieee80211_hdr *hdr = (void *)skb->data;
632 unsigned int mss = skb_shinfo(skb)->gso_size;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300633 struct sk_buff *tmp, *next;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300634 char cb[sizeof(skb->cb)];
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200635 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300636 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
637 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
638 u16 amsdu_add, snap_ip_tcp, pad, i = 0;
Emmanuel Grumbach9e7dce22015-10-26 16:14:06 +0200639 unsigned int dbg_max_amsdu_len;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200640 netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
Emmanuel Grumbach50b02132015-11-11 11:37:02 +0200641 u8 *qc, tid, txf;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300642
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300643 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
644 tcp_hdrlen(skb);
645
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200646 qc = ieee80211_get_qos_ctl(hdr);
647 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
648 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
649 return -EINVAL;
650
Emmanuel Grumbachfa820d62016-04-03 10:15:59 +0300651 dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
652
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300653 if (!sta->max_amsdu_len ||
Emmanuel Grumbach04e3a5d2015-10-28 09:47:41 +0200654 !ieee80211_is_data_qos(hdr->frame_control) ||
Emmanuel Grumbachfa820d62016-04-03 10:15:59 +0300655 (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300656 num_subframes = 1;
657 pad = 0;
658 goto segment;
659 }
660
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200661 /*
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200662 * Do not build AMSDU for IPv6 with extension headers.
663 * ask stack to segment and checkum the generated MPDUs for us.
664 */
665 if (skb->protocol == htons(ETH_P_IPV6) &&
666 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
667 IPPROTO_TCP) {
668 num_subframes = 1;
669 pad = 0;
670 netdev_features &= ~NETIF_F_CSUM_MASK;
671 goto segment;
672 }
673
674 /*
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200675 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
676 * during an BA session.
677 */
678 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
679 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300680 num_subframes = 1;
681 pad = 0;
682 goto segment;
683 }
684
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200685 max_amsdu_len = sta->max_amsdu_len;
Emmanuel Grumbach50b02132015-11-11 11:37:02 +0200686
687 /* the Tx FIFO to which this A-MSDU will be routed */
688 txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
689
690 /*
691 * Don't send an AMSDU that will be longer than the TXF.
692 * Add a security margin of 256 for the TX command + headers.
693 * We also want to have the start of the next packet inside the
694 * fifo to be able to send bursts.
695 */
696 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
697 mvm->shared_mem_cfg.txfifo_size[txf] - 256);
698
Emmanuel Grumbachfa820d62016-04-03 10:15:59 +0300699 if (unlikely(dbg_max_amsdu_len))
Emmanuel Grumbach9e7dce22015-10-26 16:14:06 +0200700 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
701 dbg_max_amsdu_len);
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200702
703 /*
704 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
705 * supported. This is a spec requirement (IEEE 802.11-2015
706 * section 8.7.3 NOTE 3).
707 */
708 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
709 !sta->vht_cap.vht_supported)
710 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
711
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300712 /* Sub frame header + SNAP + IP header + TCP header + MSS */
713 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
714 pad = (4 - subf_len) & 0x3;
715
716 /*
717 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
718 * N * subf_len + (N - 1) * pad.
719 */
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200720 num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
721 if (num_subframes > 1)
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300722 *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300723
724 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
725 tcp_hdrlen(skb) + skb->data_len;
726
727 /*
728 * Make sure we have enough TBs for the A-MSDU:
729 * 2 for each subframe
730 * 1 more for each fragment
731 * 1 more for the potential data in the header
732 */
733 num_subframes =
734 min_t(unsigned int, num_subframes,
735 (mvm->trans->max_skb_frags - 1 -
736 skb_shinfo(skb)->nr_frags) / 2);
737
738 /* This skb fits in one single A-MSDU */
739 if (num_subframes * mss >= tcp_payload_len) {
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300740 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
741
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300742 /*
743 * Compute the length of all the data added for the A-MSDU.
744 * This will be used to compute the length to write in the TX
745 * command. We have: SNAP + IP + TCP for n -1 subframes and
746 * ETH header for n subframes. Note that the original skb
747 * already had one set of SNAP / IP / TCP headers.
748 */
749 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300750 amsdu_add = num_subframes * sizeof(struct ethhdr) +
751 (num_subframes - 1) * (snap_ip_tcp + pad);
752 /* This holds the amsdu headers length */
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300753 skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300754
755 __skb_queue_tail(mpdus_skb, skb);
756 return 0;
757 }
758
759 /*
760 * Trick the segmentation function to make it
761 * create SKBs that can fit into one A-MSDU.
762 */
763segment:
764 skb_shinfo(skb)->gso_size = num_subframes * mss;
765 memcpy(cb, skb->cb, sizeof(cb));
766
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200767 next = skb_gso_segment(skb, netdev_features);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300768 skb_shinfo(skb)->gso_size = mss;
769 if (WARN_ON_ONCE(IS_ERR(next)))
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300770 return -EINVAL;
771 else if (next)
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300772 consume_skb(skb);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300773
774 while (next) {
775 tmp = next;
776 next = tmp->next;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300777
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300778 memcpy(tmp->cb, cb, sizeof(tmp->cb));
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300779 /*
780 * Compute the length of all the data added for the A-MSDU.
781 * This will be used to compute the length to write in the TX
782 * command. We have: SNAP + IP + TCP for n -1 subframes and
783 * ETH header for n subframes.
784 */
785 tcp_payload_len = skb_tail_pointer(tmp) -
786 skb_transport_header(tmp) -
787 tcp_hdrlen(tmp) + tmp->data_len;
788
789 if (ipv4)
790 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
791
792 if (tcp_payload_len > mss) {
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300793 struct ieee80211_tx_info *skb_info =
794 IEEE80211_SKB_CB(tmp);
795
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300796 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300797 amsdu_add = num_subframes * sizeof(struct ethhdr) +
798 (num_subframes - 1) * (snap_ip_tcp + pad);
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300799 skb_info->driver_data[0] =
800 (void *)(uintptr_t)amsdu_add;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300801 skb_shinfo(tmp)->gso_size = mss;
802 } else {
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200803 qc = ieee80211_get_qos_ctl((void *)tmp->data);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300804
805 if (ipv4)
806 ip_send_check(ip_hdr(tmp));
807 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
808 skb_shinfo(tmp)->gso_size = 0;
809 }
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300810
811 tmp->prev = NULL;
812 tmp->next = NULL;
813
814 __skb_queue_tail(mpdus_skb, tmp);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300815 i++;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300816 }
817
818 return 0;
819}
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300820#else /* CONFIG_INET */
821static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300822 struct ieee80211_tx_info *info,
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300823 struct ieee80211_sta *sta,
824 struct sk_buff_head *mpdus_skb)
825{
826 /* Impossible to get TSO with CONFIG_INET */
827 WARN_ON(1);
828
829 return -1;
830}
831#endif
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300832
Liad Kaufman24afba72015-07-28 18:56:08 +0300833static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
834 struct iwl_mvm_sta *mvm_sta, u8 tid,
835 struct sk_buff *skb)
836{
837 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
838 u8 mac_queue = info->hw_queue;
839 struct sk_buff_head *deferred_tx_frames;
840
841 lockdep_assert_held(&mvm_sta->lock);
842
843 mvm_sta->deferred_traffic_tid_map |= BIT(tid);
844 set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
845
846 deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
847
848 skb_queue_tail(deferred_tx_frames, skb);
849
850 /*
851 * The first deferred frame should've stopped the MAC queues, so we
852 * should never get a second deferred frame for the RA/TID.
853 */
854 if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
855 "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
856 skb_queue_len(deferred_tx_frames))) {
857 iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
858 schedule_work(&mvm->add_stream_wk);
859 }
860}
861
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200862/* Check if there are any timed-out TIDs on a given shared TXQ */
863static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
864{
865 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
866 unsigned long now = jiffies;
867 int tid;
868
869 for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
870 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
871 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
872 return true;
873 }
874
875 return false;
876}
877
Johannes Berg8ca151b2013-01-24 14:25:36 +0100878/*
879 * Sets the fields in the Tx cmd that are crypto related
880 */
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300881static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300882 struct ieee80211_tx_info *info,
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300883 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100884{
885 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100886 struct iwl_mvm_sta *mvmsta;
887 struct iwl_device_cmd *dev_cmd;
888 struct iwl_tx_cmd *tx_cmd;
889 __le16 fc;
890 u16 seq_number = 0;
891 u8 tid = IWL_MAX_TID_COUNT;
892 u8 txq_id = info->hw_queue;
Johannes Berg7ec54712016-03-16 09:29:48 +0100893 bool is_ampdu = false;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200894 int hdrlen;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100895
Johannes Berg5b577a92013-11-14 18:20:04 +0100896 mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100897 fc = hdr->frame_control;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200898 hdrlen = ieee80211_hdrlen(fc);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100899
900 if (WARN_ON_ONCE(!mvmsta))
901 return -1;
902
Emmanuel Grumbach881acd82013-03-19 16:16:00 +0200903 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100904 return -1;
905
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300906 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
907 sta, mvmsta->sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100908 if (!dev_cmd)
909 goto drop;
910
911 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
912 /* From now on, we cannot access info->control */
913
Johannes Berg3e56ead2013-02-15 22:23:18 +0100914 /*
915 * we handle that entirely ourselves -- for uAPSD the firmware
916 * will always send a notification, and for PS-Poll responses
917 * we'll notify mac80211 when getting frame status
918 */
919 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
920
Johannes Berg8ca151b2013-01-24 14:25:36 +0100921 spin_lock(&mvmsta->lock);
922
923 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
924 u8 *qc = NULL;
925 qc = ieee80211_get_qos_ctl(hdr);
926 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
927 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
928 goto drop_unlock_sta;
929
930 seq_number = mvmsta->tid_data[tid].seq_number;
931 seq_number &= IEEE80211_SCTL_SEQ;
932 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
933 hdr->seq_ctrl |= cpu_to_le16(seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100934 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
Liad Kaufman24afba72015-07-28 18:56:08 +0300935 } else if (iwl_mvm_is_dqa_supported(mvm) &&
936 (ieee80211_is_qos_nullfunc(fc) ||
937 ieee80211_is_nullfunc(fc))) {
938 /*
939 * nullfunc frames should go to the MGMT queue regardless of QOS
940 */
941 tid = IWL_MAX_TID_COUNT;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100942 }
943
Liad Kaufman7585c352016-07-07 11:00:26 +0300944 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300945 txq_id = mvmsta->tid_data[tid].txq_id;
946
Liad Kaufman7585c352016-07-07 11:00:26 +0300947 if (ieee80211_is_mgmt(fc))
948 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
949 }
950
Johannes Berg8ca151b2013-01-24 14:25:36 +0100951 /* Copy MAC header from skb into command buffer */
Johannes Bergca8c0f42015-04-20 17:54:54 +0200952 memcpy(tx_cmd->hdr, hdr, hdrlen);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100953
954 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
955
Liad Kaufmane3118ad2016-06-05 10:49:02 +0300956 if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300957 /* default to TID 0 for non-QoS packets */
958 u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
959
960 txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
961 }
962
Johannes Berg8ca151b2013-01-24 14:25:36 +0100963 if (is_ampdu) {
964 if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
965 goto drop_unlock_sta;
966 txq_id = mvmsta->tid_data[tid].txq_id;
967 }
968
Liad Kaufman9794c642015-08-19 17:34:28 +0300969 /* Check if TXQ needs to be allocated or re-activated */
970 if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
971 !mvmsta->tid_data[tid].is_tid_active) &&
972 iwl_mvm_is_dqa_supported(mvm)) {
973 /* If TXQ needs to be allocated... */
974 if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300975 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
976
977 /*
978 * The frame is now deferred, and the worker scheduled
979 * will re-allocate it, so we can free it for now.
980 */
981 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
982 spin_unlock(&mvmsta->lock);
983 return 0;
984 }
985
Liad Kaufman9794c642015-08-19 17:34:28 +0300986 /* If we are here - TXQ exists and needs to be re-activated */
987 spin_lock(&mvm->queue_info_lock);
988 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
989 mvmsta->tid_data[tid].is_tid_active = true;
990 spin_unlock(&mvm->queue_info_lock);
991
992 IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
993 txq_id);
Liad Kaufman24afba72015-07-28 18:56:08 +0300994 }
995
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200996 if (iwl_mvm_is_dqa_supported(mvm)) {
997 /* Keep track of the time of the last frame for this RA/TID */
998 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
999
1000 /*
1001 * If we have timed-out TIDs - schedule the worker that will
1002 * reconfig the queues and update them
1003 *
1004 * Note that the mvm->queue_info_lock isn't being taken here in
1005 * order to not serialize the TX flow. This isn't dangerous
1006 * because scheduling mvm->add_stream_wk can't ruin the state,
1007 * and if we DON'T schedule it due to some race condition then
1008 * next TX we get here we will.
1009 */
1010 if (unlikely(mvm->queue_info[txq_id].status ==
1011 IWL_MVM_QUEUE_SHARED &&
1012 iwl_mvm_txq_should_update(mvm, txq_id)))
1013 schedule_work(&mvm->add_stream_wk);
1014 }
Liad Kaufman9794c642015-08-19 17:34:28 +03001015
Johannes Berg8ca151b2013-01-24 14:25:36 +01001016 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
Eliad Pellercf9d1182013-12-31 18:54:06 +02001017 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001018
Johannes Berg8ca151b2013-01-24 14:25:36 +01001019 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1020 goto drop_unlock_sta;
1021
Johannes Berg7ec54712016-03-16 09:29:48 +01001022 if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
Eliad Pellercf9d1182013-12-31 18:54:06 +02001023 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001024
1025 spin_unlock(&mvmsta->lock);
1026
Liad Kaufmancf961e12015-08-13 19:16:08 +03001027 /* Increase pending frames count if this isn't AMPDU */
1028 if (!is_ampdu)
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001029 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001030
1031 return 0;
1032
1033drop_unlock_sta:
1034 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1035 spin_unlock(&mvmsta->lock);
1036drop:
1037 return -1;
1038}
1039
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001040int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1041 struct ieee80211_sta *sta)
1042{
1043 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001044 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
1045 struct ieee80211_tx_info info;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001046 struct sk_buff_head mpdus_skbs;
1047 unsigned int payload_len;
1048 int ret;
1049
1050 if (WARN_ON_ONCE(!mvmsta))
1051 return -1;
1052
1053 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
1054 return -1;
1055
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001056 memcpy(&info, skb->cb, sizeof(info));
1057
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +03001058 /* This holds the amsdu headers length */
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001059 skb_info->driver_data[0] = (void *)(uintptr_t)0;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +03001060
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001061 if (!skb_is_gso(skb))
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001062 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001063
1064 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1065 tcp_hdrlen(skb) + skb->data_len;
1066
1067 if (payload_len <= skb_shinfo(skb)->gso_size)
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001068 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001069
1070 __skb_queue_head_init(&mpdus_skbs);
1071
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001072 ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001073 if (ret)
1074 return ret;
1075
1076 if (WARN_ON(skb_queue_empty(&mpdus_skbs)))
1077 return ret;
1078
1079 while (!skb_queue_empty(&mpdus_skbs)) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +03001080 skb = __skb_dequeue(&mpdus_skbs);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001081
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001082 ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001083 if (ret) {
1084 __skb_queue_purge(&mpdus_skbs);
1085 return ret;
1086 }
1087 }
1088
1089 return 0;
1090}
1091
Johannes Berg8ca151b2013-01-24 14:25:36 +01001092static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1093 struct ieee80211_sta *sta, u8 tid)
1094{
Johannes Berg5b577a92013-11-14 18:20:04 +01001095 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001096 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1097 struct ieee80211_vif *vif = mvmsta->vif;
1098
1099 lockdep_assert_held(&mvmsta->lock);
1100
Johannes Berg3e56ead2013-02-15 22:23:18 +01001101 if ((tid_data->state == IWL_AGG_ON ||
1102 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1103 iwl_mvm_tid_queued(tid_data) == 0) {
1104 /*
1105 * Now that this aggregation queue is empty tell mac80211 so it
1106 * knows we no longer have frames buffered for the station on
1107 * this TID (for the TIM bitmap calculation.)
1108 */
1109 ieee80211_sta_set_buffered(sta, tid, false);
1110 }
1111
Johannes Berg8ca151b2013-01-24 14:25:36 +01001112 if (tid_data->ssn != tid_data->next_reclaimed)
1113 return;
1114
1115 switch (tid_data->state) {
1116 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1117 IWL_DEBUG_TX_QUEUES(mvm,
1118 "Can continue addBA flow ssn = next_recl = %d\n",
1119 tid_data->next_reclaimed);
1120 tid_data->state = IWL_AGG_STARTING;
1121 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1122 break;
1123
1124 case IWL_EMPTYING_HW_QUEUE_DELBA:
1125 IWL_DEBUG_TX_QUEUES(mvm,
1126 "Can continue DELBA flow ssn = next_recl = %d\n",
1127 tid_data->next_reclaimed);
Liad Kaufman15985fb2016-06-26 14:45:12 +03001128 if (!iwl_mvm_is_dqa_supported(mvm)) {
1129 u8 mac80211_ac = tid_to_mac80211_ac[tid];
1130
1131 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
1132 vif->hw_queue[mac80211_ac], tid,
1133 CMD_ASYNC);
1134 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001135 tid_data->state = IWL_AGG_OFF;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001136 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1137 break;
1138
1139 default:
1140 break;
1141 }
1142}
1143
1144#ifdef CONFIG_IWLWIFI_DEBUG
1145const char *iwl_mvm_get_tx_fail_reason(u32 status)
1146{
1147#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1148#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1149
1150 switch (status & TX_STATUS_MSK) {
1151 case TX_STATUS_SUCCESS:
1152 return "SUCCESS";
1153 TX_STATUS_POSTPONE(DELAY);
1154 TX_STATUS_POSTPONE(FEW_BYTES);
1155 TX_STATUS_POSTPONE(BT_PRIO);
1156 TX_STATUS_POSTPONE(QUIET_PERIOD);
1157 TX_STATUS_POSTPONE(CALC_TTAK);
1158 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1159 TX_STATUS_FAIL(SHORT_LIMIT);
1160 TX_STATUS_FAIL(LONG_LIMIT);
1161 TX_STATUS_FAIL(UNDERRUN);
1162 TX_STATUS_FAIL(DRAIN_FLOW);
1163 TX_STATUS_FAIL(RFKILL_FLUSH);
1164 TX_STATUS_FAIL(LIFE_EXPIRE);
1165 TX_STATUS_FAIL(DEST_PS);
1166 TX_STATUS_FAIL(HOST_ABORTED);
1167 TX_STATUS_FAIL(BT_RETRY);
1168 TX_STATUS_FAIL(STA_INVALID);
1169 TX_STATUS_FAIL(FRAG_DROPPED);
1170 TX_STATUS_FAIL(TID_DISABLE);
1171 TX_STATUS_FAIL(FIFO_FLUSHED);
1172 TX_STATUS_FAIL(SMALL_CF_POLL);
1173 TX_STATUS_FAIL(FW_DROP);
1174 TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1175 }
1176
1177 return "UNKNOWN";
1178
1179#undef TX_STATUS_FAIL
1180#undef TX_STATUS_POSTPONE
1181}
1182#endif /* CONFIG_IWLWIFI_DEBUG */
1183
Eyal Shapirad310e402013-08-11 18:43:47 +03001184void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
Johannes Berg57fbcce2016-04-12 15:56:15 +02001185 enum nl80211_band band,
Eyal Shapirad310e402013-08-11 18:43:47 +03001186 struct ieee80211_tx_rate *r)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001187{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001188 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1189 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1190 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1191 case RATE_MCS_CHAN_WIDTH_20:
1192 break;
1193 case RATE_MCS_CHAN_WIDTH_40:
1194 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1195 break;
1196 case RATE_MCS_CHAN_WIDTH_80:
1197 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1198 break;
1199 case RATE_MCS_CHAN_WIDTH_160:
1200 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1201 break;
1202 }
1203 if (rate_n_flags & RATE_MCS_SGI_MSK)
1204 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1205 if (rate_n_flags & RATE_MCS_HT_MSK) {
1206 r->flags |= IEEE80211_TX_RC_MCS;
1207 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1208 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1209 ieee80211_rate_set_vht(
1210 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1211 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1212 RATE_VHT_MCS_NSS_POS) + 1);
1213 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1214 } else {
1215 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
Eyal Shapirad310e402013-08-11 18:43:47 +03001216 band);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001217 }
1218}
1219
Eyal Shapirad310e402013-08-11 18:43:47 +03001220/**
1221 * translate ucode response to mac80211 tx status control values
1222 */
1223static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
1224 struct ieee80211_tx_info *info)
1225{
1226 struct ieee80211_tx_rate *r = &info->status.rates[0];
1227
1228 info->status.antenna =
1229 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1230 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
1231}
1232
Golan Ben-Ami25657fe2015-09-02 12:34:23 +03001233static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1234 u32 status)
1235{
1236 struct iwl_fw_dbg_trigger_tlv *trig;
1237 struct iwl_fw_dbg_trigger_tx_status *status_trig;
1238 int i;
1239
1240 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
1241 return;
1242
1243 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
1244 status_trig = (void *)trig->data;
1245
1246 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
1247 return;
1248
1249 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1250 /* don't collect on status 0 */
1251 if (!status_trig->statuses[i].status)
1252 break;
1253
1254 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1255 continue;
1256
1257 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
1258 "Tx status %d was received",
1259 status & TX_STATUS_MSK);
1260 break;
1261 }
1262}
1263
Johannes Berg8ca151b2013-01-24 14:25:36 +01001264static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1265 struct iwl_rx_packet *pkt)
1266{
1267 struct ieee80211_sta *sta;
1268 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1269 int txq_id = SEQ_TO_QUEUE(sequence);
1270 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1271 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1272 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1273 u32 status = le16_to_cpu(tx_resp->status.status);
1274 u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
1275 struct iwl_mvm_sta *mvmsta;
1276 struct sk_buff_head skbs;
1277 u8 skb_freed = 0;
1278 u16 next_reclaimed, seq_ctl;
Emmanuel Grumbach532beba2016-03-07 22:23:52 +02001279 bool is_ndp = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001280 bool txq_agg = false; /* Is this TXQ aggregated */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001281
1282 __skb_queue_head_init(&skbs);
1283
1284 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1285
1286 /* we can free until ssn % q.n_bd not inclusive */
1287 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
1288
1289 while (!skb_queue_empty(&skbs)) {
1290 struct sk_buff *skb = __skb_dequeue(&skbs);
1291 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1292
1293 skb_freed++;
1294
1295 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1296
1297 memset(&info->status, 0, sizeof(info->status));
1298
1299 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1300
1301 /* inform mac80211 about what happened with the frame */
1302 switch (status & TX_STATUS_MSK) {
1303 case TX_STATUS_SUCCESS:
1304 case TX_STATUS_DIRECT_DONE:
1305 info->flags |= IEEE80211_TX_STAT_ACK;
1306 break;
1307 case TX_STATUS_FAIL_DEST_PS:
1308 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1309 break;
1310 default:
1311 break;
1312 }
1313
Golan Ben-Ami25657fe2015-09-02 12:34:23 +03001314 iwl_mvm_tx_status_check_trigger(mvm, status);
1315
Johannes Berg8ca151b2013-01-24 14:25:36 +01001316 info->status.rates[0].count = tx_resp->failure_frame + 1;
Eyal Shapirad310e402013-08-11 18:43:47 +03001317 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
1318 info);
Eyal Shapira929e6ed2015-01-30 13:40:02 +02001319 info->status.status_driver_data[1] =
1320 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001321
1322 /* Single frame failure in an AMPDU queue => send BAR */
Eytan Lifshitz19e737c2013-09-09 13:30:15 +02001323 if (txq_id >= mvm->first_agg_queue &&
Eyal Shapira9ce578a2014-12-31 15:22:38 +02001324 !(info->flags & IEEE80211_TX_STAT_ACK) &&
1325 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001326 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001327
Emmanuel Grumbachebea2f32013-06-13 10:07:47 +03001328 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
1329 if (status != TX_STATUS_SUCCESS) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001330 struct ieee80211_hdr *hdr = (void *)skb->data;
1331 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1332 }
1333
Emmanuel Grumbach532beba2016-03-07 22:23:52 +02001334 if (unlikely(!seq_ctl)) {
1335 struct ieee80211_hdr *hdr = (void *)skb->data;
1336
1337 /*
1338 * If it is an NDP, we can't update next_reclaim since
1339 * its sequence control is 0. Note that for that same
1340 * reason, NDPs are never sent to A-MPDU'able queues
1341 * so that we can never have more than one freed frame
1342 * for a single Tx resonse (see WARN_ON below).
1343 */
1344 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1345 is_ndp = true;
1346 }
1347
Emmanuel Grumbach9b5452f2014-10-07 10:38:53 +03001348 /*
1349 * TODO: this is not accurate if we are freeing more than one
1350 * packet.
1351 */
1352 info->status.tx_time =
1353 le16_to_cpu(tx_resp->wireless_media_time);
Eliad Peller3a84b692014-03-12 15:05:06 +02001354 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1355 info->status.status_driver_data[0] =
1356 (void *)(uintptr_t)tx_resp->reduced_tpc;
1357
Johannes Bergf14d6b32014-03-21 13:30:03 +01001358 ieee80211_tx_status(mvm->hw, skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001359 }
1360
Eytan Lifshitz19e737c2013-09-09 13:30:15 +02001361 if (txq_id >= mvm->first_agg_queue) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001362 /* If this is an aggregation queue, we use the ssn since:
1363 * ssn = wifi seq_num % 256.
1364 * The seq_ctl is the sequence control of the packet to which
1365 * this Tx response relates. But if there is a hole in the
1366 * bitmap of the BA we received, this Tx response may allow to
1367 * reclaim the hole and all the subsequent packets that were
1368 * already acked. In that case, seq_ctl != ssn, and the next
1369 * packet to be reclaimed will be ssn and not seq_ctl. In that
1370 * case, several packets will be reclaimed even if
1371 * frame_count = 1.
1372 *
1373 * The ssn is the index (% 256) of the latest packet that has
1374 * treated (acked / dropped) + 1.
1375 */
1376 next_reclaimed = ssn;
1377 } else {
1378 /* The next packet to be reclaimed is the one after this one */
Johannes Berg9a886582013-02-15 19:25:00 +01001379 next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001380 }
1381
1382 IWL_DEBUG_TX_REPLY(mvm,
Emmanuel Grumbach8c6e83d2013-03-20 17:12:46 +02001383 "TXQ %d status %s (0x%08x)\n",
1384 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1385
1386 IWL_DEBUG_TX_REPLY(mvm,
1387 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1388 le32_to_cpu(tx_resp->initial_rate),
Johannes Berg8ca151b2013-01-24 14:25:36 +01001389 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1390 ssn, next_reclaimed, seq_ctl);
1391
1392 rcu_read_lock();
1393
1394 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001395 /*
1396 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1397 * the firmware while we still have packets for it in the Tx queues.
1398 */
1399 if (WARN_ON_ONCE(!sta))
1400 goto out;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001401
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001402 if (!IS_ERR(sta)) {
Johannes Berg5b577a92013-11-14 18:20:04 +01001403 mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001404
1405 if (tid != IWL_TID_NON_QOS) {
1406 struct iwl_mvm_tid_data *tid_data =
1407 &mvmsta->tid_data[tid];
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02001408 bool send_eosp_ndp = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001409
Johannes Berg2bfb5092012-12-27 21:43:48 +01001410 spin_lock_bh(&mvmsta->lock);
Oren Givon2c4a2472016-05-29 14:05:50 +03001411 if (iwl_mvm_is_dqa_supported(mvm)) {
1412 enum iwl_mvm_agg_state state;
1413
1414 state = mvmsta->tid_data[tid].state;
1415 txq_agg = (state == IWL_AGG_ON ||
1416 state == IWL_EMPTYING_HW_QUEUE_DELBA);
1417 } else {
1418 txq_agg = txq_id >= mvm->first_agg_queue;
1419 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001420
Emmanuel Grumbach532beba2016-03-07 22:23:52 +02001421 if (!is_ndp) {
1422 tid_data->next_reclaimed = next_reclaimed;
1423 IWL_DEBUG_TX_REPLY(mvm,
1424 "Next reclaimed packet:%d\n",
1425 next_reclaimed);
1426 } else {
1427 IWL_DEBUG_TX_REPLY(mvm,
1428 "NDP - don't update next_reclaimed\n");
1429 }
1430
Johannes Berg8ca151b2013-01-24 14:25:36 +01001431 iwl_mvm_check_ratid_empty(mvm, sta, tid);
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02001432
1433 if (mvmsta->sleep_tx_count) {
1434 mvmsta->sleep_tx_count--;
1435 if (mvmsta->sleep_tx_count &&
1436 !iwl_mvm_tid_queued(tid_data)) {
1437 /*
1438 * The number of frames in the queue
1439 * dropped to 0 even if we sent less
1440 * frames than we thought we had on the
1441 * Tx queue.
1442 * This means we had holes in the BA
1443 * window that we just filled, ask
1444 * mac80211 to send EOSP since the
1445 * firmware won't know how to do that.
1446 * Send NDP and the firmware will send
1447 * EOSP notification that will trigger
1448 * a call to ieee80211_sta_eosp().
1449 */
1450 send_eosp_ndp = true;
1451 }
1452 }
1453
Johannes Berg2bfb5092012-12-27 21:43:48 +01001454 spin_unlock_bh(&mvmsta->lock);
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02001455 if (send_eosp_ndp) {
1456 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1457 IEEE80211_FRAME_RELEASE_UAPSD,
1458 1, tid, false, false);
1459 mvmsta->sleep_tx_count = 0;
1460 ieee80211_send_eosp_nullfunc(sta, tid);
1461 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001462 }
Johannes Berg3e56ead2013-02-15 22:23:18 +01001463
1464 if (mvmsta->next_status_eosp) {
1465 mvmsta->next_status_eosp = false;
1466 ieee80211_sta_eosp(sta);
1467 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001468 } else {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001469 mvmsta = NULL;
1470 }
1471
1472 /*
1473 * If the txq is not an AMPDU queue, there is no chance we freed
1474 * several skbs. Check that out...
Johannes Berg8ca151b2013-01-24 14:25:36 +01001475 */
Liad Kaufmancf961e12015-08-13 19:16:08 +03001476 if (txq_agg)
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001477 goto out;
1478
1479 /* We can't free more than one frame at once on a shared queue */
Liad Kaufmancf961e12015-08-13 19:16:08 +03001480 WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001481
Emmanuel Grumbach589a6ba2014-06-05 11:32:41 +03001482 /* If we have still frames for this STA nothing to do here */
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001483 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
1484 goto out;
1485
1486 if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03001487
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001488 /*
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03001489 * If there are no pending frames for this STA and
1490 * the tx to this station is not disabled, notify
1491 * mac80211 that this station can now wake up in its
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001492 * STA table.
1493 * If mvmsta is not NULL, sta is valid.
1494 */
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03001495
1496 spin_lock_bh(&mvmsta->lock);
1497
1498 if (!mvmsta->disable_tx)
1499 ieee80211_sta_block_awake(mvm->hw, sta, false);
1500
1501 spin_unlock_bh(&mvmsta->lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001502 }
1503
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001504 if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
1505 /*
1506 * We are draining and this was the last packet - pre_rcu_remove
1507 * has been called already. We might be after the
1508 * synchronize_net already.
1509 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
1510 */
1511 set_bit(sta_id, mvm->sta_drained);
1512 schedule_work(&mvm->sta_drained_wk);
1513 }
1514
1515out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01001516 rcu_read_unlock();
1517}
1518
1519#ifdef CONFIG_IWLWIFI_DEBUG
1520#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1521static const char *iwl_get_agg_tx_status(u16 status)
1522{
1523 switch (status & AGG_TX_STATE_STATUS_MSK) {
1524 AGG_TX_STATE_(TRANSMITTED);
1525 AGG_TX_STATE_(UNDERRUN);
1526 AGG_TX_STATE_(BT_PRIO);
1527 AGG_TX_STATE_(FEW_BYTES);
1528 AGG_TX_STATE_(ABORT);
1529 AGG_TX_STATE_(LAST_SENT_TTL);
1530 AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1531 AGG_TX_STATE_(LAST_SENT_BT_KILL);
1532 AGG_TX_STATE_(SCD_QUERY);
1533 AGG_TX_STATE_(TEST_BAD_CRC32);
1534 AGG_TX_STATE_(RESPONSE);
1535 AGG_TX_STATE_(DUMP_TX);
1536 AGG_TX_STATE_(DELAY_TX);
1537 }
1538
1539 return "UNKNOWN";
1540}
1541
1542static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1543 struct iwl_rx_packet *pkt)
1544{
1545 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1546 struct agg_tx_status *frame_status = &tx_resp->status;
1547 int i;
1548
1549 for (i = 0; i < tx_resp->frame_count; i++) {
1550 u16 fstatus = le16_to_cpu(frame_status[i].status);
1551
1552 IWL_DEBUG_TX_REPLY(mvm,
1553 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1554 iwl_get_agg_tx_status(fstatus),
1555 fstatus & AGG_TX_STATE_STATUS_MSK,
1556 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1557 AGG_TX_STATE_TRY_CNT_POS,
1558 le16_to_cpu(frame_status[i].sequence));
1559 }
1560}
1561#else
1562static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1563 struct iwl_rx_packet *pkt)
1564{}
1565#endif /* CONFIG_IWLWIFI_DEBUG */
1566
1567static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1568 struct iwl_rx_packet *pkt)
1569{
1570 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1571 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1572 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1573 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
Sara Sharon13303c02016-04-10 15:51:54 +03001574 struct iwl_mvm_sta *mvmsta;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001575 int queue = SEQ_TO_QUEUE(sequence);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001576
Liad Kaufmancf961e12015-08-13 19:16:08 +03001577 if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
1578 (!iwl_mvm_is_dqa_supported(mvm) ||
1579 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001580 return;
1581
1582 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
1583 return;
1584
1585 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1586
1587 rcu_read_lock();
1588
Sara Sharon13303c02016-04-10 15:51:54 +03001589 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001590
Sara Sharon13303c02016-04-10 15:51:54 +03001591 if (!WARN_ON_ONCE(!mvmsta)) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001592 mvmsta->tid_data[tid].rate_n_flags =
1593 le32_to_cpu(tx_resp->initial_rate);
Emmanuel Grumbach9b5452f2014-10-07 10:38:53 +03001594 mvmsta->tid_data[tid].tx_time =
1595 le16_to_cpu(tx_resp->wireless_media_time);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001596 }
1597
1598 rcu_read_unlock();
1599}
1600
Johannes Berg04168412015-06-23 21:22:09 +02001601void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001602{
1603 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1604 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1605
1606 if (tx_resp->frame_count == 1)
1607 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1608 else
1609 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001610}
1611
Sara Sharonc46e7722016-07-17 14:24:55 +03001612static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1613 int txq, int index,
1614 struct ieee80211_tx_info *ba_info, u32 rate)
Eyal Shapiraa7130442014-09-14 15:28:09 +03001615{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001616 struct sk_buff_head reclaimed_skbs;
1617 struct iwl_mvm_tid_data *tid_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001618 struct ieee80211_sta *sta;
1619 struct iwl_mvm_sta *mvmsta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001620 struct sk_buff *skb;
Sara Sharonc46e7722016-07-17 14:24:55 +03001621 int freed;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001622
Eyal Shapira2cee4762015-01-16 11:09:30 +02001623 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
1624 tid >= IWL_MAX_TID_COUNT,
1625 "sta_id %d tid %d", sta_id, tid))
Johannes Berg04168412015-06-23 21:22:09 +02001626 return;
Eyal Shapira2cee4762015-01-16 11:09:30 +02001627
Johannes Berg8ca151b2013-01-24 14:25:36 +01001628 rcu_read_lock();
1629
1630 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1631
1632 /* Reclaiming frames for a station that has been deleted ? */
1633 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
1634 rcu_read_unlock();
Johannes Berg04168412015-06-23 21:22:09 +02001635 return;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001636 }
1637
Johannes Berg5b577a92013-11-14 18:20:04 +01001638 mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001639 tid_data = &mvmsta->tid_data[tid];
1640
Sara Sharonc46e7722016-07-17 14:24:55 +03001641 if (tid_data->txq_id != txq) {
Johannes Berg1f16ea22015-03-06 09:17:37 +01001642 IWL_ERR(mvm,
Sara Sharonc46e7722016-07-17 14:24:55 +03001643 "invalid BA notification: Q %d, tid %d\n",
1644 tid_data->txq_id, tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001645 rcu_read_unlock();
Johannes Berg04168412015-06-23 21:22:09 +02001646 return;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001647 }
1648
Johannes Berg2bfb5092012-12-27 21:43:48 +01001649 spin_lock_bh(&mvmsta->lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001650
1651 __skb_queue_head_init(&reclaimed_skbs);
1652
1653 /*
1654 * Release all TFDs before the SSN, i.e. all TFDs in front of
1655 * block-ack window (we assume that they've been successfully
1656 * transmitted ... if not, it's too late anyway).
1657 */
Sara Sharonc46e7722016-07-17 14:24:55 +03001658 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001659
Sara Sharonc46e7722016-07-17 14:24:55 +03001660 tid_data->next_reclaimed = index;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001661
1662 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1663
1664 freed = 0;
Sara Sharonc46e7722016-07-17 14:24:55 +03001665 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001666
1667 skb_queue_walk(&reclaimed_skbs, skb) {
Johannes Berg143582c2014-02-25 10:37:15 +01001668 struct ieee80211_hdr *hdr = (void *)skb->data;
1669 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001670
1671 if (ieee80211_is_data_qos(hdr->frame_control))
1672 freed++;
1673 else
1674 WARN_ON_ONCE(1);
1675
Johannes Berg8ca151b2013-01-24 14:25:36 +01001676 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1677
Johannes Berg143582c2014-02-25 10:37:15 +01001678 memset(&info->status, 0, sizeof(info->status));
1679 /* Packet was transmitted successfully, failures come as single
1680 * frames because before failing a frame the firmware transmits
1681 * it without aggregation at least once.
1682 */
1683 info->flags |= IEEE80211_TX_STAT_ACK;
1684
Eyal Shapiraa7130442014-09-14 15:28:09 +03001685 /* this is the first skb we deliver in this batch */
1686 /* put the rate scaling data there */
Sara Sharonc46e7722016-07-17 14:24:55 +03001687 if (freed == 1) {
1688 info->flags |= IEEE80211_TX_STAT_AMPDU;
1689 memcpy(&info->status, &ba_info->status,
1690 sizeof(ba_info->status));
1691 iwl_mvm_hwrate_to_tx_status(rate, info);
1692 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001693 }
1694
Johannes Berg2bfb5092012-12-27 21:43:48 +01001695 spin_unlock_bh(&mvmsta->lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001696
Eyal Shapiraa7130442014-09-14 15:28:09 +03001697 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1698 * possible (i.e. first MPDU in the aggregation wasn't acked)
1699 * Still it's important to update RS about sent vs. acked.
1700 */
1701 if (skb_queue_empty(&reclaimed_skbs)) {
Eyal Shapiraa7130442014-09-14 15:28:09 +03001702 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1703
1704 if (mvmsta->vif)
1705 chanctx_conf =
1706 rcu_dereference(mvmsta->vif->chanctx_conf);
1707
1708 if (WARN_ON_ONCE(!chanctx_conf))
1709 goto out;
1710
Sara Sharonc46e7722016-07-17 14:24:55 +03001711 ba_info->band = chanctx_conf->def.chan->band;
1712 iwl_mvm_hwrate_to_tx_status(rate, ba_info);
Eyal Shapiraa7130442014-09-14 15:28:09 +03001713
1714 IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
Sara Sharonc46e7722016-07-17 14:24:55 +03001715 iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
Eyal Shapiraa7130442014-09-14 15:28:09 +03001716 }
1717
1718out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01001719 rcu_read_unlock();
1720
1721 while (!skb_queue_empty(&reclaimed_skbs)) {
1722 skb = __skb_dequeue(&reclaimed_skbs);
Johannes Bergf14d6b32014-03-21 13:30:03 +01001723 ieee80211_tx_status(mvm->hw, skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001724 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001725}
1726
Sara Sharonc46e7722016-07-17 14:24:55 +03001727void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1728{
1729 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1730 int sta_id, tid, txq, index;
1731 struct ieee80211_tx_info ba_info = {};
1732 struct iwl_mvm_ba_notif *ba_notif;
1733 struct iwl_mvm_tid_data *tid_data;
1734 struct iwl_mvm_sta *mvmsta;
1735
1736 if (iwl_mvm_has_new_tx_api(mvm)) {
1737 struct iwl_mvm_compressed_ba_notif *ba_res =
1738 (void *)pkt->data;
1739
1740 sta_id = ba_res->sta_id;
1741 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
1742 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
1743 ba_info.status.tx_time =
1744 (u16)le32_to_cpu(ba_res->wireless_time);
1745 ba_info.status.status_driver_data[0] =
1746 (void *)(uintptr_t)ba_res->reduced_txp;
1747
1748 /*
1749 * TODO:
1750 * When supporting multi TID aggregations - we need to move
1751 * next_reclaimed to be per TXQ and not per TID or handle it
1752 * in a different way.
1753 * This will go together with SN and AddBA offload and cannot
1754 * be handled properly for now.
1755 */
1756 WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
1757 iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
1758 (int)ba_res->tfd[0].q_num,
1759 le16_to_cpu(ba_res->tfd[0].tfd_index),
1760 &ba_info, le32_to_cpu(ba_res->tx_rate));
1761
1762 IWL_DEBUG_TX_REPLY(mvm,
1763 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1764 sta_id, le32_to_cpu(ba_res->flags),
1765 le16_to_cpu(ba_res->txed),
1766 le16_to_cpu(ba_res->done));
1767 return;
1768 }
1769
1770 ba_notif = (void *)pkt->data;
1771 sta_id = ba_notif->sta_id;
1772 tid = ba_notif->tid;
1773 /* "flow" corresponds to Tx queue */
1774 txq = le16_to_cpu(ba_notif->scd_flow);
1775 /* "ssn" is start of block-ack Tx window, corresponds to index
1776 * (in Tx queue's circular buffer) of first TFD/frame in window */
1777 index = le16_to_cpu(ba_notif->scd_ssn);
1778
1779 rcu_read_lock();
1780 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1781 if (WARN_ON_ONCE(!mvmsta)) {
1782 rcu_read_unlock();
1783 return;
1784 }
1785
1786 tid_data = &mvmsta->tid_data[tid];
1787
1788 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
1789 ba_info.status.ampdu_len = ba_notif->txed;
1790 ba_info.status.tx_time = tid_data->tx_time;
1791 ba_info.status.status_driver_data[0] =
1792 (void *)(uintptr_t)ba_notif->reduced_txp;
1793
1794 rcu_read_unlock();
1795
1796 iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
1797 tid_data->rate_n_flags);
1798
1799 IWL_DEBUG_TX_REPLY(mvm,
1800 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1801 (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
1802
1803 IWL_DEBUG_TX_REPLY(mvm,
1804 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1805 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1806 le64_to_cpu(ba_notif->bitmap), txq, index,
1807 ba_notif->txed, ba_notif->txed_2_done);
1808
1809 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
1810 ba_notif->reduced_txp);
1811}
1812
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001813/*
1814 * Note that there are transports that buffer frames before they reach
1815 * the firmware. This means that after flush_tx_path is called, the
1816 * queue might not be empty. The race-free way to handle this is to:
1817 * 1) set the station as draining
1818 * 2) flush the Tx path
1819 * 3) wait for the transport queues to be empty
1820 */
Luca Coelho5888a402015-10-06 09:54:57 +03001821int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001822{
1823 int ret;
1824 struct iwl_tx_path_flush_cmd flush_cmd = {
1825 .queues_ctl = cpu_to_le32(tfd_msk),
1826 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
1827 };
1828
Johannes Berg8ca151b2013-01-24 14:25:36 +01001829 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1830 sizeof(flush_cmd), &flush_cmd);
1831 if (ret)
1832 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
1833 return ret;
1834}