blob: c89bb453c4965f6365e095f2619d75ad2b8b7b37 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Emmanuel Grumbach42032632015-04-15 12:43:46 +03009 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon9a3fcf92017-03-14 09:50:35 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020035 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Emmanuel Grumbach42032632015-04-15 12:43:46 +030036 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon9a3fcf92017-03-14 09:50:35 +020037 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <linux/ieee80211.h>
68#include <linux/etherdevice.h>
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +030069#include <linux/tcp.h>
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +030070#include <net/ip.h>
Sara Sharon5e6a98d2016-03-10 17:40:56 +020071#include <net/ipv6.h>
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
73#include "iwl-trans.h"
74#include "iwl-eeprom-parse.h"
75#include "mvm.h"
76#include "sta.h"
Golan Ben-Ami2f89a5d2015-10-27 19:17:14 +020077#include "fw-dbg.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010078
Emmanuel Grumbach42032632015-04-15 12:43:46 +030079static void
80iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
81 u16 tid, u16 ssn)
82{
83 struct iwl_fw_dbg_trigger_tlv *trig;
84 struct iwl_fw_dbg_trigger_ba *ba_trig;
85
86 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
87 return;
88
89 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
90 ba_trig = (void *)trig->data;
91
92 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
93 return;
94
95 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
96 return;
97
98 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
99 "BAR sent to %pM, tid %d, ssn %d",
100 addr, tid, ssn);
101}
102
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200103#define OPT_HDR(type, skb, off) \
104 (type *)(skb_network_header(skb) + (off))
105
Sara Sharonb86dd742016-09-29 15:16:03 +0300106static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
107 struct ieee80211_hdr *hdr,
108 struct ieee80211_tx_info *info)
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200109{
Sara Sharonb86dd742016-09-29 15:16:03 +0300110 u16 offload_assist = 0;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200111#if IS_ENABLED(CONFIG_INET)
112 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200113 u8 protocol = 0;
114
115 /*
116 * Do not compute checksum if already computed or if transport will
117 * compute it
118 */
119 if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
Sara Sharonb86dd742016-09-29 15:16:03 +0300120 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200121
122 /* We do not expect to be requested to csum stuff we do not support */
123 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
124 (skb->protocol != htons(ETH_P_IP) &&
125 skb->protocol != htons(ETH_P_IPV6)),
126 "No support for requested checksum\n")) {
127 skb_checksum_help(skb);
Sara Sharonb86dd742016-09-29 15:16:03 +0300128 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200129 }
130
131 if (skb->protocol == htons(ETH_P_IP)) {
132 protocol = ip_hdr(skb)->protocol;
133 } else {
134#if IS_ENABLED(CONFIG_IPV6)
135 struct ipv6hdr *ipv6h =
136 (struct ipv6hdr *)skb_network_header(skb);
137 unsigned int off = sizeof(*ipv6h);
138
139 protocol = ipv6h->nexthdr;
140 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
Sara Sharonecf51422016-06-08 15:15:41 +0300141 struct ipv6_opt_hdr *hp;
142
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200143 /* only supported extension headers */
144 if (protocol != NEXTHDR_ROUTING &&
145 protocol != NEXTHDR_HOP &&
Sara Sharonecf51422016-06-08 15:15:41 +0300146 protocol != NEXTHDR_DEST) {
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200147 skb_checksum_help(skb);
Sara Sharonb86dd742016-09-29 15:16:03 +0300148 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200149 }
150
Sara Sharonecf51422016-06-08 15:15:41 +0300151 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
152 protocol = hp->nexthdr;
153 off += ipv6_optlen(hp);
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200154 }
155 /* if we get here - protocol now should be TCP/UDP */
156#endif
157 }
158
159 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
160 WARN_ON_ONCE(1);
161 skb_checksum_help(skb);
Sara Sharonb86dd742016-09-29 15:16:03 +0300162 goto out;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200163 }
164
165 /* enable L4 csum */
166 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
167
168 /*
169 * Set offset to IP header (snap).
170 * We don't support tunneling so no need to take care of inner header.
171 * Size is in words.
172 */
173 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
174
175 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
176 if (skb->protocol == htons(ETH_P_IP) &&
177 (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
178 ip_hdr(skb)->check = 0;
179 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
180 }
181
182 /* reset UDP/TCP header csum */
183 if (protocol == IPPROTO_TCP)
184 tcp_hdr(skb)->check = 0;
185 else
186 udp_hdr(skb)->check = 0;
187
188 /* mac header len should include IV, size is in words */
189 if (info->control.hw_key)
190 mh_len += info->control.hw_key->iv_len;
191 mh_len /= 2;
192 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
193
Sara Sharonb86dd742016-09-29 15:16:03 +0300194out:
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200195#endif
Sara Sharonb86dd742016-09-29 15:16:03 +0300196 return offload_assist;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200197}
198
Johannes Berg8ca151b2013-01-24 14:25:36 +0100199/*
200 * Sets most of the Tx cmd's fields
201 */
Arik Nemtsov6ce73e62014-09-11 13:00:19 +0300202void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
203 struct iwl_tx_cmd *tx_cmd,
204 struct ieee80211_tx_info *info, u8 sta_id)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100205{
206 struct ieee80211_hdr *hdr = (void *)skb->data;
207 __le16 fc = hdr->frame_control;
208 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
209 u32 len = skb->len + FCS_LEN;
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +0200210 u8 ac;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100211
212 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
213 tx_flags |= TX_CMD_FLG_ACK;
214 else
215 tx_flags &= ~TX_CMD_FLG_ACK;
216
217 if (ieee80211_is_probe_resp(fc))
218 tx_flags |= TX_CMD_FLG_TSF;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100219
Johannes Berg8ca151b2013-01-24 14:25:36 +0100220 if (ieee80211_has_morefrags(fc))
221 tx_flags |= TX_CMD_FLG_MORE_FRAG;
222
223 if (ieee80211_is_data_qos(fc)) {
224 u8 *qc = ieee80211_get_qos_ctl(hdr);
225 tx_cmd->tid_tspec = qc[0] & 0xf;
226 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
Sara Sharond8fe4842016-03-09 10:12:45 +0200227 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
228 tx_cmd->offload_assist |=
229 cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
Eyal Shapira9b3b43d2014-12-31 18:34:56 +0200230 } else if (ieee80211_is_back_req(fc)) {
231 struct ieee80211_bar *bar = (void *)skb->data;
232 u16 control = le16_to_cpu(bar->control);
Emmanuel Grumbach42032632015-04-15 12:43:46 +0300233 u16 ssn = le16_to_cpu(bar->start_seq_num);
Eyal Shapira9b3b43d2014-12-31 18:34:56 +0200234
235 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
236 tx_cmd->tid_tspec = (control &
237 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
238 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
239 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
Emmanuel Grumbach42032632015-04-15 12:43:46 +0300240 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
241 ssn);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100242 } else {
243 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
244 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
245 tx_flags |= TX_CMD_FLG_SEQ_CTL;
246 else
247 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
248 }
249
Eyal Shapiraa9dc5062014-12-31 17:58:23 +0200250 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
251 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
252 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
253 else
254 ac = tid_to_mac80211_ac[0];
255
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +0200256 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
257 TX_CMD_FLG_BT_PRIO_POS;
258
Johannes Berg8ca151b2013-01-24 14:25:36 +0100259 if (ieee80211_is_mgmt(fc)) {
260 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
Avri Altmanb084a352015-07-12 09:10:05 +0300261 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
262 else if (ieee80211_is_action(fc))
263 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100264 else
Avri Altmanb084a352015-07-12 09:10:05 +0300265 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100266
267 /* The spec allows Action frames in A-MPDU, we don't support
268 * it
269 */
270 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
Johannes Berg63f75352014-01-31 14:56:18 +0100271 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
Avri Altmanb084a352015-07-12 09:10:05 +0300272 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100273 } else {
Avri Altmanb084a352015-07-12 09:10:05 +0300274 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100275 }
276
Johannes Berg8ca151b2013-01-24 14:25:36 +0100277 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
278 !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
279 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
280
Johannes Berg859d9142015-06-01 17:11:11 +0200281 if (fw_has_capa(&mvm->fw->ucode_capa,
282 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
Andrei Otcheretianskif1daa002014-07-01 12:54:25 +0300283 ieee80211_action_contains_tpc(skb))
284 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
285
Johannes Berg8ca151b2013-01-24 14:25:36 +0100286 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
Johannes Berg05e5a7e2016-12-02 10:04:49 +0100287 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
288 tx_cmd->len = cpu_to_le16((u16)skb->len);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100289 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
290 tx_cmd->sta_id = sta_id;
Sara Sharond8fe4842016-03-09 10:12:45 +0200291
292 /* padding is inserted later in transport */
293 if (ieee80211_hdrlen(fc) % 4 &&
294 !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
295 tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200296
Sara Sharonb86dd742016-09-29 15:16:03 +0300297 tx_cmd->offload_assist |=
298 cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100299}
300
Sara Sharon5ec295d2016-11-01 10:52:11 +0200301static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
302 struct ieee80211_tx_info *info,
303 struct ieee80211_sta *sta)
304{
305 int rate_idx;
306 u8 rate_plcp;
307 u32 rate_flags;
308
309 /* HT rate doesn't make sense for a non data frame */
310 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
311 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
312 info->control.rates[0].flags,
313 info->control.rates[0].idx);
314
315 rate_idx = info->control.rates[0].idx;
316 /* if the rate isn't a well known legacy rate, take the lowest one */
317 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
318 rate_idx = rate_lowest_index(
319 &mvm->nvm_data->bands[info->band], sta);
320
321 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
322 if (info->band == NL80211_BAND_5GHZ)
323 rate_idx += IWL_FIRST_OFDM_RATE;
324
325 /* For 2.4 GHZ band, check that there is no need to remap */
326 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
327
328 /* Get PLCP rate for tx_cmd->rate_n_flags */
329 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
330
331 if (info->band == NL80211_BAND_2GHZ &&
332 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
333 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
334 else
335 rate_flags =
336 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
337
338 /* Set CCK flag as needed */
339 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
340 rate_flags |= RATE_MCS_CCK_MSK;
341
342 return (u32)rate_plcp | rate_flags;
343}
344
Johannes Berg8ca151b2013-01-24 14:25:36 +0100345/*
346 * Sets the fields in the Tx cmd that are rate related
347 */
Arik Nemtsov6ce73e62014-09-11 13:00:19 +0300348void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
349 struct ieee80211_tx_info *info,
350 struct ieee80211_sta *sta, __le16 fc)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100351{
Johannes Berg8ca151b2013-01-24 14:25:36 +0100352 /* Set retry limit on RTS packets */
353 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
354
355 /* Set retry limit on DATA packets and Probe Responses*/
356 if (ieee80211_is_probe_resp(fc)) {
357 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
358 tx_cmd->rts_retry_limit =
359 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
360 } else if (ieee80211_is_back_req(fc)) {
361 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
362 } else {
363 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
364 }
365
366 /*
Eliad Pellere89044d2013-07-16 17:33:26 +0300367 * for data packets, rate info comes from the table inside the fw. This
Emmanuel Grumbach1ffde692014-10-20 08:29:55 +0300368 * table is controlled by LINK_QUALITY commands
Johannes Berg8ca151b2013-01-24 14:25:36 +0100369 */
370
Emmanuel Grumbach1ffde692014-10-20 08:29:55 +0300371 if (ieee80211_is_data(fc) && sta) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100372 tx_cmd->initial_rate_index = 0;
373 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
374 return;
375 } else if (ieee80211_is_back_req(fc)) {
Emmanuel Grumbach2edc6ec2013-06-02 19:49:15 +0300376 tx_cmd->tx_flags |=
377 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100378 }
379
Johannes Berg8ca151b2013-01-24 14:25:36 +0100380 mvm->mgmt_last_antenna_idx =
Moshe Harela0544272014-12-08 21:13:14 +0200381 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100382 mvm->mgmt_last_antenna_idx);
Emmanuel Grumbach34c8b242014-05-28 21:53:39 +0300383
Johannes Berg8ca151b2013-01-24 14:25:36 +0100384 /* Set the rate in the TX cmd */
Sara Sharon5ec295d2016-11-01 10:52:11 +0200385 tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100386}
387
Ayala Beker2a53d162016-04-07 16:21:57 +0300388static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
389 u8 *crypto_hdr)
390{
391 struct ieee80211_key_conf *keyconf = info->control.hw_key;
392 u64 pn;
393
394 pn = atomic64_inc_return(&keyconf->tx_pn);
395 crypto_hdr[0] = pn;
396 crypto_hdr[2] = 0;
397 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
398 crypto_hdr[1] = pn >> 8;
399 crypto_hdr[4] = pn >> 16;
400 crypto_hdr[5] = pn >> 24;
401 crypto_hdr[6] = pn >> 32;
402 crypto_hdr[7] = pn >> 40;
403}
404
Johannes Berg8ca151b2013-01-24 14:25:36 +0100405/*
406 * Sets the fields in the Tx cmd that are crypto related
407 */
Johannes Bergca8c0f42015-04-20 17:54:54 +0200408static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
409 struct ieee80211_tx_info *info,
410 struct iwl_tx_cmd *tx_cmd,
411 struct sk_buff *skb_frag,
412 int hdrlen)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100413{
414 struct ieee80211_key_conf *keyconf = info->control.hw_key;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200415 u8 *crypto_hdr = skb_frag->data + hdrlen;
416 u64 pn;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100417
418 switch (keyconf->cipher) {
419 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergca8c0f42015-04-20 17:54:54 +0200420 case WLAN_CIPHER_SUITE_CCMP_256:
421 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
Ayala Beker2a53d162016-04-07 16:21:57 +0300422 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100423 break;
424
425 case WLAN_CIPHER_SUITE_TKIP:
426 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
Eliad Peller1ad4f632016-02-14 13:56:36 +0200427 pn = atomic64_inc_return(&keyconf->tx_pn);
428 ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100429 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
430 break;
431
432 case WLAN_CIPHER_SUITE_WEP104:
433 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
434 /* fall through */
435 case WLAN_CIPHER_SUITE_WEP40:
436 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
437 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
438 TX_CMD_SEC_WEP_KEY_IDX_MSK);
439
440 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
441 break;
Ayala Beker2a53d162016-04-07 16:21:57 +0300442 case WLAN_CIPHER_SUITE_GCMP:
443 case WLAN_CIPHER_SUITE_GCMP_256:
444 /* TODO: Taking the key from the table might introduce a race
445 * when PTK rekeying is done, having an old packets with a PN
446 * based on the old key but the message encrypted with a new
447 * one.
448 * Need to handle this.
449 */
Emmanuel Grumbach4dc65112016-09-13 22:59:27 +0300450 tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
Ayala Beker2a53d162016-04-07 16:21:57 +0300451 tx_cmd->key[0] = keyconf->hw_key_idx;
452 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
453 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100454 default:
Max Stepanove36e5432013-08-27 19:56:13 +0300455 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100456 }
457}
458
459/*
460 * Allocates and sets the Tx cmd the driver data pointers in the skb
461 */
462static struct iwl_device_cmd *
463iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300464 struct ieee80211_tx_info *info, int hdrlen,
465 struct ieee80211_sta *sta, u8 sta_id)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100466{
467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100468 struct iwl_device_cmd *dev_cmd;
469 struct iwl_tx_cmd *tx_cmd;
470
471 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
472
473 if (unlikely(!dev_cmd))
474 return NULL;
475
Emmanuel Grumbach6f2f0192017-03-30 23:08:03 +0300476 /* Make sure we zero enough of dev_cmd */
477 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
478
479 memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
Emmanuel Grumbach3961a612013-10-22 11:27:55 +0300480 dev_cmd->hdr.cmd = TX_CMD;
Sara Sharonc47de662016-09-29 17:28:33 +0300481
482 if (iwl_mvm_has_new_tx_api(mvm)) {
483 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
484 u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
485
Sara Sharonf5bd90b2017-03-16 10:44:31 +0200486 if (ieee80211_is_data_qos(hdr->frame_control)) {
487 u8 *qc = ieee80211_get_qos_ctl(hdr);
488
489 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
490 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
491 }
492
Sara Sharonc47de662016-09-29 17:28:33 +0300493 /* padding is inserted later in transport */
Sara Sharonc47de662016-09-29 17:28:33 +0300494 if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
495 !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
496 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
497
498 cmd->offload_assist |= cpu_to_le16(offload_assist);
499
500 /* Total # bytes to be transmitted */
501 cmd->len = cpu_to_le16((u16)skb->len);
502
503 /* Copy MAC header from skb into command buffer */
504 memcpy(cmd->hdr, hdr, hdrlen);
505
506 if (!info->control.hw_key)
507 cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
508
509 /* For data packets rate info comes from the fw */
510 if (ieee80211_is_data(hdr->frame_control) && sta)
511 goto out;
512
513 cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
514 cmd->rate_n_flags =
515 cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
516
517 goto out;
518 }
519
Johannes Berg8ca151b2013-01-24 14:25:36 +0100520 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
521
522 if (info->control.hw_key)
Johannes Bergca8c0f42015-04-20 17:54:54 +0200523 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100524
525 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
526
527 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
528
Sara Sharonc47de662016-09-29 17:28:33 +0300529 /* Copy MAC header from skb into command buffer */
530 memcpy(tx_cmd->hdr, hdr, hdrlen);
531
532out:
Johannes Bergbd05a5b2016-12-02 09:57:40 +0100533 return dev_cmd;
534}
535
536static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
537 struct iwl_device_cmd *cmd)
538{
539 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
540
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300541 memset(&skb_info->status, 0, sizeof(skb_info->status));
542 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100543
Johannes Bergbd05a5b2016-12-02 09:57:40 +0100544 skb_info->driver_data[1] = cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100545}
546
Liad Kaufmande24f632015-08-04 15:19:18 +0300547static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
548 struct ieee80211_tx_info *info, __le16 fc)
549{
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300550 if (!iwl_mvm_is_dqa_supported(mvm))
551 return info->hw_queue;
Liad Kaufmande24f632015-08-04 15:19:18 +0300552
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300553 switch (info->control.vif->type) {
554 case NL80211_IFTYPE_AP:
Liad Kaufman4d339982017-03-21 17:13:16 +0200555 case NL80211_IFTYPE_ADHOC:
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300556 /*
Emmanuel Grumbachd45cb202016-12-28 10:43:02 +0200557 * Handle legacy hostapd as well, where station may be added
558 * only after assoc. Take care of the case where we send a
559 * deauth to a station that we don't have.
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300560 */
Emmanuel Grumbachd45cb202016-12-28 10:43:02 +0200561 if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
562 ieee80211_is_deauth(fc))
Sara Sharon49f71712017-01-09 12:07:16 +0200563 return mvm->probe_queue;
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300564 if (info->hw_queue == info->control.vif->cab_queue)
565 return info->hw_queue;
566
Liad Kaufman4d339982017-03-21 17:13:16 +0200567 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
568 "fc=0x%02x", le16_to_cpu(fc));
Sara Sharon49f71712017-01-09 12:07:16 +0200569 return mvm->probe_queue;
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300570 case NL80211_IFTYPE_P2P_DEVICE:
571 if (ieee80211_is_mgmt(fc))
Sara Sharon49f71712017-01-09 12:07:16 +0200572 return mvm->p2p_dev_queue;
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300573 if (info->hw_queue == info->control.vif->cab_queue)
574 return info->hw_queue;
575
576 WARN_ON_ONCE(1);
Sara Sharon49f71712017-01-09 12:07:16 +0200577 return mvm->p2p_dev_queue;
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300578 default:
579 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
580 return -1;
581 }
Liad Kaufmande24f632015-08-04 15:19:18 +0300582}
583
Johannes Berg8ca151b2013-01-24 14:25:36 +0100584int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
585{
586 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300587 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
588 struct ieee80211_tx_info info;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100589 struct iwl_device_cmd *dev_cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100590 u8 sta_id;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200591 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
Liad Kaufmande24f632015-08-04 15:19:18 +0300592 int queue;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100593
Beni Lev54c5ef22016-08-10 17:03:43 +0300594 /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
595 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
596 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
597 * and hence needs to be sent on the aux queue
598 */
Johannes Berg45b957e2016-12-02 13:33:40 +0100599 if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
Beni Lev54c5ef22016-08-10 17:03:43 +0300600 skb_info->control.vif->type == NL80211_IFTYPE_STATION)
Johannes Berg45b957e2016-12-02 13:33:40 +0100601 skb_info->hw_queue = mvm->aux_queue;
Beni Lev54c5ef22016-08-10 17:03:43 +0300602
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300603 memcpy(&info, skb->cb, sizeof(info));
604
605 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100606 return -1;
607
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300608 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
609 (!info.control.vif ||
610 info.hw_queue != info.control.vif->cab_queue)))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100611 return -1;
612
Liad Kaufmande24f632015-08-04 15:19:18 +0300613 queue = info.hw_queue;
614
Ariej Marjieh7da91b02014-07-07 12:09:40 +0300615 /*
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300616 * If the interface on which the frame is sent is the P2P_DEVICE
Johannes Berg8ca151b2013-01-24 14:25:36 +0100617 * or an AP/GO interface use the broadcast station associated
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300618 * with it; otherwise if the interface is a managed interface
619 * use the AP station associated with it for multicast traffic
620 * (this is not possible for unicast packets as a TLDS discovery
621 * response are sent without a station entry); otherwise use the
622 * AUX station.
Beni Lev6574dc92016-11-17 14:03:17 +0200623 * In DQA mode, if vif is of type STATION and frames are not multicast
624 * or offchannel, they should be sent from the BSS queue.
625 * For example, TDLS setup frames should be sent on this queue,
626 * as they go through the AP.
Johannes Berg8ca151b2013-01-24 14:25:36 +0100627 */
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300628 sta_id = mvm->aux_sta.sta_id;
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300629 if (info.control.vif) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100630 struct iwl_mvm_vif *mvmvif =
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300631 iwl_mvm_vif_from_mac80211(info.control.vif);
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300632
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300633 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
Liad Kaufman4d339982017-03-21 17:13:16 +0200634 info.control.vif->type == NL80211_IFTYPE_AP ||
635 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300636 sta_id = mvmvif->bcast_sta.sta_id;
Liad Kaufmande24f632015-08-04 15:19:18 +0300637 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
638 hdr->frame_control);
Sara Sharon3ee0f0e2016-08-08 11:51:24 +0300639 if (queue < 0)
640 return -1;
641
Sara Sharone2af3fa2017-02-22 19:35:10 +0200642 if (queue == info.control.vif->cab_queue)
643 queue = mvmvif->cab_queue;
Liad Kaufmande24f632015-08-04 15:19:18 +0300644 } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
645 is_multicast_ether_addr(hdr->addr1)) {
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300646 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
647
Sara Sharon0ae98812017-01-04 14:53:58 +0200648 if (ap_sta_id != IWL_MVM_INVALID_STA)
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300649 sta_id = ap_sta_id;
Liad Kaufmane3118ad2016-06-05 10:49:02 +0300650 } else if (iwl_mvm_is_dqa_supported(mvm) &&
Beni Lev6574dc92016-11-17 14:03:17 +0200651 info.control.vif->type == NL80211_IFTYPE_STATION &&
652 queue != mvm->aux_queue) {
Liad Kaufmane3118ad2016-06-05 10:49:02 +0300653 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
Liad Kaufman6d759b02017-05-15 18:26:33 +0300654 } else if (iwl_mvm_is_dqa_supported(mvm) &&
655 info.control.vif->type == NL80211_IFTYPE_MONITOR) {
656 queue = mvm->aux_queue;
Ilan Peerd0ab08d2015-06-24 09:23:01 +0300657 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100658 }
659
Liad Kaufmande24f632015-08-04 15:19:18 +0300660 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100661
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300662 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100663 if (!dev_cmd)
664 return -1;
665
Johannes Bergbd05a5b2016-12-02 09:57:40 +0100666 /* From now on, we cannot access info->control */
667 iwl_mvm_skb_prepare_status(skb, dev_cmd);
668
Liad Kaufmande24f632015-08-04 15:19:18 +0300669 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100670 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
671 return -1;
672 }
673
Liad Kaufmanfb896c42016-02-14 15:32:58 +0200674 /*
675 * Increase the pending frames counter, so that later when a reply comes
676 * in and the counter is decreased - we don't start getting negative
677 * values.
678 * Note that we don't need to make sure it isn't agg'd, since we're
679 * TXing non-sta
Sara Sharon9a3fcf92017-03-14 09:50:35 +0200680 * For DQA mode - we shouldn't increase it though
Liad Kaufmanfb896c42016-02-14 15:32:58 +0200681 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +0200682 if (!iwl_mvm_is_dqa_supported(mvm))
683 atomic_inc(&mvm->pending_frames[sta_id]);
Liad Kaufmanfb896c42016-02-14 15:32:58 +0200684
Johannes Berg8ca151b2013-01-24 14:25:36 +0100685 return 0;
686}
687
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300688#ifdef CONFIG_INET
689static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300690 struct ieee80211_tx_info *info,
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300691 struct ieee80211_sta *sta,
692 struct sk_buff_head *mpdus_skb)
693{
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200694 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300695 struct ieee80211_hdr *hdr = (void *)skb->data;
696 unsigned int mss = skb_shinfo(skb)->gso_size;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300697 struct sk_buff *tmp, *next;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300698 char cb[sizeof(skb->cb)];
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200699 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300700 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
701 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
Johannes Berg05e5a7e2016-12-02 10:04:49 +0100702 u16 snap_ip_tcp, pad, i = 0;
Emmanuel Grumbach9e7dce22015-10-26 16:14:06 +0200703 unsigned int dbg_max_amsdu_len;
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200704 netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
Emmanuel Grumbach50b02132015-11-11 11:37:02 +0200705 u8 *qc, tid, txf;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300706
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300707 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
708 tcp_hdrlen(skb);
709
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200710 qc = ieee80211_get_qos_ctl(hdr);
711 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
712 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
713 return -EINVAL;
714
Emmanuel Grumbachfa820d62016-04-03 10:15:59 +0300715 dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
716
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300717 if (!sta->max_amsdu_len ||
Emmanuel Grumbach04e3a5d2015-10-28 09:47:41 +0200718 !ieee80211_is_data_qos(hdr->frame_control) ||
Emmanuel Grumbachfa820d62016-04-03 10:15:59 +0300719 (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300720 num_subframes = 1;
721 pad = 0;
722 goto segment;
723 }
724
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200725 /*
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200726 * Do not build AMSDU for IPv6 with extension headers.
727 * ask stack to segment and checkum the generated MPDUs for us.
728 */
729 if (skb->protocol == htons(ETH_P_IPV6) &&
730 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
731 IPPROTO_TCP) {
732 num_subframes = 1;
733 pad = 0;
734 netdev_features &= ~NETIF_F_CSUM_MASK;
735 goto segment;
736 }
737
738 /*
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200739 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
740 * during an BA session.
741 */
742 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
743 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300744 num_subframes = 1;
745 pad = 0;
746 goto segment;
747 }
748
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200749 max_amsdu_len = sta->max_amsdu_len;
Emmanuel Grumbach50b02132015-11-11 11:37:02 +0200750
751 /* the Tx FIFO to which this A-MSDU will be routed */
752 txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
753
754 /*
755 * Don't send an AMSDU that will be longer than the TXF.
756 * Add a security margin of 256 for the TX command + headers.
757 * We also want to have the start of the next packet inside the
758 * fifo to be able to send bursts.
759 */
760 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
Sara Sharonfa1f2b62017-01-26 12:40:25 +0200761 mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
Emmanuel Grumbach50b02132015-11-11 11:37:02 +0200762
Emmanuel Grumbachfa820d62016-04-03 10:15:59 +0300763 if (unlikely(dbg_max_amsdu_len))
Emmanuel Grumbach9e7dce22015-10-26 16:14:06 +0200764 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
765 dbg_max_amsdu_len);
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200766
767 /*
768 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
769 * supported. This is a spec requirement (IEEE 802.11-2015
770 * section 8.7.3 NOTE 3).
771 */
772 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
773 !sta->vht_cap.vht_supported)
774 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
775
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300776 /* Sub frame header + SNAP + IP header + TCP header + MSS */
777 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
778 pad = (4 - subf_len) & 0x3;
779
780 /*
781 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
782 * N * subf_len + (N - 1) * pad.
783 */
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200784 num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
785 if (num_subframes > 1)
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300786 *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300787
788 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
789 tcp_hdrlen(skb) + skb->data_len;
790
791 /*
792 * Make sure we have enough TBs for the A-MSDU:
793 * 2 for each subframe
794 * 1 more for each fragment
795 * 1 more for the potential data in the header
796 */
797 num_subframes =
798 min_t(unsigned int, num_subframes,
799 (mvm->trans->max_skb_frags - 1 -
800 skb_shinfo(skb)->nr_frags) / 2);
801
802 /* This skb fits in one single A-MSDU */
803 if (num_subframes * mss >= tcp_payload_len) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300804 __skb_queue_tail(mpdus_skb, skb);
805 return 0;
806 }
807
808 /*
809 * Trick the segmentation function to make it
810 * create SKBs that can fit into one A-MSDU.
811 */
812segment:
813 skb_shinfo(skb)->gso_size = num_subframes * mss;
814 memcpy(cb, skb->cb, sizeof(cb));
815
Sara Sharon5e6a98d2016-03-10 17:40:56 +0200816 next = skb_gso_segment(skb, netdev_features);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300817 skb_shinfo(skb)->gso_size = mss;
818 if (WARN_ON_ONCE(IS_ERR(next)))
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300819 return -EINVAL;
820 else if (next)
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300821 consume_skb(skb);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300822
823 while (next) {
824 tmp = next;
825 next = tmp->next;
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300826
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300827 memcpy(tmp->cb, cb, sizeof(tmp->cb));
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300828 /*
829 * Compute the length of all the data added for the A-MSDU.
830 * This will be used to compute the length to write in the TX
831 * command. We have: SNAP + IP + TCP for n -1 subframes and
832 * ETH header for n subframes.
833 */
834 tcp_payload_len = skb_tail_pointer(tmp) -
835 skb_transport_header(tmp) -
836 tcp_hdrlen(tmp) + tmp->data_len;
837
838 if (ipv4)
839 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
840
841 if (tcp_payload_len > mss) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300842 skb_shinfo(tmp)->gso_size = mss;
843 } else {
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +0200844 qc = ieee80211_get_qos_ctl((void *)tmp->data);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300845
846 if (ipv4)
847 ip_send_check(ip_hdr(tmp));
848 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
849 skb_shinfo(tmp)->gso_size = 0;
850 }
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300851
852 tmp->prev = NULL;
853 tmp->next = NULL;
854
855 __skb_queue_tail(mpdus_skb, tmp);
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300856 i++;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300857 }
858
859 return 0;
860}
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300861#else /* CONFIG_INET */
862static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300863 struct ieee80211_tx_info *info,
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +0300864 struct ieee80211_sta *sta,
865 struct sk_buff_head *mpdus_skb)
866{
867 /* Impossible to get TSO with CONFIG_INET */
868 WARN_ON(1);
869
870 return -1;
871}
872#endif
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300873
Liad Kaufman24afba72015-07-28 18:56:08 +0300874static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
875 struct iwl_mvm_sta *mvm_sta, u8 tid,
876 struct sk_buff *skb)
877{
878 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
879 u8 mac_queue = info->hw_queue;
880 struct sk_buff_head *deferred_tx_frames;
881
882 lockdep_assert_held(&mvm_sta->lock);
883
884 mvm_sta->deferred_traffic_tid_map |= BIT(tid);
885 set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
886
887 deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
888
889 skb_queue_tail(deferred_tx_frames, skb);
890
891 /*
892 * The first deferred frame should've stopped the MAC queues, so we
893 * should never get a second deferred frame for the RA/TID.
894 */
895 if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
896 "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
897 skb_queue_len(deferred_tx_frames))) {
898 iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
899 schedule_work(&mvm->add_stream_wk);
900 }
901}
902
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200903/* Check if there are any timed-out TIDs on a given shared TXQ */
904static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
905{
906 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
907 unsigned long now = jiffies;
908 int tid;
909
Sara Sharonbb497012016-09-29 14:52:40 +0300910 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
911 return false;
912
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200913 for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
914 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
915 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
916 return true;
917 }
918
919 return false;
920}
921
Johannes Berg8ca151b2013-01-24 14:25:36 +0100922/*
923 * Sets the fields in the Tx cmd that are crypto related
924 */
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300925static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300926 struct ieee80211_tx_info *info,
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +0300927 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100928{
929 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100930 struct iwl_mvm_sta *mvmsta;
931 struct iwl_device_cmd *dev_cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100932 __le16 fc;
933 u16 seq_number = 0;
934 u8 tid = IWL_MAX_TID_COUNT;
Sara Sharon6862fce2017-02-22 19:34:17 +0200935 u16 txq_id = info->hw_queue;
Johannes Berg7ec54712016-03-16 09:29:48 +0100936 bool is_ampdu = false;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200937 int hdrlen;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100938
Johannes Berg5b577a92013-11-14 18:20:04 +0100939 mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100940 fc = hdr->frame_control;
Johannes Bergca8c0f42015-04-20 17:54:54 +0200941 hdrlen = ieee80211_hdrlen(fc);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100942
943 if (WARN_ON_ONCE(!mvmsta))
944 return -1;
945
Sara Sharon0ae98812017-01-04 14:53:58 +0200946 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100947 return -1;
948
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +0300949 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
950 sta, mvmsta->sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100951 if (!dev_cmd)
952 goto drop;
953
Johannes Berg3e56ead2013-02-15 22:23:18 +0100954 /*
955 * we handle that entirely ourselves -- for uAPSD the firmware
956 * will always send a notification, and for PS-Poll responses
957 * we'll notify mac80211 when getting frame status
958 */
959 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
960
Johannes Berg8ca151b2013-01-24 14:25:36 +0100961 spin_lock(&mvmsta->lock);
962
Sara Sharon0d7f1b92016-12-08 10:43:40 +0200963 /* nullfunc frames should go to the MGMT queue regardless of QOS,
964 * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
965 * assignment of MGMT TID
966 */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100967 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
968 u8 *qc = NULL;
969 qc = ieee80211_get_qos_ctl(hdr);
970 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
971 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
972 goto drop_unlock_sta;
973
Johannes Berg8ca151b2013-01-24 14:25:36 +0100974 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
Sara Sharon0d7f1b92016-12-08 10:43:40 +0200975 if (WARN_ON_ONCE(is_ampdu &&
976 mvmsta->tid_data[tid].state != IWL_AGG_ON))
977 goto drop_unlock_sta;
Sara Sharonc47de662016-09-29 17:28:33 +0300978
979 seq_number = mvmsta->tid_data[tid].seq_number;
980 seq_number &= IEEE80211_SCTL_SEQ;
981
982 if (!iwl_mvm_has_new_tx_api(mvm)) {
983 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
984
985 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
986 hdr->seq_ctrl |= cpu_to_le16(seq_number);
987 /* update the tx_cmd hdr as it was already copied */
988 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
989 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100990 }
991
Sara Sharon0d7f1b92016-12-08 10:43:40 +0200992 if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
Liad Kaufman9794c642015-08-19 17:34:28 +0300993 txq_id = mvmsta->tid_data[tid].txq_id;
Sara Sharonc47de662016-09-29 17:28:33 +0300994
Liad Kaufmane3118ad2016-06-05 10:49:02 +0300995 if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300996 /* default to TID 0 for non-QoS packets */
997 u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
998
999 txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
1000 }
1001
Sara Sharon0d7f1b92016-12-08 10:43:40 +02001002 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001003
Liad Kaufman9794c642015-08-19 17:34:28 +03001004 /* Check if TXQ needs to be allocated or re-activated */
Sara Sharon6862fce2017-02-22 19:34:17 +02001005 if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
Liad Kaufman9794c642015-08-19 17:34:28 +03001006 !mvmsta->tid_data[tid].is_tid_active) &&
1007 iwl_mvm_is_dqa_supported(mvm)) {
1008 /* If TXQ needs to be allocated... */
Sara Sharon6862fce2017-02-22 19:34:17 +02001009 if (txq_id == IWL_MVM_INVALID_QUEUE) {
Liad Kaufman24afba72015-07-28 18:56:08 +03001010 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
1011
1012 /*
1013 * The frame is now deferred, and the worker scheduled
1014 * will re-allocate it, so we can free it for now.
1015 */
1016 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1017 spin_unlock(&mvmsta->lock);
1018 return 0;
1019 }
1020
Sara Sharon34e10862017-02-23 13:15:07 +02001021 /* queue should always be active in new TX path */
1022 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1023
Liad Kaufman9794c642015-08-19 17:34:28 +03001024 /* If we are here - TXQ exists and needs to be re-activated */
1025 spin_lock(&mvm->queue_info_lock);
1026 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1027 mvmsta->tid_data[tid].is_tid_active = true;
1028 spin_unlock(&mvm->queue_info_lock);
1029
1030 IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
1031 txq_id);
Liad Kaufman24afba72015-07-28 18:56:08 +03001032 }
1033
Sara Sharon34e10862017-02-23 13:15:07 +02001034 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001035 /* Keep track of the time of the last frame for this RA/TID */
1036 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
1037
1038 /*
1039 * If we have timed-out TIDs - schedule the worker that will
1040 * reconfig the queues and update them
1041 *
1042 * Note that the mvm->queue_info_lock isn't being taken here in
1043 * order to not serialize the TX flow. This isn't dangerous
1044 * because scheduling mvm->add_stream_wk can't ruin the state,
1045 * and if we DON'T schedule it due to some race condition then
1046 * next TX we get here we will.
1047 */
1048 if (unlikely(mvm->queue_info[txq_id].status ==
1049 IWL_MVM_QUEUE_SHARED &&
1050 iwl_mvm_txq_should_update(mvm, txq_id)))
1051 schedule_work(&mvm->add_stream_wk);
1052 }
Liad Kaufman9794c642015-08-19 17:34:28 +03001053
Johannes Berg8ca151b2013-01-24 14:25:36 +01001054 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
Eliad Pellercf9d1182013-12-31 18:54:06 +02001055 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001056
Johannes Bergbd05a5b2016-12-02 09:57:40 +01001057 /* From now on, we cannot access info->control */
1058 iwl_mvm_skb_prepare_status(skb, dev_cmd);
1059
Johannes Berg8ca151b2013-01-24 14:25:36 +01001060 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1061 goto drop_unlock_sta;
1062
Johannes Berg7ec54712016-03-16 09:29:48 +01001063 if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
Eliad Pellercf9d1182013-12-31 18:54:06 +02001064 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001065
1066 spin_unlock(&mvmsta->lock);
1067
Sara Sharon9a3fcf92017-03-14 09:50:35 +02001068 /* Increase pending frames count if this isn't AMPDU or DQA queue */
1069 if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001070 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001071
1072 return 0;
1073
1074drop_unlock_sta:
1075 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1076 spin_unlock(&mvmsta->lock);
1077drop:
1078 return -1;
1079}
1080
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001081int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1082 struct ieee80211_sta *sta)
1083{
1084 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001085 struct ieee80211_tx_info info;
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001086 struct sk_buff_head mpdus_skbs;
1087 unsigned int payload_len;
1088 int ret;
1089
1090 if (WARN_ON_ONCE(!mvmsta))
1091 return -1;
1092
Sara Sharon0ae98812017-01-04 14:53:58 +02001093 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001094 return -1;
1095
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001096 memcpy(&info, skb->cb, sizeof(info));
1097
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001098 if (!skb_is_gso(skb))
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001099 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001100
1101 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1102 tcp_hdrlen(skb) + skb->data_len;
1103
1104 if (payload_len <= skb_shinfo(skb)->gso_size)
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001105 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001106
1107 __skb_queue_head_init(&mpdus_skbs);
1108
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001109 ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001110 if (ret)
1111 return ret;
1112
1113 if (WARN_ON(skb_queue_empty(&mpdus_skbs)))
1114 return ret;
1115
1116 while (!skb_queue_empty(&mpdus_skbs)) {
Emmanuel Grumbacha6d5e322015-10-14 16:28:52 +03001117 skb = __skb_dequeue(&mpdus_skbs);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001118
Emmanuel Grumbach5c08b0f2016-05-03 12:08:43 +03001119 ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
Emmanuel Grumbacha3713f82015-10-14 14:16:35 +03001120 if (ret) {
1121 __skb_queue_purge(&mpdus_skbs);
1122 return ret;
1123 }
1124 }
1125
1126 return 0;
1127}
1128
Johannes Berg8ca151b2013-01-24 14:25:36 +01001129static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1130 struct ieee80211_sta *sta, u8 tid)
1131{
Johannes Berg5b577a92013-11-14 18:20:04 +01001132 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001133 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1134 struct ieee80211_vif *vif = mvmsta->vif;
Liad Kaufmandd321622017-04-05 16:25:11 +03001135 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001136
1137 lockdep_assert_held(&mvmsta->lock);
1138
Johannes Berg3e56ead2013-02-15 22:23:18 +01001139 if ((tid_data->state == IWL_AGG_ON ||
Sara Sharon9a3fcf92017-03-14 09:50:35 +02001140 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
1141 iwl_mvm_is_dqa_supported(mvm)) &&
Liad Kaufmandd321622017-04-05 16:25:11 +03001142 iwl_mvm_tid_queued(mvm, tid_data) == 0) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01001143 /*
Sara Sharon9a3fcf92017-03-14 09:50:35 +02001144 * Now that this aggregation or DQA queue is empty tell
1145 * mac80211 so it knows we no longer have frames buffered for
1146 * the station on this TID (for the TIM bitmap calculation.)
Johannes Berg3e56ead2013-02-15 22:23:18 +01001147 */
1148 ieee80211_sta_set_buffered(sta, tid, false);
1149 }
1150
Liad Kaufmandd321622017-04-05 16:25:11 +03001151 /*
1152 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
1153 * to align the wrap around of ssn so we compare relevant values.
1154 */
1155 normalized_ssn = tid_data->ssn;
1156 if (mvm->trans->cfg->gen2)
1157 normalized_ssn &= 0xff;
1158
1159 if (normalized_ssn != tid_data->next_reclaimed)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001160 return;
1161
1162 switch (tid_data->state) {
1163 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1164 IWL_DEBUG_TX_QUEUES(mvm,
1165 "Can continue addBA flow ssn = next_recl = %d\n",
1166 tid_data->next_reclaimed);
1167 tid_data->state = IWL_AGG_STARTING;
1168 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1169 break;
1170
1171 case IWL_EMPTYING_HW_QUEUE_DELBA:
1172 IWL_DEBUG_TX_QUEUES(mvm,
1173 "Can continue DELBA flow ssn = next_recl = %d\n",
1174 tid_data->next_reclaimed);
Liad Kaufman15985fb2016-06-26 14:45:12 +03001175 if (!iwl_mvm_is_dqa_supported(mvm)) {
1176 u8 mac80211_ac = tid_to_mac80211_ac[tid];
1177
1178 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
1179 vif->hw_queue[mac80211_ac], tid,
1180 CMD_ASYNC);
1181 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001182 tid_data->state = IWL_AGG_OFF;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001183 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1184 break;
1185
1186 default:
1187 break;
1188 }
1189}
1190
1191#ifdef CONFIG_IWLWIFI_DEBUG
1192const char *iwl_mvm_get_tx_fail_reason(u32 status)
1193{
1194#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1195#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1196
1197 switch (status & TX_STATUS_MSK) {
1198 case TX_STATUS_SUCCESS:
1199 return "SUCCESS";
1200 TX_STATUS_POSTPONE(DELAY);
1201 TX_STATUS_POSTPONE(FEW_BYTES);
1202 TX_STATUS_POSTPONE(BT_PRIO);
1203 TX_STATUS_POSTPONE(QUIET_PERIOD);
1204 TX_STATUS_POSTPONE(CALC_TTAK);
1205 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1206 TX_STATUS_FAIL(SHORT_LIMIT);
1207 TX_STATUS_FAIL(LONG_LIMIT);
1208 TX_STATUS_FAIL(UNDERRUN);
1209 TX_STATUS_FAIL(DRAIN_FLOW);
1210 TX_STATUS_FAIL(RFKILL_FLUSH);
1211 TX_STATUS_FAIL(LIFE_EXPIRE);
1212 TX_STATUS_FAIL(DEST_PS);
1213 TX_STATUS_FAIL(HOST_ABORTED);
1214 TX_STATUS_FAIL(BT_RETRY);
1215 TX_STATUS_FAIL(STA_INVALID);
1216 TX_STATUS_FAIL(FRAG_DROPPED);
1217 TX_STATUS_FAIL(TID_DISABLE);
1218 TX_STATUS_FAIL(FIFO_FLUSHED);
1219 TX_STATUS_FAIL(SMALL_CF_POLL);
1220 TX_STATUS_FAIL(FW_DROP);
1221 TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1222 }
1223
1224 return "UNKNOWN";
1225
1226#undef TX_STATUS_FAIL
1227#undef TX_STATUS_POSTPONE
1228}
1229#endif /* CONFIG_IWLWIFI_DEBUG */
1230
Eyal Shapirad310e402013-08-11 18:43:47 +03001231void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
Johannes Berg57fbcce2016-04-12 15:56:15 +02001232 enum nl80211_band band,
Eyal Shapirad310e402013-08-11 18:43:47 +03001233 struct ieee80211_tx_rate *r)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001234{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001235 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1236 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1237 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1238 case RATE_MCS_CHAN_WIDTH_20:
1239 break;
1240 case RATE_MCS_CHAN_WIDTH_40:
1241 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1242 break;
1243 case RATE_MCS_CHAN_WIDTH_80:
1244 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1245 break;
1246 case RATE_MCS_CHAN_WIDTH_160:
1247 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1248 break;
1249 }
1250 if (rate_n_flags & RATE_MCS_SGI_MSK)
1251 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1252 if (rate_n_flags & RATE_MCS_HT_MSK) {
1253 r->flags |= IEEE80211_TX_RC_MCS;
1254 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1255 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1256 ieee80211_rate_set_vht(
1257 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1258 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1259 RATE_VHT_MCS_NSS_POS) + 1);
1260 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1261 } else {
1262 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
Eyal Shapirad310e402013-08-11 18:43:47 +03001263 band);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001264 }
1265}
1266
Eyal Shapirad310e402013-08-11 18:43:47 +03001267/**
1268 * translate ucode response to mac80211 tx status control values
1269 */
1270static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
1271 struct ieee80211_tx_info *info)
1272{
1273 struct ieee80211_tx_rate *r = &info->status.rates[0];
1274
1275 info->status.antenna =
1276 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1277 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
1278}
1279
Golan Ben-Ami25657fe2015-09-02 12:34:23 +03001280static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1281 u32 status)
1282{
1283 struct iwl_fw_dbg_trigger_tlv *trig;
1284 struct iwl_fw_dbg_trigger_tx_status *status_trig;
1285 int i;
1286
1287 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
1288 return;
1289
1290 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
1291 status_trig = (void *)trig->data;
1292
1293 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
1294 return;
1295
1296 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1297 /* don't collect on status 0 */
1298 if (!status_trig->statuses[i].status)
1299 break;
1300
1301 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1302 continue;
1303
1304 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
1305 "Tx status %d was received",
1306 status & TX_STATUS_MSK);
1307 break;
1308 }
1309}
1310
Sara Sharon12db2942017-01-17 14:28:21 +02001311/**
1312 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1313 * @tx_resp: the Tx response from the fw (agg or non-agg)
1314 *
1315 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1316 * it can't know that everything will go well until the end of the AMPDU, it
1317 * can't know in advance the number of MPDUs that will be sent in the current
1318 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1319 * Hence, it can't know in advance what the SSN of the SCD will be at the end
1320 * of the batch. This is why the SSN of the SCD is written at the end of the
1321 * whole struct at a variable offset. This function knows how to cope with the
1322 * variable offset and returns the SSN of the SCD.
1323 */
1324static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
1325 struct iwl_mvm_tx_resp *tx_resp)
1326{
1327 return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
1328 tx_resp->frame_count) & 0xfff;
1329}
1330
Johannes Berg8ca151b2013-01-24 14:25:36 +01001331static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1332 struct iwl_rx_packet *pkt)
1333{
1334 struct ieee80211_sta *sta;
1335 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1336 int txq_id = SEQ_TO_QUEUE(sequence);
Johannes Berga6a62192017-05-03 21:56:04 +02001337 /* struct iwl_mvm_tx_resp_v3 is almost the same */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001338 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1339 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1340 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
Sara Sharon12db2942017-01-17 14:28:21 +02001341 struct agg_tx_status *agg_status =
1342 iwl_mvm_get_agg_status(mvm, tx_resp);
1343 u32 status = le16_to_cpu(agg_status->status);
1344 u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001345 struct iwl_mvm_sta *mvmsta;
1346 struct sk_buff_head skbs;
1347 u8 skb_freed = 0;
Gregory Greenmanea42d1c2017-03-06 11:15:41 +02001348 u8 lq_color;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001349 u16 next_reclaimed, seq_ctl;
Emmanuel Grumbach532beba2016-03-07 22:23:52 +02001350 bool is_ndp = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001351
1352 __skb_queue_head_init(&skbs);
1353
Sara Sharon12db2942017-01-17 14:28:21 +02001354 if (iwl_mvm_has_new_tx_api(mvm))
Johannes Berga6a62192017-05-03 21:56:04 +02001355 txq_id = le16_to_cpu(tx_resp->tx_queue);
Sara Sharon12db2942017-01-17 14:28:21 +02001356
Johannes Berg8ca151b2013-01-24 14:25:36 +01001357 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1358
1359 /* we can free until ssn % q.n_bd not inclusive */
1360 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
1361
1362 while (!skb_queue_empty(&skbs)) {
1363 struct sk_buff *skb = __skb_dequeue(&skbs);
1364 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1365
1366 skb_freed++;
1367
1368 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1369
1370 memset(&info->status, 0, sizeof(info->status));
1371
Johannes Berg8ca151b2013-01-24 14:25:36 +01001372 /* inform mac80211 about what happened with the frame */
1373 switch (status & TX_STATUS_MSK) {
1374 case TX_STATUS_SUCCESS:
1375 case TX_STATUS_DIRECT_DONE:
1376 info->flags |= IEEE80211_TX_STAT_ACK;
1377 break;
1378 case TX_STATUS_FAIL_DEST_PS:
Sara Sharon9a3fcf92017-03-14 09:50:35 +02001379 /* In DQA, the FW should have stopped the queue and not
1380 * return this status
1381 */
1382 WARN_ON(iwl_mvm_is_dqa_supported(mvm));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001383 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1384 break;
1385 default:
1386 break;
1387 }
1388
Golan Ben-Ami25657fe2015-09-02 12:34:23 +03001389 iwl_mvm_tx_status_check_trigger(mvm, status);
1390
Johannes Berg8ca151b2013-01-24 14:25:36 +01001391 info->status.rates[0].count = tx_resp->failure_frame + 1;
Eyal Shapirad310e402013-08-11 18:43:47 +03001392 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
1393 info);
Eyal Shapira929e6ed2015-01-30 13:40:02 +02001394 info->status.status_driver_data[1] =
1395 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001396
1397 /* Single frame failure in an AMPDU queue => send BAR */
Sara Sharonc56108b2017-01-01 18:42:23 +02001398 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
Eyal Shapira9ce578a2014-12-31 15:22:38 +02001399 !(info->flags & IEEE80211_TX_STAT_ACK) &&
1400 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001401 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sara Sharonc56108b2017-01-01 18:42:23 +02001402 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001403
Emmanuel Grumbachebea2f32013-06-13 10:07:47 +03001404 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
1405 if (status != TX_STATUS_SUCCESS) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001406 struct ieee80211_hdr *hdr = (void *)skb->data;
1407 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1408 }
1409
Emmanuel Grumbach532beba2016-03-07 22:23:52 +02001410 if (unlikely(!seq_ctl)) {
1411 struct ieee80211_hdr *hdr = (void *)skb->data;
1412
1413 /*
1414 * If it is an NDP, we can't update next_reclaim since
1415 * its sequence control is 0. Note that for that same
1416 * reason, NDPs are never sent to A-MPDU'able queues
1417 * so that we can never have more than one freed frame
1418 * for a single Tx resonse (see WARN_ON below).
1419 */
1420 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1421 is_ndp = true;
1422 }
1423
Emmanuel Grumbach9b5452f2014-10-07 10:38:53 +03001424 /*
1425 * TODO: this is not accurate if we are freeing more than one
1426 * packet.
1427 */
1428 info->status.tx_time =
1429 le16_to_cpu(tx_resp->wireless_media_time);
Eliad Peller3a84b692014-03-12 15:05:06 +02001430 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
Gregory Greenmanea42d1c2017-03-06 11:15:41 +02001431 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
Eliad Peller3a84b692014-03-12 15:05:06 +02001432 info->status.status_driver_data[0] =
Gregory Greenmanea42d1c2017-03-06 11:15:41 +02001433 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
Eliad Peller3a84b692014-03-12 15:05:06 +02001434
Johannes Bergf14d6b32014-03-21 13:30:03 +01001435 ieee80211_tx_status(mvm->hw, skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001436 }
1437
Sara Sharonc56108b2017-01-01 18:42:23 +02001438 if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001439 /* If this is an aggregation queue, we use the ssn since:
1440 * ssn = wifi seq_num % 256.
1441 * The seq_ctl is the sequence control of the packet to which
1442 * this Tx response relates. But if there is a hole in the
1443 * bitmap of the BA we received, this Tx response may allow to
1444 * reclaim the hole and all the subsequent packets that were
1445 * already acked. In that case, seq_ctl != ssn, and the next
1446 * packet to be reclaimed will be ssn and not seq_ctl. In that
1447 * case, several packets will be reclaimed even if
1448 * frame_count = 1.
1449 *
1450 * The ssn is the index (% 256) of the latest packet that has
1451 * treated (acked / dropped) + 1.
1452 */
1453 next_reclaimed = ssn;
1454 } else {
1455 /* The next packet to be reclaimed is the one after this one */
Johannes Berg9a886582013-02-15 19:25:00 +01001456 next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001457 }
1458
1459 IWL_DEBUG_TX_REPLY(mvm,
Emmanuel Grumbach8c6e83d2013-03-20 17:12:46 +02001460 "TXQ %d status %s (0x%08x)\n",
1461 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1462
1463 IWL_DEBUG_TX_REPLY(mvm,
1464 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1465 le32_to_cpu(tx_resp->initial_rate),
Johannes Berg8ca151b2013-01-24 14:25:36 +01001466 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1467 ssn, next_reclaimed, seq_ctl);
1468
1469 rcu_read_lock();
1470
1471 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001472 /*
1473 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1474 * the firmware while we still have packets for it in the Tx queues.
1475 */
1476 if (WARN_ON_ONCE(!sta))
1477 goto out;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001478
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001479 if (!IS_ERR(sta)) {
Johannes Berg5b577a92013-11-14 18:20:04 +01001480 mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001481
Sara Sharonc65f4e02016-12-13 16:10:28 +02001482 if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001483 struct iwl_mvm_tid_data *tid_data =
1484 &mvmsta->tid_data[tid];
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02001485 bool send_eosp_ndp = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001486
Johannes Berg2bfb5092012-12-27 21:43:48 +01001487 spin_lock_bh(&mvmsta->lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001488
Emmanuel Grumbach532beba2016-03-07 22:23:52 +02001489 if (!is_ndp) {
1490 tid_data->next_reclaimed = next_reclaimed;
1491 IWL_DEBUG_TX_REPLY(mvm,
1492 "Next reclaimed packet:%d\n",
1493 next_reclaimed);
1494 } else {
1495 IWL_DEBUG_TX_REPLY(mvm,
1496 "NDP - don't update next_reclaimed\n");
1497 }
1498
Johannes Berg8ca151b2013-01-24 14:25:36 +01001499 iwl_mvm_check_ratid_empty(mvm, sta, tid);
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02001500
1501 if (mvmsta->sleep_tx_count) {
1502 mvmsta->sleep_tx_count--;
1503 if (mvmsta->sleep_tx_count &&
Liad Kaufmandd321622017-04-05 16:25:11 +03001504 !iwl_mvm_tid_queued(mvm, tid_data)) {
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02001505 /*
1506 * The number of frames in the queue
1507 * dropped to 0 even if we sent less
1508 * frames than we thought we had on the
1509 * Tx queue.
1510 * This means we had holes in the BA
1511 * window that we just filled, ask
1512 * mac80211 to send EOSP since the
1513 * firmware won't know how to do that.
1514 * Send NDP and the firmware will send
1515 * EOSP notification that will trigger
1516 * a call to ieee80211_sta_eosp().
1517 */
1518 send_eosp_ndp = true;
1519 }
1520 }
1521
Johannes Berg2bfb5092012-12-27 21:43:48 +01001522 spin_unlock_bh(&mvmsta->lock);
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02001523 if (send_eosp_ndp) {
1524 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1525 IEEE80211_FRAME_RELEASE_UAPSD,
1526 1, tid, false, false);
1527 mvmsta->sleep_tx_count = 0;
1528 ieee80211_send_eosp_nullfunc(sta, tid);
1529 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001530 }
Johannes Berg3e56ead2013-02-15 22:23:18 +01001531
1532 if (mvmsta->next_status_eosp) {
1533 mvmsta->next_status_eosp = false;
1534 ieee80211_sta_eosp(sta);
1535 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001536 } else {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001537 mvmsta = NULL;
1538 }
1539
1540 /*
1541 * If the txq is not an AMPDU queue, there is no chance we freed
1542 * several skbs. Check that out...
Johannes Berg8ca151b2013-01-24 14:25:36 +01001543 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02001544 if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001545 goto out;
1546
1547 /* We can't free more than one frame at once on a shared queue */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02001548 WARN_ON(skb_freed > 1);
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001549
Emmanuel Grumbach589a6ba2014-06-05 11:32:41 +03001550 /* If we have still frames for this STA nothing to do here */
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001551 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
1552 goto out;
1553
1554 if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03001555
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001556 /*
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03001557 * If there are no pending frames for this STA and
1558 * the tx to this station is not disabled, notify
1559 * mac80211 that this station can now wake up in its
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001560 * STA table.
1561 * If mvmsta is not NULL, sta is valid.
1562 */
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03001563
1564 spin_lock_bh(&mvmsta->lock);
1565
1566 if (!mvmsta->disable_tx)
1567 ieee80211_sta_block_awake(mvm->hw, sta, false);
1568
1569 spin_unlock_bh(&mvmsta->lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001570 }
1571
Emmanuel Grumbach9bb0c1a2014-01-20 15:21:26 +02001572 if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
1573 /*
1574 * We are draining and this was the last packet - pre_rcu_remove
1575 * has been called already. We might be after the
1576 * synchronize_net already.
1577 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
1578 */
1579 set_bit(sta_id, mvm->sta_drained);
1580 schedule_work(&mvm->sta_drained_wk);
1581 }
1582
1583out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01001584 rcu_read_unlock();
1585}
1586
1587#ifdef CONFIG_IWLWIFI_DEBUG
1588#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1589static const char *iwl_get_agg_tx_status(u16 status)
1590{
1591 switch (status & AGG_TX_STATE_STATUS_MSK) {
1592 AGG_TX_STATE_(TRANSMITTED);
1593 AGG_TX_STATE_(UNDERRUN);
1594 AGG_TX_STATE_(BT_PRIO);
1595 AGG_TX_STATE_(FEW_BYTES);
1596 AGG_TX_STATE_(ABORT);
1597 AGG_TX_STATE_(LAST_SENT_TTL);
1598 AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1599 AGG_TX_STATE_(LAST_SENT_BT_KILL);
1600 AGG_TX_STATE_(SCD_QUERY);
1601 AGG_TX_STATE_(TEST_BAD_CRC32);
1602 AGG_TX_STATE_(RESPONSE);
1603 AGG_TX_STATE_(DUMP_TX);
1604 AGG_TX_STATE_(DELAY_TX);
1605 }
1606
1607 return "UNKNOWN";
1608}
1609
1610static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1611 struct iwl_rx_packet *pkt)
1612{
1613 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
Sara Sharon12db2942017-01-17 14:28:21 +02001614 struct agg_tx_status *frame_status =
1615 iwl_mvm_get_agg_status(mvm, tx_resp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001616 int i;
1617
1618 for (i = 0; i < tx_resp->frame_count; i++) {
1619 u16 fstatus = le16_to_cpu(frame_status[i].status);
1620
1621 IWL_DEBUG_TX_REPLY(mvm,
1622 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1623 iwl_get_agg_tx_status(fstatus),
1624 fstatus & AGG_TX_STATE_STATUS_MSK,
1625 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1626 AGG_TX_STATE_TRY_CNT_POS,
1627 le16_to_cpu(frame_status[i].sequence));
1628 }
1629}
1630#else
1631static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1632 struct iwl_rx_packet *pkt)
1633{}
1634#endif /* CONFIG_IWLWIFI_DEBUG */
1635
1636static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1637 struct iwl_rx_packet *pkt)
1638{
1639 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1640 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1641 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1642 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
Sara Sharon13303c02016-04-10 15:51:54 +03001643 struct iwl_mvm_sta *mvmsta;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001644 int queue = SEQ_TO_QUEUE(sequence);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001645
Liad Kaufmancf961e12015-08-13 19:16:08 +03001646 if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
1647 (!iwl_mvm_is_dqa_supported(mvm) ||
1648 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001649 return;
1650
1651 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
1652 return;
1653
1654 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1655
1656 rcu_read_lock();
1657
Sara Sharon13303c02016-04-10 15:51:54 +03001658 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001659
Sara Sharon13303c02016-04-10 15:51:54 +03001660 if (!WARN_ON_ONCE(!mvmsta)) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001661 mvmsta->tid_data[tid].rate_n_flags =
1662 le32_to_cpu(tx_resp->initial_rate);
Emmanuel Grumbach9b5452f2014-10-07 10:38:53 +03001663 mvmsta->tid_data[tid].tx_time =
1664 le16_to_cpu(tx_resp->wireless_media_time);
Gregory Greenmanea42d1c2017-03-06 11:15:41 +02001665 mvmsta->tid_data[tid].lq_color =
1666 (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
1667 TX_RES_RATE_TABLE_COLOR_POS;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001668 }
1669
1670 rcu_read_unlock();
1671}
1672
Johannes Berg04168412015-06-23 21:22:09 +02001673void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001674{
1675 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1676 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1677
1678 if (tx_resp->frame_count == 1)
1679 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1680 else
1681 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001682}
1683
Sara Sharonc46e7722016-07-17 14:24:55 +03001684static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1685 int txq, int index,
1686 struct ieee80211_tx_info *ba_info, u32 rate)
Eyal Shapiraa7130442014-09-14 15:28:09 +03001687{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001688 struct sk_buff_head reclaimed_skbs;
1689 struct iwl_mvm_tid_data *tid_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001690 struct ieee80211_sta *sta;
1691 struct iwl_mvm_sta *mvmsta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001692 struct sk_buff *skb;
Sara Sharonc46e7722016-07-17 14:24:55 +03001693 int freed;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001694
Eyal Shapira2cee4762015-01-16 11:09:30 +02001695 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
1696 tid >= IWL_MAX_TID_COUNT,
1697 "sta_id %d tid %d", sta_id, tid))
Johannes Berg04168412015-06-23 21:22:09 +02001698 return;
Eyal Shapira2cee4762015-01-16 11:09:30 +02001699
Johannes Berg8ca151b2013-01-24 14:25:36 +01001700 rcu_read_lock();
1701
1702 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1703
1704 /* Reclaiming frames for a station that has been deleted ? */
1705 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
1706 rcu_read_unlock();
Johannes Berg04168412015-06-23 21:22:09 +02001707 return;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001708 }
1709
Johannes Berg5b577a92013-11-14 18:20:04 +01001710 mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001711 tid_data = &mvmsta->tid_data[tid];
1712
Sara Sharonc46e7722016-07-17 14:24:55 +03001713 if (tid_data->txq_id != txq) {
Johannes Berg1f16ea22015-03-06 09:17:37 +01001714 IWL_ERR(mvm,
Sara Sharonc46e7722016-07-17 14:24:55 +03001715 "invalid BA notification: Q %d, tid %d\n",
1716 tid_data->txq_id, tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001717 rcu_read_unlock();
Johannes Berg04168412015-06-23 21:22:09 +02001718 return;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001719 }
1720
Johannes Berg2bfb5092012-12-27 21:43:48 +01001721 spin_lock_bh(&mvmsta->lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001722
1723 __skb_queue_head_init(&reclaimed_skbs);
1724
1725 /*
1726 * Release all TFDs before the SSN, i.e. all TFDs in front of
1727 * block-ack window (we assume that they've been successfully
1728 * transmitted ... if not, it's too late anyway).
1729 */
Sara Sharonc46e7722016-07-17 14:24:55 +03001730 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001731
Sara Sharonc46e7722016-07-17 14:24:55 +03001732 tid_data->next_reclaimed = index;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001733
1734 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1735
1736 freed = 0;
Gregory Greenmanea42d1c2017-03-06 11:15:41 +02001737
1738 /* pack lq color from tid_data along the reduced txp */
1739 ba_info->status.status_driver_data[0] =
1740 RS_DRV_DATA_PACK(tid_data->lq_color,
1741 ba_info->status.status_driver_data[0]);
Sara Sharonc46e7722016-07-17 14:24:55 +03001742 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001743
1744 skb_queue_walk(&reclaimed_skbs, skb) {
Johannes Berg143582c2014-02-25 10:37:15 +01001745 struct ieee80211_hdr *hdr = (void *)skb->data;
1746 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001747
1748 if (ieee80211_is_data_qos(hdr->frame_control))
1749 freed++;
1750 else
1751 WARN_ON_ONCE(1);
1752
Johannes Berg8ca151b2013-01-24 14:25:36 +01001753 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1754
Johannes Berg143582c2014-02-25 10:37:15 +01001755 memset(&info->status, 0, sizeof(info->status));
1756 /* Packet was transmitted successfully, failures come as single
1757 * frames because before failing a frame the firmware transmits
1758 * it without aggregation at least once.
1759 */
1760 info->flags |= IEEE80211_TX_STAT_ACK;
1761
Eyal Shapiraa7130442014-09-14 15:28:09 +03001762 /* this is the first skb we deliver in this batch */
1763 /* put the rate scaling data there */
Sara Sharonc46e7722016-07-17 14:24:55 +03001764 if (freed == 1) {
1765 info->flags |= IEEE80211_TX_STAT_AMPDU;
1766 memcpy(&info->status, &ba_info->status,
1767 sizeof(ba_info->status));
1768 iwl_mvm_hwrate_to_tx_status(rate, info);
1769 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001770 }
1771
Johannes Berg2bfb5092012-12-27 21:43:48 +01001772 spin_unlock_bh(&mvmsta->lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001773
Eyal Shapiraa7130442014-09-14 15:28:09 +03001774 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1775 * possible (i.e. first MPDU in the aggregation wasn't acked)
1776 * Still it's important to update RS about sent vs. acked.
1777 */
1778 if (skb_queue_empty(&reclaimed_skbs)) {
Eyal Shapiraa7130442014-09-14 15:28:09 +03001779 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1780
1781 if (mvmsta->vif)
1782 chanctx_conf =
1783 rcu_dereference(mvmsta->vif->chanctx_conf);
1784
1785 if (WARN_ON_ONCE(!chanctx_conf))
1786 goto out;
1787
Sara Sharonc46e7722016-07-17 14:24:55 +03001788 ba_info->band = chanctx_conf->def.chan->band;
1789 iwl_mvm_hwrate_to_tx_status(rate, ba_info);
Eyal Shapiraa7130442014-09-14 15:28:09 +03001790
1791 IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
Sara Sharonc46e7722016-07-17 14:24:55 +03001792 iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
Eyal Shapiraa7130442014-09-14 15:28:09 +03001793 }
1794
1795out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01001796 rcu_read_unlock();
1797
1798 while (!skb_queue_empty(&reclaimed_skbs)) {
1799 skb = __skb_dequeue(&reclaimed_skbs);
Johannes Bergf14d6b32014-03-21 13:30:03 +01001800 ieee80211_tx_status(mvm->hw, skb);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001801 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001802}
1803
Sara Sharonc46e7722016-07-17 14:24:55 +03001804void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1805{
1806 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1807 int sta_id, tid, txq, index;
1808 struct ieee80211_tx_info ba_info = {};
1809 struct iwl_mvm_ba_notif *ba_notif;
1810 struct iwl_mvm_tid_data *tid_data;
1811 struct iwl_mvm_sta *mvmsta;
1812
1813 if (iwl_mvm_has_new_tx_api(mvm)) {
1814 struct iwl_mvm_compressed_ba_notif *ba_res =
1815 (void *)pkt->data;
1816
1817 sta_id = ba_res->sta_id;
1818 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
1819 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
1820 ba_info.status.tx_time =
1821 (u16)le32_to_cpu(ba_res->wireless_time);
1822 ba_info.status.status_driver_data[0] =
1823 (void *)(uintptr_t)ba_res->reduced_txp;
1824
Sara Sharon9b1ea162017-02-19 17:00:58 +02001825 if (!le16_to_cpu(ba_res->tfd_cnt))
1826 goto out;
1827
Sara Sharonc46e7722016-07-17 14:24:55 +03001828 /*
1829 * TODO:
1830 * When supporting multi TID aggregations - we need to move
1831 * next_reclaimed to be per TXQ and not per TID or handle it
1832 * in a different way.
1833 * This will go together with SN and AddBA offload and cannot
1834 * be handled properly for now.
1835 */
Sara Sharonc65f4e02016-12-13 16:10:28 +02001836 WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
1837 tid = ba_res->ra_tid[0].tid;
1838 if (tid == IWL_MGMT_TID)
1839 tid = IWL_MAX_TID_COUNT;
1840 iwl_mvm_tx_reclaim(mvm, sta_id, tid,
Sara Sharon12db2942017-01-17 14:28:21 +02001841 (int)(le16_to_cpu(ba_res->tfd[0].q_num)),
Sara Sharonc46e7722016-07-17 14:24:55 +03001842 le16_to_cpu(ba_res->tfd[0].tfd_index),
1843 &ba_info, le32_to_cpu(ba_res->tx_rate));
1844
Sara Sharon9b1ea162017-02-19 17:00:58 +02001845out:
Sara Sharonc46e7722016-07-17 14:24:55 +03001846 IWL_DEBUG_TX_REPLY(mvm,
1847 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1848 sta_id, le32_to_cpu(ba_res->flags),
1849 le16_to_cpu(ba_res->txed),
1850 le16_to_cpu(ba_res->done));
1851 return;
1852 }
1853
1854 ba_notif = (void *)pkt->data;
1855 sta_id = ba_notif->sta_id;
1856 tid = ba_notif->tid;
1857 /* "flow" corresponds to Tx queue */
1858 txq = le16_to_cpu(ba_notif->scd_flow);
1859 /* "ssn" is start of block-ack Tx window, corresponds to index
1860 * (in Tx queue's circular buffer) of first TFD/frame in window */
1861 index = le16_to_cpu(ba_notif->scd_ssn);
1862
1863 rcu_read_lock();
1864 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1865 if (WARN_ON_ONCE(!mvmsta)) {
1866 rcu_read_unlock();
1867 return;
1868 }
1869
1870 tid_data = &mvmsta->tid_data[tid];
1871
1872 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
1873 ba_info.status.ampdu_len = ba_notif->txed;
1874 ba_info.status.tx_time = tid_data->tx_time;
1875 ba_info.status.status_driver_data[0] =
1876 (void *)(uintptr_t)ba_notif->reduced_txp;
1877
1878 rcu_read_unlock();
1879
1880 iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
1881 tid_data->rate_n_flags);
1882
1883 IWL_DEBUG_TX_REPLY(mvm,
1884 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
Johannes Berg3dc6dd92017-03-09 14:08:51 +01001885 ba_notif->sta_addr, ba_notif->sta_id);
Sara Sharonc46e7722016-07-17 14:24:55 +03001886
1887 IWL_DEBUG_TX_REPLY(mvm,
1888 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1889 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1890 le64_to_cpu(ba_notif->bitmap), txq, index,
1891 ba_notif->txed, ba_notif->txed_2_done);
1892
1893 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
1894 ba_notif->reduced_txp);
1895}
1896
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001897/*
1898 * Note that there are transports that buffer frames before they reach
1899 * the firmware. This means that after flush_tx_path is called, the
1900 * queue might not be empty. The race-free way to handle this is to:
1901 * 1) set the station as draining
1902 * 2) flush the Tx path
1903 * 3) wait for the transport queues to be empty
1904 */
Luca Coelho5888a402015-10-06 09:54:57 +03001905int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001906{
1907 int ret;
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001908 struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001909 .queues_ctl = cpu_to_le32(tfd_msk),
1910 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
1911 };
1912
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001913 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1914
1915 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1916 sizeof(flush_cmd), &flush_cmd);
1917 if (ret)
1918 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
1919 return ret;
1920}
1921
1922int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
1923 u16 tids, u32 flags)
1924{
1925 int ret;
1926 struct iwl_tx_path_flush_cmd flush_cmd = {
1927 .sta_id = cpu_to_le32(sta_id),
1928 .tid_mask = cpu_to_le16(tids),
1929 };
1930
1931 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
1932
Johannes Berg8ca151b2013-01-24 14:25:36 +01001933 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1934 sizeof(flush_cmd), &flush_cmd);
1935 if (ret)
1936 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
1937 return ret;
1938}
Sara Sharond49394a2017-03-05 13:01:08 +02001939
Johannes Berga9c50722017-04-19 11:14:28 +02001940int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
Sara Sharond49394a2017-03-05 13:01:08 +02001941{
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001942 struct iwl_mvm_int_sta *int_sta = sta;
1943 struct iwl_mvm_sta *mvm_sta = sta;
Sara Sharond49394a2017-03-05 13:01:08 +02001944
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001945 if (iwl_mvm_has_new_tx_api(mvm)) {
1946 if (internal)
1947 return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id,
1948 BIT(IWL_MGMT_TID), flags);
Sara Sharond49394a2017-03-05 13:01:08 +02001949
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001950 return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
1951 0xFF, flags);
Sara Sharond49394a2017-03-05 13:01:08 +02001952 }
1953
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001954 if (internal)
1955 return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
1956 flags);
1957
1958 return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
Sara Sharond49394a2017-03-05 13:01:08 +02001959}