Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Emmanuel Grumbach | 4203263 | 2015-04-15 12:43:46 +0300 | [diff] [blame] | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Emmanuel Grumbach | 532beba | 2016-03-07 22:23:52 +0200 | [diff] [blame] | 10 | * Copyright(c) 2016 Intel Deutschland GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of version 2 of the GNU General Public License as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, but |
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program; if not, write to the Free Software |
| 23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 24 | * USA |
| 25 | * |
| 26 | * The full GNU General Public License is included in this distribution |
Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 27 | * in the file called COPYING. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 28 | * |
| 29 | * Contact Information: |
Emmanuel Grumbach | cb2f827 | 2015-11-17 15:39:56 +0200 | [diff] [blame] | 30 | * Intel Linux Wireless <linuxwifi@intel.com> |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 31 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 32 | * |
| 33 | * BSD LICENSE |
| 34 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Emmanuel Grumbach | 4203263 | 2015-04-15 12:43:46 +0300 | [diff] [blame] | 36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 37 | * All rights reserved. |
| 38 | * |
| 39 | * Redistribution and use in source and binary forms, with or without |
| 40 | * modification, are permitted provided that the following conditions |
| 41 | * are met: |
| 42 | * |
| 43 | * * Redistributions of source code must retain the above copyright |
| 44 | * notice, this list of conditions and the following disclaimer. |
| 45 | * * Redistributions in binary form must reproduce the above copyright |
| 46 | * notice, this list of conditions and the following disclaimer in |
| 47 | * the documentation and/or other materials provided with the |
| 48 | * distribution. |
| 49 | * * Neither the name Intel Corporation nor the names of its |
| 50 | * contributors may be used to endorse or promote products derived |
| 51 | * from this software without specific prior written permission. |
| 52 | * |
| 53 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 54 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 55 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 56 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 57 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 58 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 59 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 60 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 61 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 62 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 63 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 64 | * |
| 65 | *****************************************************************************/ |
| 66 | #include <linux/ieee80211.h> |
| 67 | #include <linux/etherdevice.h> |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 68 | #include <linux/tcp.h> |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 69 | #include <net/ip.h> |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 70 | #include <net/ipv6.h> |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 71 | |
| 72 | #include "iwl-trans.h" |
| 73 | #include "iwl-eeprom-parse.h" |
| 74 | #include "mvm.h" |
| 75 | #include "sta.h" |
Golan Ben-Ami | 2f89a5d | 2015-10-27 19:17:14 +0200 | [diff] [blame] | 76 | #include "fw-dbg.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 77 | |
Emmanuel Grumbach | 4203263 | 2015-04-15 12:43:46 +0300 | [diff] [blame] | 78 | static void |
| 79 | iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, |
| 80 | u16 tid, u16 ssn) |
| 81 | { |
| 82 | struct iwl_fw_dbg_trigger_tlv *trig; |
| 83 | struct iwl_fw_dbg_trigger_ba *ba_trig; |
| 84 | |
| 85 | if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) |
| 86 | return; |
| 87 | |
| 88 | trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); |
| 89 | ba_trig = (void *)trig->data; |
| 90 | |
| 91 | if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) |
| 92 | return; |
| 93 | |
| 94 | if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) |
| 95 | return; |
| 96 | |
| 97 | iwl_mvm_fw_dbg_collect_trig(mvm, trig, |
| 98 | "BAR sent to %pM, tid %d, ssn %d", |
| 99 | addr, tid, ssn); |
| 100 | } |
| 101 | |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 102 | #define OPT_HDR(type, skb, off) \ |
| 103 | (type *)(skb_network_header(skb) + (off)) |
| 104 | |
| 105 | static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, |
| 106 | struct ieee80211_hdr *hdr, |
| 107 | struct ieee80211_tx_info *info, |
| 108 | struct iwl_tx_cmd *tx_cmd) |
| 109 | { |
| 110 | #if IS_ENABLED(CONFIG_INET) |
| 111 | u16 mh_len = ieee80211_hdrlen(hdr->frame_control); |
| 112 | u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist); |
| 113 | u8 protocol = 0; |
| 114 | |
| 115 | /* |
| 116 | * Do not compute checksum if already computed or if transport will |
| 117 | * compute it |
| 118 | */ |
| 119 | if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) |
| 120 | return; |
| 121 | |
| 122 | /* We do not expect to be requested to csum stuff we do not support */ |
| 123 | if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || |
| 124 | (skb->protocol != htons(ETH_P_IP) && |
| 125 | skb->protocol != htons(ETH_P_IPV6)), |
| 126 | "No support for requested checksum\n")) { |
| 127 | skb_checksum_help(skb); |
| 128 | return; |
| 129 | } |
| 130 | |
| 131 | if (skb->protocol == htons(ETH_P_IP)) { |
| 132 | protocol = ip_hdr(skb)->protocol; |
| 133 | } else { |
| 134 | #if IS_ENABLED(CONFIG_IPV6) |
| 135 | struct ipv6hdr *ipv6h = |
| 136 | (struct ipv6hdr *)skb_network_header(skb); |
| 137 | unsigned int off = sizeof(*ipv6h); |
| 138 | |
| 139 | protocol = ipv6h->nexthdr; |
| 140 | while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { |
Sara Sharon | ecf5142 | 2016-06-08 15:15:41 +0300 | [diff] [blame] | 141 | struct ipv6_opt_hdr *hp; |
| 142 | |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 143 | /* only supported extension headers */ |
| 144 | if (protocol != NEXTHDR_ROUTING && |
| 145 | protocol != NEXTHDR_HOP && |
Sara Sharon | ecf5142 | 2016-06-08 15:15:41 +0300 | [diff] [blame] | 146 | protocol != NEXTHDR_DEST) { |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 147 | skb_checksum_help(skb); |
| 148 | return; |
| 149 | } |
| 150 | |
Sara Sharon | ecf5142 | 2016-06-08 15:15:41 +0300 | [diff] [blame] | 151 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); |
| 152 | protocol = hp->nexthdr; |
| 153 | off += ipv6_optlen(hp); |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 154 | } |
| 155 | /* if we get here - protocol now should be TCP/UDP */ |
| 156 | #endif |
| 157 | } |
| 158 | |
| 159 | if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { |
| 160 | WARN_ON_ONCE(1); |
| 161 | skb_checksum_help(skb); |
| 162 | return; |
| 163 | } |
| 164 | |
| 165 | /* enable L4 csum */ |
| 166 | offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); |
| 167 | |
| 168 | /* |
| 169 | * Set offset to IP header (snap). |
| 170 | * We don't support tunneling so no need to take care of inner header. |
| 171 | * Size is in words. |
| 172 | */ |
| 173 | offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); |
| 174 | |
| 175 | /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ |
| 176 | if (skb->protocol == htons(ETH_P_IP) && |
| 177 | (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { |
| 178 | ip_hdr(skb)->check = 0; |
| 179 | offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); |
| 180 | } |
| 181 | |
| 182 | /* reset UDP/TCP header csum */ |
| 183 | if (protocol == IPPROTO_TCP) |
| 184 | tcp_hdr(skb)->check = 0; |
| 185 | else |
| 186 | udp_hdr(skb)->check = 0; |
| 187 | |
| 188 | /* mac header len should include IV, size is in words */ |
| 189 | if (info->control.hw_key) |
| 190 | mh_len += info->control.hw_key->iv_len; |
| 191 | mh_len /= 2; |
| 192 | offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; |
| 193 | |
| 194 | tx_cmd->offload_assist = cpu_to_le16(offload_assist); |
| 195 | #endif |
| 196 | } |
| 197 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 198 | /* |
| 199 | * Sets most of the Tx cmd's fields |
| 200 | */ |
Arik Nemtsov | 6ce73e6 | 2014-09-11 13:00:19 +0300 | [diff] [blame] | 201 | void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, |
| 202 | struct iwl_tx_cmd *tx_cmd, |
| 203 | struct ieee80211_tx_info *info, u8 sta_id) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 204 | { |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 205 | struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 206 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 207 | __le16 fc = hdr->frame_control; |
| 208 | u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); |
| 209 | u32 len = skb->len + FCS_LEN; |
Emmanuel Grumbach | b797e3f | 2014-03-06 14:49:36 +0200 | [diff] [blame] | 210 | u8 ac; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 211 | |
| 212 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) |
| 213 | tx_flags |= TX_CMD_FLG_ACK; |
| 214 | else |
| 215 | tx_flags &= ~TX_CMD_FLG_ACK; |
| 216 | |
| 217 | if (ieee80211_is_probe_resp(fc)) |
| 218 | tx_flags |= TX_CMD_FLG_TSF; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 219 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 220 | if (ieee80211_has_morefrags(fc)) |
| 221 | tx_flags |= TX_CMD_FLG_MORE_FRAG; |
| 222 | |
| 223 | if (ieee80211_is_data_qos(fc)) { |
| 224 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
| 225 | tx_cmd->tid_tspec = qc[0] & 0xf; |
| 226 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; |
Sara Sharon | d8fe484 | 2016-03-09 10:12:45 +0200 | [diff] [blame] | 227 | if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) |
| 228 | tx_cmd->offload_assist |= |
| 229 | cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)); |
Eyal Shapira | 9b3b43d | 2014-12-31 18:34:56 +0200 | [diff] [blame] | 230 | } else if (ieee80211_is_back_req(fc)) { |
| 231 | struct ieee80211_bar *bar = (void *)skb->data; |
| 232 | u16 control = le16_to_cpu(bar->control); |
Emmanuel Grumbach | 4203263 | 2015-04-15 12:43:46 +0300 | [diff] [blame] | 233 | u16 ssn = le16_to_cpu(bar->start_seq_num); |
Eyal Shapira | 9b3b43d | 2014-12-31 18:34:56 +0200 | [diff] [blame] | 234 | |
| 235 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; |
| 236 | tx_cmd->tid_tspec = (control & |
| 237 | IEEE80211_BAR_CTRL_TID_INFO_MASK) >> |
| 238 | IEEE80211_BAR_CTRL_TID_INFO_SHIFT; |
| 239 | WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); |
Emmanuel Grumbach | 4203263 | 2015-04-15 12:43:46 +0300 | [diff] [blame] | 240 | iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, |
| 241 | ssn); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 242 | } else { |
| 243 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; |
| 244 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) |
| 245 | tx_flags |= TX_CMD_FLG_SEQ_CTL; |
| 246 | else |
| 247 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; |
| 248 | } |
| 249 | |
Eyal Shapira | a9dc506 | 2014-12-31 17:58:23 +0200 | [diff] [blame] | 250 | /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ |
| 251 | if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) |
| 252 | ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; |
| 253 | else |
| 254 | ac = tid_to_mac80211_ac[0]; |
| 255 | |
Emmanuel Grumbach | b797e3f | 2014-03-06 14:49:36 +0200 | [diff] [blame] | 256 | tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << |
| 257 | TX_CMD_FLG_BT_PRIO_POS; |
| 258 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 259 | if (ieee80211_is_mgmt(fc)) { |
| 260 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) |
Avri Altman | b084a35 | 2015-07-12 09:10:05 +0300 | [diff] [blame] | 261 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); |
| 262 | else if (ieee80211_is_action(fc)) |
| 263 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 264 | else |
Avri Altman | b084a35 | 2015-07-12 09:10:05 +0300 | [diff] [blame] | 265 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 266 | |
| 267 | /* The spec allows Action frames in A-MPDU, we don't support |
| 268 | * it |
| 269 | */ |
| 270 | WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); |
Johannes Berg | 63f7535 | 2014-01-31 14:56:18 +0100 | [diff] [blame] | 271 | } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { |
Avri Altman | b084a35 | 2015-07-12 09:10:05 +0300 | [diff] [blame] | 272 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 273 | } else { |
Avri Altman | b084a35 | 2015-07-12 09:10:05 +0300 | [diff] [blame] | 274 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 275 | } |
| 276 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 277 | if (ieee80211_is_data(fc) && len > mvm->rts_threshold && |
| 278 | !is_multicast_ether_addr(ieee80211_get_DA(hdr))) |
| 279 | tx_flags |= TX_CMD_FLG_PROT_REQUIRE; |
| 280 | |
Johannes Berg | 859d914 | 2015-06-01 17:11:11 +0200 | [diff] [blame] | 281 | if (fw_has_capa(&mvm->fw->ucode_capa, |
| 282 | IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && |
Andrei Otcheretianski | f1daa00 | 2014-07-01 12:54:25 +0300 | [diff] [blame] | 283 | ieee80211_action_contains_tpc(skb)) |
| 284 | tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; |
| 285 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 286 | tx_cmd->tx_flags = cpu_to_le32(tx_flags); |
| 287 | /* Total # bytes to be transmitted */ |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 288 | tx_cmd->len = cpu_to_le16((u16)skb->len + |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 289 | (uintptr_t)skb_info->driver_data[0]); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 290 | tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); |
| 291 | tx_cmd->sta_id = sta_id; |
Sara Sharon | d8fe484 | 2016-03-09 10:12:45 +0200 | [diff] [blame] | 292 | |
| 293 | /* padding is inserted later in transport */ |
| 294 | if (ieee80211_hdrlen(fc) % 4 && |
| 295 | !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)))) |
| 296 | tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD)); |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 297 | |
| 298 | iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | /* |
| 302 | * Sets the fields in the Tx cmd that are rate related |
| 303 | */ |
Arik Nemtsov | 6ce73e6 | 2014-09-11 13:00:19 +0300 | [diff] [blame] | 304 | void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, |
| 305 | struct ieee80211_tx_info *info, |
| 306 | struct ieee80211_sta *sta, __le16 fc) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 307 | { |
| 308 | u32 rate_flags; |
| 309 | int rate_idx; |
| 310 | u8 rate_plcp; |
| 311 | |
| 312 | /* Set retry limit on RTS packets */ |
| 313 | tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; |
| 314 | |
| 315 | /* Set retry limit on DATA packets and Probe Responses*/ |
| 316 | if (ieee80211_is_probe_resp(fc)) { |
| 317 | tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; |
| 318 | tx_cmd->rts_retry_limit = |
| 319 | min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); |
| 320 | } else if (ieee80211_is_back_req(fc)) { |
| 321 | tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; |
| 322 | } else { |
| 323 | tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; |
| 324 | } |
| 325 | |
| 326 | /* |
Eliad Peller | e89044d | 2013-07-16 17:33:26 +0300 | [diff] [blame] | 327 | * for data packets, rate info comes from the table inside the fw. This |
Emmanuel Grumbach | 1ffde69 | 2014-10-20 08:29:55 +0300 | [diff] [blame] | 328 | * table is controlled by LINK_QUALITY commands |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 329 | */ |
| 330 | |
Emmanuel Grumbach | 1ffde69 | 2014-10-20 08:29:55 +0300 | [diff] [blame] | 331 | if (ieee80211_is_data(fc) && sta) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 332 | tx_cmd->initial_rate_index = 0; |
| 333 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); |
| 334 | return; |
| 335 | } else if (ieee80211_is_back_req(fc)) { |
Emmanuel Grumbach | 2edc6ec | 2013-06-02 19:49:15 +0300 | [diff] [blame] | 336 | tx_cmd->tx_flags |= |
| 337 | cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 338 | } |
| 339 | |
| 340 | /* HT rate doesn't make sense for a non data frame */ |
| 341 | WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, |
Johannes Berg | f85e9d1 | 2014-10-08 09:57:29 +0200 | [diff] [blame] | 342 | "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n", |
| 343 | info->control.rates[0].flags, |
| 344 | info->control.rates[0].idx, |
| 345 | le16_to_cpu(fc)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 346 | |
| 347 | rate_idx = info->control.rates[0].idx; |
| 348 | /* if the rate isn't a well known legacy rate, take the lowest one */ |
| 349 | if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY) |
| 350 | rate_idx = rate_lowest_index( |
| 351 | &mvm->nvm_data->bands[info->band], sta); |
| 352 | |
| 353 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ |
Johannes Berg | 57fbcce | 2016-04-12 15:56:15 +0200 | [diff] [blame] | 354 | if (info->band == NL80211_BAND_5GHZ) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 355 | rate_idx += IWL_FIRST_OFDM_RATE; |
| 356 | |
| 357 | /* For 2.4 GHZ band, check that there is no need to remap */ |
| 358 | BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); |
| 359 | |
| 360 | /* Get PLCP rate for tx_cmd->rate_n_flags */ |
| 361 | rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); |
| 362 | |
| 363 | mvm->mgmt_last_antenna_idx = |
Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 364 | iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 365 | mvm->mgmt_last_antenna_idx); |
Emmanuel Grumbach | 34c8b24 | 2014-05-28 21:53:39 +0300 | [diff] [blame] | 366 | |
Johannes Berg | 57fbcce | 2016-04-12 15:56:15 +0200 | [diff] [blame] | 367 | if (info->band == NL80211_BAND_2GHZ && |
Emmanuel Grumbach | 34c8b24 | 2014-05-28 21:53:39 +0300 | [diff] [blame] | 368 | !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) |
Emmanuel Grumbach | 923a8c1 | 2015-05-31 21:44:22 +0300 | [diff] [blame] | 369 | rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; |
Emmanuel Grumbach | 34c8b24 | 2014-05-28 21:53:39 +0300 | [diff] [blame] | 370 | else |
| 371 | rate_flags = |
| 372 | BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 373 | |
| 374 | /* Set CCK flag as needed */ |
| 375 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) |
| 376 | rate_flags |= RATE_MCS_CCK_MSK; |
| 377 | |
| 378 | /* Set the rate in the TX cmd */ |
| 379 | tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); |
| 380 | } |
| 381 | |
Ayala Beker | 2a53d16 | 2016-04-07 16:21:57 +0300 | [diff] [blame] | 382 | static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, |
| 383 | u8 *crypto_hdr) |
| 384 | { |
| 385 | struct ieee80211_key_conf *keyconf = info->control.hw_key; |
| 386 | u64 pn; |
| 387 | |
| 388 | pn = atomic64_inc_return(&keyconf->tx_pn); |
| 389 | crypto_hdr[0] = pn; |
| 390 | crypto_hdr[2] = 0; |
| 391 | crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); |
| 392 | crypto_hdr[1] = pn >> 8; |
| 393 | crypto_hdr[4] = pn >> 16; |
| 394 | crypto_hdr[5] = pn >> 24; |
| 395 | crypto_hdr[6] = pn >> 32; |
| 396 | crypto_hdr[7] = pn >> 40; |
| 397 | } |
| 398 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 399 | /* |
| 400 | * Sets the fields in the Tx cmd that are crypto related |
| 401 | */ |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 402 | static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, |
| 403 | struct ieee80211_tx_info *info, |
| 404 | struct iwl_tx_cmd *tx_cmd, |
| 405 | struct sk_buff *skb_frag, |
| 406 | int hdrlen) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 407 | { |
| 408 | struct ieee80211_key_conf *keyconf = info->control.hw_key; |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 409 | u8 *crypto_hdr = skb_frag->data + hdrlen; |
| 410 | u64 pn; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 411 | |
| 412 | switch (keyconf->cipher) { |
| 413 | case WLAN_CIPHER_SUITE_CCMP: |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 414 | case WLAN_CIPHER_SUITE_CCMP_256: |
| 415 | iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); |
Ayala Beker | 2a53d16 | 2016-04-07 16:21:57 +0300 | [diff] [blame] | 416 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 417 | break; |
| 418 | |
| 419 | case WLAN_CIPHER_SUITE_TKIP: |
| 420 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; |
Eliad Peller | 1ad4f63 | 2016-02-14 13:56:36 +0200 | [diff] [blame] | 421 | pn = atomic64_inc_return(&keyconf->tx_pn); |
| 422 | ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 423 | ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); |
| 424 | break; |
| 425 | |
| 426 | case WLAN_CIPHER_SUITE_WEP104: |
| 427 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; |
| 428 | /* fall through */ |
| 429 | case WLAN_CIPHER_SUITE_WEP40: |
| 430 | tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | |
| 431 | ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & |
| 432 | TX_CMD_SEC_WEP_KEY_IDX_MSK); |
| 433 | |
| 434 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); |
| 435 | break; |
Ayala Beker | 2a53d16 | 2016-04-07 16:21:57 +0300 | [diff] [blame] | 436 | case WLAN_CIPHER_SUITE_GCMP: |
| 437 | case WLAN_CIPHER_SUITE_GCMP_256: |
| 438 | /* TODO: Taking the key from the table might introduce a race |
| 439 | * when PTK rekeying is done, having an old packets with a PN |
| 440 | * based on the old key but the message encrypted with a new |
| 441 | * one. |
| 442 | * Need to handle this. |
| 443 | */ |
| 444 | tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE; |
| 445 | tx_cmd->key[0] = keyconf->hw_key_idx; |
| 446 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); |
| 447 | break; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 448 | default: |
Max Stepanov | e36e543 | 2013-08-27 19:56:13 +0300 | [diff] [blame] | 449 | tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 450 | } |
| 451 | } |
| 452 | |
| 453 | /* |
| 454 | * Allocates and sets the Tx cmd the driver data pointers in the skb |
| 455 | */ |
| 456 | static struct iwl_device_cmd * |
| 457 | iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 458 | struct ieee80211_tx_info *info, int hdrlen, |
| 459 | struct ieee80211_sta *sta, u8 sta_id) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 460 | { |
| 461 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 462 | struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 463 | struct iwl_device_cmd *dev_cmd; |
| 464 | struct iwl_tx_cmd *tx_cmd; |
| 465 | |
| 466 | dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); |
| 467 | |
| 468 | if (unlikely(!dev_cmd)) |
| 469 | return NULL; |
| 470 | |
| 471 | memset(dev_cmd, 0, sizeof(*dev_cmd)); |
Emmanuel Grumbach | 3961a61 | 2013-10-22 11:27:55 +0300 | [diff] [blame] | 472 | dev_cmd->hdr.cmd = TX_CMD; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 473 | tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; |
| 474 | |
| 475 | if (info->control.hw_key) |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 476 | iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 477 | |
| 478 | iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); |
| 479 | |
| 480 | iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); |
| 481 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 482 | memset(&skb_info->status, 0, sizeof(skb_info->status)); |
| 483 | memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 484 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 485 | skb_info->driver_data[1] = dev_cmd; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 486 | |
| 487 | return dev_cmd; |
| 488 | } |
| 489 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 490 | static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, |
| 491 | struct ieee80211_tx_info *info, __le16 fc) |
| 492 | { |
Liad Kaufman | 4c96513 | 2015-08-09 19:26:56 +0300 | [diff] [blame] | 493 | if (iwl_mvm_is_dqa_supported(mvm)) { |
| 494 | if (info->control.vif->type == NL80211_IFTYPE_AP && |
| 495 | ieee80211_is_probe_resp(fc)) |
| 496 | return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; |
| 497 | else if (ieee80211_is_mgmt(fc) && |
| 498 | info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE) |
| 499 | return IWL_MVM_DQA_P2P_DEVICE_QUEUE; |
| 500 | } |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 501 | |
| 502 | return info->hw_queue; |
| 503 | } |
| 504 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 505 | int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) |
| 506 | { |
| 507 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 508 | struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); |
| 509 | struct ieee80211_tx_info info; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 510 | struct iwl_device_cmd *dev_cmd; |
| 511 | struct iwl_tx_cmd *tx_cmd; |
| 512 | u8 sta_id; |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 513 | int hdrlen = ieee80211_hdrlen(hdr->frame_control); |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 514 | int queue; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 515 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 516 | memcpy(&info, skb->cb, sizeof(info)); |
| 517 | |
| 518 | if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 519 | return -1; |
| 520 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 521 | if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && |
| 522 | (!info.control.vif || |
| 523 | info.hw_queue != info.control.vif->cab_queue))) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 524 | return -1; |
| 525 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 526 | /* This holds the amsdu headers length */ |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 527 | skb_info->driver_data[0] = (void *)(uintptr_t)0; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 528 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 529 | /* |
Ariej Marjieh | 7da91b0 | 2014-07-07 12:09:40 +0300 | [diff] [blame] | 530 | * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used |
| 531 | * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel |
| 532 | * queue. STATION (HS2.0) uses the auxiliary context of the FW, |
| 533 | * and hence needs to be sent on the aux queue |
| 534 | */ |
| 535 | if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 536 | info.control.vif->type == NL80211_IFTYPE_STATION) |
Ariej Marjieh | 7da91b0 | 2014-07-07 12:09:40 +0300 | [diff] [blame] | 537 | IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; |
| 538 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 539 | queue = info.hw_queue; |
| 540 | |
Ariej Marjieh | 7da91b0 | 2014-07-07 12:09:40 +0300 | [diff] [blame] | 541 | /* |
Ilan Peer | d0ab08d | 2015-06-24 09:23:01 +0300 | [diff] [blame] | 542 | * If the interface on which the frame is sent is the P2P_DEVICE |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 543 | * or an AP/GO interface use the broadcast station associated |
Ilan Peer | d0ab08d | 2015-06-24 09:23:01 +0300 | [diff] [blame] | 544 | * with it; otherwise if the interface is a managed interface |
| 545 | * use the AP station associated with it for multicast traffic |
| 546 | * (this is not possible for unicast packets as a TLDS discovery |
| 547 | * response are sent without a station entry); otherwise use the |
| 548 | * AUX station. |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 549 | * In DQA mode, if vif is of type STATION and frames are not multicast, |
| 550 | * they should be sent from the BSS queue. For example, TDLS setup |
| 551 | * frames should be sent on this queue, as they go through the AP. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 552 | */ |
Ilan Peer | d0ab08d | 2015-06-24 09:23:01 +0300 | [diff] [blame] | 553 | sta_id = mvm->aux_sta.sta_id; |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 554 | if (info.control.vif) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 555 | struct iwl_mvm_vif *mvmvif = |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 556 | iwl_mvm_vif_from_mac80211(info.control.vif); |
Ilan Peer | d0ab08d | 2015-06-24 09:23:01 +0300 | [diff] [blame] | 557 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 558 | if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 559 | info.control.vif->type == NL80211_IFTYPE_AP) { |
Ilan Peer | d0ab08d | 2015-06-24 09:23:01 +0300 | [diff] [blame] | 560 | sta_id = mvmvif->bcast_sta.sta_id; |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 561 | queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, |
| 562 | hdr->frame_control); |
| 563 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && |
| 564 | is_multicast_ether_addr(hdr->addr1)) { |
Ilan Peer | d0ab08d | 2015-06-24 09:23:01 +0300 | [diff] [blame] | 565 | u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); |
| 566 | |
| 567 | if (ap_sta_id != IWL_MVM_STATION_COUNT) |
| 568 | sta_id = ap_sta_id; |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 569 | } else if (iwl_mvm_is_dqa_supported(mvm) && |
| 570 | info.control.vif->type == NL80211_IFTYPE_STATION) { |
| 571 | queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; |
Ilan Peer | d0ab08d | 2015-06-24 09:23:01 +0300 | [diff] [blame] | 572 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 573 | } |
| 574 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 575 | IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 576 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 577 | dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 578 | if (!dev_cmd) |
| 579 | return -1; |
| 580 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 581 | tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; |
| 582 | |
| 583 | /* Copy MAC header from skb into command buffer */ |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 584 | memcpy(tx_cmd->hdr, hdr, hdrlen); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 585 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 586 | if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 587 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); |
| 588 | return -1; |
| 589 | } |
| 590 | |
Liad Kaufman | fb896c4 | 2016-02-14 15:32:58 +0200 | [diff] [blame] | 591 | /* |
| 592 | * Increase the pending frames counter, so that later when a reply comes |
| 593 | * in and the counter is decreased - we don't start getting negative |
| 594 | * values. |
| 595 | * Note that we don't need to make sure it isn't agg'd, since we're |
| 596 | * TXing non-sta |
| 597 | */ |
| 598 | atomic_inc(&mvm->pending_frames[sta_id]); |
| 599 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 600 | return 0; |
| 601 | } |
| 602 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 603 | #ifdef CONFIG_INET |
| 604 | static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 605 | struct ieee80211_tx_info *info, |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 606 | struct ieee80211_sta *sta, |
| 607 | struct sk_buff_head *mpdus_skb) |
| 608 | { |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 609 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 610 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 611 | unsigned int mss = skb_shinfo(skb)->gso_size; |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 612 | struct sk_buff *tmp, *next; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 613 | char cb[sizeof(skb->cb)]; |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 614 | unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 615 | bool ipv4 = (skb->protocol == htons(ETH_P_IP)); |
| 616 | u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; |
| 617 | u16 amsdu_add, snap_ip_tcp, pad, i = 0; |
Emmanuel Grumbach | 9e7dce2 | 2015-10-26 16:14:06 +0200 | [diff] [blame] | 618 | unsigned int dbg_max_amsdu_len; |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 619 | netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG; |
Emmanuel Grumbach | 50b0213 | 2015-11-11 11:37:02 +0200 | [diff] [blame] | 620 | u8 *qc, tid, txf; |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 621 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 622 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + |
| 623 | tcp_hdrlen(skb); |
| 624 | |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 625 | qc = ieee80211_get_qos_ctl(hdr); |
| 626 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
| 627 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) |
| 628 | return -EINVAL; |
| 629 | |
Emmanuel Grumbach | fa820d6 | 2016-04-03 10:15:59 +0300 | [diff] [blame] | 630 | dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); |
| 631 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 632 | if (!sta->max_amsdu_len || |
Emmanuel Grumbach | 04e3a5d | 2015-10-28 09:47:41 +0200 | [diff] [blame] | 633 | !ieee80211_is_data_qos(hdr->frame_control) || |
Emmanuel Grumbach | fa820d6 | 2016-04-03 10:15:59 +0300 | [diff] [blame] | 634 | (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) { |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 635 | num_subframes = 1; |
| 636 | pad = 0; |
| 637 | goto segment; |
| 638 | } |
| 639 | |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 640 | /* |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 641 | * Do not build AMSDU for IPv6 with extension headers. |
| 642 | * ask stack to segment and checkum the generated MPDUs for us. |
| 643 | */ |
| 644 | if (skb->protocol == htons(ETH_P_IPV6) && |
| 645 | ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != |
| 646 | IPPROTO_TCP) { |
| 647 | num_subframes = 1; |
| 648 | pad = 0; |
| 649 | netdev_features &= ~NETIF_F_CSUM_MASK; |
| 650 | goto segment; |
| 651 | } |
| 652 | |
| 653 | /* |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 654 | * No need to lock amsdu_in_ampdu_allowed since it can't be modified |
| 655 | * during an BA session. |
| 656 | */ |
| 657 | if (info->flags & IEEE80211_TX_CTL_AMPDU && |
| 658 | !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) { |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 659 | num_subframes = 1; |
| 660 | pad = 0; |
| 661 | goto segment; |
| 662 | } |
| 663 | |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 664 | max_amsdu_len = sta->max_amsdu_len; |
Emmanuel Grumbach | 50b0213 | 2015-11-11 11:37:02 +0200 | [diff] [blame] | 665 | |
| 666 | /* the Tx FIFO to which this A-MSDU will be routed */ |
| 667 | txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; |
| 668 | |
| 669 | /* |
| 670 | * Don't send an AMSDU that will be longer than the TXF. |
| 671 | * Add a security margin of 256 for the TX command + headers. |
| 672 | * We also want to have the start of the next packet inside the |
| 673 | * fifo to be able to send bursts. |
| 674 | */ |
| 675 | max_amsdu_len = min_t(unsigned int, max_amsdu_len, |
| 676 | mvm->shared_mem_cfg.txfifo_size[txf] - 256); |
| 677 | |
Emmanuel Grumbach | fa820d6 | 2016-04-03 10:15:59 +0300 | [diff] [blame] | 678 | if (unlikely(dbg_max_amsdu_len)) |
Emmanuel Grumbach | 9e7dce2 | 2015-10-26 16:14:06 +0200 | [diff] [blame] | 679 | max_amsdu_len = min_t(unsigned int, max_amsdu_len, |
| 680 | dbg_max_amsdu_len); |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 681 | |
| 682 | /* |
| 683 | * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not |
| 684 | * supported. This is a spec requirement (IEEE 802.11-2015 |
| 685 | * section 8.7.3 NOTE 3). |
| 686 | */ |
| 687 | if (info->flags & IEEE80211_TX_CTL_AMPDU && |
| 688 | !sta->vht_cap.vht_supported) |
| 689 | max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); |
| 690 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 691 | /* Sub frame header + SNAP + IP header + TCP header + MSS */ |
| 692 | subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; |
| 693 | pad = (4 - subf_len) & 0x3; |
| 694 | |
| 695 | /* |
| 696 | * If we have N subframes in the A-MSDU, then the A-MSDU's size is |
| 697 | * N * subf_len + (N - 1) * pad. |
| 698 | */ |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 699 | num_subframes = (max_amsdu_len + pad) / (subf_len + pad); |
| 700 | if (num_subframes > 1) |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 701 | *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 702 | |
| 703 | tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - |
| 704 | tcp_hdrlen(skb) + skb->data_len; |
| 705 | |
| 706 | /* |
| 707 | * Make sure we have enough TBs for the A-MSDU: |
| 708 | * 2 for each subframe |
| 709 | * 1 more for each fragment |
| 710 | * 1 more for the potential data in the header |
| 711 | */ |
| 712 | num_subframes = |
| 713 | min_t(unsigned int, num_subframes, |
| 714 | (mvm->trans->max_skb_frags - 1 - |
| 715 | skb_shinfo(skb)->nr_frags) / 2); |
| 716 | |
| 717 | /* This skb fits in one single A-MSDU */ |
| 718 | if (num_subframes * mss >= tcp_payload_len) { |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 719 | struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); |
| 720 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 721 | /* |
| 722 | * Compute the length of all the data added for the A-MSDU. |
| 723 | * This will be used to compute the length to write in the TX |
| 724 | * command. We have: SNAP + IP + TCP for n -1 subframes and |
| 725 | * ETH header for n subframes. Note that the original skb |
| 726 | * already had one set of SNAP / IP / TCP headers. |
| 727 | */ |
| 728 | num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 729 | amsdu_add = num_subframes * sizeof(struct ethhdr) + |
| 730 | (num_subframes - 1) * (snap_ip_tcp + pad); |
| 731 | /* This holds the amsdu headers length */ |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 732 | skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 733 | |
| 734 | __skb_queue_tail(mpdus_skb, skb); |
| 735 | return 0; |
| 736 | } |
| 737 | |
| 738 | /* |
| 739 | * Trick the segmentation function to make it |
| 740 | * create SKBs that can fit into one A-MSDU. |
| 741 | */ |
| 742 | segment: |
| 743 | skb_shinfo(skb)->gso_size = num_subframes * mss; |
| 744 | memcpy(cb, skb->cb, sizeof(cb)); |
| 745 | |
Sara Sharon | 5e6a98d | 2016-03-10 17:40:56 +0200 | [diff] [blame] | 746 | next = skb_gso_segment(skb, netdev_features); |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 747 | skb_shinfo(skb)->gso_size = mss; |
| 748 | if (WARN_ON_ONCE(IS_ERR(next))) |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 749 | return -EINVAL; |
| 750 | else if (next) |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 751 | consume_skb(skb); |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 752 | |
| 753 | while (next) { |
| 754 | tmp = next; |
| 755 | next = tmp->next; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 756 | |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 757 | memcpy(tmp->cb, cb, sizeof(tmp->cb)); |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 758 | /* |
| 759 | * Compute the length of all the data added for the A-MSDU. |
| 760 | * This will be used to compute the length to write in the TX |
| 761 | * command. We have: SNAP + IP + TCP for n -1 subframes and |
| 762 | * ETH header for n subframes. |
| 763 | */ |
| 764 | tcp_payload_len = skb_tail_pointer(tmp) - |
| 765 | skb_transport_header(tmp) - |
| 766 | tcp_hdrlen(tmp) + tmp->data_len; |
| 767 | |
| 768 | if (ipv4) |
| 769 | ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); |
| 770 | |
| 771 | if (tcp_payload_len > mss) { |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 772 | struct ieee80211_tx_info *skb_info = |
| 773 | IEEE80211_SKB_CB(tmp); |
| 774 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 775 | num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 776 | amsdu_add = num_subframes * sizeof(struct ethhdr) + |
| 777 | (num_subframes - 1) * (snap_ip_tcp + pad); |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 778 | skb_info->driver_data[0] = |
| 779 | (void *)(uintptr_t)amsdu_add; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 780 | skb_shinfo(tmp)->gso_size = mss; |
| 781 | } else { |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 782 | qc = ieee80211_get_qos_ctl((void *)tmp->data); |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 783 | |
| 784 | if (ipv4) |
| 785 | ip_send_check(ip_hdr(tmp)); |
| 786 | *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; |
| 787 | skb_shinfo(tmp)->gso_size = 0; |
| 788 | } |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 789 | |
| 790 | tmp->prev = NULL; |
| 791 | tmp->next = NULL; |
| 792 | |
| 793 | __skb_queue_tail(mpdus_skb, tmp); |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 794 | i++; |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 795 | } |
| 796 | |
| 797 | return 0; |
| 798 | } |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 799 | #else /* CONFIG_INET */ |
| 800 | static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 801 | struct ieee80211_tx_info *info, |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 802 | struct ieee80211_sta *sta, |
| 803 | struct sk_buff_head *mpdus_skb) |
| 804 | { |
| 805 | /* Impossible to get TSO with CONFIG_INET */ |
| 806 | WARN_ON(1); |
| 807 | |
| 808 | return -1; |
| 809 | } |
| 810 | #endif |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 811 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 812 | static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, |
| 813 | struct iwl_mvm_sta *mvm_sta, u8 tid, |
| 814 | struct sk_buff *skb) |
| 815 | { |
| 816 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 817 | u8 mac_queue = info->hw_queue; |
| 818 | struct sk_buff_head *deferred_tx_frames; |
| 819 | |
| 820 | lockdep_assert_held(&mvm_sta->lock); |
| 821 | |
| 822 | mvm_sta->deferred_traffic_tid_map |= BIT(tid); |
| 823 | set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); |
| 824 | |
| 825 | deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; |
| 826 | |
| 827 | skb_queue_tail(deferred_tx_frames, skb); |
| 828 | |
| 829 | /* |
| 830 | * The first deferred frame should've stopped the MAC queues, so we |
| 831 | * should never get a second deferred frame for the RA/TID. |
| 832 | */ |
| 833 | if (!WARN(skb_queue_len(deferred_tx_frames) != 1, |
| 834 | "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid, |
| 835 | skb_queue_len(deferred_tx_frames))) { |
| 836 | iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); |
| 837 | schedule_work(&mvm->add_stream_wk); |
| 838 | } |
| 839 | } |
| 840 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 841 | /* Check if there are any timed-out TIDs on a given shared TXQ */ |
| 842 | static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) |
| 843 | { |
| 844 | unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; |
| 845 | unsigned long now = jiffies; |
| 846 | int tid; |
| 847 | |
| 848 | for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
| 849 | if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + |
| 850 | IWL_MVM_DQA_QUEUE_TIMEOUT, now)) |
| 851 | return true; |
| 852 | } |
| 853 | |
| 854 | return false; |
| 855 | } |
| 856 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 857 | /* |
| 858 | * Sets the fields in the Tx cmd that are crypto related |
| 859 | */ |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 860 | static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 861 | struct ieee80211_tx_info *info, |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 862 | struct ieee80211_sta *sta) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 863 | { |
| 864 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 865 | struct iwl_mvm_sta *mvmsta; |
| 866 | struct iwl_device_cmd *dev_cmd; |
| 867 | struct iwl_tx_cmd *tx_cmd; |
| 868 | __le16 fc; |
| 869 | u16 seq_number = 0; |
| 870 | u8 tid = IWL_MAX_TID_COUNT; |
| 871 | u8 txq_id = info->hw_queue; |
Johannes Berg | 7ec5471 | 2016-03-16 09:29:48 +0100 | [diff] [blame] | 872 | bool is_ampdu = false; |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 873 | int hdrlen; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 874 | |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 875 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 876 | fc = hdr->frame_control; |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 877 | hdrlen = ieee80211_hdrlen(fc); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 878 | |
| 879 | if (WARN_ON_ONCE(!mvmsta)) |
| 880 | return -1; |
| 881 | |
Emmanuel Grumbach | 881acd8 | 2013-03-19 16:16:00 +0200 | [diff] [blame] | 882 | if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 883 | return -1; |
| 884 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 885 | dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, |
| 886 | sta, mvmsta->sta_id); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 887 | if (!dev_cmd) |
| 888 | goto drop; |
| 889 | |
| 890 | tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; |
| 891 | /* From now on, we cannot access info->control */ |
| 892 | |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 893 | /* |
| 894 | * we handle that entirely ourselves -- for uAPSD the firmware |
| 895 | * will always send a notification, and for PS-Poll responses |
| 896 | * we'll notify mac80211 when getting frame status |
| 897 | */ |
| 898 | info->flags &= ~IEEE80211_TX_STATUS_EOSP; |
| 899 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 900 | spin_lock(&mvmsta->lock); |
| 901 | |
| 902 | if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { |
| 903 | u8 *qc = NULL; |
| 904 | qc = ieee80211_get_qos_ctl(hdr); |
| 905 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; |
| 906 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) |
| 907 | goto drop_unlock_sta; |
| 908 | |
| 909 | seq_number = mvmsta->tid_data[tid].seq_number; |
| 910 | seq_number &= IEEE80211_SCTL_SEQ; |
| 911 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
| 912 | hdr->seq_ctrl |= cpu_to_le16(seq_number); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 913 | is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 914 | } else if (iwl_mvm_is_dqa_supported(mvm) && |
| 915 | (ieee80211_is_qos_nullfunc(fc) || |
| 916 | ieee80211_is_nullfunc(fc))) { |
| 917 | /* |
| 918 | * nullfunc frames should go to the MGMT queue regardless of QOS |
| 919 | */ |
| 920 | tid = IWL_MAX_TID_COUNT; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 921 | } |
| 922 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 923 | if (iwl_mvm_is_dqa_supported(mvm)) |
| 924 | txq_id = mvmsta->tid_data[tid].txq_id; |
| 925 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 926 | /* Copy MAC header from skb into command buffer */ |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 927 | memcpy(tx_cmd->hdr, hdr, hdrlen); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 928 | |
| 929 | WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); |
| 930 | |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 931 | if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 932 | /* default to TID 0 for non-QoS packets */ |
| 933 | u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; |
| 934 | |
| 935 | txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; |
| 936 | } |
| 937 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 938 | if (is_ampdu) { |
| 939 | if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) |
| 940 | goto drop_unlock_sta; |
| 941 | txq_id = mvmsta->tid_data[tid].txq_id; |
| 942 | } |
| 943 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 944 | /* Check if TXQ needs to be allocated or re-activated */ |
| 945 | if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE || |
| 946 | !mvmsta->tid_data[tid].is_tid_active) && |
| 947 | iwl_mvm_is_dqa_supported(mvm)) { |
| 948 | /* If TXQ needs to be allocated... */ |
| 949 | if (txq_id == IEEE80211_INVAL_HW_QUEUE) { |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 950 | iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); |
| 951 | |
| 952 | /* |
| 953 | * The frame is now deferred, and the worker scheduled |
| 954 | * will re-allocate it, so we can free it for now. |
| 955 | */ |
| 956 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); |
| 957 | spin_unlock(&mvmsta->lock); |
| 958 | return 0; |
| 959 | } |
| 960 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 961 | /* If we are here - TXQ exists and needs to be re-activated */ |
| 962 | spin_lock(&mvm->queue_info_lock); |
| 963 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; |
| 964 | mvmsta->tid_data[tid].is_tid_active = true; |
| 965 | spin_unlock(&mvm->queue_info_lock); |
| 966 | |
| 967 | IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n", |
| 968 | txq_id); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 969 | } |
| 970 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 971 | if (iwl_mvm_is_dqa_supported(mvm)) { |
| 972 | /* Keep track of the time of the last frame for this RA/TID */ |
| 973 | mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; |
| 974 | |
| 975 | /* |
| 976 | * If we have timed-out TIDs - schedule the worker that will |
| 977 | * reconfig the queues and update them |
| 978 | * |
| 979 | * Note that the mvm->queue_info_lock isn't being taken here in |
| 980 | * order to not serialize the TX flow. This isn't dangerous |
| 981 | * because scheduling mvm->add_stream_wk can't ruin the state, |
| 982 | * and if we DON'T schedule it due to some race condition then |
| 983 | * next TX we get here we will. |
| 984 | */ |
| 985 | if (unlikely(mvm->queue_info[txq_id].status == |
| 986 | IWL_MVM_QUEUE_SHARED && |
| 987 | iwl_mvm_txq_should_update(mvm, txq_id))) |
| 988 | schedule_work(&mvm->add_stream_wk); |
| 989 | } |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 990 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 991 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, |
Eliad Peller | cf9d118 | 2013-12-31 18:54:06 +0200 | [diff] [blame] | 992 | tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 993 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 994 | if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) |
| 995 | goto drop_unlock_sta; |
| 996 | |
Johannes Berg | 7ec5471 | 2016-03-16 09:29:48 +0100 | [diff] [blame] | 997 | if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) |
Eliad Peller | cf9d118 | 2013-12-31 18:54:06 +0200 | [diff] [blame] | 998 | mvmsta->tid_data[tid].seq_number = seq_number + 0x10; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 999 | |
| 1000 | spin_unlock(&mvmsta->lock); |
| 1001 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1002 | /* Increase pending frames count if this isn't AMPDU */ |
| 1003 | if (!is_ampdu) |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 1004 | atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1005 | |
| 1006 | return 0; |
| 1007 | |
| 1008 | drop_unlock_sta: |
| 1009 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); |
| 1010 | spin_unlock(&mvmsta->lock); |
| 1011 | drop: |
| 1012 | return -1; |
| 1013 | } |
| 1014 | |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1015 | int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, |
| 1016 | struct ieee80211_sta *sta) |
| 1017 | { |
| 1018 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 1019 | struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); |
| 1020 | struct ieee80211_tx_info info; |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1021 | struct sk_buff_head mpdus_skbs; |
| 1022 | unsigned int payload_len; |
| 1023 | int ret; |
| 1024 | |
| 1025 | if (WARN_ON_ONCE(!mvmsta)) |
| 1026 | return -1; |
| 1027 | |
| 1028 | if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) |
| 1029 | return -1; |
| 1030 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 1031 | memcpy(&info, skb->cb, sizeof(info)); |
| 1032 | |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 1033 | /* This holds the amsdu headers length */ |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 1034 | skb_info->driver_data[0] = (void *)(uintptr_t)0; |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 1035 | |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1036 | if (!skb_is_gso(skb)) |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 1037 | return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1038 | |
| 1039 | payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - |
| 1040 | tcp_hdrlen(skb) + skb->data_len; |
| 1041 | |
| 1042 | if (payload_len <= skb_shinfo(skb)->gso_size) |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 1043 | return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1044 | |
| 1045 | __skb_queue_head_init(&mpdus_skbs); |
| 1046 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 1047 | ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1048 | if (ret) |
| 1049 | return ret; |
| 1050 | |
| 1051 | if (WARN_ON(skb_queue_empty(&mpdus_skbs))) |
| 1052 | return ret; |
| 1053 | |
| 1054 | while (!skb_queue_empty(&mpdus_skbs)) { |
Emmanuel Grumbach | a6d5e32 | 2015-10-14 16:28:52 +0300 | [diff] [blame] | 1055 | skb = __skb_dequeue(&mpdus_skbs); |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1056 | |
Emmanuel Grumbach | 5c08b0f | 2016-05-03 12:08:43 +0300 | [diff] [blame] | 1057 | ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); |
Emmanuel Grumbach | a3713f8 | 2015-10-14 14:16:35 +0300 | [diff] [blame] | 1058 | if (ret) { |
| 1059 | __skb_queue_purge(&mpdus_skbs); |
| 1060 | return ret; |
| 1061 | } |
| 1062 | } |
| 1063 | |
| 1064 | return 0; |
| 1065 | } |
| 1066 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1067 | static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, |
| 1068 | struct ieee80211_sta *sta, u8 tid) |
| 1069 | { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 1070 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1071 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
| 1072 | struct ieee80211_vif *vif = mvmsta->vif; |
| 1073 | |
| 1074 | lockdep_assert_held(&mvmsta->lock); |
| 1075 | |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 1076 | if ((tid_data->state == IWL_AGG_ON || |
| 1077 | tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && |
| 1078 | iwl_mvm_tid_queued(tid_data) == 0) { |
| 1079 | /* |
| 1080 | * Now that this aggregation queue is empty tell mac80211 so it |
| 1081 | * knows we no longer have frames buffered for the station on |
| 1082 | * this TID (for the TIM bitmap calculation.) |
| 1083 | */ |
| 1084 | ieee80211_sta_set_buffered(sta, tid, false); |
| 1085 | } |
| 1086 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1087 | if (tid_data->ssn != tid_data->next_reclaimed) |
| 1088 | return; |
| 1089 | |
| 1090 | switch (tid_data->state) { |
| 1091 | case IWL_EMPTYING_HW_QUEUE_ADDBA: |
| 1092 | IWL_DEBUG_TX_QUEUES(mvm, |
| 1093 | "Can continue addBA flow ssn = next_recl = %d\n", |
| 1094 | tid_data->next_reclaimed); |
| 1095 | tid_data->state = IWL_AGG_STARTING; |
| 1096 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 1097 | break; |
| 1098 | |
| 1099 | case IWL_EMPTYING_HW_QUEUE_DELBA: |
| 1100 | IWL_DEBUG_TX_QUEUES(mvm, |
| 1101 | "Can continue DELBA flow ssn = next_recl = %d\n", |
| 1102 | tid_data->next_reclaimed); |
Liad Kaufman | 15985fb | 2016-06-26 14:45:12 +0300 | [diff] [blame] | 1103 | if (!iwl_mvm_is_dqa_supported(mvm)) { |
| 1104 | u8 mac80211_ac = tid_to_mac80211_ac[tid]; |
| 1105 | |
| 1106 | iwl_mvm_disable_txq(mvm, tid_data->txq_id, |
| 1107 | vif->hw_queue[mac80211_ac], tid, |
| 1108 | CMD_ASYNC); |
| 1109 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1110 | tid_data->state = IWL_AGG_OFF; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1111 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 1112 | break; |
| 1113 | |
| 1114 | default: |
| 1115 | break; |
| 1116 | } |
| 1117 | } |
| 1118 | |
| 1119 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 1120 | const char *iwl_mvm_get_tx_fail_reason(u32 status) |
| 1121 | { |
| 1122 | #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x |
| 1123 | #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x |
| 1124 | |
| 1125 | switch (status & TX_STATUS_MSK) { |
| 1126 | case TX_STATUS_SUCCESS: |
| 1127 | return "SUCCESS"; |
| 1128 | TX_STATUS_POSTPONE(DELAY); |
| 1129 | TX_STATUS_POSTPONE(FEW_BYTES); |
| 1130 | TX_STATUS_POSTPONE(BT_PRIO); |
| 1131 | TX_STATUS_POSTPONE(QUIET_PERIOD); |
| 1132 | TX_STATUS_POSTPONE(CALC_TTAK); |
| 1133 | TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); |
| 1134 | TX_STATUS_FAIL(SHORT_LIMIT); |
| 1135 | TX_STATUS_FAIL(LONG_LIMIT); |
| 1136 | TX_STATUS_FAIL(UNDERRUN); |
| 1137 | TX_STATUS_FAIL(DRAIN_FLOW); |
| 1138 | TX_STATUS_FAIL(RFKILL_FLUSH); |
| 1139 | TX_STATUS_FAIL(LIFE_EXPIRE); |
| 1140 | TX_STATUS_FAIL(DEST_PS); |
| 1141 | TX_STATUS_FAIL(HOST_ABORTED); |
| 1142 | TX_STATUS_FAIL(BT_RETRY); |
| 1143 | TX_STATUS_FAIL(STA_INVALID); |
| 1144 | TX_STATUS_FAIL(FRAG_DROPPED); |
| 1145 | TX_STATUS_FAIL(TID_DISABLE); |
| 1146 | TX_STATUS_FAIL(FIFO_FLUSHED); |
| 1147 | TX_STATUS_FAIL(SMALL_CF_POLL); |
| 1148 | TX_STATUS_FAIL(FW_DROP); |
| 1149 | TX_STATUS_FAIL(STA_COLOR_MISMATCH); |
| 1150 | } |
| 1151 | |
| 1152 | return "UNKNOWN"; |
| 1153 | |
| 1154 | #undef TX_STATUS_FAIL |
| 1155 | #undef TX_STATUS_POSTPONE |
| 1156 | } |
| 1157 | #endif /* CONFIG_IWLWIFI_DEBUG */ |
| 1158 | |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 1159 | void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, |
Johannes Berg | 57fbcce | 2016-04-12 15:56:15 +0200 | [diff] [blame] | 1160 | enum nl80211_band band, |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 1161 | struct ieee80211_tx_rate *r) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1162 | { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1163 | if (rate_n_flags & RATE_HT_MCS_GF_MSK) |
| 1164 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; |
| 1165 | switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { |
| 1166 | case RATE_MCS_CHAN_WIDTH_20: |
| 1167 | break; |
| 1168 | case RATE_MCS_CHAN_WIDTH_40: |
| 1169 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; |
| 1170 | break; |
| 1171 | case RATE_MCS_CHAN_WIDTH_80: |
| 1172 | r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; |
| 1173 | break; |
| 1174 | case RATE_MCS_CHAN_WIDTH_160: |
| 1175 | r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; |
| 1176 | break; |
| 1177 | } |
| 1178 | if (rate_n_flags & RATE_MCS_SGI_MSK) |
| 1179 | r->flags |= IEEE80211_TX_RC_SHORT_GI; |
| 1180 | if (rate_n_flags & RATE_MCS_HT_MSK) { |
| 1181 | r->flags |= IEEE80211_TX_RC_MCS; |
| 1182 | r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; |
| 1183 | } else if (rate_n_flags & RATE_MCS_VHT_MSK) { |
| 1184 | ieee80211_rate_set_vht( |
| 1185 | r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, |
| 1186 | ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> |
| 1187 | RATE_VHT_MCS_NSS_POS) + 1); |
| 1188 | r->flags |= IEEE80211_TX_RC_VHT_MCS; |
| 1189 | } else { |
| 1190 | r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 1191 | band); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1192 | } |
| 1193 | } |
| 1194 | |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 1195 | /** |
| 1196 | * translate ucode response to mac80211 tx status control values |
| 1197 | */ |
| 1198 | static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, |
| 1199 | struct ieee80211_tx_info *info) |
| 1200 | { |
| 1201 | struct ieee80211_tx_rate *r = &info->status.rates[0]; |
| 1202 | |
| 1203 | info->status.antenna = |
| 1204 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); |
| 1205 | iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); |
| 1206 | } |
| 1207 | |
Golan Ben-Ami | 25657fe | 2015-09-02 12:34:23 +0300 | [diff] [blame] | 1208 | static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, |
| 1209 | u32 status) |
| 1210 | { |
| 1211 | struct iwl_fw_dbg_trigger_tlv *trig; |
| 1212 | struct iwl_fw_dbg_trigger_tx_status *status_trig; |
| 1213 | int i; |
| 1214 | |
| 1215 | if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) |
| 1216 | return; |
| 1217 | |
| 1218 | trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); |
| 1219 | status_trig = (void *)trig->data; |
| 1220 | |
| 1221 | if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) |
| 1222 | return; |
| 1223 | |
| 1224 | for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { |
| 1225 | /* don't collect on status 0 */ |
| 1226 | if (!status_trig->statuses[i].status) |
| 1227 | break; |
| 1228 | |
| 1229 | if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) |
| 1230 | continue; |
| 1231 | |
| 1232 | iwl_mvm_fw_dbg_collect_trig(mvm, trig, |
| 1233 | "Tx status %d was received", |
| 1234 | status & TX_STATUS_MSK); |
| 1235 | break; |
| 1236 | } |
| 1237 | } |
| 1238 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1239 | static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, |
| 1240 | struct iwl_rx_packet *pkt) |
| 1241 | { |
| 1242 | struct ieee80211_sta *sta; |
| 1243 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
| 1244 | int txq_id = SEQ_TO_QUEUE(sequence); |
| 1245 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; |
| 1246 | int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); |
| 1247 | int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); |
| 1248 | u32 status = le16_to_cpu(tx_resp->status.status); |
| 1249 | u16 ssn = iwl_mvm_get_scd_ssn(tx_resp); |
| 1250 | struct iwl_mvm_sta *mvmsta; |
| 1251 | struct sk_buff_head skbs; |
| 1252 | u8 skb_freed = 0; |
| 1253 | u16 next_reclaimed, seq_ctl; |
Emmanuel Grumbach | 532beba | 2016-03-07 22:23:52 +0200 | [diff] [blame] | 1254 | bool is_ndp = false; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1255 | bool txq_agg = false; /* Is this TXQ aggregated */ |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1256 | |
| 1257 | __skb_queue_head_init(&skbs); |
| 1258 | |
| 1259 | seq_ctl = le16_to_cpu(tx_resp->seq_ctl); |
| 1260 | |
| 1261 | /* we can free until ssn % q.n_bd not inclusive */ |
| 1262 | iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); |
| 1263 | |
| 1264 | while (!skb_queue_empty(&skbs)) { |
| 1265 | struct sk_buff *skb = __skb_dequeue(&skbs); |
| 1266 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 1267 | |
| 1268 | skb_freed++; |
| 1269 | |
| 1270 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); |
| 1271 | |
| 1272 | memset(&info->status, 0, sizeof(info->status)); |
| 1273 | |
| 1274 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; |
| 1275 | |
| 1276 | /* inform mac80211 about what happened with the frame */ |
| 1277 | switch (status & TX_STATUS_MSK) { |
| 1278 | case TX_STATUS_SUCCESS: |
| 1279 | case TX_STATUS_DIRECT_DONE: |
| 1280 | info->flags |= IEEE80211_TX_STAT_ACK; |
| 1281 | break; |
| 1282 | case TX_STATUS_FAIL_DEST_PS: |
| 1283 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; |
| 1284 | break; |
| 1285 | default: |
| 1286 | break; |
| 1287 | } |
| 1288 | |
Golan Ben-Ami | 25657fe | 2015-09-02 12:34:23 +0300 | [diff] [blame] | 1289 | iwl_mvm_tx_status_check_trigger(mvm, status); |
| 1290 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1291 | info->status.rates[0].count = tx_resp->failure_frame + 1; |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 1292 | iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), |
| 1293 | info); |
Eyal Shapira | 929e6ed | 2015-01-30 13:40:02 +0200 | [diff] [blame] | 1294 | info->status.status_driver_data[1] = |
| 1295 | (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1296 | |
| 1297 | /* Single frame failure in an AMPDU queue => send BAR */ |
Eytan Lifshitz | 19e737c | 2013-09-09 13:30:15 +0200 | [diff] [blame] | 1298 | if (txq_id >= mvm->first_agg_queue && |
Eyal Shapira | 9ce578a | 2014-12-31 15:22:38 +0200 | [diff] [blame] | 1299 | !(info->flags & IEEE80211_TX_STAT_ACK) && |
| 1300 | !(info->flags & IEEE80211_TX_STAT_TX_FILTERED)) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1301 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1302 | |
Emmanuel Grumbach | ebea2f3 | 2013-06-13 10:07:47 +0300 | [diff] [blame] | 1303 | /* W/A FW bug: seq_ctl is wrong when the status isn't success */ |
| 1304 | if (status != TX_STATUS_SUCCESS) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1305 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 1306 | seq_ctl = le16_to_cpu(hdr->seq_ctrl); |
| 1307 | } |
| 1308 | |
Emmanuel Grumbach | 532beba | 2016-03-07 22:23:52 +0200 | [diff] [blame] | 1309 | if (unlikely(!seq_ctl)) { |
| 1310 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 1311 | |
| 1312 | /* |
| 1313 | * If it is an NDP, we can't update next_reclaim since |
| 1314 | * its sequence control is 0. Note that for that same |
| 1315 | * reason, NDPs are never sent to A-MPDU'able queues |
| 1316 | * so that we can never have more than one freed frame |
| 1317 | * for a single Tx resonse (see WARN_ON below). |
| 1318 | */ |
| 1319 | if (ieee80211_is_qos_nullfunc(hdr->frame_control)) |
| 1320 | is_ndp = true; |
| 1321 | } |
| 1322 | |
Emmanuel Grumbach | 9b5452f | 2014-10-07 10:38:53 +0300 | [diff] [blame] | 1323 | /* |
| 1324 | * TODO: this is not accurate if we are freeing more than one |
| 1325 | * packet. |
| 1326 | */ |
| 1327 | info->status.tx_time = |
| 1328 | le16_to_cpu(tx_resp->wireless_media_time); |
Eliad Peller | 3a84b69 | 2014-03-12 15:05:06 +0200 | [diff] [blame] | 1329 | BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); |
| 1330 | info->status.status_driver_data[0] = |
| 1331 | (void *)(uintptr_t)tx_resp->reduced_tpc; |
| 1332 | |
Johannes Berg | f14d6b3 | 2014-03-21 13:30:03 +0100 | [diff] [blame] | 1333 | ieee80211_tx_status(mvm->hw, skb); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1334 | } |
| 1335 | |
Eytan Lifshitz | 19e737c | 2013-09-09 13:30:15 +0200 | [diff] [blame] | 1336 | if (txq_id >= mvm->first_agg_queue) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1337 | /* If this is an aggregation queue, we use the ssn since: |
| 1338 | * ssn = wifi seq_num % 256. |
| 1339 | * The seq_ctl is the sequence control of the packet to which |
| 1340 | * this Tx response relates. But if there is a hole in the |
| 1341 | * bitmap of the BA we received, this Tx response may allow to |
| 1342 | * reclaim the hole and all the subsequent packets that were |
| 1343 | * already acked. In that case, seq_ctl != ssn, and the next |
| 1344 | * packet to be reclaimed will be ssn and not seq_ctl. In that |
| 1345 | * case, several packets will be reclaimed even if |
| 1346 | * frame_count = 1. |
| 1347 | * |
| 1348 | * The ssn is the index (% 256) of the latest packet that has |
| 1349 | * treated (acked / dropped) + 1. |
| 1350 | */ |
| 1351 | next_reclaimed = ssn; |
| 1352 | } else { |
| 1353 | /* The next packet to be reclaimed is the one after this one */ |
Johannes Berg | 9a88658 | 2013-02-15 19:25:00 +0100 | [diff] [blame] | 1354 | next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1355 | } |
| 1356 | |
| 1357 | IWL_DEBUG_TX_REPLY(mvm, |
Emmanuel Grumbach | 8c6e83d | 2013-03-20 17:12:46 +0200 | [diff] [blame] | 1358 | "TXQ %d status %s (0x%08x)\n", |
| 1359 | txq_id, iwl_mvm_get_tx_fail_reason(status), status); |
| 1360 | |
| 1361 | IWL_DEBUG_TX_REPLY(mvm, |
| 1362 | "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", |
| 1363 | le32_to_cpu(tx_resp->initial_rate), |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1364 | tx_resp->failure_frame, SEQ_TO_INDEX(sequence), |
| 1365 | ssn, next_reclaimed, seq_ctl); |
| 1366 | |
| 1367 | rcu_read_lock(); |
| 1368 | |
| 1369 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1370 | /* |
| 1371 | * sta can't be NULL otherwise it'd mean that the sta has been freed in |
| 1372 | * the firmware while we still have packets for it in the Tx queues. |
| 1373 | */ |
| 1374 | if (WARN_ON_ONCE(!sta)) |
| 1375 | goto out; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1376 | |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1377 | if (!IS_ERR(sta)) { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 1378 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1379 | |
| 1380 | if (tid != IWL_TID_NON_QOS) { |
| 1381 | struct iwl_mvm_tid_data *tid_data = |
| 1382 | &mvmsta->tid_data[tid]; |
Emmanuel Grumbach | 36be0eb | 2015-11-05 10:32:31 +0200 | [diff] [blame] | 1383 | bool send_eosp_ndp = false; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1384 | |
Johannes Berg | 2bfb509 | 2012-12-27 21:43:48 +0100 | [diff] [blame] | 1385 | spin_lock_bh(&mvmsta->lock); |
Oren Givon | 2c4a247 | 2016-05-29 14:05:50 +0300 | [diff] [blame] | 1386 | if (iwl_mvm_is_dqa_supported(mvm)) { |
| 1387 | enum iwl_mvm_agg_state state; |
| 1388 | |
| 1389 | state = mvmsta->tid_data[tid].state; |
| 1390 | txq_agg = (state == IWL_AGG_ON || |
| 1391 | state == IWL_EMPTYING_HW_QUEUE_DELBA); |
| 1392 | } else { |
| 1393 | txq_agg = txq_id >= mvm->first_agg_queue; |
| 1394 | } |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1395 | |
Emmanuel Grumbach | 532beba | 2016-03-07 22:23:52 +0200 | [diff] [blame] | 1396 | if (!is_ndp) { |
| 1397 | tid_data->next_reclaimed = next_reclaimed; |
| 1398 | IWL_DEBUG_TX_REPLY(mvm, |
| 1399 | "Next reclaimed packet:%d\n", |
| 1400 | next_reclaimed); |
| 1401 | } else { |
| 1402 | IWL_DEBUG_TX_REPLY(mvm, |
| 1403 | "NDP - don't update next_reclaimed\n"); |
| 1404 | } |
| 1405 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1406 | iwl_mvm_check_ratid_empty(mvm, sta, tid); |
Emmanuel Grumbach | 36be0eb | 2015-11-05 10:32:31 +0200 | [diff] [blame] | 1407 | |
| 1408 | if (mvmsta->sleep_tx_count) { |
| 1409 | mvmsta->sleep_tx_count--; |
| 1410 | if (mvmsta->sleep_tx_count && |
| 1411 | !iwl_mvm_tid_queued(tid_data)) { |
| 1412 | /* |
| 1413 | * The number of frames in the queue |
| 1414 | * dropped to 0 even if we sent less |
| 1415 | * frames than we thought we had on the |
| 1416 | * Tx queue. |
| 1417 | * This means we had holes in the BA |
| 1418 | * window that we just filled, ask |
| 1419 | * mac80211 to send EOSP since the |
| 1420 | * firmware won't know how to do that. |
| 1421 | * Send NDP and the firmware will send |
| 1422 | * EOSP notification that will trigger |
| 1423 | * a call to ieee80211_sta_eosp(). |
| 1424 | */ |
| 1425 | send_eosp_ndp = true; |
| 1426 | } |
| 1427 | } |
| 1428 | |
Johannes Berg | 2bfb509 | 2012-12-27 21:43:48 +0100 | [diff] [blame] | 1429 | spin_unlock_bh(&mvmsta->lock); |
Emmanuel Grumbach | 36be0eb | 2015-11-05 10:32:31 +0200 | [diff] [blame] | 1430 | if (send_eosp_ndp) { |
| 1431 | iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, |
| 1432 | IEEE80211_FRAME_RELEASE_UAPSD, |
| 1433 | 1, tid, false, false); |
| 1434 | mvmsta->sleep_tx_count = 0; |
| 1435 | ieee80211_send_eosp_nullfunc(sta, tid); |
| 1436 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1437 | } |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 1438 | |
| 1439 | if (mvmsta->next_status_eosp) { |
| 1440 | mvmsta->next_status_eosp = false; |
| 1441 | ieee80211_sta_eosp(sta); |
| 1442 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1443 | } else { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1444 | mvmsta = NULL; |
| 1445 | } |
| 1446 | |
| 1447 | /* |
| 1448 | * If the txq is not an AMPDU queue, there is no chance we freed |
| 1449 | * several skbs. Check that out... |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1450 | */ |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1451 | if (txq_agg) |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1452 | goto out; |
| 1453 | |
| 1454 | /* We can't free more than one frame at once on a shared queue */ |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1455 | WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1456 | |
Emmanuel Grumbach | 589a6ba | 2014-06-05 11:32:41 +0300 | [diff] [blame] | 1457 | /* If we have still frames for this STA nothing to do here */ |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1458 | if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) |
| 1459 | goto out; |
| 1460 | |
| 1461 | if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) { |
Andrei Otcheretianski | 003e5236 | 2014-05-25 17:24:22 +0300 | [diff] [blame] | 1462 | |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1463 | /* |
Andrei Otcheretianski | 003e5236 | 2014-05-25 17:24:22 +0300 | [diff] [blame] | 1464 | * If there are no pending frames for this STA and |
| 1465 | * the tx to this station is not disabled, notify |
| 1466 | * mac80211 that this station can now wake up in its |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1467 | * STA table. |
| 1468 | * If mvmsta is not NULL, sta is valid. |
| 1469 | */ |
Andrei Otcheretianski | 003e5236 | 2014-05-25 17:24:22 +0300 | [diff] [blame] | 1470 | |
| 1471 | spin_lock_bh(&mvmsta->lock); |
| 1472 | |
| 1473 | if (!mvmsta->disable_tx) |
| 1474 | ieee80211_sta_block_awake(mvm->hw, sta, false); |
| 1475 | |
| 1476 | spin_unlock_bh(&mvmsta->lock); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1477 | } |
| 1478 | |
Emmanuel Grumbach | 9bb0c1a | 2014-01-20 15:21:26 +0200 | [diff] [blame] | 1479 | if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) { |
| 1480 | /* |
| 1481 | * We are draining and this was the last packet - pre_rcu_remove |
| 1482 | * has been called already. We might be after the |
| 1483 | * synchronize_net already. |
| 1484 | * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues. |
| 1485 | */ |
| 1486 | set_bit(sta_id, mvm->sta_drained); |
| 1487 | schedule_work(&mvm->sta_drained_wk); |
| 1488 | } |
| 1489 | |
| 1490 | out: |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1491 | rcu_read_unlock(); |
| 1492 | } |
| 1493 | |
| 1494 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 1495 | #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x |
| 1496 | static const char *iwl_get_agg_tx_status(u16 status) |
| 1497 | { |
| 1498 | switch (status & AGG_TX_STATE_STATUS_MSK) { |
| 1499 | AGG_TX_STATE_(TRANSMITTED); |
| 1500 | AGG_TX_STATE_(UNDERRUN); |
| 1501 | AGG_TX_STATE_(BT_PRIO); |
| 1502 | AGG_TX_STATE_(FEW_BYTES); |
| 1503 | AGG_TX_STATE_(ABORT); |
| 1504 | AGG_TX_STATE_(LAST_SENT_TTL); |
| 1505 | AGG_TX_STATE_(LAST_SENT_TRY_CNT); |
| 1506 | AGG_TX_STATE_(LAST_SENT_BT_KILL); |
| 1507 | AGG_TX_STATE_(SCD_QUERY); |
| 1508 | AGG_TX_STATE_(TEST_BAD_CRC32); |
| 1509 | AGG_TX_STATE_(RESPONSE); |
| 1510 | AGG_TX_STATE_(DUMP_TX); |
| 1511 | AGG_TX_STATE_(DELAY_TX); |
| 1512 | } |
| 1513 | |
| 1514 | return "UNKNOWN"; |
| 1515 | } |
| 1516 | |
| 1517 | static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, |
| 1518 | struct iwl_rx_packet *pkt) |
| 1519 | { |
| 1520 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; |
| 1521 | struct agg_tx_status *frame_status = &tx_resp->status; |
| 1522 | int i; |
| 1523 | |
| 1524 | for (i = 0; i < tx_resp->frame_count; i++) { |
| 1525 | u16 fstatus = le16_to_cpu(frame_status[i].status); |
| 1526 | |
| 1527 | IWL_DEBUG_TX_REPLY(mvm, |
| 1528 | "status %s (0x%04x), try-count (%d) seq (0x%x)\n", |
| 1529 | iwl_get_agg_tx_status(fstatus), |
| 1530 | fstatus & AGG_TX_STATE_STATUS_MSK, |
| 1531 | (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> |
| 1532 | AGG_TX_STATE_TRY_CNT_POS, |
| 1533 | le16_to_cpu(frame_status[i].sequence)); |
| 1534 | } |
| 1535 | } |
| 1536 | #else |
| 1537 | static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, |
| 1538 | struct iwl_rx_packet *pkt) |
| 1539 | {} |
| 1540 | #endif /* CONFIG_IWLWIFI_DEBUG */ |
| 1541 | |
| 1542 | static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, |
| 1543 | struct iwl_rx_packet *pkt) |
| 1544 | { |
| 1545 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; |
| 1546 | int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); |
| 1547 | int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); |
| 1548 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
Sara Sharon | 13303c0 | 2016-04-10 15:51:54 +0300 | [diff] [blame] | 1549 | struct iwl_mvm_sta *mvmsta; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1550 | int queue = SEQ_TO_QUEUE(sequence); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1551 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1552 | if (WARN_ON_ONCE(queue < mvm->first_agg_queue && |
| 1553 | (!iwl_mvm_is_dqa_supported(mvm) || |
| 1554 | (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1555 | return; |
| 1556 | |
| 1557 | if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) |
| 1558 | return; |
| 1559 | |
| 1560 | iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); |
| 1561 | |
| 1562 | rcu_read_lock(); |
| 1563 | |
Sara Sharon | 13303c0 | 2016-04-10 15:51:54 +0300 | [diff] [blame] | 1564 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1565 | |
Sara Sharon | 13303c0 | 2016-04-10 15:51:54 +0300 | [diff] [blame] | 1566 | if (!WARN_ON_ONCE(!mvmsta)) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1567 | mvmsta->tid_data[tid].rate_n_flags = |
| 1568 | le32_to_cpu(tx_resp->initial_rate); |
Emmanuel Grumbach | 9b5452f | 2014-10-07 10:38:53 +0300 | [diff] [blame] | 1569 | mvmsta->tid_data[tid].tx_time = |
| 1570 | le16_to_cpu(tx_resp->wireless_media_time); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1571 | } |
| 1572 | |
| 1573 | rcu_read_unlock(); |
| 1574 | } |
| 1575 | |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1576 | void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1577 | { |
| 1578 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 1579 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; |
| 1580 | |
| 1581 | if (tx_resp->frame_count == 1) |
| 1582 | iwl_mvm_rx_tx_cmd_single(mvm, pkt); |
| 1583 | else |
| 1584 | iwl_mvm_rx_tx_cmd_agg(mvm, pkt); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1585 | } |
| 1586 | |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1587 | static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, |
| 1588 | int txq, int index, |
| 1589 | struct ieee80211_tx_info *ba_info, u32 rate) |
Eyal Shapira | a713044 | 2014-09-14 15:28:09 +0300 | [diff] [blame] | 1590 | { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1591 | struct sk_buff_head reclaimed_skbs; |
| 1592 | struct iwl_mvm_tid_data *tid_data; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1593 | struct ieee80211_sta *sta; |
| 1594 | struct iwl_mvm_sta *mvmsta; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1595 | struct sk_buff *skb; |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1596 | int freed; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1597 | |
Eyal Shapira | 2cee476 | 2015-01-16 11:09:30 +0200 | [diff] [blame] | 1598 | if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || |
| 1599 | tid >= IWL_MAX_TID_COUNT, |
| 1600 | "sta_id %d tid %d", sta_id, tid)) |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1601 | return; |
Eyal Shapira | 2cee476 | 2015-01-16 11:09:30 +0200 | [diff] [blame] | 1602 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1603 | rcu_read_lock(); |
| 1604 | |
| 1605 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
| 1606 | |
| 1607 | /* Reclaiming frames for a station that has been deleted ? */ |
| 1608 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { |
| 1609 | rcu_read_unlock(); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1610 | return; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1611 | } |
| 1612 | |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 1613 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1614 | tid_data = &mvmsta->tid_data[tid]; |
| 1615 | |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1616 | if (tid_data->txq_id != txq) { |
Johannes Berg | 1f16ea2 | 2015-03-06 09:17:37 +0100 | [diff] [blame] | 1617 | IWL_ERR(mvm, |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1618 | "invalid BA notification: Q %d, tid %d\n", |
| 1619 | tid_data->txq_id, tid); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1620 | rcu_read_unlock(); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1621 | return; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1622 | } |
| 1623 | |
Johannes Berg | 2bfb509 | 2012-12-27 21:43:48 +0100 | [diff] [blame] | 1624 | spin_lock_bh(&mvmsta->lock); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1625 | |
| 1626 | __skb_queue_head_init(&reclaimed_skbs); |
| 1627 | |
| 1628 | /* |
| 1629 | * Release all TFDs before the SSN, i.e. all TFDs in front of |
| 1630 | * block-ack window (we assume that they've been successfully |
| 1631 | * transmitted ... if not, it's too late anyway). |
| 1632 | */ |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1633 | iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1634 | |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1635 | tid_data->next_reclaimed = index; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1636 | |
| 1637 | iwl_mvm_check_ratid_empty(mvm, sta, tid); |
| 1638 | |
| 1639 | freed = 0; |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1640 | ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1641 | |
| 1642 | skb_queue_walk(&reclaimed_skbs, skb) { |
Johannes Berg | 143582c | 2014-02-25 10:37:15 +0100 | [diff] [blame] | 1643 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 1644 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1645 | |
| 1646 | if (ieee80211_is_data_qos(hdr->frame_control)) |
| 1647 | freed++; |
| 1648 | else |
| 1649 | WARN_ON_ONCE(1); |
| 1650 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1651 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); |
| 1652 | |
Johannes Berg | 143582c | 2014-02-25 10:37:15 +0100 | [diff] [blame] | 1653 | memset(&info->status, 0, sizeof(info->status)); |
| 1654 | /* Packet was transmitted successfully, failures come as single |
| 1655 | * frames because before failing a frame the firmware transmits |
| 1656 | * it without aggregation at least once. |
| 1657 | */ |
| 1658 | info->flags |= IEEE80211_TX_STAT_ACK; |
| 1659 | |
Eyal Shapira | a713044 | 2014-09-14 15:28:09 +0300 | [diff] [blame] | 1660 | /* this is the first skb we deliver in this batch */ |
| 1661 | /* put the rate scaling data there */ |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1662 | if (freed == 1) { |
| 1663 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
| 1664 | memcpy(&info->status, &ba_info->status, |
| 1665 | sizeof(ba_info->status)); |
| 1666 | iwl_mvm_hwrate_to_tx_status(rate, info); |
| 1667 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1668 | } |
| 1669 | |
Johannes Berg | 2bfb509 | 2012-12-27 21:43:48 +0100 | [diff] [blame] | 1670 | spin_unlock_bh(&mvmsta->lock); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1671 | |
Eyal Shapira | a713044 | 2014-09-14 15:28:09 +0300 | [diff] [blame] | 1672 | /* We got a BA notif with 0 acked or scd_ssn didn't progress which is |
| 1673 | * possible (i.e. first MPDU in the aggregation wasn't acked) |
| 1674 | * Still it's important to update RS about sent vs. acked. |
| 1675 | */ |
| 1676 | if (skb_queue_empty(&reclaimed_skbs)) { |
Eyal Shapira | a713044 | 2014-09-14 15:28:09 +0300 | [diff] [blame] | 1677 | struct ieee80211_chanctx_conf *chanctx_conf = NULL; |
| 1678 | |
| 1679 | if (mvmsta->vif) |
| 1680 | chanctx_conf = |
| 1681 | rcu_dereference(mvmsta->vif->chanctx_conf); |
| 1682 | |
| 1683 | if (WARN_ON_ONCE(!chanctx_conf)) |
| 1684 | goto out; |
| 1685 | |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1686 | ba_info->band = chanctx_conf->def.chan->band; |
| 1687 | iwl_mvm_hwrate_to_tx_status(rate, ba_info); |
Eyal Shapira | a713044 | 2014-09-14 15:28:09 +0300 | [diff] [blame] | 1688 | |
| 1689 | IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1690 | iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false); |
Eyal Shapira | a713044 | 2014-09-14 15:28:09 +0300 | [diff] [blame] | 1691 | } |
| 1692 | |
| 1693 | out: |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1694 | rcu_read_unlock(); |
| 1695 | |
| 1696 | while (!skb_queue_empty(&reclaimed_skbs)) { |
| 1697 | skb = __skb_dequeue(&reclaimed_skbs); |
Johannes Berg | f14d6b3 | 2014-03-21 13:30:03 +0100 | [diff] [blame] | 1698 | ieee80211_tx_status(mvm->hw, skb); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1699 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1700 | } |
| 1701 | |
Sara Sharon | c46e772 | 2016-07-17 14:24:55 +0300 | [diff] [blame^] | 1702 | void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) |
| 1703 | { |
| 1704 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 1705 | int sta_id, tid, txq, index; |
| 1706 | struct ieee80211_tx_info ba_info = {}; |
| 1707 | struct iwl_mvm_ba_notif *ba_notif; |
| 1708 | struct iwl_mvm_tid_data *tid_data; |
| 1709 | struct iwl_mvm_sta *mvmsta; |
| 1710 | |
| 1711 | if (iwl_mvm_has_new_tx_api(mvm)) { |
| 1712 | struct iwl_mvm_compressed_ba_notif *ba_res = |
| 1713 | (void *)pkt->data; |
| 1714 | |
| 1715 | sta_id = ba_res->sta_id; |
| 1716 | ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); |
| 1717 | ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); |
| 1718 | ba_info.status.tx_time = |
| 1719 | (u16)le32_to_cpu(ba_res->wireless_time); |
| 1720 | ba_info.status.status_driver_data[0] = |
| 1721 | (void *)(uintptr_t)ba_res->reduced_txp; |
| 1722 | |
| 1723 | /* |
| 1724 | * TODO: |
| 1725 | * When supporting multi TID aggregations - we need to move |
| 1726 | * next_reclaimed to be per TXQ and not per TID or handle it |
| 1727 | * in a different way. |
| 1728 | * This will go together with SN and AddBA offload and cannot |
| 1729 | * be handled properly for now. |
| 1730 | */ |
| 1731 | WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1); |
| 1732 | iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid, |
| 1733 | (int)ba_res->tfd[0].q_num, |
| 1734 | le16_to_cpu(ba_res->tfd[0].tfd_index), |
| 1735 | &ba_info, le32_to_cpu(ba_res->tx_rate)); |
| 1736 | |
| 1737 | IWL_DEBUG_TX_REPLY(mvm, |
| 1738 | "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", |
| 1739 | sta_id, le32_to_cpu(ba_res->flags), |
| 1740 | le16_to_cpu(ba_res->txed), |
| 1741 | le16_to_cpu(ba_res->done)); |
| 1742 | return; |
| 1743 | } |
| 1744 | |
| 1745 | ba_notif = (void *)pkt->data; |
| 1746 | sta_id = ba_notif->sta_id; |
| 1747 | tid = ba_notif->tid; |
| 1748 | /* "flow" corresponds to Tx queue */ |
| 1749 | txq = le16_to_cpu(ba_notif->scd_flow); |
| 1750 | /* "ssn" is start of block-ack Tx window, corresponds to index |
| 1751 | * (in Tx queue's circular buffer) of first TFD/frame in window */ |
| 1752 | index = le16_to_cpu(ba_notif->scd_ssn); |
| 1753 | |
| 1754 | rcu_read_lock(); |
| 1755 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); |
| 1756 | if (WARN_ON_ONCE(!mvmsta)) { |
| 1757 | rcu_read_unlock(); |
| 1758 | return; |
| 1759 | } |
| 1760 | |
| 1761 | tid_data = &mvmsta->tid_data[tid]; |
| 1762 | |
| 1763 | ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; |
| 1764 | ba_info.status.ampdu_len = ba_notif->txed; |
| 1765 | ba_info.status.tx_time = tid_data->tx_time; |
| 1766 | ba_info.status.status_driver_data[0] = |
| 1767 | (void *)(uintptr_t)ba_notif->reduced_txp; |
| 1768 | |
| 1769 | rcu_read_unlock(); |
| 1770 | |
| 1771 | iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, |
| 1772 | tid_data->rate_n_flags); |
| 1773 | |
| 1774 | IWL_DEBUG_TX_REPLY(mvm, |
| 1775 | "BA_NOTIFICATION Received from %pM, sta_id = %d\n", |
| 1776 | (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id); |
| 1777 | |
| 1778 | IWL_DEBUG_TX_REPLY(mvm, |
| 1779 | "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", |
| 1780 | ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), |
| 1781 | le64_to_cpu(ba_notif->bitmap), txq, index, |
| 1782 | ba_notif->txed, ba_notif->txed_2_done); |
| 1783 | |
| 1784 | IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", |
| 1785 | ba_notif->reduced_txp); |
| 1786 | } |
| 1787 | |
Emmanuel Grumbach | fe92e32 | 2015-03-11 09:34:31 +0200 | [diff] [blame] | 1788 | /* |
| 1789 | * Note that there are transports that buffer frames before they reach |
| 1790 | * the firmware. This means that after flush_tx_path is called, the |
| 1791 | * queue might not be empty. The race-free way to handle this is to: |
| 1792 | * 1) set the station as draining |
| 1793 | * 2) flush the Tx path |
| 1794 | * 3) wait for the transport queues to be empty |
| 1795 | */ |
Luca Coelho | 5888a40 | 2015-10-06 09:54:57 +0300 | [diff] [blame] | 1796 | int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1797 | { |
| 1798 | int ret; |
| 1799 | struct iwl_tx_path_flush_cmd flush_cmd = { |
| 1800 | .queues_ctl = cpu_to_le32(tfd_msk), |
| 1801 | .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), |
| 1802 | }; |
| 1803 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1804 | ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, |
| 1805 | sizeof(flush_cmd), &flush_cmd); |
| 1806 | if (ret) |
| 1807 | IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); |
| 1808 | return ret; |
| 1809 | } |