Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
Andrei Otcheretianski | fa7878e | 2015-05-05 09:28:16 +0300 | [diff] [blame] | 8 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 10 | * Copyright(c) 2016 Intel Deutschland GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of version 2 of the GNU General Public License as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, but |
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program; if not, write to the Free Software |
| 23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 24 | * USA |
| 25 | * |
| 26 | * The full GNU General Public License is included in this distribution |
Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 27 | * in the file called COPYING. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 28 | * |
| 29 | * Contact Information: |
Emmanuel Grumbach | cb2f827 | 2015-11-17 15:39:56 +0200 | [diff] [blame] | 30 | * Intel Linux Wireless <linuxwifi@intel.com> |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 31 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 32 | * |
| 33 | * BSD LICENSE |
| 34 | * |
Andrei Otcheretianski | fa7878e | 2015-05-05 09:28:16 +0300 | [diff] [blame] | 35 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
| 36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 37 | * Copyright(c) 2016 Intel Deutschland GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 38 | * All rights reserved. |
| 39 | * |
| 40 | * Redistribution and use in source and binary forms, with or without |
| 41 | * modification, are permitted provided that the following conditions |
| 42 | * are met: |
| 43 | * |
| 44 | * * Redistributions of source code must retain the above copyright |
| 45 | * notice, this list of conditions and the following disclaimer. |
| 46 | * * Redistributions in binary form must reproduce the above copyright |
| 47 | * notice, this list of conditions and the following disclaimer in |
| 48 | * the documentation and/or other materials provided with the |
| 49 | * distribution. |
| 50 | * * Neither the name Intel Corporation nor the names of its |
| 51 | * contributors may be used to endorse or promote products derived |
| 52 | * from this software without specific prior written permission. |
| 53 | * |
| 54 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 55 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 56 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 57 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 58 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 59 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 60 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 61 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 62 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 63 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 64 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 65 | * |
| 66 | *****************************************************************************/ |
| 67 | #include <net/mac80211.h> |
| 68 | |
| 69 | #include "mvm.h" |
| 70 | #include "sta.h" |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 71 | #include "rs.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 72 | |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 73 | /* |
| 74 | * New version of ADD_STA_sta command added new fields at the end of the |
| 75 | * structure, so sending the size of the relevant API's structure is enough to |
| 76 | * support both API versions. |
| 77 | */ |
| 78 | static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) |
| 79 | { |
| 80 | return iwl_mvm_has_new_rx_api(mvm) ? |
| 81 | sizeof(struct iwl_mvm_add_sta_cmd) : |
| 82 | sizeof(struct iwl_mvm_add_sta_cmd_v7); |
| 83 | } |
| 84 | |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 85 | static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, |
| 86 | enum nl80211_iftype iftype) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 87 | { |
| 88 | int sta_id; |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 89 | u32 reserved_ids = 0; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 90 | |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 91 | BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 92 | WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); |
| 93 | |
| 94 | lockdep_assert_held(&mvm->mutex); |
| 95 | |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 96 | /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ |
| 97 | if (iftype != NL80211_IFTYPE_STATION) |
| 98 | reserved_ids = BIT(0); |
| 99 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 100 | /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 101 | for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) { |
| 102 | if (BIT(sta_id) & reserved_ids) |
| 103 | continue; |
| 104 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 105 | if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 106 | lockdep_is_held(&mvm->mutex))) |
| 107 | return sta_id; |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 108 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 109 | return IWL_MVM_STATION_COUNT; |
| 110 | } |
| 111 | |
Johannes Berg | 7a45397 | 2013-02-12 13:10:44 +0100 | [diff] [blame] | 112 | /* send station add/update command to firmware */ |
| 113 | int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 114 | bool update, unsigned int flags) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 115 | { |
Johannes Berg | 9d8ce6a | 2014-12-23 16:02:40 +0100 | [diff] [blame] | 116 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | 4b8265a | 2014-07-13 08:58:04 +0300 | [diff] [blame] | 117 | struct iwl_mvm_add_sta_cmd add_sta_cmd = { |
| 118 | .sta_id = mvm_sta->sta_id, |
| 119 | .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), |
| 120 | .add_modify = update ? 1 : 0, |
| 121 | .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | |
| 122 | STA_FLG_MIMO_EN_MSK), |
Liad Kaufman | cf0cda1 | 2015-09-24 10:44:12 +0200 | [diff] [blame] | 123 | .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), |
Emmanuel Grumbach | 4b8265a | 2014-07-13 08:58:04 +0300 | [diff] [blame] | 124 | }; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 125 | int ret; |
| 126 | u32 status; |
| 127 | u32 agg_size = 0, mpdu_dens = 0; |
| 128 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 129 | if (!update || (flags & STA_MODIFY_QUEUES)) { |
Johannes Berg | 7a45397 | 2013-02-12 13:10:44 +0100 | [diff] [blame] | 130 | add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); |
| 131 | memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 132 | |
| 133 | if (flags & STA_MODIFY_QUEUES) |
| 134 | add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; |
Johannes Berg | 7a45397 | 2013-02-12 13:10:44 +0100 | [diff] [blame] | 135 | } |
Johannes Berg | 5bc5aaa | 2013-02-12 14:35:36 +0100 | [diff] [blame] | 136 | |
| 137 | switch (sta->bandwidth) { |
| 138 | case IEEE80211_STA_RX_BW_160: |
| 139 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); |
| 140 | /* fall through */ |
| 141 | case IEEE80211_STA_RX_BW_80: |
| 142 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); |
| 143 | /* fall through */ |
| 144 | case IEEE80211_STA_RX_BW_40: |
| 145 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); |
| 146 | /* fall through */ |
| 147 | case IEEE80211_STA_RX_BW_20: |
| 148 | if (sta->ht_cap.ht_supported) |
| 149 | add_sta_cmd.station_flags |= |
| 150 | cpu_to_le32(STA_FLG_FAT_EN_20MHZ); |
| 151 | break; |
| 152 | } |
| 153 | |
| 154 | switch (sta->rx_nss) { |
| 155 | case 1: |
| 156 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); |
| 157 | break; |
| 158 | case 2: |
| 159 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); |
| 160 | break; |
| 161 | case 3 ... 8: |
| 162 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); |
| 163 | break; |
| 164 | } |
| 165 | |
| 166 | switch (sta->smps_mode) { |
| 167 | case IEEE80211_SMPS_AUTOMATIC: |
| 168 | case IEEE80211_SMPS_NUM_MODES: |
| 169 | WARN_ON(1); |
| 170 | break; |
| 171 | case IEEE80211_SMPS_STATIC: |
| 172 | /* override NSS */ |
| 173 | add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); |
| 174 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); |
| 175 | break; |
| 176 | case IEEE80211_SMPS_DYNAMIC: |
| 177 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); |
| 178 | break; |
| 179 | case IEEE80211_SMPS_OFF: |
| 180 | /* nothing */ |
| 181 | break; |
| 182 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 183 | |
| 184 | if (sta->ht_cap.ht_supported) { |
| 185 | add_sta_cmd.station_flags_msk |= |
| 186 | cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | |
| 187 | STA_FLG_AGG_MPDU_DENS_MSK); |
| 188 | |
| 189 | mpdu_dens = sta->ht_cap.ampdu_density; |
| 190 | } |
| 191 | |
| 192 | if (sta->vht_cap.vht_supported) { |
| 193 | agg_size = sta->vht_cap.cap & |
| 194 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; |
| 195 | agg_size >>= |
| 196 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; |
| 197 | } else if (sta->ht_cap.ht_supported) { |
| 198 | agg_size = sta->ht_cap.ampdu_factor; |
| 199 | } |
| 200 | |
| 201 | add_sta_cmd.station_flags |= |
| 202 | cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); |
| 203 | add_sta_cmd.station_flags |= |
| 204 | cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); |
Emmanuel Grumbach | 19c52f4 | 2016-10-09 15:43:24 +0300 | [diff] [blame] | 205 | add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 206 | |
Johannes Berg | 65e2548 | 2016-04-13 14:24:22 +0200 | [diff] [blame] | 207 | if (sta->wme) { |
| 208 | add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; |
| 209 | |
| 210 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) |
| 211 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK); |
| 212 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) |
| 213 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE); |
| 214 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) |
| 215 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI); |
| 216 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) |
| 217 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO); |
| 218 | } |
| 219 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 220 | status = ADD_STA_SUCCESS; |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 221 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
| 222 | iwl_mvm_add_sta_cmd_size(mvm), |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 223 | &add_sta_cmd, &status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 224 | if (ret) |
| 225 | return ret; |
| 226 | |
Sara Sharon | 837c4da | 2016-01-07 16:50:45 +0200 | [diff] [blame] | 227 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 228 | case ADD_STA_SUCCESS: |
| 229 | IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); |
| 230 | break; |
| 231 | default: |
| 232 | ret = -EIO; |
| 233 | IWL_ERR(mvm, "ADD_STA failed\n"); |
| 234 | break; |
| 235 | } |
| 236 | |
| 237 | return ret; |
| 238 | } |
| 239 | |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 240 | static void iwl_mvm_rx_agg_session_expired(unsigned long data) |
| 241 | { |
| 242 | struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data; |
| 243 | struct iwl_mvm_baid_data *ba_data; |
| 244 | struct ieee80211_sta *sta; |
| 245 | struct iwl_mvm_sta *mvm_sta; |
| 246 | unsigned long timeout; |
| 247 | |
| 248 | rcu_read_lock(); |
| 249 | |
| 250 | ba_data = rcu_dereference(*rcu_ptr); |
| 251 | |
| 252 | if (WARN_ON(!ba_data)) |
| 253 | goto unlock; |
| 254 | |
| 255 | if (!ba_data->timeout) |
| 256 | goto unlock; |
| 257 | |
| 258 | timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); |
| 259 | if (time_is_after_jiffies(timeout)) { |
| 260 | mod_timer(&ba_data->session_timer, timeout); |
| 261 | goto unlock; |
| 262 | } |
| 263 | |
| 264 | /* Timer expired */ |
| 265 | sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); |
| 266 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
| 267 | ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, |
| 268 | sta->addr, ba_data->tid); |
| 269 | unlock: |
| 270 | rcu_read_unlock(); |
| 271 | } |
| 272 | |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 273 | static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, |
| 274 | struct ieee80211_sta *sta) |
| 275 | { |
| 276 | unsigned long used_hw_queues; |
| 277 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | 5d42e7b | 2015-03-19 20:04:51 +0200 | [diff] [blame] | 278 | unsigned int wdg_timeout = |
| 279 | iwl_mvm_get_wd_timeout(mvm, NULL, true, false); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 280 | u32 ac; |
| 281 | |
| 282 | lockdep_assert_held(&mvm->mutex); |
| 283 | |
| 284 | used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); |
| 285 | |
| 286 | /* Find available queues, and allocate them to the ACs */ |
| 287 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { |
| 288 | u8 queue = find_first_zero_bit(&used_hw_queues, |
| 289 | mvm->first_agg_queue); |
| 290 | |
| 291 | if (queue >= mvm->first_agg_queue) { |
| 292 | IWL_ERR(mvm, "Failed to allocate STA queue\n"); |
| 293 | return -EBUSY; |
| 294 | } |
| 295 | |
| 296 | __set_bit(queue, &used_hw_queues); |
| 297 | mvmsta->hw_queue[ac] = queue; |
| 298 | } |
| 299 | |
| 300 | /* Found a place for all queues - enable them */ |
| 301 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { |
| 302 | iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 303 | mvmsta->hw_queue[ac], |
Liad Kaufman | 5c1156e | 2015-07-22 17:59:53 +0300 | [diff] [blame] | 304 | iwl_mvm_ac_to_tx_fifo[ac], 0, |
| 305 | wdg_timeout); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 306 | mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); |
| 307 | } |
| 308 | |
| 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, |
| 313 | struct ieee80211_sta *sta) |
| 314 | { |
| 315 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 316 | unsigned long sta_msk; |
| 317 | int i; |
| 318 | |
| 319 | lockdep_assert_held(&mvm->mutex); |
| 320 | |
| 321 | /* disable the TDLS STA-specific queues */ |
| 322 | sta_msk = mvmsta->tfd_queue_msk; |
Emmanuel Grumbach | a4ca3ed | 2015-01-20 17:07:10 +0200 | [diff] [blame] | 323 | for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) |
Arik Nemtsov | 06ecdba | 2015-10-12 14:47:11 +0300 | [diff] [blame] | 324 | iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 325 | } |
| 326 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 327 | /* Disable aggregations for a bitmap of TIDs for a given station */ |
| 328 | static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, |
| 329 | unsigned long disable_agg_tids, |
| 330 | bool remove_queue) |
| 331 | { |
| 332 | struct iwl_mvm_add_sta_cmd cmd = {}; |
| 333 | struct ieee80211_sta *sta; |
| 334 | struct iwl_mvm_sta *mvmsta; |
| 335 | u32 status; |
| 336 | u8 sta_id; |
| 337 | int ret; |
| 338 | |
| 339 | spin_lock_bh(&mvm->queue_info_lock); |
| 340 | sta_id = mvm->queue_info[queue].ra_sta_id; |
| 341 | spin_unlock_bh(&mvm->queue_info_lock); |
| 342 | |
| 343 | rcu_read_lock(); |
| 344 | |
| 345 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
| 346 | |
| 347 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { |
| 348 | rcu_read_unlock(); |
| 349 | return -EINVAL; |
| 350 | } |
| 351 | |
| 352 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 353 | |
| 354 | mvmsta->tid_disable_agg |= disable_agg_tids; |
| 355 | |
| 356 | cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); |
| 357 | cmd.sta_id = mvmsta->sta_id; |
| 358 | cmd.add_modify = STA_MODE_MODIFY; |
| 359 | cmd.modify_mask = STA_MODIFY_QUEUES; |
| 360 | if (disable_agg_tids) |
| 361 | cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; |
| 362 | if (remove_queue) |
| 363 | cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; |
| 364 | cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); |
| 365 | cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); |
| 366 | |
| 367 | rcu_read_unlock(); |
| 368 | |
| 369 | /* Notify FW of queue removal from the STA queues */ |
| 370 | status = ADD_STA_SUCCESS; |
| 371 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
| 372 | iwl_mvm_add_sta_cmd_size(mvm), |
| 373 | &cmd, &status); |
| 374 | |
| 375 | return ret; |
| 376 | } |
| 377 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 378 | static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) |
| 379 | { |
| 380 | struct ieee80211_sta *sta; |
| 381 | struct iwl_mvm_sta *mvmsta; |
| 382 | unsigned long tid_bitmap; |
| 383 | unsigned long agg_tids = 0; |
| 384 | s8 sta_id; |
| 385 | int tid; |
| 386 | |
| 387 | lockdep_assert_held(&mvm->mutex); |
| 388 | |
| 389 | spin_lock_bh(&mvm->queue_info_lock); |
| 390 | sta_id = mvm->queue_info[queue].ra_sta_id; |
| 391 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
| 392 | spin_unlock_bh(&mvm->queue_info_lock); |
| 393 | |
| 394 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 395 | lockdep_is_held(&mvm->mutex)); |
| 396 | |
| 397 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) |
| 398 | return -EINVAL; |
| 399 | |
| 400 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 401 | |
| 402 | spin_lock_bh(&mvmsta->lock); |
| 403 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
| 404 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) |
| 405 | agg_tids |= BIT(tid); |
| 406 | } |
| 407 | spin_unlock_bh(&mvmsta->lock); |
| 408 | |
| 409 | return agg_tids; |
| 410 | } |
| 411 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 412 | /* |
| 413 | * Remove a queue from a station's resources. |
| 414 | * Note that this only marks as free. It DOESN'T delete a BA agreement, and |
| 415 | * doesn't disable the queue |
| 416 | */ |
| 417 | static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) |
| 418 | { |
| 419 | struct ieee80211_sta *sta; |
| 420 | struct iwl_mvm_sta *mvmsta; |
| 421 | unsigned long tid_bitmap; |
| 422 | unsigned long disable_agg_tids = 0; |
| 423 | u8 sta_id; |
| 424 | int tid; |
| 425 | |
| 426 | lockdep_assert_held(&mvm->mutex); |
| 427 | |
| 428 | spin_lock_bh(&mvm->queue_info_lock); |
| 429 | sta_id = mvm->queue_info[queue].ra_sta_id; |
| 430 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
| 431 | spin_unlock_bh(&mvm->queue_info_lock); |
| 432 | |
| 433 | rcu_read_lock(); |
| 434 | |
| 435 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
| 436 | |
| 437 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { |
| 438 | rcu_read_unlock(); |
| 439 | return 0; |
| 440 | } |
| 441 | |
| 442 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 443 | |
| 444 | spin_lock_bh(&mvmsta->lock); |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 445 | /* Unmap MAC queues and TIDs from this queue */ |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 446 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 447 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) |
| 448 | disable_agg_tids |= BIT(tid); |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 449 | mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 450 | } |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 451 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 452 | mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 453 | spin_unlock_bh(&mvmsta->lock); |
| 454 | |
| 455 | rcu_read_unlock(); |
| 456 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 457 | spin_lock_bh(&mvm->queue_info_lock); |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 458 | /* Unmap MAC queues and TIDs from this queue */ |
| 459 | mvm->queue_info[queue].hw_queue_to_mac80211 = 0; |
| 460 | mvm->queue_info[queue].hw_queue_refcount = 0; |
| 461 | mvm->queue_info[queue].tid_bitmap = 0; |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 462 | spin_unlock_bh(&mvm->queue_info_lock); |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 463 | |
| 464 | return disable_agg_tids; |
| 465 | } |
| 466 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 467 | static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, |
| 468 | unsigned long tfd_queue_mask, u8 ac) |
| 469 | { |
| 470 | int queue = 0; |
| 471 | u8 ac_to_queue[IEEE80211_NUM_ACS]; |
| 472 | int i; |
| 473 | |
| 474 | lockdep_assert_held(&mvm->queue_info_lock); |
| 475 | |
| 476 | memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); |
| 477 | |
| 478 | /* See what ACs the existing queues for this STA have */ |
| 479 | for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { |
| 480 | /* Only DATA queues can be shared */ |
| 481 | if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && |
| 482 | i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) |
| 483 | continue; |
| 484 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 485 | /* Don't try and take queues being reconfigured */ |
| 486 | if (mvm->queue_info[queue].status == |
| 487 | IWL_MVM_QUEUE_RECONFIGURING) |
| 488 | continue; |
| 489 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 490 | ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; |
| 491 | } |
| 492 | |
| 493 | /* |
| 494 | * The queue to share is chosen only from DATA queues as follows (in |
| 495 | * descending priority): |
| 496 | * 1. An AC_BE queue |
| 497 | * 2. Same AC queue |
| 498 | * 3. Highest AC queue that is lower than new AC |
| 499 | * 4. Any existing AC (there always is at least 1 DATA queue) |
| 500 | */ |
| 501 | |
| 502 | /* Priority 1: An AC_BE queue */ |
| 503 | if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) |
| 504 | queue = ac_to_queue[IEEE80211_AC_BE]; |
| 505 | /* Priority 2: Same AC queue */ |
| 506 | else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) |
| 507 | queue = ac_to_queue[ac]; |
| 508 | /* Priority 3a: If new AC is VO and VI exists - use VI */ |
| 509 | else if (ac == IEEE80211_AC_VO && |
| 510 | ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) |
| 511 | queue = ac_to_queue[IEEE80211_AC_VI]; |
| 512 | /* Priority 3b: No BE so only AC less than the new one is BK */ |
| 513 | else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) |
| 514 | queue = ac_to_queue[IEEE80211_AC_BK]; |
| 515 | /* Priority 4a: No BE nor BK - use VI if exists */ |
| 516 | else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) |
| 517 | queue = ac_to_queue[IEEE80211_AC_VI]; |
| 518 | /* Priority 4b: No BE, BK nor VI - use VO if exists */ |
| 519 | else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) |
| 520 | queue = ac_to_queue[IEEE80211_AC_VO]; |
| 521 | |
| 522 | /* Make sure queue found (or not) is legal */ |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 523 | if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && |
| 524 | !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && |
| 525 | (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 526 | IWL_ERR(mvm, "No DATA queues available to share\n"); |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 527 | return -ENOSPC; |
| 528 | } |
| 529 | |
| 530 | /* Make sure the queue isn't in the middle of being reconfigured */ |
| 531 | if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { |
| 532 | IWL_ERR(mvm, |
| 533 | "TXQ %d is in the middle of re-config - try again\n", |
| 534 | queue); |
| 535 | return -EBUSY; |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 536 | } |
| 537 | |
| 538 | return queue; |
| 539 | } |
| 540 | |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 541 | /* |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 542 | * If a given queue has a higher AC than the TID stream that is being compared |
| 543 | * to, the queue needs to be redirected to the lower AC. This function does that |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 544 | * in such a case, otherwise - if no redirection required - it does nothing, |
| 545 | * unless the %force param is true. |
| 546 | */ |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 547 | int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, |
| 548 | int ac, int ssn, unsigned int wdg_timeout, |
| 549 | bool force) |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 550 | { |
| 551 | struct iwl_scd_txq_cfg_cmd cmd = { |
| 552 | .scd_queue = queue, |
Liad Kaufman | f7c692d | 2016-03-08 10:41:32 +0200 | [diff] [blame] | 553 | .action = SCD_CFG_DISABLE_QUEUE, |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 554 | }; |
| 555 | bool shared_queue; |
| 556 | unsigned long mq; |
| 557 | int ret; |
| 558 | |
| 559 | /* |
| 560 | * If the AC is lower than current one - FIFO needs to be redirected to |
| 561 | * the lowest one of the streams in the queue. Check if this is needed |
| 562 | * here. |
| 563 | * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with |
| 564 | * value 3 and VO with value 0, so to check if ac X is lower than ac Y |
| 565 | * we need to check if the numerical value of X is LARGER than of Y. |
| 566 | */ |
| 567 | spin_lock_bh(&mvm->queue_info_lock); |
| 568 | if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { |
| 569 | spin_unlock_bh(&mvm->queue_info_lock); |
| 570 | |
| 571 | IWL_DEBUG_TX_QUEUES(mvm, |
| 572 | "No redirection needed on TXQ #%d\n", |
| 573 | queue); |
| 574 | return 0; |
| 575 | } |
| 576 | |
| 577 | cmd.sta_id = mvm->queue_info[queue].ra_sta_id; |
| 578 | cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; |
Liad Kaufman | edbe961 | 2016-02-02 15:43:32 +0200 | [diff] [blame] | 579 | cmd.tid = mvm->queue_info[queue].txq_tid; |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 580 | mq = mvm->queue_info[queue].hw_queue_to_mac80211; |
| 581 | shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); |
| 582 | spin_unlock_bh(&mvm->queue_info_lock); |
| 583 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 584 | IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 585 | queue, iwl_mvm_ac_to_tx_fifo[ac]); |
| 586 | |
| 587 | /* Stop MAC queues and wait for this queue to empty */ |
| 588 | iwl_mvm_stop_mac_queues(mvm, mq); |
| 589 | ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue)); |
| 590 | if (ret) { |
| 591 | IWL_ERR(mvm, "Error draining queue %d before reconfig\n", |
| 592 | queue); |
| 593 | ret = -EIO; |
| 594 | goto out; |
| 595 | } |
| 596 | |
| 597 | /* Before redirecting the queue we need to de-activate it */ |
| 598 | iwl_trans_txq_disable(mvm->trans, queue, false); |
| 599 | ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); |
| 600 | if (ret) |
| 601 | IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, |
| 602 | ret); |
| 603 | |
| 604 | /* Make sure the SCD wrptr is correctly set before reconfiguring */ |
Sara Sharon | ca3b9c6 | 2016-06-30 16:14:02 +0300 | [diff] [blame] | 605 | iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 606 | |
Liad Kaufman | edbe961 | 2016-02-02 15:43:32 +0200 | [diff] [blame] | 607 | /* Update the TID "owner" of the queue */ |
| 608 | spin_lock_bh(&mvm->queue_info_lock); |
| 609 | mvm->queue_info[queue].txq_tid = tid; |
| 610 | spin_unlock_bh(&mvm->queue_info_lock); |
| 611 | |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 612 | /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ |
| 613 | |
| 614 | /* Redirect to lower AC */ |
| 615 | iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], |
| 616 | cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, |
| 617 | ssn); |
| 618 | |
| 619 | /* Update AC marking of the queue */ |
| 620 | spin_lock_bh(&mvm->queue_info_lock); |
| 621 | mvm->queue_info[queue].mac80211_ac = ac; |
| 622 | spin_unlock_bh(&mvm->queue_info_lock); |
| 623 | |
| 624 | /* |
| 625 | * Mark queue as shared in transport if shared |
| 626 | * Note this has to be done after queue enablement because enablement |
| 627 | * can also set this value, and there is no indication there to shared |
| 628 | * queues |
| 629 | */ |
| 630 | if (shared_queue) |
| 631 | iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); |
| 632 | |
| 633 | out: |
| 634 | /* Continue using the MAC queues */ |
| 635 | iwl_mvm_start_mac_queues(mvm, mq); |
| 636 | |
| 637 | return ret; |
| 638 | } |
| 639 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 640 | static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, |
| 641 | struct ieee80211_sta *sta, u8 ac, int tid, |
| 642 | struct ieee80211_hdr *hdr) |
| 643 | { |
| 644 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 645 | struct iwl_trans_txq_scd_cfg cfg = { |
| 646 | .fifo = iwl_mvm_ac_to_tx_fifo[ac], |
| 647 | .sta_id = mvmsta->sta_id, |
| 648 | .tid = tid, |
| 649 | .frame_limit = IWL_FRAME_LIMIT, |
| 650 | }; |
| 651 | unsigned int wdg_timeout = |
| 652 | iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); |
| 653 | u8 mac_queue = mvmsta->vif->hw_queue[ac]; |
| 654 | int queue = -1; |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 655 | bool using_inactive_queue = false; |
| 656 | unsigned long disable_agg_tids = 0; |
| 657 | enum iwl_mvm_agg_state queue_state; |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 658 | bool shared_queue = false; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 659 | int ssn; |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 660 | unsigned long tfd_queue_mask; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 661 | int ret; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 662 | |
| 663 | lockdep_assert_held(&mvm->mutex); |
| 664 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 665 | spin_lock_bh(&mvmsta->lock); |
| 666 | tfd_queue_mask = mvmsta->tfd_queue_msk; |
| 667 | spin_unlock_bh(&mvmsta->lock); |
| 668 | |
Liad Kaufman | d2515a9 | 2016-03-23 16:31:08 +0200 | [diff] [blame] | 669 | spin_lock_bh(&mvm->queue_info_lock); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 670 | |
| 671 | /* |
| 672 | * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one |
| 673 | * exists |
| 674 | */ |
| 675 | if (!ieee80211_is_data_qos(hdr->frame_control) || |
| 676 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 677 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
| 678 | IWL_MVM_DQA_MIN_MGMT_QUEUE, |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 679 | IWL_MVM_DQA_MAX_MGMT_QUEUE); |
| 680 | if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) |
| 681 | IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", |
| 682 | queue); |
| 683 | |
| 684 | /* If no such queue is found, we'll use a DATA queue instead */ |
| 685 | } |
| 686 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 687 | if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && |
| 688 | (mvm->queue_info[mvmsta->reserved_queue].status == |
| 689 | IWL_MVM_QUEUE_RESERVED || |
| 690 | mvm->queue_info[mvmsta->reserved_queue].status == |
| 691 | IWL_MVM_QUEUE_INACTIVE)) { |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 692 | queue = mvmsta->reserved_queue; |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 693 | mvm->queue_info[queue].reserved = true; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 694 | IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); |
| 695 | } |
| 696 | |
| 697 | if (queue < 0) |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 698 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
| 699 | IWL_MVM_DQA_MIN_DATA_QUEUE, |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 700 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 701 | |
| 702 | /* |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 703 | * Check if this queue is already allocated but inactive. |
| 704 | * In such a case, we'll need to first free this queue before enabling |
| 705 | * it again, so we'll mark it as reserved to make sure no new traffic |
| 706 | * arrives on it |
| 707 | */ |
| 708 | if (queue > 0 && |
| 709 | mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { |
| 710 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; |
| 711 | using_inactive_queue = true; |
| 712 | IWL_DEBUG_TX_QUEUES(mvm, |
| 713 | "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", |
| 714 | queue, mvmsta->sta_id, tid); |
| 715 | } |
| 716 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 717 | /* No free queue - we'll have to share */ |
| 718 | if (queue <= 0) { |
| 719 | queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); |
| 720 | if (queue > 0) { |
| 721 | shared_queue = true; |
| 722 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; |
| 723 | } |
| 724 | } |
| 725 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 726 | /* |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 727 | * Mark TXQ as ready, even though it hasn't been fully configured yet, |
| 728 | * to make sure no one else takes it. |
| 729 | * This will allow avoiding re-acquiring the lock at the end of the |
| 730 | * configuration. On error we'll mark it back as free. |
| 731 | */ |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 732 | if ((queue > 0) && !shared_queue) |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 733 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 734 | |
Liad Kaufman | d2515a9 | 2016-03-23 16:31:08 +0200 | [diff] [blame] | 735 | spin_unlock_bh(&mvm->queue_info_lock); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 736 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 737 | /* This shouldn't happen - out of queues */ |
| 738 | if (WARN_ON(queue <= 0)) { |
| 739 | IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", |
| 740 | tid, cfg.sta_id); |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 741 | return queue; |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 742 | } |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 743 | |
| 744 | /* |
| 745 | * Actual en/disablement of aggregations is through the ADD_STA HCMD, |
| 746 | * but for configuring the SCD to send A-MPDUs we need to mark the queue |
| 747 | * as aggregatable. |
| 748 | * Mark all DATA queues as allowing to be aggregated at some point |
| 749 | */ |
Liad Kaufman | d5216a2 | 2015-08-09 15:50:51 +0300 | [diff] [blame] | 750 | cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || |
| 751 | queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 752 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 753 | /* |
| 754 | * If this queue was previously inactive (idle) - we need to free it |
| 755 | * first |
| 756 | */ |
| 757 | if (using_inactive_queue) { |
| 758 | struct iwl_scd_txq_cfg_cmd cmd = { |
| 759 | .scd_queue = queue, |
Liad Kaufman | f7c692d | 2016-03-08 10:41:32 +0200 | [diff] [blame] | 760 | .action = SCD_CFG_DISABLE_QUEUE, |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 761 | }; |
Liad Kaufman | d55092b | 2016-08-03 18:41:27 +0300 | [diff] [blame] | 762 | u8 txq_curr_ac; |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 763 | |
| 764 | disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); |
| 765 | |
Liad Kaufman | 93f436e | 2015-08-31 13:41:26 +0300 | [diff] [blame] | 766 | spin_lock_bh(&mvm->queue_info_lock); |
Liad Kaufman | d55092b | 2016-08-03 18:41:27 +0300 | [diff] [blame] | 767 | txq_curr_ac = mvm->queue_info[queue].mac80211_ac; |
Liad Kaufman | 93f436e | 2015-08-31 13:41:26 +0300 | [diff] [blame] | 768 | cmd.sta_id = mvm->queue_info[queue].ra_sta_id; |
Liad Kaufman | d55092b | 2016-08-03 18:41:27 +0300 | [diff] [blame] | 769 | cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac]; |
Liad Kaufman | edbe961 | 2016-02-02 15:43:32 +0200 | [diff] [blame] | 770 | cmd.tid = mvm->queue_info[queue].txq_tid; |
Liad Kaufman | 93f436e | 2015-08-31 13:41:26 +0300 | [diff] [blame] | 771 | spin_unlock_bh(&mvm->queue_info_lock); |
| 772 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 773 | /* Disable the queue */ |
Liad Kaufman | 8d98ae6 | 2016-02-02 16:02:46 +0200 | [diff] [blame] | 774 | if (disable_agg_tids) |
| 775 | iwl_mvm_invalidate_sta_queue(mvm, queue, |
| 776 | disable_agg_tids, false); |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 777 | iwl_trans_txq_disable(mvm->trans, queue, false); |
| 778 | ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), |
| 779 | &cmd); |
| 780 | if (ret) { |
| 781 | IWL_ERR(mvm, |
| 782 | "Failed to free inactive queue %d (ret=%d)\n", |
| 783 | queue, ret); |
| 784 | |
| 785 | /* Re-mark the inactive queue as inactive */ |
| 786 | spin_lock_bh(&mvm->queue_info_lock); |
| 787 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; |
| 788 | spin_unlock_bh(&mvm->queue_info_lock); |
| 789 | |
| 790 | return ret; |
| 791 | } |
Liad Kaufman | 8d98ae6 | 2016-02-02 16:02:46 +0200 | [diff] [blame] | 792 | |
| 793 | /* If TXQ is allocated to another STA, update removal in FW */ |
| 794 | if (cmd.sta_id != mvmsta->sta_id) |
| 795 | iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 796 | } |
| 797 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 798 | IWL_DEBUG_TX_QUEUES(mvm, |
| 799 | "Allocating %squeue #%d to sta %d on tid %d\n", |
| 800 | shared_queue ? "shared " : "", queue, |
| 801 | mvmsta->sta_id, tid); |
| 802 | |
| 803 | if (shared_queue) { |
| 804 | /* Disable any open aggs on this queue */ |
| 805 | disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); |
| 806 | |
| 807 | if (disable_agg_tids) { |
| 808 | IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", |
| 809 | queue); |
| 810 | iwl_mvm_invalidate_sta_queue(mvm, queue, |
| 811 | disable_agg_tids, false); |
| 812 | } |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 813 | } |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 814 | |
| 815 | ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); |
| 816 | iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, |
| 817 | wdg_timeout); |
| 818 | |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 819 | /* |
| 820 | * Mark queue as shared in transport if shared |
| 821 | * Note this has to be done after queue enablement because enablement |
| 822 | * can also set this value, and there is no indication there to shared |
| 823 | * queues |
| 824 | */ |
| 825 | if (shared_queue) |
| 826 | iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); |
| 827 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 828 | spin_lock_bh(&mvmsta->lock); |
| 829 | mvmsta->tid_data[tid].txq_id = queue; |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 830 | mvmsta->tid_data[tid].is_tid_active = true; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 831 | mvmsta->tfd_queue_msk |= BIT(queue); |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 832 | queue_state = mvmsta->tid_data[tid].state; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 833 | |
| 834 | if (mvmsta->reserved_queue == queue) |
| 835 | mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; |
| 836 | spin_unlock_bh(&mvmsta->lock); |
| 837 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 838 | if (!shared_queue) { |
| 839 | ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); |
| 840 | if (ret) |
| 841 | goto out_err; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 842 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 843 | /* If we need to re-enable aggregations... */ |
| 844 | if (queue_state == IWL_AGG_ON) { |
| 845 | ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); |
| 846 | if (ret) |
| 847 | goto out_err; |
| 848 | } |
Liad Kaufman | 58f2cc5 | 2015-09-30 16:44:28 +0200 | [diff] [blame] | 849 | } else { |
| 850 | /* Redirect queue, if needed */ |
| 851 | ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, |
| 852 | wdg_timeout, false); |
| 853 | if (ret) |
| 854 | goto out_err; |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 855 | } |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 856 | |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 857 | return 0; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 858 | |
| 859 | out_err: |
| 860 | iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); |
| 861 | |
| 862 | return ret; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 863 | } |
| 864 | |
Liad Kaufman | 19aefa4 | 2016-03-08 14:29:51 +0200 | [diff] [blame] | 865 | static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) |
| 866 | { |
| 867 | struct iwl_scd_txq_cfg_cmd cmd = { |
| 868 | .scd_queue = queue, |
| 869 | .action = SCD_CFG_UPDATE_QUEUE_TID, |
| 870 | }; |
| 871 | s8 sta_id; |
| 872 | int tid; |
| 873 | unsigned long tid_bitmap; |
| 874 | int ret; |
| 875 | |
| 876 | lockdep_assert_held(&mvm->mutex); |
| 877 | |
| 878 | spin_lock_bh(&mvm->queue_info_lock); |
| 879 | sta_id = mvm->queue_info[queue].ra_sta_id; |
| 880 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
| 881 | spin_unlock_bh(&mvm->queue_info_lock); |
| 882 | |
| 883 | if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) |
| 884 | return; |
| 885 | |
| 886 | /* Find any TID for queue */ |
| 887 | tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); |
| 888 | cmd.tid = tid; |
| 889 | cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; |
| 890 | |
| 891 | ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); |
Liad Kaufman | 341ca40 | 2016-09-18 14:51:59 +0300 | [diff] [blame] | 892 | if (ret) { |
Liad Kaufman | 19aefa4 | 2016-03-08 14:29:51 +0200 | [diff] [blame] | 893 | IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", |
| 894 | queue, ret); |
Liad Kaufman | 341ca40 | 2016-09-18 14:51:59 +0300 | [diff] [blame] | 895 | return; |
| 896 | } |
| 897 | |
| 898 | spin_lock_bh(&mvm->queue_info_lock); |
| 899 | mvm->queue_info[queue].txq_tid = tid; |
| 900 | spin_unlock_bh(&mvm->queue_info_lock); |
| 901 | IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", |
| 902 | queue, tid); |
Liad Kaufman | 19aefa4 | 2016-03-08 14:29:51 +0200 | [diff] [blame] | 903 | } |
| 904 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 905 | static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) |
| 906 | { |
| 907 | struct ieee80211_sta *sta; |
| 908 | struct iwl_mvm_sta *mvmsta; |
| 909 | s8 sta_id; |
| 910 | int tid = -1; |
| 911 | unsigned long tid_bitmap; |
| 912 | unsigned int wdg_timeout; |
| 913 | int ssn; |
| 914 | int ret = true; |
| 915 | |
| 916 | lockdep_assert_held(&mvm->mutex); |
| 917 | |
| 918 | spin_lock_bh(&mvm->queue_info_lock); |
| 919 | sta_id = mvm->queue_info[queue].ra_sta_id; |
| 920 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
| 921 | spin_unlock_bh(&mvm->queue_info_lock); |
| 922 | |
| 923 | /* Find TID for queue, and make sure it is the only one on the queue */ |
| 924 | tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); |
| 925 | if (tid_bitmap != BIT(tid)) { |
| 926 | IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", |
| 927 | queue, tid_bitmap); |
| 928 | return; |
| 929 | } |
| 930 | |
| 931 | IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, |
| 932 | tid); |
| 933 | |
| 934 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 935 | lockdep_is_held(&mvm->mutex)); |
| 936 | |
| 937 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) |
| 938 | return; |
| 939 | |
| 940 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 941 | wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); |
| 942 | |
| 943 | ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); |
| 944 | |
| 945 | ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, |
| 946 | tid_to_mac80211_ac[tid], ssn, |
| 947 | wdg_timeout, true); |
| 948 | if (ret) { |
| 949 | IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); |
| 950 | return; |
| 951 | } |
| 952 | |
| 953 | /* If aggs should be turned back on - do it */ |
| 954 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { |
Emmanuel Grumbach | 9cd70e8 | 2016-09-20 13:40:33 +0300 | [diff] [blame] | 955 | struct iwl_mvm_add_sta_cmd cmd = {0}; |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 956 | |
| 957 | mvmsta->tid_disable_agg &= ~BIT(tid); |
| 958 | |
| 959 | cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); |
| 960 | cmd.sta_id = mvmsta->sta_id; |
| 961 | cmd.add_modify = STA_MODE_MODIFY; |
| 962 | cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; |
| 963 | cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); |
| 964 | cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); |
| 965 | |
| 966 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, |
| 967 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); |
| 968 | if (!ret) { |
| 969 | IWL_DEBUG_TX_QUEUES(mvm, |
| 970 | "TXQ #%d is now aggregated again\n", |
| 971 | queue); |
| 972 | |
| 973 | /* Mark queue intenally as aggregating again */ |
| 974 | iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); |
| 975 | } |
| 976 | } |
| 977 | |
| 978 | spin_lock_bh(&mvm->queue_info_lock); |
| 979 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
| 980 | spin_unlock_bh(&mvm->queue_info_lock); |
| 981 | } |
| 982 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 983 | static inline u8 iwl_mvm_tid_to_ac_queue(int tid) |
| 984 | { |
| 985 | if (tid == IWL_MAX_TID_COUNT) |
| 986 | return IEEE80211_AC_VO; /* MGMT */ |
| 987 | |
| 988 | return tid_to_mac80211_ac[tid]; |
| 989 | } |
| 990 | |
| 991 | static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, |
| 992 | struct ieee80211_sta *sta, int tid) |
| 993 | { |
| 994 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 995 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
| 996 | struct sk_buff *skb; |
| 997 | struct ieee80211_hdr *hdr; |
| 998 | struct sk_buff_head deferred_tx; |
| 999 | u8 mac_queue; |
| 1000 | bool no_queue = false; /* Marks if there is a problem with the queue */ |
| 1001 | u8 ac; |
| 1002 | |
| 1003 | lockdep_assert_held(&mvm->mutex); |
| 1004 | |
| 1005 | skb = skb_peek(&tid_data->deferred_tx_frames); |
| 1006 | if (!skb) |
| 1007 | return; |
| 1008 | hdr = (void *)skb->data; |
| 1009 | |
| 1010 | ac = iwl_mvm_tid_to_ac_queue(tid); |
| 1011 | mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; |
| 1012 | |
| 1013 | if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && |
| 1014 | iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { |
| 1015 | IWL_ERR(mvm, |
| 1016 | "Can't alloc TXQ for sta %d tid %d - dropping frame\n", |
| 1017 | mvmsta->sta_id, tid); |
| 1018 | |
| 1019 | /* |
| 1020 | * Mark queue as problematic so later the deferred traffic is |
| 1021 | * freed, as we can do nothing with it |
| 1022 | */ |
| 1023 | no_queue = true; |
| 1024 | } |
| 1025 | |
| 1026 | __skb_queue_head_init(&deferred_tx); |
| 1027 | |
Liad Kaufman | d2515a9 | 2016-03-23 16:31:08 +0200 | [diff] [blame] | 1028 | /* Disable bottom-halves when entering TX path */ |
| 1029 | local_bh_disable(); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1030 | spin_lock(&mvmsta->lock); |
| 1031 | skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); |
Liad Kaufman | ad5de73 | 2016-09-27 16:01:10 +0300 | [diff] [blame] | 1032 | mvmsta->deferred_traffic_tid_map &= ~BIT(tid); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1033 | spin_unlock(&mvmsta->lock); |
| 1034 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1035 | while ((skb = __skb_dequeue(&deferred_tx))) |
| 1036 | if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) |
| 1037 | ieee80211_free_txskb(mvm->hw, skb); |
| 1038 | local_bh_enable(); |
| 1039 | |
| 1040 | /* Wake queue */ |
| 1041 | iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); |
| 1042 | } |
| 1043 | |
| 1044 | void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) |
| 1045 | { |
| 1046 | struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, |
| 1047 | add_stream_wk); |
| 1048 | struct ieee80211_sta *sta; |
| 1049 | struct iwl_mvm_sta *mvmsta; |
| 1050 | unsigned long deferred_tid_traffic; |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 1051 | int queue, sta_id, tid; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1052 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 1053 | /* Check inactivity of queues */ |
| 1054 | iwl_mvm_inactivity_check(mvm); |
| 1055 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1056 | mutex_lock(&mvm->mutex); |
| 1057 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 1058 | /* Reconfigure queues requiring reconfiguation */ |
| 1059 | for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) { |
| 1060 | bool reconfig; |
Liad Kaufman | 19aefa4 | 2016-03-08 14:29:51 +0200 | [diff] [blame] | 1061 | bool change_owner; |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 1062 | |
| 1063 | spin_lock_bh(&mvm->queue_info_lock); |
| 1064 | reconfig = (mvm->queue_info[queue].status == |
| 1065 | IWL_MVM_QUEUE_RECONFIGURING); |
Liad Kaufman | 19aefa4 | 2016-03-08 14:29:51 +0200 | [diff] [blame] | 1066 | |
| 1067 | /* |
| 1068 | * We need to take into account a situation in which a TXQ was |
| 1069 | * allocated to TID x, and then turned shared by adding TIDs y |
| 1070 | * and z. If TID x becomes inactive and is removed from the TXQ, |
| 1071 | * ownership must be given to one of the remaining TIDs. |
| 1072 | * This is mainly because if TID x continues - a new queue can't |
| 1073 | * be allocated for it as long as it is an owner of another TXQ. |
| 1074 | */ |
| 1075 | change_owner = !(mvm->queue_info[queue].tid_bitmap & |
| 1076 | BIT(mvm->queue_info[queue].txq_tid)) && |
| 1077 | (mvm->queue_info[queue].status == |
| 1078 | IWL_MVM_QUEUE_SHARED); |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 1079 | spin_unlock_bh(&mvm->queue_info_lock); |
| 1080 | |
| 1081 | if (reconfig) |
| 1082 | iwl_mvm_unshare_queue(mvm, queue); |
Liad Kaufman | 19aefa4 | 2016-03-08 14:29:51 +0200 | [diff] [blame] | 1083 | else if (change_owner) |
| 1084 | iwl_mvm_change_queue_owner(mvm, queue); |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 1085 | } |
| 1086 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1087 | /* Go over all stations with deferred traffic */ |
| 1088 | for_each_set_bit(sta_id, mvm->sta_deferred_frames, |
| 1089 | IWL_MVM_STATION_COUNT) { |
| 1090 | clear_bit(sta_id, mvm->sta_deferred_frames); |
| 1091 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 1092 | lockdep_is_held(&mvm->mutex)); |
| 1093 | if (IS_ERR_OR_NULL(sta)) |
| 1094 | continue; |
| 1095 | |
| 1096 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 1097 | deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; |
| 1098 | |
| 1099 | for_each_set_bit(tid, &deferred_tid_traffic, |
| 1100 | IWL_MAX_TID_COUNT + 1) |
| 1101 | iwl_mvm_tx_deferred_stream(mvm, sta, tid); |
| 1102 | } |
| 1103 | |
| 1104 | mutex_unlock(&mvm->mutex); |
| 1105 | } |
| 1106 | |
| 1107 | static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, |
Liad Kaufman | d5216a2 | 2015-08-09 15:50:51 +0300 | [diff] [blame] | 1108 | struct ieee80211_sta *sta, |
| 1109 | enum nl80211_iftype vif_type) |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1110 | { |
| 1111 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 1112 | int queue; |
| 1113 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 1114 | /* |
| 1115 | * Check for inactive queues, so we don't reach a situation where we |
| 1116 | * can't add a STA due to a shortage in queues that doesn't really exist |
| 1117 | */ |
| 1118 | iwl_mvm_inactivity_check(mvm); |
| 1119 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1120 | spin_lock_bh(&mvm->queue_info_lock); |
| 1121 | |
| 1122 | /* Make sure we have free resources for this STA */ |
Liad Kaufman | d5216a2 | 2015-08-09 15:50:51 +0300 | [diff] [blame] | 1123 | if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && |
| 1124 | !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1125 | (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == |
| 1126 | IWL_MVM_QUEUE_FREE)) |
Liad Kaufman | d5216a2 | 2015-08-09 15:50:51 +0300 | [diff] [blame] | 1127 | queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; |
| 1128 | else |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 1129 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
| 1130 | IWL_MVM_DQA_MIN_DATA_QUEUE, |
Liad Kaufman | d5216a2 | 2015-08-09 15:50:51 +0300 | [diff] [blame] | 1131 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1132 | if (queue < 0) { |
| 1133 | spin_unlock_bh(&mvm->queue_info_lock); |
| 1134 | IWL_ERR(mvm, "No available queues for new station\n"); |
| 1135 | return -ENOSPC; |
| 1136 | } |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1137 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1138 | |
| 1139 | spin_unlock_bh(&mvm->queue_info_lock); |
| 1140 | |
| 1141 | mvmsta->reserved_queue = queue; |
| 1142 | |
| 1143 | IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", |
| 1144 | queue, mvmsta->sta_id); |
| 1145 | |
| 1146 | return 0; |
| 1147 | } |
| 1148 | |
Liad Kaufman | 8d98ae6 | 2016-02-02 16:02:46 +0200 | [diff] [blame] | 1149 | /* |
| 1150 | * In DQA mode, after a HW restart the queues should be allocated as before, in |
| 1151 | * order to avoid race conditions when there are shared queues. This function |
| 1152 | * does the re-mapping and queue allocation. |
| 1153 | * |
| 1154 | * Note that re-enabling aggregations isn't done in this function. |
| 1155 | */ |
| 1156 | static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, |
| 1157 | struct iwl_mvm_sta *mvm_sta) |
| 1158 | { |
| 1159 | unsigned int wdg_timeout = |
| 1160 | iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); |
| 1161 | int i; |
| 1162 | struct iwl_trans_txq_scd_cfg cfg = { |
| 1163 | .sta_id = mvm_sta->sta_id, |
| 1164 | .frame_limit = IWL_FRAME_LIMIT, |
| 1165 | }; |
| 1166 | |
Johannes Berg | 03c902b | 2016-12-02 12:03:36 +0100 | [diff] [blame] | 1167 | /* Make sure reserved queue is still marked as such (if allocated) */ |
| 1168 | if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) |
| 1169 | mvm->queue_info[mvm_sta->reserved_queue].status = |
| 1170 | IWL_MVM_QUEUE_RESERVED; |
Liad Kaufman | 8d98ae6 | 2016-02-02 16:02:46 +0200 | [diff] [blame] | 1171 | |
| 1172 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { |
| 1173 | struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; |
| 1174 | int txq_id = tid_data->txq_id; |
| 1175 | int ac; |
| 1176 | u8 mac_queue; |
| 1177 | |
| 1178 | if (txq_id == IEEE80211_INVAL_HW_QUEUE) |
| 1179 | continue; |
| 1180 | |
| 1181 | skb_queue_head_init(&tid_data->deferred_tx_frames); |
| 1182 | |
| 1183 | ac = tid_to_mac80211_ac[i]; |
| 1184 | mac_queue = mvm_sta->vif->hw_queue[ac]; |
| 1185 | |
| 1186 | cfg.tid = i; |
| 1187 | cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; |
| 1188 | cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || |
| 1189 | txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); |
| 1190 | |
| 1191 | IWL_DEBUG_TX_QUEUES(mvm, |
| 1192 | "Re-mapping sta %d tid %d to queue %d\n", |
| 1193 | mvm_sta->sta_id, i, txq_id); |
| 1194 | |
| 1195 | iwl_mvm_enable_txq(mvm, txq_id, mac_queue, |
| 1196 | IEEE80211_SEQ_TO_SN(tid_data->seq_number), |
| 1197 | &cfg, wdg_timeout); |
| 1198 | |
| 1199 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; |
| 1200 | } |
| 1201 | |
| 1202 | atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); |
| 1203 | } |
| 1204 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1205 | int iwl_mvm_add_sta(struct iwl_mvm *mvm, |
| 1206 | struct ieee80211_vif *vif, |
| 1207 | struct ieee80211_sta *sta) |
| 1208 | { |
| 1209 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Johannes Berg | 9d8ce6a | 2014-12-23 16:02:40 +0100 | [diff] [blame] | 1210 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
Sara Sharon | a571f5f | 2015-12-07 12:50:58 +0200 | [diff] [blame] | 1211 | struct iwl_mvm_rxq_dup_data *dup_data; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1212 | int i, ret, sta_id; |
| 1213 | |
| 1214 | lockdep_assert_held(&mvm->mutex); |
| 1215 | |
| 1216 | if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 1217 | sta_id = iwl_mvm_find_free_sta_id(mvm, |
| 1218 | ieee80211_vif_type_p2p(vif)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1219 | else |
| 1220 | sta_id = mvm_sta->sta_id; |
| 1221 | |
Johannes Berg | 36f4631 | 2015-03-10 20:32:08 +0100 | [diff] [blame] | 1222 | if (sta_id == IWL_MVM_STATION_COUNT) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1223 | return -ENOSPC; |
| 1224 | |
| 1225 | spin_lock_init(&mvm_sta->lock); |
| 1226 | |
Liad Kaufman | 8d98ae6 | 2016-02-02 16:02:46 +0200 | [diff] [blame] | 1227 | /* In DQA mode, if this is a HW restart, re-alloc existing queues */ |
| 1228 | if (iwl_mvm_is_dqa_supported(mvm) && |
| 1229 | test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { |
| 1230 | iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); |
| 1231 | goto update_fw; |
| 1232 | } |
| 1233 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1234 | mvm_sta->sta_id = sta_id; |
| 1235 | mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, |
| 1236 | mvmvif->color); |
| 1237 | mvm_sta->vif = vif; |
| 1238 | mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1239 | mvm_sta->tx_protection = 0; |
| 1240 | mvm_sta->tt_tx_protection = false; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1241 | |
| 1242 | /* HW restart, don't assume the memory has been zeroed */ |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 1243 | atomic_set(&mvm->pending_frames[sta_id], 0); |
Liad Kaufman | 69191af | 2015-09-01 18:50:22 +0300 | [diff] [blame] | 1244 | mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1245 | mvm_sta->tfd_queue_msk = 0; |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1246 | |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 1247 | /* |
| 1248 | * Allocate new queues for a TDLS station, unless we're in DQA mode, |
| 1249 | * and then they'll be allocated dynamically |
| 1250 | */ |
| 1251 | if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1252 | ret = iwl_mvm_tdls_sta_init(mvm, sta); |
| 1253 | if (ret) |
| 1254 | return ret; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1255 | } else if (!iwl_mvm_is_dqa_supported(mvm)) { |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1256 | for (i = 0; i < IEEE80211_NUM_ACS; i++) |
| 1257 | if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) |
| 1258 | mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); |
| 1259 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1260 | |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 1261 | /* for HW restart - reset everything but the sequence number */ |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1262 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 1263 | u16 seq = mvm_sta->tid_data[i].seq_number; |
| 1264 | memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); |
| 1265 | mvm_sta->tid_data[i].seq_number = seq; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1266 | |
| 1267 | if (!iwl_mvm_is_dqa_supported(mvm)) |
| 1268 | continue; |
| 1269 | |
| 1270 | /* |
| 1271 | * Mark all queues for this STA as unallocated and defer TX |
| 1272 | * frames until the queue is allocated |
| 1273 | */ |
| 1274 | mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; |
| 1275 | skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 1276 | } |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1277 | mvm_sta->deferred_traffic_tid_map = 0; |
Eyal Shapira | efed664 | 2014-09-14 15:58:53 +0300 | [diff] [blame] | 1278 | mvm_sta->agg_tids = 0; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1279 | |
Sara Sharon | a571f5f | 2015-12-07 12:50:58 +0200 | [diff] [blame] | 1280 | if (iwl_mvm_has_new_rx_api(mvm) && |
| 1281 | !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { |
| 1282 | dup_data = kcalloc(mvm->trans->num_rx_queues, |
| 1283 | sizeof(*dup_data), |
| 1284 | GFP_KERNEL); |
| 1285 | if (!dup_data) |
| 1286 | return -ENOMEM; |
| 1287 | mvm_sta->dup_data = dup_data; |
| 1288 | } |
| 1289 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1290 | if (iwl_mvm_is_dqa_supported(mvm)) { |
Liad Kaufman | d5216a2 | 2015-08-09 15:50:51 +0300 | [diff] [blame] | 1291 | ret = iwl_mvm_reserve_sta_stream(mvm, sta, |
| 1292 | ieee80211_vif_type_p2p(vif)); |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1293 | if (ret) |
| 1294 | goto err; |
| 1295 | } |
| 1296 | |
Liad Kaufman | 8d98ae6 | 2016-02-02 16:02:46 +0200 | [diff] [blame] | 1297 | update_fw: |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1298 | ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1299 | if (ret) |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1300 | goto err; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1301 | |
Johannes Berg | 9e84801 | 2014-08-04 14:33:42 +0200 | [diff] [blame] | 1302 | if (vif->type == NL80211_IFTYPE_STATION) { |
| 1303 | if (!sta->tdls) { |
| 1304 | WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); |
| 1305 | mvmvif->ap_sta_id = sta_id; |
| 1306 | } else { |
| 1307 | WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); |
| 1308 | } |
| 1309 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1310 | |
| 1311 | rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); |
| 1312 | |
| 1313 | return 0; |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1314 | |
| 1315 | err: |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 1316 | if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) |
| 1317 | iwl_mvm_tdls_sta_deinit(mvm, sta); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1318 | return ret; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1319 | } |
| 1320 | |
| 1321 | int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, |
| 1322 | bool drain) |
| 1323 | { |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 1324 | struct iwl_mvm_add_sta_cmd cmd = {}; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1325 | int ret; |
| 1326 | u32 status; |
| 1327 | |
| 1328 | lockdep_assert_held(&mvm->mutex); |
| 1329 | |
| 1330 | cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); |
| 1331 | cmd.sta_id = mvmsta->sta_id; |
| 1332 | cmd.add_modify = STA_MODE_MODIFY; |
| 1333 | cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; |
| 1334 | cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); |
| 1335 | |
| 1336 | status = ADD_STA_SUCCESS; |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 1337 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
| 1338 | iwl_mvm_add_sta_cmd_size(mvm), |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 1339 | &cmd, &status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1340 | if (ret) |
| 1341 | return ret; |
| 1342 | |
Sara Sharon | 837c4da | 2016-01-07 16:50:45 +0200 | [diff] [blame] | 1343 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1344 | case ADD_STA_SUCCESS: |
| 1345 | IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", |
| 1346 | mvmsta->sta_id); |
| 1347 | break; |
| 1348 | default: |
| 1349 | ret = -EIO; |
| 1350 | IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", |
| 1351 | mvmsta->sta_id); |
| 1352 | break; |
| 1353 | } |
| 1354 | |
| 1355 | return ret; |
| 1356 | } |
| 1357 | |
| 1358 | /* |
| 1359 | * Remove a station from the FW table. Before sending the command to remove |
| 1360 | * the station validate that the station is indeed known to the driver (sanity |
| 1361 | * only). |
| 1362 | */ |
| 1363 | static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) |
| 1364 | { |
| 1365 | struct ieee80211_sta *sta; |
| 1366 | struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { |
| 1367 | .sta_id = sta_id, |
| 1368 | }; |
| 1369 | int ret; |
| 1370 | |
| 1371 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 1372 | lockdep_is_held(&mvm->mutex)); |
| 1373 | |
| 1374 | /* Note: internal stations are marked as error values */ |
| 1375 | if (!sta) { |
| 1376 | IWL_ERR(mvm, "Invalid station id\n"); |
| 1377 | return -EINVAL; |
| 1378 | } |
| 1379 | |
Emmanuel Grumbach | a102292 | 2014-05-12 11:36:41 +0300 | [diff] [blame] | 1380 | ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1381 | sizeof(rm_sta_cmd), &rm_sta_cmd); |
| 1382 | if (ret) { |
| 1383 | IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); |
| 1384 | return ret; |
| 1385 | } |
| 1386 | |
| 1387 | return 0; |
| 1388 | } |
| 1389 | |
| 1390 | void iwl_mvm_sta_drained_wk(struct work_struct *wk) |
| 1391 | { |
| 1392 | struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); |
| 1393 | u8 sta_id; |
| 1394 | |
| 1395 | /* |
| 1396 | * The mutex is needed because of the SYNC cmd, but not only: if the |
| 1397 | * work would run concurrently with iwl_mvm_rm_sta, it would run before |
| 1398 | * iwl_mvm_rm_sta sets the station as busy, and exit. Then |
| 1399 | * iwl_mvm_rm_sta would set the station as busy, and nobody will clean |
| 1400 | * that later. |
| 1401 | */ |
| 1402 | mutex_lock(&mvm->mutex); |
| 1403 | |
| 1404 | for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { |
| 1405 | int ret; |
| 1406 | struct ieee80211_sta *sta = |
| 1407 | rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 1408 | lockdep_is_held(&mvm->mutex)); |
| 1409 | |
Johannes Berg | 1ddbbb0 | 2013-12-04 22:39:17 +0100 | [diff] [blame] | 1410 | /* |
| 1411 | * This station is in use or RCU-removed; the latter happens in |
| 1412 | * managed mode, where mac80211 removes the station before we |
| 1413 | * can remove it from firmware (we can only do that after the |
| 1414 | * MAC is marked unassociated), and possibly while the deauth |
| 1415 | * frame to disconnect from the AP is still queued. Then, the |
| 1416 | * station pointer is -ENOENT when the last skb is reclaimed. |
| 1417 | */ |
| 1418 | if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1419 | continue; |
| 1420 | |
| 1421 | if (PTR_ERR(sta) == -EINVAL) { |
| 1422 | IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", |
| 1423 | sta_id); |
| 1424 | continue; |
| 1425 | } |
| 1426 | |
| 1427 | if (!sta) { |
| 1428 | IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", |
| 1429 | sta_id); |
| 1430 | continue; |
| 1431 | } |
| 1432 | |
| 1433 | WARN_ON(PTR_ERR(sta) != -EBUSY); |
| 1434 | /* This station was removed and we waited until it got drained, |
| 1435 | * we can now proceed and remove it. |
| 1436 | */ |
| 1437 | ret = iwl_mvm_rm_sta_common(mvm, sta_id); |
| 1438 | if (ret) { |
| 1439 | IWL_ERR(mvm, |
| 1440 | "Couldn't remove sta %d after it was drained\n", |
| 1441 | sta_id); |
| 1442 | continue; |
| 1443 | } |
Monam Agarwal | c531c77 | 2014-03-24 00:05:56 +0530 | [diff] [blame] | 1444 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1445 | clear_bit(sta_id, mvm->sta_drained); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1446 | |
| 1447 | if (mvm->tfd_drained[sta_id]) { |
| 1448 | unsigned long i, msk = mvm->tfd_drained[sta_id]; |
| 1449 | |
Emmanuel Grumbach | a4ca3ed | 2015-01-20 17:07:10 +0200 | [diff] [blame] | 1450 | for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) |
Arik Nemtsov | 06ecdba | 2015-10-12 14:47:11 +0300 | [diff] [blame] | 1451 | iwl_mvm_disable_txq(mvm, i, i, |
| 1452 | IWL_MAX_TID_COUNT, 0); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1453 | |
| 1454 | mvm->tfd_drained[sta_id] = 0; |
| 1455 | IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", |
| 1456 | sta_id, msk); |
| 1457 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1458 | } |
| 1459 | |
| 1460 | mutex_unlock(&mvm->mutex); |
| 1461 | } |
| 1462 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1463 | static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, |
| 1464 | struct ieee80211_vif *vif, |
| 1465 | struct iwl_mvm_sta *mvm_sta) |
| 1466 | { |
| 1467 | int ac; |
| 1468 | int i; |
| 1469 | |
| 1470 | lockdep_assert_held(&mvm->mutex); |
| 1471 | |
| 1472 | for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { |
| 1473 | if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) |
| 1474 | continue; |
| 1475 | |
| 1476 | ac = iwl_mvm_tid_to_ac_queue(i); |
| 1477 | iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, |
| 1478 | vif->hw_queue[ac], i, 0); |
| 1479 | mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; |
| 1480 | } |
| 1481 | } |
| 1482 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1483 | int iwl_mvm_rm_sta(struct iwl_mvm *mvm, |
| 1484 | struct ieee80211_vif *vif, |
| 1485 | struct ieee80211_sta *sta) |
| 1486 | { |
| 1487 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Johannes Berg | 9d8ce6a | 2014-12-23 16:02:40 +0100 | [diff] [blame] | 1488 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1489 | int ret; |
| 1490 | |
| 1491 | lockdep_assert_held(&mvm->mutex); |
| 1492 | |
Sara Sharon | a571f5f | 2015-12-07 12:50:58 +0200 | [diff] [blame] | 1493 | if (iwl_mvm_has_new_rx_api(mvm)) |
| 1494 | kfree(mvm_sta->dup_data); |
| 1495 | |
Liad Kaufman | a6f035a | 2015-08-24 15:23:14 +0300 | [diff] [blame] | 1496 | if ((vif->type == NL80211_IFTYPE_STATION && |
| 1497 | mvmvif->ap_sta_id == mvm_sta->sta_id) || |
| 1498 | iwl_mvm_is_dqa_supported(mvm)){ |
Emmanuel Grumbach | fe92e32 | 2015-03-11 09:34:31 +0200 | [diff] [blame] | 1499 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); |
| 1500 | if (ret) |
| 1501 | return ret; |
Emmanuel Grumbach | 80d8565 | 2013-02-19 15:32:42 +0200 | [diff] [blame] | 1502 | /* flush its queues here since we are freeing mvm_sta */ |
Luca Coelho | 5888a40 | 2015-10-06 09:54:57 +0300 | [diff] [blame] | 1503 | ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); |
Emmanuel Grumbach | fe92e32 | 2015-03-11 09:34:31 +0200 | [diff] [blame] | 1504 | if (ret) |
| 1505 | return ret; |
| 1506 | ret = iwl_trans_wait_tx_queue_empty(mvm->trans, |
| 1507 | mvm_sta->tfd_queue_msk); |
| 1508 | if (ret) |
| 1509 | return ret; |
| 1510 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); |
Emmanuel Grumbach | 80d8565 | 2013-02-19 15:32:42 +0200 | [diff] [blame] | 1511 | |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 1512 | /* If DQA is supported - the queues can be disabled now */ |
Liad Kaufman | 5621474 | 2016-09-22 15:14:08 +0300 | [diff] [blame] | 1513 | if (iwl_mvm_is_dqa_supported(mvm)) |
| 1514 | iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); |
| 1515 | |
| 1516 | /* If there is a TXQ still marked as reserved - free it */ |
| 1517 | if (iwl_mvm_is_dqa_supported(mvm) && |
| 1518 | mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { |
Liad Kaufman | a0315dea | 2016-07-07 13:25:59 +0300 | [diff] [blame] | 1519 | u8 reserved_txq = mvm_sta->reserved_queue; |
| 1520 | enum iwl_mvm_queue_status *status; |
| 1521 | |
Liad Kaufman | a0315dea | 2016-07-07 13:25:59 +0300 | [diff] [blame] | 1522 | /* |
| 1523 | * If no traffic has gone through the reserved TXQ - it |
| 1524 | * is still marked as IWL_MVM_QUEUE_RESERVED, and |
| 1525 | * should be manually marked as free again |
| 1526 | */ |
| 1527 | spin_lock_bh(&mvm->queue_info_lock); |
| 1528 | status = &mvm->queue_info[reserved_txq].status; |
| 1529 | if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && |
| 1530 | (*status != IWL_MVM_QUEUE_FREE), |
| 1531 | "sta_id %d reserved txq %d status %d", |
| 1532 | mvm_sta->sta_id, reserved_txq, *status)) { |
| 1533 | spin_unlock_bh(&mvm->queue_info_lock); |
| 1534 | return -EINVAL; |
| 1535 | } |
| 1536 | |
| 1537 | *status = IWL_MVM_QUEUE_FREE; |
| 1538 | spin_unlock_bh(&mvm->queue_info_lock); |
| 1539 | } |
| 1540 | |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 1541 | if (vif->type == NL80211_IFTYPE_STATION && |
| 1542 | mvmvif->ap_sta_id == mvm_sta->sta_id) { |
| 1543 | /* if associated - we can't remove the AP STA now */ |
| 1544 | if (vif->bss_conf.assoc) |
| 1545 | return ret; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1546 | |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 1547 | /* unassoc - go ahead - remove the AP STA now */ |
| 1548 | mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; |
Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1549 | |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 1550 | /* clear d0i3_ap_sta_id if no longer relevant */ |
| 1551 | if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) |
| 1552 | mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; |
| 1553 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1554 | } |
| 1555 | |
| 1556 | /* |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 1557 | * This shouldn't happen - the TDLS channel switch should be canceled |
| 1558 | * before the STA is removed. |
| 1559 | */ |
| 1560 | if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { |
| 1561 | mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; |
| 1562 | cancel_delayed_work(&mvm->tdls_cs.dwork); |
| 1563 | } |
| 1564 | |
| 1565 | /* |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 1566 | * Make sure that the tx response code sees the station as -EBUSY and |
| 1567 | * calls the drain worker. |
| 1568 | */ |
| 1569 | spin_lock_bh(&mvm_sta->lock); |
| 1570 | /* |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1571 | * There are frames pending on the AC queues for this station. |
| 1572 | * We need to wait until all the frames are drained... |
| 1573 | */ |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 1574 | if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1575 | rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], |
| 1576 | ERR_PTR(-EBUSY)); |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 1577 | spin_unlock_bh(&mvm_sta->lock); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1578 | |
| 1579 | /* disable TDLS sta queues on drain complete */ |
| 1580 | if (sta->tdls) { |
| 1581 | mvm->tfd_drained[mvm_sta->sta_id] = |
| 1582 | mvm_sta->tfd_queue_msk; |
| 1583 | IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", |
| 1584 | mvm_sta->sta_id); |
| 1585 | } |
| 1586 | |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 1587 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1588 | } else { |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 1589 | spin_unlock_bh(&mvm_sta->lock); |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1590 | |
Liad Kaufman | e3118ad | 2016-06-05 10:49:02 +0300 | [diff] [blame] | 1591 | if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 1592 | iwl_mvm_tdls_sta_deinit(mvm, sta); |
| 1593 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1594 | ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); |
Monam Agarwal | c531c77 | 2014-03-24 00:05:56 +0530 | [diff] [blame] | 1595 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1596 | } |
| 1597 | |
| 1598 | return ret; |
| 1599 | } |
| 1600 | |
| 1601 | int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, |
| 1602 | struct ieee80211_vif *vif, |
| 1603 | u8 sta_id) |
| 1604 | { |
| 1605 | int ret = iwl_mvm_rm_sta_common(mvm, sta_id); |
| 1606 | |
| 1607 | lockdep_assert_held(&mvm->mutex); |
| 1608 | |
Monam Agarwal | c531c77 | 2014-03-24 00:05:56 +0530 | [diff] [blame] | 1609 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1610 | return ret; |
| 1611 | } |
| 1612 | |
Chaya Rachel Ivgi | 0e39eb0 | 2015-12-03 15:51:46 +0200 | [diff] [blame] | 1613 | int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, |
| 1614 | struct iwl_mvm_int_sta *sta, |
| 1615 | u32 qmask, enum nl80211_iftype iftype) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1616 | { |
| 1617 | if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 1618 | sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1619 | if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) |
| 1620 | return -ENOSPC; |
| 1621 | } |
| 1622 | |
| 1623 | sta->tfd_queue_msk = qmask; |
| 1624 | |
| 1625 | /* put a non-NULL value so iterating over the stations won't stop */ |
| 1626 | rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); |
| 1627 | return 0; |
| 1628 | } |
| 1629 | |
Johannes Berg | 712b24a | 2014-08-04 14:14:14 +0200 | [diff] [blame] | 1630 | static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, |
| 1631 | struct iwl_mvm_int_sta *sta) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1632 | { |
Monam Agarwal | c531c77 | 2014-03-24 00:05:56 +0530 | [diff] [blame] | 1633 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1634 | memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); |
| 1635 | sta->sta_id = IWL_MVM_STATION_COUNT; |
| 1636 | } |
| 1637 | |
| 1638 | static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, |
| 1639 | struct iwl_mvm_int_sta *sta, |
| 1640 | const u8 *addr, |
| 1641 | u16 mac_id, u16 color) |
| 1642 | { |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 1643 | struct iwl_mvm_add_sta_cmd cmd; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1644 | int ret; |
| 1645 | u32 status; |
| 1646 | |
| 1647 | lockdep_assert_held(&mvm->mutex); |
| 1648 | |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 1649 | memset(&cmd, 0, sizeof(cmd)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1650 | cmd.sta_id = sta->sta_id; |
| 1651 | cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, |
| 1652 | color)); |
| 1653 | |
| 1654 | cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); |
Liad Kaufman | cf0cda1 | 2015-09-24 10:44:12 +0200 | [diff] [blame] | 1655 | cmd.tid_disable_tx = cpu_to_le16(0xffff); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1656 | |
| 1657 | if (addr) |
| 1658 | memcpy(cmd.addr, addr, ETH_ALEN); |
| 1659 | |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 1660 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
| 1661 | iwl_mvm_add_sta_cmd_size(mvm), |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 1662 | &cmd, &status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1663 | if (ret) |
| 1664 | return ret; |
| 1665 | |
Sara Sharon | 837c4da | 2016-01-07 16:50:45 +0200 | [diff] [blame] | 1666 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1667 | case ADD_STA_SUCCESS: |
| 1668 | IWL_DEBUG_INFO(mvm, "Internal station added.\n"); |
| 1669 | return 0; |
| 1670 | default: |
| 1671 | ret = -EIO; |
| 1672 | IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", |
| 1673 | status); |
| 1674 | break; |
| 1675 | } |
| 1676 | return ret; |
| 1677 | } |
| 1678 | |
| 1679 | int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) |
| 1680 | { |
Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 1681 | unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? |
| 1682 | mvm->cfg->base_params->wd_timeout : |
| 1683 | IWL_WATCHDOG_DISABLED; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1684 | int ret; |
| 1685 | |
| 1686 | lockdep_assert_held(&mvm->mutex); |
| 1687 | |
Ariej Marjieh | 7da91b0 | 2014-07-07 12:09:40 +0300 | [diff] [blame] | 1688 | /* Map Aux queue to fifo - needs to happen before adding Aux station */ |
Liad Kaufman | 28d0793 | 2015-09-01 16:36:25 +0300 | [diff] [blame] | 1689 | if (!iwl_mvm_is_dqa_supported(mvm)) |
| 1690 | iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, |
| 1691 | IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); |
Ariej Marjieh | 7da91b0 | 2014-07-07 12:09:40 +0300 | [diff] [blame] | 1692 | |
| 1693 | /* Allocate aux station and assign to it the aux queue */ |
| 1694 | ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), |
Eliad Peller | b92e661 | 2014-01-23 17:58:23 +0200 | [diff] [blame] | 1695 | NL80211_IFTYPE_UNSPECIFIED); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1696 | if (ret) |
| 1697 | return ret; |
| 1698 | |
Liad Kaufman | 28d0793 | 2015-09-01 16:36:25 +0300 | [diff] [blame] | 1699 | if (iwl_mvm_is_dqa_supported(mvm)) { |
| 1700 | struct iwl_trans_txq_scd_cfg cfg = { |
| 1701 | .fifo = IWL_MVM_TX_FIFO_MCAST, |
| 1702 | .sta_id = mvm->aux_sta.sta_id, |
| 1703 | .tid = IWL_MAX_TID_COUNT, |
| 1704 | .aggregate = false, |
| 1705 | .frame_limit = IWL_FRAME_LIMIT, |
| 1706 | }; |
| 1707 | |
| 1708 | iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, |
| 1709 | wdg_timeout); |
| 1710 | } |
| 1711 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1712 | ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, |
| 1713 | MAC_INDEX_AUX, 0); |
| 1714 | |
| 1715 | if (ret) |
| 1716 | iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); |
| 1717 | return ret; |
| 1718 | } |
| 1719 | |
Chaya Rachel Ivgi | 0e39eb0 | 2015-12-03 15:51:46 +0200 | [diff] [blame] | 1720 | int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 1721 | { |
| 1722 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
| 1723 | |
| 1724 | lockdep_assert_held(&mvm->mutex); |
| 1725 | return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, |
| 1726 | mvmvif->id, 0); |
| 1727 | } |
| 1728 | |
| 1729 | int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 1730 | { |
| 1731 | int ret; |
| 1732 | |
| 1733 | lockdep_assert_held(&mvm->mutex); |
| 1734 | |
| 1735 | ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); |
| 1736 | if (ret) |
| 1737 | IWL_WARN(mvm, "Failed sending remove station\n"); |
| 1738 | |
| 1739 | return ret; |
| 1740 | } |
| 1741 | |
| 1742 | void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) |
| 1743 | { |
| 1744 | iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); |
| 1745 | } |
| 1746 | |
Johannes Berg | 712b24a | 2014-08-04 14:14:14 +0200 | [diff] [blame] | 1747 | void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) |
| 1748 | { |
| 1749 | lockdep_assert_held(&mvm->mutex); |
| 1750 | |
| 1751 | iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); |
| 1752 | } |
| 1753 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1754 | /* |
| 1755 | * Send the add station command for the vif's broadcast station. |
| 1756 | * Assumes that the station was already allocated. |
| 1757 | * |
| 1758 | * @mvm: the mvm component |
| 1759 | * @vif: the interface to which the broadcast station is added |
| 1760 | * @bsta: the broadcast station to add. |
| 1761 | */ |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1762 | int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1763 | { |
| 1764 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1765 | struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; |
Johannes Berg | 5023d96 | 2013-07-31 14:07:43 +0200 | [diff] [blame] | 1766 | static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; |
Johannes Berg | a424340 | 2014-01-20 23:46:38 +0100 | [diff] [blame] | 1767 | const u8 *baddr = _baddr; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1768 | |
| 1769 | lockdep_assert_held(&mvm->mutex); |
| 1770 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 1771 | if (iwl_mvm_is_dqa_supported(mvm)) { |
| 1772 | struct iwl_trans_txq_scd_cfg cfg = { |
| 1773 | .fifo = IWL_MVM_TX_FIFO_VO, |
| 1774 | .sta_id = mvmvif->bcast_sta.sta_id, |
| 1775 | .tid = IWL_MAX_TID_COUNT, |
| 1776 | .aggregate = false, |
| 1777 | .frame_limit = IWL_FRAME_LIMIT, |
| 1778 | }; |
| 1779 | unsigned int wdg_timeout = |
| 1780 | iwl_mvm_get_wd_timeout(mvm, vif, false, false); |
| 1781 | int queue; |
| 1782 | |
| 1783 | if ((vif->type == NL80211_IFTYPE_AP) && |
| 1784 | (mvmvif->bcast_sta.tfd_queue_msk & |
| 1785 | BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) |
| 1786 | queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; |
Liad Kaufman | 4c96513 | 2015-08-09 19:26:56 +0300 | [diff] [blame] | 1787 | else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && |
| 1788 | (mvmvif->bcast_sta.tfd_queue_msk & |
| 1789 | BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) |
| 1790 | queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 1791 | else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) |
| 1792 | return -EINVAL; |
| 1793 | |
| 1794 | iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, |
| 1795 | wdg_timeout); |
| 1796 | } |
| 1797 | |
Johannes Berg | 5023d96 | 2013-07-31 14:07:43 +0200 | [diff] [blame] | 1798 | if (vif->type == NL80211_IFTYPE_ADHOC) |
| 1799 | baddr = vif->bss_conf.bssid; |
| 1800 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1801 | if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) |
| 1802 | return -ENOSPC; |
| 1803 | |
| 1804 | return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, |
| 1805 | mvmvif->id, mvmvif->color); |
| 1806 | } |
| 1807 | |
| 1808 | /* Send the FW a request to remove the station from it's internal data |
| 1809 | * structures, but DO NOT remove the entry from the local data structures. */ |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1810 | int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1811 | { |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1812 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1813 | int ret; |
| 1814 | |
| 1815 | lockdep_assert_held(&mvm->mutex); |
| 1816 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1817 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1818 | if (ret) |
| 1819 | IWL_WARN(mvm, "Failed sending remove station\n"); |
| 1820 | return ret; |
| 1821 | } |
| 1822 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1823 | int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 1824 | { |
| 1825 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 1826 | u32 qmask = 0; |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1827 | |
| 1828 | lockdep_assert_held(&mvm->mutex); |
| 1829 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 1830 | if (!iwl_mvm_is_dqa_supported(mvm)) |
| 1831 | qmask = iwl_mvm_mac_get_queues_mask(vif); |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1832 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 1833 | if (vif->type == NL80211_IFTYPE_AP) { |
| 1834 | /* |
| 1835 | * The firmware defines the TFD queue mask to only be relevant |
| 1836 | * for *unicast* queues, so the multicast (CAB) queue shouldn't |
| 1837 | * be included. |
| 1838 | */ |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1839 | qmask &= ~BIT(vif->cab_queue); |
| 1840 | |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 1841 | if (iwl_mvm_is_dqa_supported(mvm)) |
| 1842 | qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); |
Liad Kaufman | 4c96513 | 2015-08-09 19:26:56 +0300 | [diff] [blame] | 1843 | } else if (iwl_mvm_is_dqa_supported(mvm) && |
| 1844 | vif->type == NL80211_IFTYPE_P2P_DEVICE) { |
| 1845 | qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); |
Liad Kaufman | de24f63 | 2015-08-04 15:19:18 +0300 | [diff] [blame] | 1846 | } |
| 1847 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1848 | return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, |
| 1849 | ieee80211_vif_type_p2p(vif)); |
| 1850 | } |
| 1851 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1852 | /* Allocate a new station entry for the broadcast station to the given vif, |
| 1853 | * and send it to the FW. |
| 1854 | * Note that each P2P mac should have its own broadcast station. |
| 1855 | * |
| 1856 | * @mvm: the mvm component |
| 1857 | * @vif: the interface to which the broadcast station is added |
| 1858 | * @bsta: the broadcast station to add. */ |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1859 | int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1860 | { |
| 1861 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1862 | struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1863 | int ret; |
| 1864 | |
| 1865 | lockdep_assert_held(&mvm->mutex); |
| 1866 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1867 | ret = iwl_mvm_alloc_bcast_sta(mvm, vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1868 | if (ret) |
| 1869 | return ret; |
| 1870 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1871 | ret = iwl_mvm_send_add_bcast_sta(mvm, vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1872 | |
| 1873 | if (ret) |
| 1874 | iwl_mvm_dealloc_int_sta(mvm, bsta); |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1875 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1876 | return ret; |
| 1877 | } |
| 1878 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1879 | void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 1880 | { |
| 1881 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
| 1882 | |
| 1883 | iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); |
| 1884 | } |
| 1885 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1886 | /* |
| 1887 | * Send the FW a request to remove the station from it's internal data |
| 1888 | * structures, and in addition remove it from the local data structure. |
| 1889 | */ |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1890 | int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1891 | { |
| 1892 | int ret; |
| 1893 | |
| 1894 | lockdep_assert_held(&mvm->mutex); |
| 1895 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1896 | ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1897 | |
Johannes Berg | 013290a | 2014-08-04 13:38:48 +0200 | [diff] [blame] | 1898 | iwl_mvm_dealloc_bcast_sta(mvm, vif); |
| 1899 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1900 | return ret; |
| 1901 | } |
| 1902 | |
Emmanuel Grumbach | 113a044 | 2013-07-02 14:16:38 +0300 | [diff] [blame] | 1903 | #define IWL_MAX_RX_BA_SESSIONS 16 |
| 1904 | |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1905 | static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 1906 | { |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1907 | struct iwl_mvm_delba_notif notif = { |
| 1908 | .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, |
| 1909 | .metadata.sync = 1, |
| 1910 | .delba.baid = baid, |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 1911 | }; |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1912 | iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); |
| 1913 | }; |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 1914 | |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1915 | static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, |
| 1916 | struct iwl_mvm_baid_data *data) |
| 1917 | { |
| 1918 | int i; |
| 1919 | |
| 1920 | iwl_mvm_sync_rxq_del_ba(mvm, data->baid); |
| 1921 | |
| 1922 | for (i = 0; i < mvm->trans->num_rx_queues; i++) { |
| 1923 | int j; |
| 1924 | struct iwl_mvm_reorder_buffer *reorder_buf = |
| 1925 | &data->reorder_buf[i]; |
| 1926 | |
Sara Sharon | 0690405 | 2016-02-28 20:28:17 +0200 | [diff] [blame] | 1927 | spin_lock_bh(&reorder_buf->lock); |
| 1928 | if (likely(!reorder_buf->num_stored)) { |
| 1929 | spin_unlock_bh(&reorder_buf->lock); |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1930 | continue; |
Sara Sharon | 0690405 | 2016-02-28 20:28:17 +0200 | [diff] [blame] | 1931 | } |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1932 | |
| 1933 | /* |
| 1934 | * This shouldn't happen in regular DELBA since the internal |
| 1935 | * delBA notification should trigger a release of all frames in |
| 1936 | * the reorder buffer. |
| 1937 | */ |
| 1938 | WARN_ON(1); |
| 1939 | |
| 1940 | for (j = 0; j < reorder_buf->buf_size; j++) |
| 1941 | __skb_queue_purge(&reorder_buf->entries[j]); |
Sara Sharon | 0690405 | 2016-02-28 20:28:17 +0200 | [diff] [blame] | 1942 | /* |
| 1943 | * Prevent timer re-arm. This prevents a very far fetched case |
| 1944 | * where we timed out on the notification. There may be prior |
| 1945 | * RX frames pending in the RX queue before the notification |
| 1946 | * that might get processed between now and the actual deletion |
| 1947 | * and we would re-arm the timer although we are deleting the |
| 1948 | * reorder buffer. |
| 1949 | */ |
| 1950 | reorder_buf->removed = true; |
| 1951 | spin_unlock_bh(&reorder_buf->lock); |
| 1952 | del_timer_sync(&reorder_buf->reorder_timer); |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1953 | } |
| 1954 | } |
| 1955 | |
| 1956 | static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, |
| 1957 | u32 sta_id, |
| 1958 | struct iwl_mvm_baid_data *data, |
| 1959 | u16 ssn, u8 buf_size) |
| 1960 | { |
| 1961 | int i; |
| 1962 | |
| 1963 | for (i = 0; i < mvm->trans->num_rx_queues; i++) { |
| 1964 | struct iwl_mvm_reorder_buffer *reorder_buf = |
| 1965 | &data->reorder_buf[i]; |
| 1966 | int j; |
| 1967 | |
| 1968 | reorder_buf->num_stored = 0; |
| 1969 | reorder_buf->head_sn = ssn; |
| 1970 | reorder_buf->buf_size = buf_size; |
Sara Sharon | 0690405 | 2016-02-28 20:28:17 +0200 | [diff] [blame] | 1971 | /* rx reorder timer */ |
| 1972 | reorder_buf->reorder_timer.function = |
| 1973 | iwl_mvm_reorder_timer_expired; |
| 1974 | reorder_buf->reorder_timer.data = (unsigned long)reorder_buf; |
| 1975 | init_timer(&reorder_buf->reorder_timer); |
| 1976 | spin_lock_init(&reorder_buf->lock); |
| 1977 | reorder_buf->mvm = mvm; |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 1978 | reorder_buf->queue = i; |
| 1979 | reorder_buf->sta_id = sta_id; |
| 1980 | for (j = 0; j < reorder_buf->buf_size; j++) |
| 1981 | __skb_queue_head_init(&reorder_buf->entries[j]); |
| 1982 | } |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 1983 | } |
| 1984 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1985 | int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 1986 | int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1987 | { |
Johannes Berg | 9d8ce6a | 2014-12-23 16:02:40 +0100 | [diff] [blame] | 1988 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 1989 | struct iwl_mvm_add_sta_cmd cmd = {}; |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 1990 | struct iwl_mvm_baid_data *baid_data = NULL; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1991 | int ret; |
| 1992 | u32 status; |
| 1993 | |
| 1994 | lockdep_assert_held(&mvm->mutex); |
| 1995 | |
Emmanuel Grumbach | 113a044 | 2013-07-02 14:16:38 +0300 | [diff] [blame] | 1996 | if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { |
| 1997 | IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); |
| 1998 | return -ENOSPC; |
| 1999 | } |
| 2000 | |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2001 | if (iwl_mvm_has_new_rx_api(mvm) && start) { |
| 2002 | /* |
| 2003 | * Allocate here so if allocation fails we can bail out early |
| 2004 | * before starting the BA session in the firmware |
| 2005 | */ |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 2006 | baid_data = kzalloc(sizeof(*baid_data) + |
| 2007 | mvm->trans->num_rx_queues * |
| 2008 | sizeof(baid_data->reorder_buf[0]), |
| 2009 | GFP_KERNEL); |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2010 | if (!baid_data) |
| 2011 | return -ENOMEM; |
| 2012 | } |
| 2013 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2014 | cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); |
| 2015 | cmd.sta_id = mvm_sta->sta_id; |
| 2016 | cmd.add_modify = STA_MODE_MODIFY; |
Emmanuel Grumbach | 93a4266 | 2013-07-02 13:35:35 +0300 | [diff] [blame] | 2017 | if (start) { |
| 2018 | cmd.add_immediate_ba_tid = (u8) tid; |
| 2019 | cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 2020 | cmd.rx_ba_window = cpu_to_le16((u16)buf_size); |
Emmanuel Grumbach | 93a4266 | 2013-07-02 13:35:35 +0300 | [diff] [blame] | 2021 | } else { |
| 2022 | cmd.remove_immediate_ba_tid = (u8) tid; |
| 2023 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2024 | cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : |
| 2025 | STA_MODIFY_REMOVE_BA_TID; |
| 2026 | |
| 2027 | status = ADD_STA_SUCCESS; |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 2028 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
| 2029 | iwl_mvm_add_sta_cmd_size(mvm), |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 2030 | &cmd, &status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2031 | if (ret) |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2032 | goto out_free; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2033 | |
Sara Sharon | 837c4da | 2016-01-07 16:50:45 +0200 | [diff] [blame] | 2034 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2035 | case ADD_STA_SUCCESS: |
Sara Sharon | 35263a0 | 2016-06-21 12:12:10 +0300 | [diff] [blame] | 2036 | IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", |
| 2037 | start ? "start" : "stopp"); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2038 | break; |
| 2039 | case ADD_STA_IMMEDIATE_BA_FAILURE: |
| 2040 | IWL_WARN(mvm, "RX BA Session refused by fw\n"); |
| 2041 | ret = -ENOSPC; |
| 2042 | break; |
| 2043 | default: |
| 2044 | ret = -EIO; |
| 2045 | IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", |
| 2046 | start ? "start" : "stopp", status); |
| 2047 | break; |
| 2048 | } |
| 2049 | |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2050 | if (ret) |
| 2051 | goto out_free; |
Emmanuel Grumbach | 113a044 | 2013-07-02 14:16:38 +0300 | [diff] [blame] | 2052 | |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2053 | if (start) { |
| 2054 | u8 baid; |
| 2055 | |
| 2056 | mvm->rx_ba_sessions++; |
| 2057 | |
| 2058 | if (!iwl_mvm_has_new_rx_api(mvm)) |
| 2059 | return 0; |
| 2060 | |
| 2061 | if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { |
| 2062 | ret = -EINVAL; |
| 2063 | goto out_free; |
| 2064 | } |
| 2065 | baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> |
| 2066 | IWL_ADD_STA_BAID_SHIFT); |
| 2067 | baid_data->baid = baid; |
| 2068 | baid_data->timeout = timeout; |
| 2069 | baid_data->last_rx = jiffies; |
Wei Yongjun | 72c240f | 2016-07-12 11:40:57 +0000 | [diff] [blame] | 2070 | setup_timer(&baid_data->session_timer, |
| 2071 | iwl_mvm_rx_agg_session_expired, |
| 2072 | (unsigned long)&mvm->baid_map[baid]); |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2073 | baid_data->mvm = mvm; |
| 2074 | baid_data->tid = tid; |
| 2075 | baid_data->sta_id = mvm_sta->sta_id; |
| 2076 | |
| 2077 | mvm_sta->tid_to_baid[tid] = baid; |
| 2078 | if (timeout) |
| 2079 | mod_timer(&baid_data->session_timer, |
| 2080 | TU_TO_EXP_TIME(timeout * 2)); |
| 2081 | |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 2082 | iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id, |
| 2083 | baid_data, ssn, buf_size); |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2084 | /* |
| 2085 | * protect the BA data with RCU to cover a case where our |
| 2086 | * internal RX sync mechanism will timeout (not that it's |
| 2087 | * supposed to happen) and we will free the session data while |
| 2088 | * RX is being processed in parallel |
| 2089 | */ |
Sara Sharon | 35263a0 | 2016-06-21 12:12:10 +0300 | [diff] [blame] | 2090 | IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", |
| 2091 | mvm_sta->sta_id, tid, baid); |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2092 | WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); |
| 2093 | rcu_assign_pointer(mvm->baid_map[baid], baid_data); |
Sara Sharon | 60dec52 | 2016-06-21 14:14:08 +0300 | [diff] [blame] | 2094 | } else { |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2095 | u8 baid = mvm_sta->tid_to_baid[tid]; |
| 2096 | |
Sara Sharon | 60dec52 | 2016-06-21 14:14:08 +0300 | [diff] [blame] | 2097 | if (mvm->rx_ba_sessions > 0) |
| 2098 | /* check that restart flow didn't zero the counter */ |
| 2099 | mvm->rx_ba_sessions--; |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2100 | if (!iwl_mvm_has_new_rx_api(mvm)) |
| 2101 | return 0; |
| 2102 | |
| 2103 | if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) |
| 2104 | return -EINVAL; |
| 2105 | |
| 2106 | baid_data = rcu_access_pointer(mvm->baid_map[baid]); |
| 2107 | if (WARN_ON(!baid_data)) |
| 2108 | return -EINVAL; |
| 2109 | |
| 2110 | /* synchronize all rx queues so we can safely delete */ |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 2111 | iwl_mvm_free_reorder(mvm, baid_data); |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2112 | del_timer_sync(&baid_data->session_timer); |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2113 | RCU_INIT_POINTER(mvm->baid_map[baid], NULL); |
| 2114 | kfree_rcu(baid_data, rcu_head); |
Sara Sharon | 35263a0 | 2016-06-21 12:12:10 +0300 | [diff] [blame] | 2115 | IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 2116 | } |
| 2117 | return 0; |
| 2118 | |
| 2119 | out_free: |
| 2120 | kfree(baid_data); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2121 | return ret; |
| 2122 | } |
| 2123 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 2124 | int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
| 2125 | int tid, u8 queue, bool start) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2126 | { |
Johannes Berg | 9d8ce6a | 2014-12-23 16:02:40 +0100 | [diff] [blame] | 2127 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 2128 | struct iwl_mvm_add_sta_cmd cmd = {}; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2129 | int ret; |
| 2130 | u32 status; |
| 2131 | |
| 2132 | lockdep_assert_held(&mvm->mutex); |
| 2133 | |
| 2134 | if (start) { |
| 2135 | mvm_sta->tfd_queue_msk |= BIT(queue); |
| 2136 | mvm_sta->tid_disable_agg &= ~BIT(tid); |
| 2137 | } else { |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2138 | /* In DQA-mode the queue isn't removed on agg termination */ |
| 2139 | if (!iwl_mvm_is_dqa_supported(mvm)) |
| 2140 | mvm_sta->tfd_queue_msk &= ~BIT(queue); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2141 | mvm_sta->tid_disable_agg |= BIT(tid); |
| 2142 | } |
| 2143 | |
| 2144 | cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); |
| 2145 | cmd.sta_id = mvm_sta->sta_id; |
| 2146 | cmd.add_modify = STA_MODE_MODIFY; |
| 2147 | cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; |
| 2148 | cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); |
| 2149 | cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); |
| 2150 | |
| 2151 | status = ADD_STA_SUCCESS; |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 2152 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
| 2153 | iwl_mvm_add_sta_cmd_size(mvm), |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 2154 | &cmd, &status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2155 | if (ret) |
| 2156 | return ret; |
| 2157 | |
Sara Sharon | 837c4da | 2016-01-07 16:50:45 +0200 | [diff] [blame] | 2158 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2159 | case ADD_STA_SUCCESS: |
| 2160 | break; |
| 2161 | default: |
| 2162 | ret = -EIO; |
| 2163 | IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", |
| 2164 | start ? "start" : "stopp", status); |
| 2165 | break; |
| 2166 | } |
| 2167 | |
| 2168 | return ret; |
| 2169 | } |
| 2170 | |
Emmanuel Grumbach | b797e3f | 2014-03-06 14:49:36 +0200 | [diff] [blame] | 2171 | const u8 tid_to_mac80211_ac[] = { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2172 | IEEE80211_AC_BE, |
| 2173 | IEEE80211_AC_BK, |
| 2174 | IEEE80211_AC_BK, |
| 2175 | IEEE80211_AC_BE, |
| 2176 | IEEE80211_AC_VI, |
| 2177 | IEEE80211_AC_VI, |
| 2178 | IEEE80211_AC_VO, |
| 2179 | IEEE80211_AC_VO, |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 2180 | IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2181 | }; |
| 2182 | |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 2183 | static const u8 tid_to_ucode_ac[] = { |
| 2184 | AC_BE, |
| 2185 | AC_BK, |
| 2186 | AC_BK, |
| 2187 | AC_BE, |
| 2188 | AC_VI, |
| 2189 | AC_VI, |
| 2190 | AC_VO, |
| 2191 | AC_VO, |
| 2192 | }; |
| 2193 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2194 | int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 2195 | struct ieee80211_sta *sta, u16 tid, u16 *ssn) |
| 2196 | { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 2197 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2198 | struct iwl_mvm_tid_data *tid_data; |
| 2199 | int txq_id; |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2200 | int ret; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2201 | |
| 2202 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) |
| 2203 | return -EINVAL; |
| 2204 | |
| 2205 | if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { |
| 2206 | IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", |
| 2207 | mvmsta->tid_data[tid].state); |
| 2208 | return -ENXIO; |
| 2209 | } |
| 2210 | |
| 2211 | lockdep_assert_held(&mvm->mutex); |
| 2212 | |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 2213 | spin_lock_bh(&mvmsta->lock); |
| 2214 | |
| 2215 | /* possible race condition - we entered D0i3 while starting agg */ |
| 2216 | if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { |
| 2217 | spin_unlock_bh(&mvmsta->lock); |
| 2218 | IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); |
| 2219 | return -EIO; |
| 2220 | } |
| 2221 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2222 | spin_lock(&mvm->queue_info_lock); |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2223 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2224 | /* |
| 2225 | * Note the possible cases: |
| 2226 | * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed |
| 2227 | * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free |
| 2228 | * one and mark it as reserved |
| 2229 | * 3. In DQA mode, but no traffic yet on this TID: same treatment as in |
| 2230 | * non-DQA mode, since the TXQ hasn't yet been allocated |
| 2231 | */ |
| 2232 | txq_id = mvmsta->tid_data[tid].txq_id; |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2233 | if (iwl_mvm_is_dqa_supported(mvm) && |
| 2234 | unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { |
| 2235 | ret = -ENXIO; |
| 2236 | IWL_DEBUG_TX_QUEUES(mvm, |
| 2237 | "Can't start tid %d agg on shared queue!\n", |
| 2238 | tid); |
| 2239 | goto release_locks; |
| 2240 | } else if (!iwl_mvm_is_dqa_supported(mvm) || |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2241 | mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 2242 | txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
| 2243 | mvm->first_agg_queue, |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2244 | mvm->last_agg_queue); |
| 2245 | if (txq_id < 0) { |
| 2246 | ret = txq_id; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2247 | IWL_ERR(mvm, "Failed to allocate agg queue\n"); |
| 2248 | goto release_locks; |
| 2249 | } |
| 2250 | |
| 2251 | /* TXQ hasn't yet been enabled, so mark it only as reserved */ |
| 2252 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2253 | } |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2254 | |
| 2255 | spin_unlock(&mvm->queue_info_lock); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2256 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2257 | IWL_DEBUG_TX_QUEUES(mvm, |
| 2258 | "AGG for tid %d will be on queue #%d\n", |
| 2259 | tid, txq_id); |
| 2260 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2261 | tid_data = &mvmsta->tid_data[tid]; |
Johannes Berg | 9a88658 | 2013-02-15 19:25:00 +0100 | [diff] [blame] | 2262 | tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2263 | tid_data->txq_id = txq_id; |
| 2264 | *ssn = tid_data->ssn; |
| 2265 | |
| 2266 | IWL_DEBUG_TX_QUEUES(mvm, |
| 2267 | "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", |
| 2268 | mvmsta->sta_id, tid, txq_id, tid_data->ssn, |
| 2269 | tid_data->next_reclaimed); |
| 2270 | |
| 2271 | if (tid_data->ssn == tid_data->next_reclaimed) { |
| 2272 | tid_data->state = IWL_AGG_STARTING; |
| 2273 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 2274 | } else { |
| 2275 | tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; |
| 2276 | } |
| 2277 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2278 | ret = 0; |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2279 | goto out; |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2280 | |
| 2281 | release_locks: |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2282 | spin_unlock(&mvm->queue_info_lock); |
| 2283 | out: |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2284 | spin_unlock_bh(&mvmsta->lock); |
| 2285 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2286 | return ret; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2287 | } |
| 2288 | |
| 2289 | int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 2290 | struct ieee80211_sta *sta, u16 tid, u8 buf_size, |
| 2291 | bool amsdu) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2292 | { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 2293 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2294 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
Emmanuel Grumbach | 5d42e7b | 2015-03-19 20:04:51 +0200 | [diff] [blame] | 2295 | unsigned int wdg_timeout = |
| 2296 | iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); |
Emmanuel Grumbach | eea76c3 | 2016-02-21 16:29:17 +0200 | [diff] [blame] | 2297 | int queue, ret; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2298 | bool alloc_queue = true; |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2299 | enum iwl_mvm_queue_status queue_status; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2300 | u16 ssn; |
| 2301 | |
Emmanuel Grumbach | eea76c3 | 2016-02-21 16:29:17 +0200 | [diff] [blame] | 2302 | struct iwl_trans_txq_scd_cfg cfg = { |
| 2303 | .sta_id = mvmsta->sta_id, |
| 2304 | .tid = tid, |
| 2305 | .frame_limit = buf_size, |
| 2306 | .aggregate = true, |
| 2307 | }; |
| 2308 | |
Eyal Shapira | efed664 | 2014-09-14 15:58:53 +0300 | [diff] [blame] | 2309 | BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) |
| 2310 | != IWL_MAX_TID_COUNT); |
| 2311 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2312 | buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); |
| 2313 | |
| 2314 | spin_lock_bh(&mvmsta->lock); |
| 2315 | ssn = tid_data->ssn; |
| 2316 | queue = tid_data->txq_id; |
| 2317 | tid_data->state = IWL_AGG_ON; |
Eyal Shapira | efed664 | 2014-09-14 15:58:53 +0300 | [diff] [blame] | 2318 | mvmsta->agg_tids |= BIT(tid); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2319 | tid_data->ssn = 0xffff; |
Emmanuel Grumbach | bb81bb6 | 2015-10-26 16:00:29 +0200 | [diff] [blame] | 2320 | tid_data->amsdu_in_ampdu_allowed = amsdu; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2321 | spin_unlock_bh(&mvmsta->lock); |
| 2322 | |
Emmanuel Grumbach | eea76c3 | 2016-02-21 16:29:17 +0200 | [diff] [blame] | 2323 | cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2324 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2325 | spin_lock_bh(&mvm->queue_info_lock); |
| 2326 | queue_status = mvm->queue_info[queue].status; |
| 2327 | spin_unlock_bh(&mvm->queue_info_lock); |
| 2328 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2329 | /* In DQA mode, the existing queue might need to be reconfigured */ |
| 2330 | if (iwl_mvm_is_dqa_supported(mvm)) { |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2331 | /* Maybe there is no need to even alloc a queue... */ |
| 2332 | if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) |
| 2333 | alloc_queue = false; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2334 | |
| 2335 | /* |
| 2336 | * Only reconfig the SCD for the queue if the window size has |
| 2337 | * changed from current (become smaller) |
| 2338 | */ |
| 2339 | if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { |
| 2340 | /* |
| 2341 | * If reconfiguring an existing queue, it first must be |
| 2342 | * drained |
| 2343 | */ |
| 2344 | ret = iwl_trans_wait_tx_queue_empty(mvm->trans, |
| 2345 | BIT(queue)); |
| 2346 | if (ret) { |
| 2347 | IWL_ERR(mvm, |
| 2348 | "Error draining queue before reconfig\n"); |
| 2349 | return ret; |
| 2350 | } |
| 2351 | |
| 2352 | ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, |
| 2353 | mvmsta->sta_id, tid, |
| 2354 | buf_size, ssn); |
| 2355 | if (ret) { |
| 2356 | IWL_ERR(mvm, |
| 2357 | "Error reconfiguring TXQ #%d\n", queue); |
| 2358 | return ret; |
| 2359 | } |
| 2360 | } |
| 2361 | } |
| 2362 | |
| 2363 | if (alloc_queue) |
| 2364 | iwl_mvm_enable_txq(mvm, queue, |
| 2365 | vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, |
| 2366 | &cfg, wdg_timeout); |
Andrei Otcheretianski | fa7878e | 2015-05-05 09:28:16 +0300 | [diff] [blame] | 2367 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2368 | /* Send ADD_STA command to enable aggs only if the queue isn't shared */ |
| 2369 | if (queue_status != IWL_MVM_QUEUE_SHARED) { |
| 2370 | ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); |
| 2371 | if (ret) |
| 2372 | return -EIO; |
| 2373 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2374 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2375 | /* No need to mark as reserved */ |
| 2376 | spin_lock_bh(&mvm->queue_info_lock); |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2377 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2378 | spin_unlock_bh(&mvm->queue_info_lock); |
| 2379 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2380 | /* |
| 2381 | * Even though in theory the peer could have different |
| 2382 | * aggregation reorder buffer sizes for different sessions, |
| 2383 | * our ucode doesn't allow for that and has a global limit |
| 2384 | * for each station. Therefore, use the minimum of all the |
| 2385 | * aggregation sessions and our default value. |
| 2386 | */ |
| 2387 | mvmsta->max_agg_bufsize = |
| 2388 | min(mvmsta->max_agg_bufsize, buf_size); |
| 2389 | mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; |
| 2390 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 2391 | IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", |
| 2392 | sta->addr, tid); |
| 2393 | |
Eyal Shapira | 9e68094 | 2013-11-09 00:16:16 +0200 | [diff] [blame] | 2394 | return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2395 | } |
| 2396 | |
| 2397 | int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 2398 | struct ieee80211_sta *sta, u16 tid) |
| 2399 | { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 2400 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2401 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
| 2402 | u16 txq_id; |
| 2403 | int err; |
| 2404 | |
Emmanuel Grumbach | f9aa8dd | 2013-03-04 09:11:08 +0200 | [diff] [blame] | 2405 | /* |
| 2406 | * If mac80211 is cleaning its state, then say that we finished since |
| 2407 | * our state has been cleared anyway. |
| 2408 | */ |
| 2409 | if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { |
| 2410 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 2411 | return 0; |
| 2412 | } |
| 2413 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2414 | spin_lock_bh(&mvmsta->lock); |
| 2415 | |
| 2416 | txq_id = tid_data->txq_id; |
| 2417 | |
| 2418 | IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", |
| 2419 | mvmsta->sta_id, tid, txq_id, tid_data->state); |
| 2420 | |
Eyal Shapira | efed664 | 2014-09-14 15:58:53 +0300 | [diff] [blame] | 2421 | mvmsta->agg_tids &= ~BIT(tid); |
| 2422 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2423 | spin_lock_bh(&mvm->queue_info_lock); |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2424 | /* |
| 2425 | * The TXQ is marked as reserved only if no traffic came through yet |
| 2426 | * This means no traffic has been sent on this TID (agg'd or not), so |
| 2427 | * we no longer have use for the queue. Since it hasn't even been |
| 2428 | * allocated through iwl_mvm_enable_txq, so we can just mark it back as |
| 2429 | * free. |
| 2430 | */ |
| 2431 | if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) |
| 2432 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 2433 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2434 | spin_unlock_bh(&mvm->queue_info_lock); |
| 2435 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2436 | switch (tid_data->state) { |
| 2437 | case IWL_AGG_ON: |
Johannes Berg | 9a88658 | 2013-02-15 19:25:00 +0100 | [diff] [blame] | 2438 | tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2439 | |
| 2440 | IWL_DEBUG_TX_QUEUES(mvm, |
| 2441 | "ssn = %d, next_recl = %d\n", |
| 2442 | tid_data->ssn, tid_data->next_reclaimed); |
| 2443 | |
| 2444 | /* There are still packets for this RA / TID in the HW */ |
| 2445 | if (tid_data->ssn != tid_data->next_reclaimed) { |
| 2446 | tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; |
| 2447 | err = 0; |
| 2448 | break; |
| 2449 | } |
| 2450 | |
| 2451 | tid_data->ssn = 0xffff; |
Johannes Berg | f7f89e7 | 2014-08-05 15:24:44 +0200 | [diff] [blame] | 2452 | tid_data->state = IWL_AGG_OFF; |
Johannes Berg | f7f89e7 | 2014-08-05 15:24:44 +0200 | [diff] [blame] | 2453 | spin_unlock_bh(&mvmsta->lock); |
| 2454 | |
| 2455 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 2456 | |
| 2457 | iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); |
| 2458 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2459 | if (!iwl_mvm_is_dqa_supported(mvm)) { |
| 2460 | int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; |
| 2461 | |
| 2462 | iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); |
| 2463 | } |
Johannes Berg | f7f89e7 | 2014-08-05 15:24:44 +0200 | [diff] [blame] | 2464 | return 0; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2465 | case IWL_AGG_STARTING: |
| 2466 | case IWL_EMPTYING_HW_QUEUE_ADDBA: |
| 2467 | /* |
| 2468 | * The agg session has been stopped before it was set up. This |
| 2469 | * can happen when the AddBA timer times out for example. |
| 2470 | */ |
| 2471 | |
| 2472 | /* No barriers since we are under mutex */ |
| 2473 | lockdep_assert_held(&mvm->mutex); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2474 | |
| 2475 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 2476 | tid_data->state = IWL_AGG_OFF; |
| 2477 | err = 0; |
| 2478 | break; |
| 2479 | default: |
| 2480 | IWL_ERR(mvm, |
| 2481 | "Stopping AGG while state not ON or starting for %d on %d (%d)\n", |
| 2482 | mvmsta->sta_id, tid, tid_data->state); |
| 2483 | IWL_ERR(mvm, |
| 2484 | "\ttid_data->txq_id = %d\n", tid_data->txq_id); |
| 2485 | err = -EINVAL; |
| 2486 | } |
| 2487 | |
| 2488 | spin_unlock_bh(&mvmsta->lock); |
| 2489 | |
| 2490 | return err; |
| 2491 | } |
| 2492 | |
Emmanuel Grumbach | e3d9e7c | 2013-02-19 16:13:53 +0200 | [diff] [blame] | 2493 | int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 2494 | struct ieee80211_sta *sta, u16 tid) |
| 2495 | { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 2496 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | e3d9e7c | 2013-02-19 16:13:53 +0200 | [diff] [blame] | 2497 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
| 2498 | u16 txq_id; |
Johannes Berg | b6658ff | 2013-07-24 13:55:51 +0200 | [diff] [blame] | 2499 | enum iwl_mvm_agg_state old_state; |
Emmanuel Grumbach | e3d9e7c | 2013-02-19 16:13:53 +0200 | [diff] [blame] | 2500 | |
| 2501 | /* |
| 2502 | * First set the agg state to OFF to avoid calling |
| 2503 | * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. |
| 2504 | */ |
| 2505 | spin_lock_bh(&mvmsta->lock); |
| 2506 | txq_id = tid_data->txq_id; |
| 2507 | IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", |
| 2508 | mvmsta->sta_id, tid, txq_id, tid_data->state); |
Johannes Berg | b6658ff | 2013-07-24 13:55:51 +0200 | [diff] [blame] | 2509 | old_state = tid_data->state; |
Emmanuel Grumbach | e3d9e7c | 2013-02-19 16:13:53 +0200 | [diff] [blame] | 2510 | tid_data->state = IWL_AGG_OFF; |
Eyal Shapira | efed664 | 2014-09-14 15:58:53 +0300 | [diff] [blame] | 2511 | mvmsta->agg_tids &= ~BIT(tid); |
Emmanuel Grumbach | e3d9e7c | 2013-02-19 16:13:53 +0200 | [diff] [blame] | 2512 | spin_unlock_bh(&mvmsta->lock); |
| 2513 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2514 | spin_lock_bh(&mvm->queue_info_lock); |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2515 | /* |
| 2516 | * The TXQ is marked as reserved only if no traffic came through yet |
| 2517 | * This means no traffic has been sent on this TID (agg'd or not), so |
| 2518 | * we no longer have use for the queue. Since it hasn't even been |
| 2519 | * allocated through iwl_mvm_enable_txq, so we can just mark it back as |
| 2520 | * free. |
| 2521 | */ |
| 2522 | if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) |
| 2523 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 2524 | spin_unlock_bh(&mvm->queue_info_lock); |
| 2525 | |
Johannes Berg | b6658ff | 2013-07-24 13:55:51 +0200 | [diff] [blame] | 2526 | if (old_state >= IWL_AGG_ON) { |
Emmanuel Grumbach | fe92e32 | 2015-03-11 09:34:31 +0200 | [diff] [blame] | 2527 | iwl_mvm_drain_sta(mvm, mvmsta, true); |
Luca Coelho | 5888a40 | 2015-10-06 09:54:57 +0300 | [diff] [blame] | 2528 | if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) |
Johannes Berg | b6658ff | 2013-07-24 13:55:51 +0200 | [diff] [blame] | 2529 | IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); |
Emmanuel Grumbach | fe92e32 | 2015-03-11 09:34:31 +0200 | [diff] [blame] | 2530 | iwl_trans_wait_tx_queue_empty(mvm->trans, |
| 2531 | mvmsta->tfd_queue_msk); |
| 2532 | iwl_mvm_drain_sta(mvm, mvmsta, false); |
Emmanuel Grumbach | e3d9e7c | 2013-02-19 16:13:53 +0200 | [diff] [blame] | 2533 | |
Johannes Berg | f7f89e7 | 2014-08-05 15:24:44 +0200 | [diff] [blame] | 2534 | iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); |
| 2535 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 2536 | if (!iwl_mvm_is_dqa_supported(mvm)) { |
| 2537 | int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; |
| 2538 | |
| 2539 | iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, |
| 2540 | tid, 0); |
| 2541 | } |
Johannes Berg | b6658ff | 2013-07-24 13:55:51 +0200 | [diff] [blame] | 2542 | } |
| 2543 | |
Emmanuel Grumbach | e3d9e7c | 2013-02-19 16:13:53 +0200 | [diff] [blame] | 2544 | return 0; |
| 2545 | } |
| 2546 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2547 | static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) |
| 2548 | { |
Johannes Berg | 2dc2a15 | 2015-06-16 17:09:18 +0200 | [diff] [blame] | 2549 | int i, max = -1, max_offs = -1; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2550 | |
| 2551 | lockdep_assert_held(&mvm->mutex); |
| 2552 | |
Johannes Berg | 2dc2a15 | 2015-06-16 17:09:18 +0200 | [diff] [blame] | 2553 | /* Pick the unused key offset with the highest 'deleted' |
| 2554 | * counter. Every time a key is deleted, all the counters |
| 2555 | * are incremented and the one that was just deleted is |
| 2556 | * reset to zero. Thus, the highest counter is the one |
| 2557 | * that was deleted longest ago. Pick that one. |
| 2558 | */ |
| 2559 | for (i = 0; i < STA_KEY_MAX_NUM; i++) { |
| 2560 | if (test_bit(i, mvm->fw_key_table)) |
| 2561 | continue; |
| 2562 | if (mvm->fw_key_deleted[i] > max) { |
| 2563 | max = mvm->fw_key_deleted[i]; |
| 2564 | max_offs = i; |
| 2565 | } |
| 2566 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2567 | |
Johannes Berg | 2dc2a15 | 2015-06-16 17:09:18 +0200 | [diff] [blame] | 2568 | if (max_offs < 0) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2569 | return STA_KEY_IDX_INVALID; |
| 2570 | |
Johannes Berg | 2dc2a15 | 2015-06-16 17:09:18 +0200 | [diff] [blame] | 2571 | return max_offs; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2572 | } |
| 2573 | |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2574 | static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, |
| 2575 | struct ieee80211_vif *vif, |
| 2576 | struct ieee80211_sta *sta) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2577 | { |
Johannes Berg | 5b530e9 | 2014-12-23 16:00:17 +0100 | [diff] [blame] | 2578 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2579 | |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2580 | if (sta) |
| 2581 | return iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2582 | |
| 2583 | /* |
| 2584 | * The device expects GTKs for station interfaces to be |
| 2585 | * installed as GTKs for the AP station. If we have no |
| 2586 | * station ID, then use AP's station ID. |
| 2587 | */ |
| 2588 | if (vif->type == NL80211_IFTYPE_STATION && |
Avri Altman | 9513c5e | 2015-10-19 16:29:11 +0200 | [diff] [blame] | 2589 | mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { |
| 2590 | u8 sta_id = mvmvif->ap_sta_id; |
| 2591 | |
Emmanuel Grumbach | 7d6a1ab | 2016-05-15 10:20:29 +0300 | [diff] [blame] | 2592 | sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], |
| 2593 | lockdep_is_held(&mvm->mutex)); |
| 2594 | |
Avri Altman | 9513c5e | 2015-10-19 16:29:11 +0200 | [diff] [blame] | 2595 | /* |
| 2596 | * It is possible that the 'sta' parameter is NULL, |
| 2597 | * for example when a GTK is removed - the sta_id will then |
| 2598 | * be the AP ID, and no station was passed by mac80211. |
| 2599 | */ |
Emmanuel Grumbach | 7d6a1ab | 2016-05-15 10:20:29 +0300 | [diff] [blame] | 2600 | if (IS_ERR_OR_NULL(sta)) |
| 2601 | return NULL; |
| 2602 | |
| 2603 | return iwl_mvm_sta_from_mac80211(sta); |
Avri Altman | 9513c5e | 2015-10-19 16:29:11 +0200 | [diff] [blame] | 2604 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2605 | |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2606 | return NULL; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2607 | } |
| 2608 | |
| 2609 | static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, |
| 2610 | struct iwl_mvm_sta *mvm_sta, |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2611 | struct ieee80211_key_conf *keyconf, bool mcast, |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2612 | u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, |
| 2613 | u8 key_offset) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2614 | { |
Max Stepanov | 5a258aa | 2013-04-07 09:11:21 +0300 | [diff] [blame] | 2615 | struct iwl_mvm_add_sta_key_cmd cmd = {}; |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 2616 | __le16 key_flags; |
Johannes Berg | 7992074 | 2014-11-03 15:43:04 +0100 | [diff] [blame] | 2617 | int ret; |
| 2618 | u32 status; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2619 | u16 keyidx; |
| 2620 | int i; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2621 | u8 sta_id = mvm_sta->sta_id; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2622 | |
| 2623 | keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & |
| 2624 | STA_KEY_FLG_KEYID_MSK; |
| 2625 | key_flags = cpu_to_le16(keyidx); |
| 2626 | key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); |
| 2627 | |
| 2628 | switch (keyconf->cipher) { |
| 2629 | case WLAN_CIPHER_SUITE_TKIP: |
| 2630 | key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); |
Max Stepanov | 5a258aa | 2013-04-07 09:11:21 +0300 | [diff] [blame] | 2631 | cmd.tkip_rx_tsc_byte2 = tkip_iv32; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2632 | for (i = 0; i < 5; i++) |
Max Stepanov | 5a258aa | 2013-04-07 09:11:21 +0300 | [diff] [blame] | 2633 | cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); |
| 2634 | memcpy(cmd.key, keyconf->key, keyconf->keylen); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2635 | break; |
| 2636 | case WLAN_CIPHER_SUITE_CCMP: |
| 2637 | key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); |
Max Stepanov | 5a258aa | 2013-04-07 09:11:21 +0300 | [diff] [blame] | 2638 | memcpy(cmd.key, keyconf->key, keyconf->keylen); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2639 | break; |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2640 | case WLAN_CIPHER_SUITE_WEP104: |
| 2641 | key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); |
John W. Linville | aa0cb08 | 2015-01-12 16:18:11 -0500 | [diff] [blame] | 2642 | /* fall through */ |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2643 | case WLAN_CIPHER_SUITE_WEP40: |
| 2644 | key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); |
| 2645 | memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); |
| 2646 | break; |
Ayala Beker | 2a53d16 | 2016-04-07 16:21:57 +0300 | [diff] [blame] | 2647 | case WLAN_CIPHER_SUITE_GCMP_256: |
| 2648 | key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); |
| 2649 | /* fall through */ |
| 2650 | case WLAN_CIPHER_SUITE_GCMP: |
| 2651 | key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); |
| 2652 | memcpy(cmd.key, keyconf->key, keyconf->keylen); |
| 2653 | break; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2654 | default: |
Max Stepanov | e36e543 | 2013-08-27 19:56:13 +0300 | [diff] [blame] | 2655 | key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); |
| 2656 | memcpy(cmd.key, keyconf->key, keyconf->keylen); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2657 | } |
| 2658 | |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2659 | if (mcast) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2660 | key_flags |= cpu_to_le16(STA_KEY_MULTICAST); |
| 2661 | |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2662 | cmd.key_offset = key_offset; |
Max Stepanov | 5a258aa | 2013-04-07 09:11:21 +0300 | [diff] [blame] | 2663 | cmd.key_flags = key_flags; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2664 | cmd.sta_id = sta_id; |
| 2665 | |
| 2666 | status = ADD_STA_SUCCESS; |
Emmanuel Grumbach | a102292 | 2014-05-12 11:36:41 +0300 | [diff] [blame] | 2667 | if (cmd_flags & CMD_ASYNC) |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 2668 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, |
| 2669 | sizeof(cmd), &cmd); |
Emmanuel Grumbach | a102292 | 2014-05-12 11:36:41 +0300 | [diff] [blame] | 2670 | else |
| 2671 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), |
| 2672 | &cmd, &status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2673 | |
| 2674 | switch (status) { |
| 2675 | case ADD_STA_SUCCESS: |
| 2676 | IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); |
| 2677 | break; |
| 2678 | default: |
| 2679 | ret = -EIO; |
| 2680 | IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); |
| 2681 | break; |
| 2682 | } |
| 2683 | |
| 2684 | return ret; |
| 2685 | } |
| 2686 | |
| 2687 | static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, |
| 2688 | struct ieee80211_key_conf *keyconf, |
| 2689 | u8 sta_id, bool remove_key) |
| 2690 | { |
| 2691 | struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; |
| 2692 | |
| 2693 | /* verify the key details match the required command's expectations */ |
Ayala Beker | 8e160ab | 2016-04-11 11:37:38 +0300 | [diff] [blame] | 2694 | if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || |
| 2695 | (keyconf->keyidx != 4 && keyconf->keyidx != 5) || |
| 2696 | (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && |
| 2697 | keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && |
| 2698 | keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) |
| 2699 | return -EINVAL; |
| 2700 | |
| 2701 | if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && |
| 2702 | keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2703 | return -EINVAL; |
| 2704 | |
| 2705 | igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); |
| 2706 | igtk_cmd.sta_id = cpu_to_le32(sta_id); |
| 2707 | |
| 2708 | if (remove_key) { |
| 2709 | igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); |
| 2710 | } else { |
| 2711 | struct ieee80211_key_seq seq; |
| 2712 | const u8 *pn; |
| 2713 | |
Ayala Beker | aa95052 | 2016-06-01 00:28:09 +0300 | [diff] [blame] | 2714 | switch (keyconf->cipher) { |
| 2715 | case WLAN_CIPHER_SUITE_AES_CMAC: |
| 2716 | igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); |
| 2717 | break; |
Ayala Beker | 8e160ab | 2016-04-11 11:37:38 +0300 | [diff] [blame] | 2718 | case WLAN_CIPHER_SUITE_BIP_GMAC_128: |
| 2719 | case WLAN_CIPHER_SUITE_BIP_GMAC_256: |
| 2720 | igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); |
| 2721 | break; |
Ayala Beker | aa95052 | 2016-06-01 00:28:09 +0300 | [diff] [blame] | 2722 | default: |
| 2723 | return -EINVAL; |
| 2724 | } |
| 2725 | |
Ayala Beker | 8e160ab | 2016-04-11 11:37:38 +0300 | [diff] [blame] | 2726 | memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); |
| 2727 | if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) |
| 2728 | igtk_cmd.ctrl_flags |= |
| 2729 | cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2730 | ieee80211_get_key_rx_seq(keyconf, 0, &seq); |
| 2731 | pn = seq.aes_cmac.pn; |
| 2732 | igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | |
| 2733 | ((u64) pn[4] << 8) | |
| 2734 | ((u64) pn[3] << 16) | |
| 2735 | ((u64) pn[2] << 24) | |
| 2736 | ((u64) pn[1] << 32) | |
| 2737 | ((u64) pn[0] << 40)); |
| 2738 | } |
| 2739 | |
| 2740 | IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", |
| 2741 | remove_key ? "removing" : "installing", |
| 2742 | igtk_cmd.sta_id); |
| 2743 | |
Ayala Beker | 8e160ab | 2016-04-11 11:37:38 +0300 | [diff] [blame] | 2744 | if (!iwl_mvm_has_new_rx_api(mvm)) { |
| 2745 | struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { |
| 2746 | .ctrl_flags = igtk_cmd.ctrl_flags, |
| 2747 | .key_id = igtk_cmd.key_id, |
| 2748 | .sta_id = igtk_cmd.sta_id, |
| 2749 | .receive_seq_cnt = igtk_cmd.receive_seq_cnt |
| 2750 | }; |
| 2751 | |
| 2752 | memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, |
| 2753 | ARRAY_SIZE(igtk_cmd_v1.igtk)); |
| 2754 | return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, |
| 2755 | sizeof(igtk_cmd_v1), &igtk_cmd_v1); |
| 2756 | } |
Emmanuel Grumbach | a102292 | 2014-05-12 11:36:41 +0300 | [diff] [blame] | 2757 | return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2758 | sizeof(igtk_cmd), &igtk_cmd); |
| 2759 | } |
| 2760 | |
| 2761 | |
| 2762 | static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, |
| 2763 | struct ieee80211_vif *vif, |
| 2764 | struct ieee80211_sta *sta) |
| 2765 | { |
Johannes Berg | 5b530e9 | 2014-12-23 16:00:17 +0100 | [diff] [blame] | 2766 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2767 | |
| 2768 | if (sta) |
| 2769 | return sta->addr; |
| 2770 | |
| 2771 | if (vif->type == NL80211_IFTYPE_STATION && |
| 2772 | mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { |
| 2773 | u8 sta_id = mvmvif->ap_sta_id; |
| 2774 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 2775 | lockdep_is_held(&mvm->mutex)); |
| 2776 | return sta->addr; |
| 2777 | } |
| 2778 | |
| 2779 | |
| 2780 | return NULL; |
| 2781 | } |
| 2782 | |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2783 | static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, |
| 2784 | struct ieee80211_vif *vif, |
| 2785 | struct ieee80211_sta *sta, |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2786 | struct ieee80211_key_conf *keyconf, |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2787 | u8 key_offset, |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2788 | bool mcast) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2789 | { |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2790 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2791 | int ret; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2792 | const u8 *addr; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2793 | struct ieee80211_key_seq seq; |
| 2794 | u16 p1k[5]; |
| 2795 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2796 | switch (keyconf->cipher) { |
| 2797 | case WLAN_CIPHER_SUITE_TKIP: |
| 2798 | addr = iwl_mvm_get_mac_addr(mvm, vif, sta); |
| 2799 | /* get phase 1 key from mac80211 */ |
| 2800 | ieee80211_get_key_rx_seq(keyconf, 0, &seq); |
| 2801 | ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2802 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2803 | seq.tkip.iv32, p1k, 0, key_offset); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2804 | break; |
| 2805 | case WLAN_CIPHER_SUITE_CCMP: |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2806 | case WLAN_CIPHER_SUITE_WEP40: |
| 2807 | case WLAN_CIPHER_SUITE_WEP104: |
Ayala Beker | 2a53d16 | 2016-04-07 16:21:57 +0300 | [diff] [blame] | 2808 | case WLAN_CIPHER_SUITE_GCMP: |
| 2809 | case WLAN_CIPHER_SUITE_GCMP_256: |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2810 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2811 | 0, NULL, 0, key_offset); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2812 | break; |
| 2813 | default: |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2814 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2815 | 0, NULL, 0, key_offset); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2816 | } |
| 2817 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2818 | return ret; |
| 2819 | } |
| 2820 | |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2821 | static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2822 | struct ieee80211_key_conf *keyconf, |
| 2823 | bool mcast) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2824 | { |
Max Stepanov | 5a258aa | 2013-04-07 09:11:21 +0300 | [diff] [blame] | 2825 | struct iwl_mvm_add_sta_key_cmd cmd = {}; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2826 | __le16 key_flags; |
Johannes Berg | 7992074 | 2014-11-03 15:43:04 +0100 | [diff] [blame] | 2827 | int ret; |
| 2828 | u32 status; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2829 | |
Emmanuel Grumbach | 8115efb | 2013-02-05 10:08:35 +0200 | [diff] [blame] | 2830 | key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & |
| 2831 | STA_KEY_FLG_KEYID_MSK); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2832 | key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); |
| 2833 | key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); |
| 2834 | |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2835 | if (mcast) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2836 | key_flags |= cpu_to_le16(STA_KEY_MULTICAST); |
| 2837 | |
Max Stepanov | 5a258aa | 2013-04-07 09:11:21 +0300 | [diff] [blame] | 2838 | cmd.key_flags = key_flags; |
| 2839 | cmd.key_offset = keyconf->hw_key_idx; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2840 | cmd.sta_id = sta_id; |
| 2841 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2842 | status = ADD_STA_SUCCESS; |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 2843 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), |
| 2844 | &cmd, &status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 2845 | |
| 2846 | switch (status) { |
| 2847 | case ADD_STA_SUCCESS: |
| 2848 | IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); |
| 2849 | break; |
| 2850 | default: |
| 2851 | ret = -EIO; |
| 2852 | IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); |
| 2853 | break; |
| 2854 | } |
| 2855 | |
| 2856 | return ret; |
| 2857 | } |
| 2858 | |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2859 | int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, |
| 2860 | struct ieee80211_vif *vif, |
| 2861 | struct ieee80211_sta *sta, |
| 2862 | struct ieee80211_key_conf *keyconf, |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2863 | u8 key_offset) |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2864 | { |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2865 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2866 | struct iwl_mvm_sta *mvm_sta; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2867 | u8 sta_id; |
| 2868 | int ret; |
Matti Gottlieb | 11828db | 2015-06-01 15:15:11 +0300 | [diff] [blame] | 2869 | static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2870 | |
| 2871 | lockdep_assert_held(&mvm->mutex); |
| 2872 | |
| 2873 | /* Get the station id from the mvm local station table */ |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2874 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); |
| 2875 | if (!mvm_sta) { |
| 2876 | IWL_ERR(mvm, "Failed to find station\n"); |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2877 | return -EINVAL; |
| 2878 | } |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2879 | sta_id = mvm_sta->sta_id; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2880 | |
Ayala Beker | 8e160ab | 2016-04-11 11:37:38 +0300 | [diff] [blame] | 2881 | if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || |
| 2882 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || |
| 2883 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2884 | ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); |
| 2885 | goto end; |
| 2886 | } |
| 2887 | |
| 2888 | /* |
| 2889 | * It is possible that the 'sta' parameter is NULL, and thus |
| 2890 | * there is a need to retrieve the sta from the local station table. |
| 2891 | */ |
| 2892 | if (!sta) { |
| 2893 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 2894 | lockdep_is_held(&mvm->mutex)); |
| 2895 | if (IS_ERR_OR_NULL(sta)) { |
| 2896 | IWL_ERR(mvm, "Invalid station id\n"); |
| 2897 | return -EINVAL; |
| 2898 | } |
| 2899 | } |
| 2900 | |
| 2901 | if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) |
| 2902 | return -EINVAL; |
| 2903 | |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2904 | /* If the key_offset is not pre-assigned, we need to find a |
| 2905 | * new offset to use. In normal cases, the offset is not |
| 2906 | * pre-assigned, but during HW_RESTART we want to reuse the |
| 2907 | * same indices, so we pass them when this function is called. |
| 2908 | * |
| 2909 | * In D3 entry, we need to hardcoded the indices (because the |
| 2910 | * firmware hardcodes the PTK offset to 0). In this case, we |
| 2911 | * need to make sure we don't overwrite the hw_key_idx in the |
| 2912 | * keyconf structure, because otherwise we cannot configure |
| 2913 | * the original ones back when resuming. |
| 2914 | */ |
| 2915 | if (key_offset == STA_KEY_IDX_INVALID) { |
| 2916 | key_offset = iwl_mvm_set_fw_key_idx(mvm); |
| 2917 | if (key_offset == STA_KEY_IDX_INVALID) |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2918 | return -ENOSPC; |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2919 | keyconf->hw_key_idx = key_offset; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2920 | } |
| 2921 | |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2922 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); |
Luca Coelho | 9c3deeb | 2015-11-11 01:06:17 +0200 | [diff] [blame] | 2923 | if (ret) |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2924 | goto end; |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2925 | |
| 2926 | /* |
| 2927 | * For WEP, the same key is used for multicast and unicast. Upload it |
| 2928 | * again, using the same key offset, and now pointing the other one |
| 2929 | * to the same key slot (offset). |
| 2930 | * If this fails, remove the original as well. |
| 2931 | */ |
| 2932 | if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || |
| 2933 | keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 2934 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, |
| 2935 | key_offset, !mcast); |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2936 | if (ret) { |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2937 | __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); |
Luca Coelho | 9c3deeb | 2015-11-11 01:06:17 +0200 | [diff] [blame] | 2938 | goto end; |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2939 | } |
| 2940 | } |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2941 | |
Luca Coelho | 9c3deeb | 2015-11-11 01:06:17 +0200 | [diff] [blame] | 2942 | __set_bit(key_offset, mvm->fw_key_table); |
| 2943 | |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2944 | end: |
| 2945 | IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", |
| 2946 | keyconf->cipher, keyconf->keylen, keyconf->keyidx, |
Matti Gottlieb | 11828db | 2015-06-01 15:15:11 +0300 | [diff] [blame] | 2947 | sta ? sta->addr : zero_addr, ret); |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2948 | return ret; |
| 2949 | } |
| 2950 | |
| 2951 | int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, |
| 2952 | struct ieee80211_vif *vif, |
| 2953 | struct ieee80211_sta *sta, |
| 2954 | struct ieee80211_key_conf *keyconf) |
| 2955 | { |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2956 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2957 | struct iwl_mvm_sta *mvm_sta; |
| 2958 | u8 sta_id = IWL_MVM_STATION_COUNT; |
Johannes Berg | 2dc2a15 | 2015-06-16 17:09:18 +0200 | [diff] [blame] | 2959 | int ret, i; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2960 | |
| 2961 | lockdep_assert_held(&mvm->mutex); |
| 2962 | |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2963 | /* Get the station from the mvm local station table */ |
| 2964 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2965 | |
| 2966 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", |
| 2967 | keyconf->keyidx, sta_id); |
| 2968 | |
Ayala Beker | 8e160ab | 2016-04-11 11:37:38 +0300 | [diff] [blame] | 2969 | if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || |
| 2970 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || |
| 2971 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2972 | return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); |
| 2973 | |
| 2974 | if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { |
| 2975 | IWL_ERR(mvm, "offset %d not used in fw key table.\n", |
| 2976 | keyconf->hw_key_idx); |
| 2977 | return -ENOENT; |
| 2978 | } |
| 2979 | |
Johannes Berg | 2dc2a15 | 2015-06-16 17:09:18 +0200 | [diff] [blame] | 2980 | /* track which key was deleted last */ |
| 2981 | for (i = 0; i < STA_KEY_MAX_NUM; i++) { |
| 2982 | if (mvm->fw_key_deleted[i] < U8_MAX) |
| 2983 | mvm->fw_key_deleted[i]++; |
| 2984 | } |
| 2985 | mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; |
| 2986 | |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2987 | if (!mvm_sta) { |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 2988 | IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); |
| 2989 | return 0; |
| 2990 | } |
| 2991 | |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 2992 | sta_id = mvm_sta->sta_id; |
| 2993 | |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 2994 | ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); |
| 2995 | if (ret) |
| 2996 | return ret; |
| 2997 | |
| 2998 | /* delete WEP key twice to get rid of (now useless) offset */ |
| 2999 | if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || |
| 3000 | keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) |
| 3001 | ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); |
| 3002 | |
| 3003 | return ret; |
Johannes Berg | 2f6319d | 2014-11-12 23:39:56 +0100 | [diff] [blame] | 3004 | } |
| 3005 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3006 | void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, |
| 3007 | struct ieee80211_vif *vif, |
| 3008 | struct ieee80211_key_conf *keyconf, |
| 3009 | struct ieee80211_sta *sta, u32 iv32, |
| 3010 | u16 *phase1key) |
| 3011 | { |
Beni Lev | c3eb536 | 2013-02-06 17:22:18 +0200 | [diff] [blame] | 3012 | struct iwl_mvm_sta *mvm_sta; |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 3013 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3014 | |
Beni Lev | c3eb536 | 2013-02-06 17:22:18 +0200 | [diff] [blame] | 3015 | rcu_read_lock(); |
| 3016 | |
Johannes Berg | 5f7a184 | 2015-12-11 09:36:10 +0100 | [diff] [blame] | 3017 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); |
| 3018 | if (WARN_ON_ONCE(!mvm_sta)) |
Emmanuel Grumbach | 12f1721 | 2015-12-20 14:48:08 +0200 | [diff] [blame] | 3019 | goto unlock; |
Johannes Berg | ba3943b | 2014-11-12 23:54:48 +0100 | [diff] [blame] | 3020 | iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
Luca Coelho | d6ee54a | 2015-11-10 22:13:43 +0200 | [diff] [blame] | 3021 | iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); |
Emmanuel Grumbach | 12f1721 | 2015-12-20 14:48:08 +0200 | [diff] [blame] | 3022 | |
| 3023 | unlock: |
Beni Lev | c3eb536 | 2013-02-06 17:22:18 +0200 | [diff] [blame] | 3024 | rcu_read_unlock(); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3025 | } |
| 3026 | |
Johannes Berg | 9cc4071 | 2013-02-15 22:47:48 +0100 | [diff] [blame] | 3027 | void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, |
| 3028 | struct ieee80211_sta *sta) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3029 | { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 3030 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 3031 | struct iwl_mvm_add_sta_cmd cmd = { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3032 | .add_modify = STA_MODE_MODIFY, |
Johannes Berg | 9cc4071 | 2013-02-15 22:47:48 +0100 | [diff] [blame] | 3033 | .sta_id = mvmsta->sta_id, |
Emmanuel Grumbach | 5af0177 | 2013-06-09 12:59:24 +0300 | [diff] [blame] | 3034 | .station_flags_msk = cpu_to_le32(STA_FLG_PS), |
Johannes Berg | 9cc4071 | 2013-02-15 22:47:48 +0100 | [diff] [blame] | 3035 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3036 | }; |
| 3037 | int ret; |
| 3038 | |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 3039 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, |
| 3040 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3041 | if (ret) |
| 3042 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); |
| 3043 | } |
| 3044 | |
Johannes Berg | 9cc4071 | 2013-02-15 22:47:48 +0100 | [diff] [blame] | 3045 | void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, |
| 3046 | struct ieee80211_sta *sta, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3047 | enum ieee80211_frame_release_type reason, |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3048 | u16 cnt, u16 tids, bool more_data, |
| 3049 | bool agg) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3050 | { |
Johannes Berg | 5b577a9 | 2013-11-14 18:20:04 +0100 | [diff] [blame] | 3051 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
Emmanuel Grumbach | f9dc000 | 2014-03-30 09:53:27 +0300 | [diff] [blame] | 3052 | struct iwl_mvm_add_sta_cmd cmd = { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3053 | .add_modify = STA_MODE_MODIFY, |
Johannes Berg | 9cc4071 | 2013-02-15 22:47:48 +0100 | [diff] [blame] | 3054 | .sta_id = mvmsta->sta_id, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3055 | .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, |
| 3056 | .sleep_tx_count = cpu_to_le16(cnt), |
Johannes Berg | 9cc4071 | 2013-02-15 22:47:48 +0100 | [diff] [blame] | 3057 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3058 | }; |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3059 | int tid, ret; |
| 3060 | unsigned long _tids = tids; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3061 | |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3062 | /* convert TIDs to ACs - we don't support TSPEC so that's OK |
| 3063 | * Note that this field is reserved and unused by firmware not |
| 3064 | * supporting GO uAPSD, so it's safe to always do this. |
| 3065 | */ |
| 3066 | for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) |
| 3067 | cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); |
| 3068 | |
| 3069 | /* If we're releasing frames from aggregation queues then check if the |
| 3070 | * all queues combined that we're releasing frames from have |
| 3071 | * - more frames than the service period, in which case more_data |
| 3072 | * needs to be set |
| 3073 | * - fewer than 'cnt' frames, in which case we need to adjust the |
| 3074 | * firmware command (but do that unconditionally) |
| 3075 | */ |
| 3076 | if (agg) { |
| 3077 | int remaining = cnt; |
Emmanuel Grumbach | 36be0eb | 2015-11-05 10:32:31 +0200 | [diff] [blame] | 3078 | int sleep_tx_count; |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3079 | |
| 3080 | spin_lock_bh(&mvmsta->lock); |
| 3081 | for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { |
| 3082 | struct iwl_mvm_tid_data *tid_data; |
| 3083 | u16 n_queued; |
| 3084 | |
| 3085 | tid_data = &mvmsta->tid_data[tid]; |
| 3086 | if (WARN(tid_data->state != IWL_AGG_ON && |
| 3087 | tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, |
| 3088 | "TID %d state is %d\n", |
| 3089 | tid, tid_data->state)) { |
| 3090 | spin_unlock_bh(&mvmsta->lock); |
| 3091 | ieee80211_sta_eosp(sta); |
| 3092 | return; |
| 3093 | } |
| 3094 | |
| 3095 | n_queued = iwl_mvm_tid_queued(tid_data); |
| 3096 | if (n_queued > remaining) { |
| 3097 | more_data = true; |
| 3098 | remaining = 0; |
| 3099 | break; |
| 3100 | } |
| 3101 | remaining -= n_queued; |
| 3102 | } |
Emmanuel Grumbach | 36be0eb | 2015-11-05 10:32:31 +0200 | [diff] [blame] | 3103 | sleep_tx_count = cnt - remaining; |
| 3104 | if (reason == IEEE80211_FRAME_RELEASE_UAPSD) |
| 3105 | mvmsta->sleep_tx_count = sleep_tx_count; |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3106 | spin_unlock_bh(&mvmsta->lock); |
| 3107 | |
Emmanuel Grumbach | 36be0eb | 2015-11-05 10:32:31 +0200 | [diff] [blame] | 3108 | cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3109 | if (WARN_ON(cnt - remaining == 0)) { |
| 3110 | ieee80211_sta_eosp(sta); |
| 3111 | return; |
| 3112 | } |
| 3113 | } |
| 3114 | |
| 3115 | /* Note: this is ignored by firmware not supporting GO uAPSD */ |
| 3116 | if (more_data) |
| 3117 | cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA); |
| 3118 | |
| 3119 | if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { |
| 3120 | mvmsta->next_status_eosp = true; |
| 3121 | cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL); |
| 3122 | } else { |
| 3123 | cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); |
| 3124 | } |
| 3125 | |
Emmanuel Grumbach | 156f92f | 2015-11-24 14:55:18 +0200 | [diff] [blame] | 3126 | /* block the Tx queues until the FW updated the sleep Tx count */ |
| 3127 | iwl_trans_block_txq_ptrs(mvm->trans, true); |
| 3128 | |
| 3129 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, |
| 3130 | CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 3131 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 3132 | if (ret) |
| 3133 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); |
| 3134 | } |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3135 | |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 3136 | void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, |
| 3137 | struct iwl_rx_cmd_buffer *rxb) |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3138 | { |
| 3139 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 3140 | struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; |
| 3141 | struct ieee80211_sta *sta; |
| 3142 | u32 sta_id = le32_to_cpu(notif->sta_id); |
| 3143 | |
| 3144 | if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 3145 | return; |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3146 | |
| 3147 | rcu_read_lock(); |
| 3148 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
| 3149 | if (!IS_ERR_OR_NULL(sta)) |
| 3150 | ieee80211_sta_eosp(sta); |
| 3151 | rcu_read_unlock(); |
Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 3152 | } |
Andrei Otcheretianski | 09b0ce1 | 2014-05-25 17:07:38 +0300 | [diff] [blame] | 3153 | |
| 3154 | void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, |
| 3155 | struct iwl_mvm_sta *mvmsta, bool disable) |
| 3156 | { |
| 3157 | struct iwl_mvm_add_sta_cmd cmd = { |
| 3158 | .add_modify = STA_MODE_MODIFY, |
| 3159 | .sta_id = mvmsta->sta_id, |
| 3160 | .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, |
| 3161 | .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), |
| 3162 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), |
| 3163 | }; |
| 3164 | int ret; |
| 3165 | |
Sara Sharon | 854c570 | 2016-01-26 13:17:47 +0200 | [diff] [blame] | 3166 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, |
| 3167 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); |
Andrei Otcheretianski | 09b0ce1 | 2014-05-25 17:07:38 +0300 | [diff] [blame] | 3168 | if (ret) |
| 3169 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); |
| 3170 | } |
Andrei Otcheretianski | 003e5236 | 2014-05-25 17:24:22 +0300 | [diff] [blame] | 3171 | |
| 3172 | void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, |
| 3173 | struct ieee80211_sta *sta, |
| 3174 | bool disable) |
| 3175 | { |
| 3176 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
| 3177 | |
| 3178 | spin_lock_bh(&mvm_sta->lock); |
| 3179 | |
| 3180 | if (mvm_sta->disable_tx == disable) { |
| 3181 | spin_unlock_bh(&mvm_sta->lock); |
| 3182 | return; |
| 3183 | } |
| 3184 | |
| 3185 | mvm_sta->disable_tx = disable; |
| 3186 | |
| 3187 | /* |
Sara Sharon | 0d365ae | 2015-03-31 12:24:05 +0300 | [diff] [blame] | 3188 | * Tell mac80211 to start/stop queuing tx for this station, |
| 3189 | * but don't stop queuing if there are still pending frames |
Andrei Otcheretianski | 003e5236 | 2014-05-25 17:24:22 +0300 | [diff] [blame] | 3190 | * for this station. |
| 3191 | */ |
| 3192 | if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) |
| 3193 | ieee80211_sta_block_awake(mvm->hw, sta, disable); |
| 3194 | |
| 3195 | iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); |
| 3196 | |
| 3197 | spin_unlock_bh(&mvm_sta->lock); |
| 3198 | } |
| 3199 | |
| 3200 | void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, |
| 3201 | struct iwl_mvm_vif *mvmvif, |
| 3202 | bool disable) |
| 3203 | { |
| 3204 | struct ieee80211_sta *sta; |
| 3205 | struct iwl_mvm_sta *mvm_sta; |
| 3206 | int i; |
| 3207 | |
| 3208 | lockdep_assert_held(&mvm->mutex); |
| 3209 | |
| 3210 | /* Block/unblock all the stations of the given mvmvif */ |
| 3211 | for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { |
| 3212 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], |
| 3213 | lockdep_is_held(&mvm->mutex)); |
| 3214 | if (IS_ERR_OR_NULL(sta)) |
| 3215 | continue; |
| 3216 | |
| 3217 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
| 3218 | if (mvm_sta->mac_id_n_color != |
| 3219 | FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) |
| 3220 | continue; |
| 3221 | |
| 3222 | iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); |
| 3223 | } |
| 3224 | } |
Luciano Coelho | dc88b4b | 2014-11-10 11:10:14 +0200 | [diff] [blame] | 3225 | |
| 3226 | void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 3227 | { |
| 3228 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
| 3229 | struct iwl_mvm_sta *mvmsta; |
| 3230 | |
| 3231 | rcu_read_lock(); |
| 3232 | |
| 3233 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); |
| 3234 | |
| 3235 | if (!WARN_ON(!mvmsta)) |
| 3236 | iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); |
| 3237 | |
| 3238 | rcu_read_unlock(); |
| 3239 | } |