blob: ab66b4394dfc8ca2afc0cf1321f93f28f186d1cb [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020037 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
Sara Sharonced19f22017-02-06 19:09:32 +020080 if (iwl_mvm_has_new_rx_api(mvm) ||
81 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
82 return sizeof(struct iwl_mvm_add_sta_cmd);
83 else
84 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020085}
86
Eliad Pellerb92e6612014-01-23 17:58:23 +020087static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
88 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010089{
90 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010092
Eliad Pellerb92e6612014-01-23 17:58:23 +020093 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010094 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
95
96 lockdep_assert_held(&mvm->mutex);
97
Eliad Pellerb92e6612014-01-23 17:58:23 +020098 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
99 if (iftype != NL80211_IFTYPE_STATION)
100 reserved_ids = BIT(0);
101
Johannes Berg8ca151b2013-01-24 14:25:36 +0100102 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200103 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200104 if (BIT(sta_id) & reserved_ids)
105 continue;
106
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
108 lockdep_is_held(&mvm->mutex)))
109 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200110 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200111 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112}
113
Johannes Berg7a453972013-02-12 13:10:44 +0100114/* send station add/update command to firmware */
115int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300116 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100118 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300119 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
120 .sta_id = mvm_sta->sta_id,
121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
122 .add_modify = update ? 1 : 0,
123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
124 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200125 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300126 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100127 int ret;
128 u32 status;
129 u32 agg_size = 0, mpdu_dens = 0;
130
Sara Sharonced19f22017-02-06 19:09:32 +0200131 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
132 add_sta_cmd.station_type = mvm_sta->sta_type;
133
Liad Kaufman24afba72015-07-28 18:56:08 +0300134 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100135 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300136
Sara Sharonbb497012016-09-29 14:52:40 +0300137 if (!iwl_mvm_has_new_tx_api(mvm)) {
138 add_sta_cmd.tfd_queue_msk =
139 cpu_to_le32(mvm_sta->tfd_queue_msk);
140
141 if (flags & STA_MODIFY_QUEUES)
142 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
143 } else {
144 WARN_ON(flags & STA_MODIFY_QUEUES);
145 }
Johannes Berg7a453972013-02-12 13:10:44 +0100146 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100147
148 switch (sta->bandwidth) {
149 case IEEE80211_STA_RX_BW_160:
150 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
151 /* fall through */
152 case IEEE80211_STA_RX_BW_80:
153 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
154 /* fall through */
155 case IEEE80211_STA_RX_BW_40:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_20:
159 if (sta->ht_cap.ht_supported)
160 add_sta_cmd.station_flags |=
161 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
162 break;
163 }
164
165 switch (sta->rx_nss) {
166 case 1:
167 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
168 break;
169 case 2:
170 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
171 break;
172 case 3 ... 8:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
174 break;
175 }
176
177 switch (sta->smps_mode) {
178 case IEEE80211_SMPS_AUTOMATIC:
179 case IEEE80211_SMPS_NUM_MODES:
180 WARN_ON(1);
181 break;
182 case IEEE80211_SMPS_STATIC:
183 /* override NSS */
184 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
185 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
186 break;
187 case IEEE80211_SMPS_DYNAMIC:
188 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
189 break;
190 case IEEE80211_SMPS_OFF:
191 /* nothing */
192 break;
193 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100194
195 if (sta->ht_cap.ht_supported) {
196 add_sta_cmd.station_flags_msk |=
197 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
198 STA_FLG_AGG_MPDU_DENS_MSK);
199
200 mpdu_dens = sta->ht_cap.ampdu_density;
201 }
202
203 if (sta->vht_cap.vht_supported) {
204 agg_size = sta->vht_cap.cap &
205 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
206 agg_size >>=
207 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
208 } else if (sta->ht_cap.ht_supported) {
209 agg_size = sta->ht_cap.ampdu_factor;
210 }
211
212 add_sta_cmd.station_flags |=
213 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
214 add_sta_cmd.station_flags |=
215 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100216 if (mvm_sta->associated)
217 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100218
Johannes Berg65e25482016-04-13 14:24:22 +0200219 if (sta->wme) {
220 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
221
222 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200223 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200224 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200225 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200226 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200227 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200229 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
230 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200231 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200232 }
233
Johannes Berg8ca151b2013-01-24 14:25:36 +0100234 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200235 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
236 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300237 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100238 if (ret)
239 return ret;
240
Sara Sharon837c4da2016-01-07 16:50:45 +0200241 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100242 case ADD_STA_SUCCESS:
243 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
244 break;
245 default:
246 ret = -EIO;
247 IWL_ERR(mvm, "ADD_STA failed\n");
248 break;
249 }
250
251 return ret;
252}
253
Sara Sharon10b2b202016-03-20 16:23:41 +0200254static void iwl_mvm_rx_agg_session_expired(unsigned long data)
255{
256 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
257 struct iwl_mvm_baid_data *ba_data;
258 struct ieee80211_sta *sta;
259 struct iwl_mvm_sta *mvm_sta;
260 unsigned long timeout;
261
262 rcu_read_lock();
263
264 ba_data = rcu_dereference(*rcu_ptr);
265
266 if (WARN_ON(!ba_data))
267 goto unlock;
268
269 if (!ba_data->timeout)
270 goto unlock;
271
272 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
273 if (time_is_after_jiffies(timeout)) {
274 mod_timer(&ba_data->session_timer, timeout);
275 goto unlock;
276 }
277
278 /* Timer expired */
279 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
Emmanuel Grumbach61dd8a82017-06-12 15:10:09 +0300280
281 /*
282 * sta should be valid unless the following happens:
283 * The firmware asserts which triggers a reconfig flow, but
284 * the reconfig fails before we set the pointer to sta into
285 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
286 * A-MDPU and hence the timer continues to run. Then, the
287 * timer expires and sta is NULL.
288 */
289 if (!sta)
290 goto unlock;
291
Sara Sharon10b2b202016-03-20 16:23:41 +0200292 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
293 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
294 sta->addr, ba_data->tid);
295unlock:
296 rcu_read_unlock();
297}
298
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300299static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
300 struct ieee80211_sta *sta)
301{
302 unsigned long used_hw_queues;
303 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200304 unsigned int wdg_timeout =
305 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300306 u32 ac;
307
308 lockdep_assert_held(&mvm->mutex);
309
310 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
311
312 /* Find available queues, and allocate them to the ACs */
313 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
314 u8 queue = find_first_zero_bit(&used_hw_queues,
315 mvm->first_agg_queue);
316
317 if (queue >= mvm->first_agg_queue) {
318 IWL_ERR(mvm, "Failed to allocate STA queue\n");
319 return -EBUSY;
320 }
321
322 __set_bit(queue, &used_hw_queues);
323 mvmsta->hw_queue[ac] = queue;
324 }
325
326 /* Found a place for all queues - enable them */
327 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
328 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300329 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300330 iwl_mvm_ac_to_tx_fifo[ac], 0,
331 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300332 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
333 }
334
335 return 0;
336}
337
338static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
339 struct ieee80211_sta *sta)
340{
341 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
342 unsigned long sta_msk;
343 int i;
344
345 lockdep_assert_held(&mvm->mutex);
346
347 /* disable the TDLS STA-specific queues */
348 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200349 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300350 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300351}
352
Liad Kaufman9794c642015-08-19 17:34:28 +0300353/* Disable aggregations for a bitmap of TIDs for a given station */
354static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
355 unsigned long disable_agg_tids,
356 bool remove_queue)
357{
358 struct iwl_mvm_add_sta_cmd cmd = {};
359 struct ieee80211_sta *sta;
360 struct iwl_mvm_sta *mvmsta;
361 u32 status;
362 u8 sta_id;
363 int ret;
364
Sara Sharonbb497012016-09-29 14:52:40 +0300365 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
366 return -EINVAL;
367
Liad Kaufman9794c642015-08-19 17:34:28 +0300368 spin_lock_bh(&mvm->queue_info_lock);
369 sta_id = mvm->queue_info[queue].ra_sta_id;
370 spin_unlock_bh(&mvm->queue_info_lock);
371
372 rcu_read_lock();
373
374 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
375
376 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
377 rcu_read_unlock();
378 return -EINVAL;
379 }
380
381 mvmsta = iwl_mvm_sta_from_mac80211(sta);
382
383 mvmsta->tid_disable_agg |= disable_agg_tids;
384
385 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
386 cmd.sta_id = mvmsta->sta_id;
387 cmd.add_modify = STA_MODE_MODIFY;
388 cmd.modify_mask = STA_MODIFY_QUEUES;
389 if (disable_agg_tids)
390 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
391 if (remove_queue)
392 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
393 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
394 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
395
396 rcu_read_unlock();
397
398 /* Notify FW of queue removal from the STA queues */
399 status = ADD_STA_SUCCESS;
400 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
401 iwl_mvm_add_sta_cmd_size(mvm),
402 &cmd, &status);
403
404 return ret;
405}
406
Liad Kaufman42db09c2016-05-02 14:01:14 +0300407static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
408{
409 struct ieee80211_sta *sta;
410 struct iwl_mvm_sta *mvmsta;
411 unsigned long tid_bitmap;
412 unsigned long agg_tids = 0;
Sharon Dvir806911d2017-05-21 12:09:49 +0300413 u8 sta_id;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300414 int tid;
415
416 lockdep_assert_held(&mvm->mutex);
417
Sara Sharonbb497012016-09-29 14:52:40 +0300418 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
419 return -EINVAL;
420
Liad Kaufman42db09c2016-05-02 14:01:14 +0300421 spin_lock_bh(&mvm->queue_info_lock);
422 sta_id = mvm->queue_info[queue].ra_sta_id;
423 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
424 spin_unlock_bh(&mvm->queue_info_lock);
425
426 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
427 lockdep_is_held(&mvm->mutex));
428
429 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
430 return -EINVAL;
431
432 mvmsta = iwl_mvm_sta_from_mac80211(sta);
433
434 spin_lock_bh(&mvmsta->lock);
435 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
436 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
437 agg_tids |= BIT(tid);
438 }
439 spin_unlock_bh(&mvmsta->lock);
440
441 return agg_tids;
442}
443
Liad Kaufman9794c642015-08-19 17:34:28 +0300444/*
445 * Remove a queue from a station's resources.
446 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
447 * doesn't disable the queue
448 */
449static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
450{
451 struct ieee80211_sta *sta;
452 struct iwl_mvm_sta *mvmsta;
453 unsigned long tid_bitmap;
454 unsigned long disable_agg_tids = 0;
455 u8 sta_id;
456 int tid;
457
458 lockdep_assert_held(&mvm->mutex);
459
Sara Sharonbb497012016-09-29 14:52:40 +0300460 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
461 return -EINVAL;
462
Liad Kaufman9794c642015-08-19 17:34:28 +0300463 spin_lock_bh(&mvm->queue_info_lock);
464 sta_id = mvm->queue_info[queue].ra_sta_id;
465 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
466 spin_unlock_bh(&mvm->queue_info_lock);
467
468 rcu_read_lock();
469
470 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
471
472 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
473 rcu_read_unlock();
474 return 0;
475 }
476
477 mvmsta = iwl_mvm_sta_from_mac80211(sta);
478
479 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300480 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300481 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300482 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
483 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200484 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300485 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300486
Liad Kaufman42db09c2016-05-02 14:01:14 +0300487 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300488 spin_unlock_bh(&mvmsta->lock);
489
490 rcu_read_unlock();
491
Liad Kaufman9794c642015-08-19 17:34:28 +0300492 return disable_agg_tids;
493}
494
Sara Sharon01796ff2016-11-16 17:04:36 +0200495static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
496 bool same_sta)
497{
498 struct iwl_mvm_sta *mvmsta;
499 u8 txq_curr_ac, sta_id, tid;
500 unsigned long disable_agg_tids = 0;
501 int ret;
502
503 lockdep_assert_held(&mvm->mutex);
504
Sara Sharonbb497012016-09-29 14:52:40 +0300505 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
506 return -EINVAL;
507
Sara Sharon01796ff2016-11-16 17:04:36 +0200508 spin_lock_bh(&mvm->queue_info_lock);
509 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
510 sta_id = mvm->queue_info[queue].ra_sta_id;
511 tid = mvm->queue_info[queue].txq_tid;
512 spin_unlock_bh(&mvm->queue_info_lock);
513
514 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200515 if (WARN_ON(!mvmsta))
516 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200517
518 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
519 /* Disable the queue */
520 if (disable_agg_tids)
521 iwl_mvm_invalidate_sta_queue(mvm, queue,
522 disable_agg_tids, false);
523
524 ret = iwl_mvm_disable_txq(mvm, queue,
525 mvmsta->vif->hw_queue[txq_curr_ac],
526 tid, 0);
527 if (ret) {
528 /* Re-mark the inactive queue as inactive */
529 spin_lock_bh(&mvm->queue_info_lock);
530 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
531 spin_unlock_bh(&mvm->queue_info_lock);
532 IWL_ERR(mvm,
533 "Failed to free inactive queue %d (ret=%d)\n",
534 queue, ret);
535
536 return ret;
537 }
538
539 /* If TXQ is allocated to another STA, update removal in FW */
540 if (!same_sta)
541 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
542
543 return 0;
544}
545
Liad Kaufman42db09c2016-05-02 14:01:14 +0300546static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
547 unsigned long tfd_queue_mask, u8 ac)
548{
549 int queue = 0;
550 u8 ac_to_queue[IEEE80211_NUM_ACS];
551 int i;
552
553 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300554 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
555 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300556
557 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
558
559 /* See what ACs the existing queues for this STA have */
560 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
561 /* Only DATA queues can be shared */
562 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
563 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
564 continue;
565
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200566 /* Don't try and take queues being reconfigured */
567 if (mvm->queue_info[queue].status ==
568 IWL_MVM_QUEUE_RECONFIGURING)
569 continue;
570
Liad Kaufman42db09c2016-05-02 14:01:14 +0300571 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
572 }
573
574 /*
575 * The queue to share is chosen only from DATA queues as follows (in
576 * descending priority):
577 * 1. An AC_BE queue
578 * 2. Same AC queue
579 * 3. Highest AC queue that is lower than new AC
580 * 4. Any existing AC (there always is at least 1 DATA queue)
581 */
582
583 /* Priority 1: An AC_BE queue */
584 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
585 queue = ac_to_queue[IEEE80211_AC_BE];
586 /* Priority 2: Same AC queue */
587 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
588 queue = ac_to_queue[ac];
589 /* Priority 3a: If new AC is VO and VI exists - use VI */
590 else if (ac == IEEE80211_AC_VO &&
591 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
592 queue = ac_to_queue[IEEE80211_AC_VI];
593 /* Priority 3b: No BE so only AC less than the new one is BK */
594 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
595 queue = ac_to_queue[IEEE80211_AC_BK];
596 /* Priority 4a: No BE nor BK - use VI if exists */
597 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
598 queue = ac_to_queue[IEEE80211_AC_VI];
599 /* Priority 4b: No BE, BK nor VI - use VO if exists */
600 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
601 queue = ac_to_queue[IEEE80211_AC_VO];
602
603 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200604 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
605 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
606 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300607 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200608 return -ENOSPC;
609 }
610
611 /* Make sure the queue isn't in the middle of being reconfigured */
612 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
613 IWL_ERR(mvm,
614 "TXQ %d is in the middle of re-config - try again\n",
615 queue);
616 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300617 }
618
619 return queue;
620}
621
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200622/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200623 * If a given queue has a higher AC than the TID stream that is being compared
624 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200625 * in such a case, otherwise - if no redirection required - it does nothing,
626 * unless the %force param is true.
627 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200628int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
629 int ac, int ssn, unsigned int wdg_timeout,
630 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200631{
632 struct iwl_scd_txq_cfg_cmd cmd = {
633 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200634 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200635 };
636 bool shared_queue;
637 unsigned long mq;
638 int ret;
639
Sara Sharonbb497012016-09-29 14:52:40 +0300640 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
641 return -EINVAL;
642
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200643 /*
644 * If the AC is lower than current one - FIFO needs to be redirected to
645 * the lowest one of the streams in the queue. Check if this is needed
646 * here.
647 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
648 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
649 * we need to check if the numerical value of X is LARGER than of Y.
650 */
651 spin_lock_bh(&mvm->queue_info_lock);
652 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
653 spin_unlock_bh(&mvm->queue_info_lock);
654
655 IWL_DEBUG_TX_QUEUES(mvm,
656 "No redirection needed on TXQ #%d\n",
657 queue);
658 return 0;
659 }
660
661 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
662 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200663 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200664 mq = mvm->hw_queue_to_mac80211[queue];
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200665 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
666 spin_unlock_bh(&mvm->queue_info_lock);
667
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200668 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200669 queue, iwl_mvm_ac_to_tx_fifo[ac]);
670
671 /* Stop MAC queues and wait for this queue to empty */
672 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200673 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200674 if (ret) {
675 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
676 queue);
677 ret = -EIO;
678 goto out;
679 }
680
681 /* Before redirecting the queue we need to de-activate it */
682 iwl_trans_txq_disable(mvm->trans, queue, false);
683 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
684 if (ret)
685 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
686 ret);
687
688 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300689 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200690
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200691 /* Update the TID "owner" of the queue */
692 spin_lock_bh(&mvm->queue_info_lock);
693 mvm->queue_info[queue].txq_tid = tid;
694 spin_unlock_bh(&mvm->queue_info_lock);
695
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200696 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
697
698 /* Redirect to lower AC */
699 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
700 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
701 ssn);
702
703 /* Update AC marking of the queue */
704 spin_lock_bh(&mvm->queue_info_lock);
705 mvm->queue_info[queue].mac80211_ac = ac;
706 spin_unlock_bh(&mvm->queue_info_lock);
707
708 /*
709 * Mark queue as shared in transport if shared
710 * Note this has to be done after queue enablement because enablement
711 * can also set this value, and there is no indication there to shared
712 * queues
713 */
714 if (shared_queue)
715 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
716
717out:
718 /* Continue using the MAC queues */
719 iwl_mvm_start_mac_queues(mvm, mq);
720
721 return ret;
722}
723
Sara Sharon310181e2017-01-17 14:27:48 +0200724static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
725 struct ieee80211_sta *sta, u8 ac,
726 int tid)
727{
728 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
729 unsigned int wdg_timeout =
730 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
731 u8 mac_queue = mvmsta->vif->hw_queue[ac];
732 int queue = -1;
733
734 lockdep_assert_held(&mvm->mutex);
735
736 IWL_DEBUG_TX_QUEUES(mvm,
737 "Allocating queue for sta %d on tid %d\n",
738 mvmsta->sta_id, tid);
739 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
740 wdg_timeout);
741 if (queue < 0)
742 return queue;
743
744 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
745
746 spin_lock_bh(&mvmsta->lock);
747 mvmsta->tid_data[tid].txq_id = queue;
748 mvmsta->tid_data[tid].is_tid_active = true;
Sara Sharon310181e2017-01-17 14:27:48 +0200749 spin_unlock_bh(&mvmsta->lock);
750
Sara Sharon310181e2017-01-17 14:27:48 +0200751 return 0;
752}
753
Liad Kaufman24afba72015-07-28 18:56:08 +0300754static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
755 struct ieee80211_sta *sta, u8 ac, int tid,
756 struct ieee80211_hdr *hdr)
757{
758 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
759 struct iwl_trans_txq_scd_cfg cfg = {
760 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
761 .sta_id = mvmsta->sta_id,
762 .tid = tid,
763 .frame_limit = IWL_FRAME_LIMIT,
764 };
765 unsigned int wdg_timeout =
766 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
767 u8 mac_queue = mvmsta->vif->hw_queue[ac];
768 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200769 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300770 unsigned long disable_agg_tids = 0;
771 enum iwl_mvm_agg_state queue_state;
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300772 bool shared_queue = false, inc_ssn;
Liad Kaufman24afba72015-07-28 18:56:08 +0300773 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300774 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300775 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300776
777 lockdep_assert_held(&mvm->mutex);
778
Sara Sharon310181e2017-01-17 14:27:48 +0200779 if (iwl_mvm_has_new_tx_api(mvm))
780 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
781
Liad Kaufman42db09c2016-05-02 14:01:14 +0300782 spin_lock_bh(&mvmsta->lock);
783 tfd_queue_mask = mvmsta->tfd_queue_msk;
784 spin_unlock_bh(&mvmsta->lock);
785
Liad Kaufmand2515a92016-03-23 16:31:08 +0200786 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300787
788 /*
789 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
790 * exists
791 */
792 if (!ieee80211_is_data_qos(hdr->frame_control) ||
793 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300794 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
795 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300796 IWL_MVM_DQA_MAX_MGMT_QUEUE);
797 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
798 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
799 queue);
800
801 /* If no such queue is found, we'll use a DATA queue instead */
802 }
803
Liad Kaufman9794c642015-08-19 17:34:28 +0300804 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
805 (mvm->queue_info[mvmsta->reserved_queue].status ==
806 IWL_MVM_QUEUE_RESERVED ||
807 mvm->queue_info[mvmsta->reserved_queue].status ==
808 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300809 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300810 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300811 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
812 }
813
814 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300815 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
816 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300817 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300818
819 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300820 * Check if this queue is already allocated but inactive.
821 * In such a case, we'll need to first free this queue before enabling
822 * it again, so we'll mark it as reserved to make sure no new traffic
823 * arrives on it
824 */
825 if (queue > 0 &&
826 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
827 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
828 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +0200829 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +0300830 IWL_DEBUG_TX_QUEUES(mvm,
831 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
832 queue, mvmsta->sta_id, tid);
833 }
834
Liad Kaufman42db09c2016-05-02 14:01:14 +0300835 /* No free queue - we'll have to share */
836 if (queue <= 0) {
837 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
838 if (queue > 0) {
839 shared_queue = true;
840 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
841 }
842 }
843
Liad Kaufman9794c642015-08-19 17:34:28 +0300844 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300845 * Mark TXQ as ready, even though it hasn't been fully configured yet,
846 * to make sure no one else takes it.
847 * This will allow avoiding re-acquiring the lock at the end of the
848 * configuration. On error we'll mark it back as free.
849 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300850 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300851 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300852
Liad Kaufmand2515a92016-03-23 16:31:08 +0200853 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300854
Liad Kaufman42db09c2016-05-02 14:01:14 +0300855 /* This shouldn't happen - out of queues */
856 if (WARN_ON(queue <= 0)) {
857 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
858 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200859 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300860 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300861
862 /*
863 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
864 * but for configuring the SCD to send A-MPDUs we need to mark the queue
865 * as aggregatable.
866 * Mark all DATA queues as allowing to be aggregated at some point
867 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300868 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
869 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300870
Liad Kaufman9794c642015-08-19 17:34:28 +0300871 /*
872 * If this queue was previously inactive (idle) - we need to free it
873 * first
874 */
875 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +0200876 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
877 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +0300878 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +0300879 }
880
Liad Kaufman42db09c2016-05-02 14:01:14 +0300881 IWL_DEBUG_TX_QUEUES(mvm,
882 "Allocating %squeue #%d to sta %d on tid %d\n",
883 shared_queue ? "shared " : "", queue,
884 mvmsta->sta_id, tid);
885
886 if (shared_queue) {
887 /* Disable any open aggs on this queue */
888 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
889
890 if (disable_agg_tids) {
891 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
892 queue);
893 iwl_mvm_invalidate_sta_queue(mvm, queue,
894 disable_agg_tids, false);
895 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300896 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300897
898 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300899 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
900 ssn, &cfg, wdg_timeout);
901 if (inc_ssn) {
902 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
903 le16_add_cpu(&hdr->seq_ctrl, 0x10);
904 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300905
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200906 /*
907 * Mark queue as shared in transport if shared
908 * Note this has to be done after queue enablement because enablement
909 * can also set this value, and there is no indication there to shared
910 * queues
911 */
912 if (shared_queue)
913 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
914
Liad Kaufman24afba72015-07-28 18:56:08 +0300915 spin_lock_bh(&mvmsta->lock);
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300916 /*
917 * This looks racy, but it is not. We have only one packet for
918 * this ra/tid in our Tx path since we stop the Qdisc when we
919 * need to allocate a new TFD queue.
920 */
921 if (inc_ssn)
922 mvmsta->tid_data[tid].seq_number += 0x10;
Liad Kaufman24afba72015-07-28 18:56:08 +0300923 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300924 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300925 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300926 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300927
928 if (mvmsta->reserved_queue == queue)
929 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
930 spin_unlock_bh(&mvmsta->lock);
931
Liad Kaufman42db09c2016-05-02 14:01:14 +0300932 if (!shared_queue) {
933 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
934 if (ret)
935 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300936
Liad Kaufman42db09c2016-05-02 14:01:14 +0300937 /* If we need to re-enable aggregations... */
938 if (queue_state == IWL_AGG_ON) {
939 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
940 if (ret)
941 goto out_err;
942 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200943 } else {
944 /* Redirect queue, if needed */
945 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
946 wdg_timeout, false);
947 if (ret)
948 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300949 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300950
Liad Kaufman42db09c2016-05-02 14:01:14 +0300951 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300952
953out_err:
954 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
955
956 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300957}
958
Liad Kaufman19aefa42016-03-08 14:29:51 +0200959static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
960{
961 struct iwl_scd_txq_cfg_cmd cmd = {
962 .scd_queue = queue,
963 .action = SCD_CFG_UPDATE_QUEUE_TID,
964 };
Liad Kaufman19aefa42016-03-08 14:29:51 +0200965 int tid;
966 unsigned long tid_bitmap;
967 int ret;
968
969 lockdep_assert_held(&mvm->mutex);
970
Sara Sharonbb497012016-09-29 14:52:40 +0300971 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
972 return;
973
Liad Kaufman19aefa42016-03-08 14:29:51 +0200974 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200975 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
976 spin_unlock_bh(&mvm->queue_info_lock);
977
978 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
979 return;
980
981 /* Find any TID for queue */
982 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
983 cmd.tid = tid;
984 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
985
986 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300987 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200988 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
989 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300990 return;
991 }
992
993 spin_lock_bh(&mvm->queue_info_lock);
994 mvm->queue_info[queue].txq_tid = tid;
995 spin_unlock_bh(&mvm->queue_info_lock);
996 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
997 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200998}
999
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001000static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1001{
1002 struct ieee80211_sta *sta;
1003 struct iwl_mvm_sta *mvmsta;
Sharon Dvir806911d2017-05-21 12:09:49 +03001004 u8 sta_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001005 int tid = -1;
1006 unsigned long tid_bitmap;
1007 unsigned int wdg_timeout;
1008 int ssn;
1009 int ret = true;
1010
Sara Sharonbb497012016-09-29 14:52:40 +03001011 /* queue sharing is disabled on new TX path */
1012 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1013 return;
1014
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001015 lockdep_assert_held(&mvm->mutex);
1016
1017 spin_lock_bh(&mvm->queue_info_lock);
1018 sta_id = mvm->queue_info[queue].ra_sta_id;
1019 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1020 spin_unlock_bh(&mvm->queue_info_lock);
1021
1022 /* Find TID for queue, and make sure it is the only one on the queue */
1023 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1024 if (tid_bitmap != BIT(tid)) {
1025 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1026 queue, tid_bitmap);
1027 return;
1028 }
1029
1030 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1031 tid);
1032
1033 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1034 lockdep_is_held(&mvm->mutex));
1035
1036 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1037 return;
1038
1039 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1040 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1041
1042 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1043
1044 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1045 tid_to_mac80211_ac[tid], ssn,
1046 wdg_timeout, true);
1047 if (ret) {
1048 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1049 return;
1050 }
1051
1052 /* If aggs should be turned back on - do it */
1053 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001054 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001055
1056 mvmsta->tid_disable_agg &= ~BIT(tid);
1057
1058 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1059 cmd.sta_id = mvmsta->sta_id;
1060 cmd.add_modify = STA_MODE_MODIFY;
1061 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1062 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1063 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1064
1065 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1066 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1067 if (!ret) {
1068 IWL_DEBUG_TX_QUEUES(mvm,
1069 "TXQ #%d is now aggregated again\n",
1070 queue);
1071
1072 /* Mark queue intenally as aggregating again */
1073 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1074 }
1075 }
1076
1077 spin_lock_bh(&mvm->queue_info_lock);
1078 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1079 spin_unlock_bh(&mvm->queue_info_lock);
1080}
1081
Liad Kaufman24afba72015-07-28 18:56:08 +03001082static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1083{
1084 if (tid == IWL_MAX_TID_COUNT)
1085 return IEEE80211_AC_VO; /* MGMT */
1086
1087 return tid_to_mac80211_ac[tid];
1088}
1089
1090static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1091 struct ieee80211_sta *sta, int tid)
1092{
1093 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1094 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1095 struct sk_buff *skb;
1096 struct ieee80211_hdr *hdr;
1097 struct sk_buff_head deferred_tx;
1098 u8 mac_queue;
1099 bool no_queue = false; /* Marks if there is a problem with the queue */
1100 u8 ac;
1101
1102 lockdep_assert_held(&mvm->mutex);
1103
1104 skb = skb_peek(&tid_data->deferred_tx_frames);
1105 if (!skb)
1106 return;
1107 hdr = (void *)skb->data;
1108
1109 ac = iwl_mvm_tid_to_ac_queue(tid);
1110 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1111
Sara Sharon6862fce2017-02-22 19:34:17 +02001112 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001113 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1114 IWL_ERR(mvm,
1115 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1116 mvmsta->sta_id, tid);
1117
1118 /*
1119 * Mark queue as problematic so later the deferred traffic is
1120 * freed, as we can do nothing with it
1121 */
1122 no_queue = true;
1123 }
1124
1125 __skb_queue_head_init(&deferred_tx);
1126
Liad Kaufmand2515a92016-03-23 16:31:08 +02001127 /* Disable bottom-halves when entering TX path */
1128 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001129 spin_lock(&mvmsta->lock);
1130 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001131 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001132 spin_unlock(&mvmsta->lock);
1133
Liad Kaufman24afba72015-07-28 18:56:08 +03001134 while ((skb = __skb_dequeue(&deferred_tx)))
1135 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1136 ieee80211_free_txskb(mvm->hw, skb);
1137 local_bh_enable();
1138
1139 /* Wake queue */
1140 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1141}
1142
1143void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1144{
1145 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1146 add_stream_wk);
1147 struct ieee80211_sta *sta;
1148 struct iwl_mvm_sta *mvmsta;
1149 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001150 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001151
Liad Kaufman9794c642015-08-19 17:34:28 +03001152 /* Check inactivity of queues */
1153 iwl_mvm_inactivity_check(mvm);
1154
Liad Kaufman24afba72015-07-28 18:56:08 +03001155 mutex_lock(&mvm->mutex);
1156
Sara Sharon34e10862017-02-23 13:15:07 +02001157 /* No queue reconfiguration in TVQM mode */
1158 if (iwl_mvm_has_new_tx_api(mvm))
1159 goto alloc_queues;
1160
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001161 /* Reconfigure queues requiring reconfiguation */
Sara Sharon34e10862017-02-23 13:15:07 +02001162 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001163 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001164 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001165
1166 spin_lock_bh(&mvm->queue_info_lock);
1167 reconfig = (mvm->queue_info[queue].status ==
1168 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001169
1170 /*
1171 * We need to take into account a situation in which a TXQ was
1172 * allocated to TID x, and then turned shared by adding TIDs y
1173 * and z. If TID x becomes inactive and is removed from the TXQ,
1174 * ownership must be given to one of the remaining TIDs.
1175 * This is mainly because if TID x continues - a new queue can't
1176 * be allocated for it as long as it is an owner of another TXQ.
1177 */
1178 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1179 BIT(mvm->queue_info[queue].txq_tid)) &&
1180 (mvm->queue_info[queue].status ==
1181 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001182 spin_unlock_bh(&mvm->queue_info_lock);
1183
1184 if (reconfig)
1185 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001186 else if (change_owner)
1187 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001188 }
1189
Sara Sharon34e10862017-02-23 13:15:07 +02001190alloc_queues:
Liad Kaufman24afba72015-07-28 18:56:08 +03001191 /* Go over all stations with deferred traffic */
1192 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1193 IWL_MVM_STATION_COUNT) {
1194 clear_bit(sta_id, mvm->sta_deferred_frames);
1195 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1196 lockdep_is_held(&mvm->mutex));
1197 if (IS_ERR_OR_NULL(sta))
1198 continue;
1199
1200 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1201 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1202
1203 for_each_set_bit(tid, &deferred_tid_traffic,
1204 IWL_MAX_TID_COUNT + 1)
1205 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1206 }
1207
1208 mutex_unlock(&mvm->mutex);
1209}
1210
1211static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001212 struct ieee80211_sta *sta,
1213 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001214{
1215 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1216 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001217 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001218
Sara Sharon396952e2017-02-22 19:40:55 +02001219 /* queue reserving is disabled on new TX path */
1220 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1221 return 0;
1222
Liad Kaufman9794c642015-08-19 17:34:28 +03001223 /*
1224 * Check for inactive queues, so we don't reach a situation where we
1225 * can't add a STA due to a shortage in queues that doesn't really exist
1226 */
1227 iwl_mvm_inactivity_check(mvm);
1228
Liad Kaufman24afba72015-07-28 18:56:08 +03001229 spin_lock_bh(&mvm->queue_info_lock);
1230
1231 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001232 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1233 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001234 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1235 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001236 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1237 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001238 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1239 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001240 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001241 if (queue < 0) {
1242 spin_unlock_bh(&mvm->queue_info_lock);
1243 IWL_ERR(mvm, "No available queues for new station\n");
1244 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001245 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1246 /*
1247 * If this queue is already allocated but inactive we'll need to
1248 * first free this queue before enabling it again, we'll mark
1249 * it as reserved to make sure no new traffic arrives on it
1250 */
1251 using_inactive_queue = true;
1252 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001253 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001254 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001255
1256 spin_unlock_bh(&mvm->queue_info_lock);
1257
1258 mvmsta->reserved_queue = queue;
1259
Sara Sharon01796ff2016-11-16 17:04:36 +02001260 if (using_inactive_queue)
1261 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1262
Liad Kaufman24afba72015-07-28 18:56:08 +03001263 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1264 queue, mvmsta->sta_id);
1265
1266 return 0;
1267}
1268
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001269/*
1270 * In DQA mode, after a HW restart the queues should be allocated as before, in
1271 * order to avoid race conditions when there are shared queues. This function
1272 * does the re-mapping and queue allocation.
1273 *
1274 * Note that re-enabling aggregations isn't done in this function.
1275 */
1276static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1277 struct iwl_mvm_sta *mvm_sta)
1278{
1279 unsigned int wdg_timeout =
1280 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1281 int i;
1282 struct iwl_trans_txq_scd_cfg cfg = {
1283 .sta_id = mvm_sta->sta_id,
1284 .frame_limit = IWL_FRAME_LIMIT,
1285 };
1286
Johannes Berg03c902b2016-12-02 12:03:36 +01001287 /* Make sure reserved queue is still marked as such (if allocated) */
1288 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1289 mvm->queue_info[mvm_sta->reserved_queue].status =
1290 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001291
1292 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1293 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1294 int txq_id = tid_data->txq_id;
1295 int ac;
1296 u8 mac_queue;
1297
Sara Sharon6862fce2017-02-22 19:34:17 +02001298 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001299 continue;
1300
1301 skb_queue_head_init(&tid_data->deferred_tx_frames);
1302
1303 ac = tid_to_mac80211_ac[i];
1304 mac_queue = mvm_sta->vif->hw_queue[ac];
1305
Sara Sharon310181e2017-01-17 14:27:48 +02001306 if (iwl_mvm_has_new_tx_api(mvm)) {
1307 IWL_DEBUG_TX_QUEUES(mvm,
1308 "Re-mapping sta %d tid %d\n",
1309 mvm_sta->sta_id, i);
1310 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1311 mvm_sta->sta_id,
1312 i, wdg_timeout);
1313 tid_data->txq_id = txq_id;
1314 } else {
1315 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001316
Sara Sharon310181e2017-01-17 14:27:48 +02001317 cfg.tid = i;
1318 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1319 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1320 txq_id ==
1321 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001322
Sara Sharon310181e2017-01-17 14:27:48 +02001323 IWL_DEBUG_TX_QUEUES(mvm,
1324 "Re-mapping sta %d tid %d to queue %d\n",
1325 mvm_sta->sta_id, i, txq_id);
1326
1327 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1328 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001329 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001330 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001331 }
1332
1333 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1334}
1335
Johannes Berg8ca151b2013-01-24 14:25:36 +01001336int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1337 struct ieee80211_vif *vif,
1338 struct ieee80211_sta *sta)
1339{
1340 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001341 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001342 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001343 int i, ret, sta_id;
1344
1345 lockdep_assert_held(&mvm->mutex);
1346
1347 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001348 sta_id = iwl_mvm_find_free_sta_id(mvm,
1349 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001350 else
1351 sta_id = mvm_sta->sta_id;
1352
Sara Sharon0ae98812017-01-04 14:53:58 +02001353 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001354 return -ENOSPC;
1355
1356 spin_lock_init(&mvm_sta->lock);
1357
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001358 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1359 if (iwl_mvm_is_dqa_supported(mvm) &&
1360 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1361 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1362 goto update_fw;
1363 }
1364
Johannes Berg8ca151b2013-01-24 14:25:36 +01001365 mvm_sta->sta_id = sta_id;
1366 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1367 mvmvif->color);
1368 mvm_sta->vif = vif;
Liad Kaufmana58bb462017-05-28 14:20:04 +03001369 if (!mvm->trans->cfg->gen2)
1370 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1371 else
1372 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001373 mvm_sta->tx_protection = 0;
1374 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001375 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001376
1377 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001378 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001379 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001380 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001381
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001382 /*
1383 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1384 * and then they'll be allocated dynamically
1385 */
1386 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001387 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1388 if (ret)
1389 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001390 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001391 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1392 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1393 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1394 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001395
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001396 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001397 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001398 u16 seq = mvm_sta->tid_data[i].seq_number;
1399 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1400 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001401
1402 if (!iwl_mvm_is_dqa_supported(mvm))
1403 continue;
1404
1405 /*
1406 * Mark all queues for this STA as unallocated and defer TX
1407 * frames until the queue is allocated
1408 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001409 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001410 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001411 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001412 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001413 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001414
Sara Sharona571f5f2015-12-07 12:50:58 +02001415 if (iwl_mvm_has_new_rx_api(mvm) &&
1416 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Berg92c4dca2017-06-07 10:35:54 +02001417 int q;
1418
Sara Sharona571f5f2015-12-07 12:50:58 +02001419 dup_data = kcalloc(mvm->trans->num_rx_queues,
Johannes Berg92c4dca2017-06-07 10:35:54 +02001420 sizeof(*dup_data), GFP_KERNEL);
Sara Sharona571f5f2015-12-07 12:50:58 +02001421 if (!dup_data)
1422 return -ENOMEM;
Johannes Berg92c4dca2017-06-07 10:35:54 +02001423 /*
1424 * Initialize all the last_seq values to 0xffff which can never
1425 * compare equal to the frame's seq_ctrl in the check in
1426 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1427 * number and fragmented packets don't reach that function.
1428 *
1429 * This thus allows receiving a packet with seqno 0 and the
1430 * retry bit set as the very first packet on a new TID.
1431 */
1432 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1433 memset(dup_data[q].last_seq, 0xff,
1434 sizeof(dup_data[q].last_seq));
Sara Sharona571f5f2015-12-07 12:50:58 +02001435 mvm_sta->dup_data = dup_data;
1436 }
1437
Sara Sharon396952e2017-02-22 19:40:55 +02001438 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001439 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1440 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001441 if (ret)
1442 goto err;
1443 }
1444
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001445update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001446 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001447 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001448 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001449
Johannes Berg9e848012014-08-04 14:33:42 +02001450 if (vif->type == NL80211_IFTYPE_STATION) {
1451 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001452 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001453 mvmvif->ap_sta_id = sta_id;
1454 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001455 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001456 }
1457 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001458
1459 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1460
1461 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001462
1463err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001464 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1465 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001466 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001467}
1468
1469int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1470 bool drain)
1471{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001472 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001473 int ret;
1474 u32 status;
1475
1476 lockdep_assert_held(&mvm->mutex);
1477
1478 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1479 cmd.sta_id = mvmsta->sta_id;
1480 cmd.add_modify = STA_MODE_MODIFY;
1481 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1482 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1483
1484 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001485 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1486 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001487 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001488 if (ret)
1489 return ret;
1490
Sara Sharon837c4da2016-01-07 16:50:45 +02001491 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001492 case ADD_STA_SUCCESS:
1493 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1494 mvmsta->sta_id);
1495 break;
1496 default:
1497 ret = -EIO;
1498 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1499 mvmsta->sta_id);
1500 break;
1501 }
1502
1503 return ret;
1504}
1505
1506/*
1507 * Remove a station from the FW table. Before sending the command to remove
1508 * the station validate that the station is indeed known to the driver (sanity
1509 * only).
1510 */
1511static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1512{
1513 struct ieee80211_sta *sta;
1514 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1515 .sta_id = sta_id,
1516 };
1517 int ret;
1518
1519 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1520 lockdep_is_held(&mvm->mutex));
1521
1522 /* Note: internal stations are marked as error values */
1523 if (!sta) {
1524 IWL_ERR(mvm, "Invalid station id\n");
1525 return -EINVAL;
1526 }
1527
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001528 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001529 sizeof(rm_sta_cmd), &rm_sta_cmd);
1530 if (ret) {
1531 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1532 return ret;
1533 }
1534
1535 return 0;
1536}
1537
1538void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1539{
1540 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1541 u8 sta_id;
1542
1543 /*
1544 * The mutex is needed because of the SYNC cmd, but not only: if the
1545 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1546 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1547 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1548 * that later.
1549 */
1550 mutex_lock(&mvm->mutex);
1551
1552 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1553 int ret;
1554 struct ieee80211_sta *sta =
1555 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1556 lockdep_is_held(&mvm->mutex));
1557
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001558 /*
1559 * This station is in use or RCU-removed; the latter happens in
1560 * managed mode, where mac80211 removes the station before we
1561 * can remove it from firmware (we can only do that after the
1562 * MAC is marked unassociated), and possibly while the deauth
1563 * frame to disconnect from the AP is still queued. Then, the
1564 * station pointer is -ENOENT when the last skb is reclaimed.
1565 */
1566 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001567 continue;
1568
1569 if (PTR_ERR(sta) == -EINVAL) {
1570 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1571 sta_id);
1572 continue;
1573 }
1574
1575 if (!sta) {
1576 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1577 sta_id);
1578 continue;
1579 }
1580
1581 WARN_ON(PTR_ERR(sta) != -EBUSY);
1582 /* This station was removed and we waited until it got drained,
1583 * we can now proceed and remove it.
1584 */
1585 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1586 if (ret) {
1587 IWL_ERR(mvm,
1588 "Couldn't remove sta %d after it was drained\n",
1589 sta_id);
1590 continue;
1591 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301592 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001593 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001594
1595 if (mvm->tfd_drained[sta_id]) {
1596 unsigned long i, msk = mvm->tfd_drained[sta_id];
1597
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001598 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001599 iwl_mvm_disable_txq(mvm, i, i,
1600 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001601
1602 mvm->tfd_drained[sta_id] = 0;
1603 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1604 sta_id, msk);
1605 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001606 }
1607
1608 mutex_unlock(&mvm->mutex);
1609}
1610
Liad Kaufman24afba72015-07-28 18:56:08 +03001611static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1612 struct ieee80211_vif *vif,
1613 struct iwl_mvm_sta *mvm_sta)
1614{
1615 int ac;
1616 int i;
1617
1618 lockdep_assert_held(&mvm->mutex);
1619
1620 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001621 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001622 continue;
1623
1624 ac = iwl_mvm_tid_to_ac_queue(i);
1625 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1626 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001627 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001628 }
1629}
1630
Sara Sharond6d517b2017-03-06 10:16:11 +02001631int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1632 struct iwl_mvm_sta *mvm_sta)
1633{
1634 int i, ret;
1635
1636 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1637 u16 txq_id;
1638
1639 spin_lock_bh(&mvm_sta->lock);
1640 txq_id = mvm_sta->tid_data[i].txq_id;
1641 spin_unlock_bh(&mvm_sta->lock);
1642
1643 if (txq_id == IWL_MVM_INVALID_QUEUE)
1644 continue;
1645
1646 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1647 if (ret)
1648 break;
1649 }
1650
1651 return ret;
1652}
1653
Johannes Berg8ca151b2013-01-24 14:25:36 +01001654int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1655 struct ieee80211_vif *vif,
1656 struct ieee80211_sta *sta)
1657{
1658 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001659 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001660 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001661 int ret;
1662
1663 lockdep_assert_held(&mvm->mutex);
1664
Sara Sharona571f5f2015-12-07 12:50:58 +02001665 if (iwl_mvm_has_new_rx_api(mvm))
1666 kfree(mvm_sta->dup_data);
1667
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001668 if ((vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001669 mvmvif->ap_sta_id == sta_id) ||
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001670 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001671 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1672 if (ret)
1673 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001674 /* flush its queues here since we are freeing mvm_sta */
Sara Sharond49394a2017-03-05 13:01:08 +02001675 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001676 if (ret)
1677 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001678 if (iwl_mvm_has_new_tx_api(mvm)) {
1679 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1680 } else {
1681 u32 q_mask = mvm_sta->tfd_queue_msk;
1682
1683 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1684 q_mask);
1685 }
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001686 if (ret)
1687 return ret;
1688 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001689
Liad Kaufman24afba72015-07-28 18:56:08 +03001690 /* If DQA is supported - the queues can be disabled now */
Sara Sharon94c3e612016-12-07 15:04:37 +02001691 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman56214742016-09-22 15:14:08 +03001692 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001693 /*
1694 * If pending_frames is set at this point - it must be
1695 * driver internal logic error, since queues are empty
1696 * and removed successuly.
1697 * warn on it but set it to 0 anyway to avoid station
1698 * not being removed later in the function
1699 */
1700 WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
1701 }
Liad Kaufman56214742016-09-22 15:14:08 +03001702
1703 /* If there is a TXQ still marked as reserved - free it */
1704 if (iwl_mvm_is_dqa_supported(mvm) &&
1705 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001706 u8 reserved_txq = mvm_sta->reserved_queue;
1707 enum iwl_mvm_queue_status *status;
1708
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001709 /*
1710 * If no traffic has gone through the reserved TXQ - it
1711 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1712 * should be manually marked as free again
1713 */
1714 spin_lock_bh(&mvm->queue_info_lock);
1715 status = &mvm->queue_info[reserved_txq].status;
1716 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1717 (*status != IWL_MVM_QUEUE_FREE),
1718 "sta_id %d reserved txq %d status %d",
Sara Sharon94c3e612016-12-07 15:04:37 +02001719 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001720 spin_unlock_bh(&mvm->queue_info_lock);
1721 return -EINVAL;
1722 }
1723
1724 *status = IWL_MVM_QUEUE_FREE;
1725 spin_unlock_bh(&mvm->queue_info_lock);
1726 }
1727
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001728 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001729 mvmvif->ap_sta_id == sta_id) {
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001730 /* if associated - we can't remove the AP STA now */
1731 if (vif->bss_conf.assoc)
1732 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001733
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001734 /* unassoc - go ahead - remove the AP STA now */
Sara Sharon0ae98812017-01-04 14:53:58 +02001735 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
Eliad Peller37577fe2013-12-05 17:19:39 +02001736
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001737 /* clear d0i3_ap_sta_id if no longer relevant */
Sara Sharon94c3e612016-12-07 15:04:37 +02001738 if (mvm->d0i3_ap_sta_id == sta_id)
Sara Sharon0ae98812017-01-04 14:53:58 +02001739 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001740 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001741 }
1742
1743 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001744 * This shouldn't happen - the TDLS channel switch should be canceled
1745 * before the STA is removed.
1746 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001747 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001748 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001749 cancel_delayed_work(&mvm->tdls_cs.dwork);
1750 }
1751
1752 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001753 * Make sure that the tx response code sees the station as -EBUSY and
1754 * calls the drain worker.
1755 */
1756 spin_lock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02001757
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001758 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001759 * There are frames pending on the AC queues for this station.
1760 * We need to wait until all the frames are drained...
1761 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001762 if (atomic_read(&mvm->pending_frames[sta_id])) {
1763 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
Johannes Berg8ca151b2013-01-24 14:25:36 +01001764 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001765 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001766
1767 /* disable TDLS sta queues on drain complete */
1768 if (sta->tdls) {
Sara Sharon94c3e612016-12-07 15:04:37 +02001769 mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
1770 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001771 }
1772
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001773 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001774 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001775 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001776
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001777 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001778 iwl_mvm_tdls_sta_deinit(mvm, sta);
1779
Johannes Berg8ca151b2013-01-24 14:25:36 +01001780 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301781 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001782 }
1783
1784 return ret;
1785}
1786
1787int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1788 struct ieee80211_vif *vif,
1789 u8 sta_id)
1790{
1791 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1792
1793 lockdep_assert_held(&mvm->mutex);
1794
Monam Agarwalc531c772014-03-24 00:05:56 +05301795 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001796 return ret;
1797}
1798
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001799int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1800 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02001801 u32 qmask, enum nl80211_iftype iftype,
1802 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001803{
1804 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001805 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02001806 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001807 return -ENOSPC;
1808 }
1809
1810 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02001811 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001812
1813 /* put a non-NULL value so iterating over the stations won't stop */
1814 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1815 return 0;
1816}
1817
Sara Sharon26d6c162017-01-03 12:00:19 +02001818void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001819{
Monam Agarwalc531c772014-03-24 00:05:56 +05301820 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001821 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02001822 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001823}
1824
1825static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1826 struct iwl_mvm_int_sta *sta,
1827 const u8 *addr,
1828 u16 mac_id, u16 color)
1829{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001830 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001831 int ret;
1832 u32 status;
1833
1834 lockdep_assert_held(&mvm->mutex);
1835
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001836 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001837 cmd.sta_id = sta->sta_id;
1838 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1839 color));
Sara Sharonced19f22017-02-06 19:09:32 +02001840 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1841 cmd.station_type = sta->type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001842
Sara Sharonbb497012016-09-29 14:52:40 +03001843 if (!iwl_mvm_has_new_tx_api(mvm))
1844 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001845 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001846
1847 if (addr)
1848 memcpy(cmd.addr, addr, ETH_ALEN);
1849
Sara Sharon854c5702016-01-26 13:17:47 +02001850 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1851 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001852 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001853 if (ret)
1854 return ret;
1855
Sara Sharon837c4da2016-01-07 16:50:45 +02001856 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001857 case ADD_STA_SUCCESS:
1858 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1859 return 0;
1860 default:
1861 ret = -EIO;
1862 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1863 status);
1864 break;
1865 }
1866 return ret;
1867}
1868
Sara Sharonc5a719e2016-11-15 10:20:48 +02001869static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001870{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001871 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1872 mvm->cfg->base_params->wd_timeout :
1873 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001874
Sara Sharon310181e2017-01-17 14:27:48 +02001875 if (iwl_mvm_has_new_tx_api(mvm)) {
1876 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
1877 mvm->aux_sta.sta_id,
1878 IWL_MAX_TID_COUNT,
1879 wdg_timeout);
1880 mvm->aux_queue = queue;
1881 } else if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman28d07932015-09-01 16:36:25 +03001882 struct iwl_trans_txq_scd_cfg cfg = {
1883 .fifo = IWL_MVM_TX_FIFO_MCAST,
1884 .sta_id = mvm->aux_sta.sta_id,
1885 .tid = IWL_MAX_TID_COUNT,
1886 .aggregate = false,
1887 .frame_limit = IWL_FRAME_LIMIT,
1888 };
1889
1890 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1891 wdg_timeout);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001892 } else {
1893 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1894 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03001895 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02001896}
1897
1898int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1899{
1900 int ret;
1901
1902 lockdep_assert_held(&mvm->mutex);
1903
1904 /* Allocate aux station and assign to it the aux queue */
1905 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02001906 NL80211_IFTYPE_UNSPECIFIED,
1907 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001908 if (ret)
1909 return ret;
1910
1911 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1912 if (!iwl_mvm_has_new_tx_api(mvm))
1913 iwl_mvm_enable_aux_queue(mvm);
Liad Kaufman28d07932015-09-01 16:36:25 +03001914
Johannes Berg8ca151b2013-01-24 14:25:36 +01001915 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1916 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001917 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001918 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001919 return ret;
1920 }
1921
1922 /*
1923 * For a000 firmware and on we cannot add queue to a station unknown
1924 * to firmware so enable queue here - after the station was added
1925 */
1926 if (iwl_mvm_has_new_tx_api(mvm))
1927 iwl_mvm_enable_aux_queue(mvm);
1928
1929 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001930}
1931
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001932int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1933{
1934 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1935
1936 lockdep_assert_held(&mvm->mutex);
1937 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1938 mvmvif->id, 0);
1939}
1940
1941int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1942{
1943 int ret;
1944
1945 lockdep_assert_held(&mvm->mutex);
1946
1947 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1948 if (ret)
1949 IWL_WARN(mvm, "Failed sending remove station\n");
1950
1951 return ret;
1952}
1953
1954void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1955{
1956 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1957}
1958
Johannes Berg712b24a2014-08-04 14:14:14 +02001959void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1960{
1961 lockdep_assert_held(&mvm->mutex);
1962
1963 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1964}
1965
Johannes Berg8ca151b2013-01-24 14:25:36 +01001966/*
1967 * Send the add station command for the vif's broadcast station.
1968 * Assumes that the station was already allocated.
1969 *
1970 * @mvm: the mvm component
1971 * @vif: the interface to which the broadcast station is added
1972 * @bsta: the broadcast station to add.
1973 */
Johannes Berg013290a2014-08-04 13:38:48 +02001974int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001975{
1976 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001977 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001978 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001979 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01001980 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001981 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02001982 unsigned int wdg_timeout =
1983 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1984 struct iwl_trans_txq_scd_cfg cfg = {
1985 .fifo = IWL_MVM_TX_FIFO_VO,
1986 .sta_id = mvmvif->bcast_sta.sta_id,
1987 .tid = IWL_MAX_TID_COUNT,
1988 .aggregate = false,
1989 .frame_limit = IWL_FRAME_LIMIT,
1990 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01001991
1992 lockdep_assert_held(&mvm->mutex);
1993
Sara Sharon310181e2017-01-17 14:27:48 +02001994 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02001995 if (vif->type == NL80211_IFTYPE_AP ||
1996 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02001997 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001998 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02001999 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002000 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03002001 return -EINVAL;
2002
Liad Kaufmandf88c082016-11-24 15:31:00 +02002003 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002004
Sara Sharon310181e2017-01-17 14:27:48 +02002005 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2006 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03002007 }
2008
Johannes Berg5023d962013-07-31 14:07:43 +02002009 if (vif->type == NL80211_IFTYPE_ADHOC)
2010 baddr = vif->bss_conf.bssid;
2011
Sara Sharon0ae98812017-01-04 14:53:58 +02002012 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002013 return -ENOSPC;
2014
Liad Kaufmandf88c082016-11-24 15:31:00 +02002015 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2016 mvmvif->id, mvmvif->color);
2017 if (ret)
2018 return ret;
2019
2020 /*
Sara Sharonc5a719e2016-11-15 10:20:48 +02002021 * For a000 firmware and on we cannot add queue to a station unknown
2022 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02002023 */
Sara Sharon310181e2017-01-17 14:27:48 +02002024 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01002025 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2026 bsta->sta_id,
2027 IWL_MAX_TID_COUNT,
2028 wdg_timeout);
2029
Luca Coelho7b758a12017-06-20 13:40:03 +03002030 if (vif->type == NL80211_IFTYPE_AP ||
2031 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon310181e2017-01-17 14:27:48 +02002032 mvm->probe_queue = queue;
2033 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2034 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002035 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02002036
2037 return 0;
2038}
2039
2040static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2041 struct ieee80211_vif *vif)
2042{
2043 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002044 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002045
2046 lockdep_assert_held(&mvm->mutex);
2047
Sara Sharond49394a2017-03-05 13:01:08 +02002048 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2049
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002050 switch (vif->type) {
2051 case NL80211_IFTYPE_AP:
2052 case NL80211_IFTYPE_ADHOC:
2053 queue = mvm->probe_queue;
2054 break;
2055 case NL80211_IFTYPE_P2P_DEVICE:
2056 queue = mvm->p2p_dev_queue;
2057 break;
2058 default:
2059 WARN(1, "Can't free bcast queue on vif type %d\n",
2060 vif->type);
2061 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002062 }
2063
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002064 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2065 if (iwl_mvm_has_new_tx_api(mvm))
2066 return;
2067
2068 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2069 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002070}
2071
2072/* Send the FW a request to remove the station from it's internal data
2073 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02002074int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002075{
Johannes Berg013290a2014-08-04 13:38:48 +02002076 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002077 int ret;
2078
2079 lockdep_assert_held(&mvm->mutex);
2080
Liad Kaufmandf88c082016-11-24 15:31:00 +02002081 if (iwl_mvm_is_dqa_supported(mvm))
2082 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2083
Johannes Berg013290a2014-08-04 13:38:48 +02002084 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002085 if (ret)
2086 IWL_WARN(mvm, "Failed sending remove station\n");
2087 return ret;
2088}
2089
Johannes Berg013290a2014-08-04 13:38:48 +02002090int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2091{
2092 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03002093 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02002094
2095 lockdep_assert_held(&mvm->mutex);
2096
Liad Kaufmandf88c082016-11-24 15:31:00 +02002097 if (!iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmande24f632015-08-04 15:19:18 +03002098 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002099
Liad Kaufmande24f632015-08-04 15:19:18 +03002100 /*
2101 * The firmware defines the TFD queue mask to only be relevant
2102 * for *unicast* queues, so the multicast (CAB) queue shouldn't
Liad Kaufmandf88c082016-11-24 15:31:00 +02002103 * be included. This only happens in NL80211_IFTYPE_AP vif type,
2104 * so the next line will only have an effect there.
Liad Kaufmande24f632015-08-04 15:19:18 +03002105 */
Johannes Berg013290a2014-08-04 13:38:48 +02002106 qmask &= ~BIT(vif->cab_queue);
Liad Kaufmande24f632015-08-04 15:19:18 +03002107 }
2108
Johannes Berg013290a2014-08-04 13:38:48 +02002109 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
Sara Sharonced19f22017-02-06 19:09:32 +02002110 ieee80211_vif_type_p2p(vif),
2111 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02002112}
2113
Johannes Berg8ca151b2013-01-24 14:25:36 +01002114/* Allocate a new station entry for the broadcast station to the given vif,
2115 * and send it to the FW.
2116 * Note that each P2P mac should have its own broadcast station.
2117 *
2118 * @mvm: the mvm component
2119 * @vif: the interface to which the broadcast station is added
2120 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02002121int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002122{
2123 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002124 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002125 int ret;
2126
2127 lockdep_assert_held(&mvm->mutex);
2128
Johannes Berg013290a2014-08-04 13:38:48 +02002129 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002130 if (ret)
2131 return ret;
2132
Johannes Berg013290a2014-08-04 13:38:48 +02002133 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002134
2135 if (ret)
2136 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002137
Johannes Berg8ca151b2013-01-24 14:25:36 +01002138 return ret;
2139}
2140
Johannes Berg013290a2014-08-04 13:38:48 +02002141void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2142{
2143 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2144
2145 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2146}
2147
Johannes Berg8ca151b2013-01-24 14:25:36 +01002148/*
2149 * Send the FW a request to remove the station from it's internal data
2150 * structures, and in addition remove it from the local data structure.
2151 */
Johannes Berg013290a2014-08-04 13:38:48 +02002152int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002153{
2154 int ret;
2155
2156 lockdep_assert_held(&mvm->mutex);
2157
Johannes Berg013290a2014-08-04 13:38:48 +02002158 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002159
Johannes Berg013290a2014-08-04 13:38:48 +02002160 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2161
Johannes Berg8ca151b2013-01-24 14:25:36 +01002162 return ret;
2163}
2164
Sara Sharon26d6c162017-01-03 12:00:19 +02002165/*
2166 * Allocate a new station entry for the multicast station to the given vif,
2167 * and send it to the FW.
2168 * Note that each AP/GO mac should have its own multicast station.
2169 *
2170 * @mvm: the mvm component
2171 * @vif: the interface to which the multicast station is added
2172 */
2173int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2174{
2175 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2176 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2177 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2178 const u8 *maddr = _maddr;
2179 struct iwl_trans_txq_scd_cfg cfg = {
2180 .fifo = IWL_MVM_TX_FIFO_MCAST,
2181 .sta_id = msta->sta_id,
2182 .tid = IWL_MAX_TID_COUNT,
2183 .aggregate = false,
2184 .frame_limit = IWL_FRAME_LIMIT,
2185 };
2186 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2187 int ret;
2188
2189 lockdep_assert_held(&mvm->mutex);
2190
2191 if (!iwl_mvm_is_dqa_supported(mvm))
2192 return 0;
2193
Liad Kaufmanee48b722017-03-21 17:13:16 +02002194 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2195 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002196 return -ENOTSUPP;
2197
Sara Sharonced19f22017-02-06 19:09:32 +02002198 /*
2199 * While in previous FWs we had to exclude cab queue from TFD queue
2200 * mask, now it is needed as any other queue.
2201 */
2202 if (!iwl_mvm_has_new_tx_api(mvm) &&
2203 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2204 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2205 &cfg, timeout);
2206 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2207 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002208 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2209 mvmvif->id, mvmvif->color);
2210 if (ret) {
2211 iwl_mvm_dealloc_int_sta(mvm, msta);
2212 return ret;
2213 }
2214
2215 /*
2216 * Enable cab queue after the ADD_STA command is sent.
2217 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002218 * command with unknown station id, and for FW that doesn't support
2219 * station API since the cab queue is not included in the
2220 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002221 */
Sara Sharon310181e2017-01-17 14:27:48 +02002222 if (iwl_mvm_has_new_tx_api(mvm)) {
2223 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2224 msta->sta_id,
2225 IWL_MAX_TID_COUNT,
2226 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002227 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002228 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2229 IWL_UCODE_TLV_API_STA_TYPE)) {
Liad Kaufmanee48b722017-03-21 17:13:16 +02002230 /*
2231 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2232 * invalid, so make sure we use the queue we want.
2233 * Note that this is done here as we want to avoid making DQA
2234 * changes in mac80211 layer.
2235 */
2236 if (vif->type == NL80211_IFTYPE_ADHOC) {
2237 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2238 mvmvif->cab_queue = vif->cab_queue;
2239 }
Sara Sharon310181e2017-01-17 14:27:48 +02002240 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2241 &cfg, timeout);
2242 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002243
2244 return 0;
2245}
2246
2247/*
2248 * Send the FW a request to remove the station from it's internal data
2249 * structures, and in addition remove it from the local data structure.
2250 */
2251int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2252{
2253 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2254 int ret;
2255
2256 lockdep_assert_held(&mvm->mutex);
2257
2258 if (!iwl_mvm_is_dqa_supported(mvm))
2259 return 0;
2260
Sara Sharond49394a2017-03-05 13:01:08 +02002261 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2262
Sara Sharone2af3fa2017-02-22 19:35:10 +02002263 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Sara Sharon26d6c162017-01-03 12:00:19 +02002264 IWL_MAX_TID_COUNT, 0);
2265
2266 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2267 if (ret)
2268 IWL_WARN(mvm, "Failed sending remove station\n");
2269
2270 return ret;
2271}
2272
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002273#define IWL_MAX_RX_BA_SESSIONS 16
2274
Sara Sharonb915c102016-03-23 16:32:02 +02002275static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002276{
Sara Sharonb915c102016-03-23 16:32:02 +02002277 struct iwl_mvm_delba_notif notif = {
2278 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2279 .metadata.sync = 1,
2280 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002281 };
Sara Sharonb915c102016-03-23 16:32:02 +02002282 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2283};
Sara Sharon10b2b202016-03-20 16:23:41 +02002284
Sara Sharonb915c102016-03-23 16:32:02 +02002285static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2286 struct iwl_mvm_baid_data *data)
2287{
2288 int i;
2289
2290 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2291
2292 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2293 int j;
2294 struct iwl_mvm_reorder_buffer *reorder_buf =
2295 &data->reorder_buf[i];
2296
Sara Sharon06904052016-02-28 20:28:17 +02002297 spin_lock_bh(&reorder_buf->lock);
2298 if (likely(!reorder_buf->num_stored)) {
2299 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002300 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002301 }
Sara Sharonb915c102016-03-23 16:32:02 +02002302
2303 /*
2304 * This shouldn't happen in regular DELBA since the internal
2305 * delBA notification should trigger a release of all frames in
2306 * the reorder buffer.
2307 */
2308 WARN_ON(1);
2309
2310 for (j = 0; j < reorder_buf->buf_size; j++)
2311 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02002312 /*
2313 * Prevent timer re-arm. This prevents a very far fetched case
2314 * where we timed out on the notification. There may be prior
2315 * RX frames pending in the RX queue before the notification
2316 * that might get processed between now and the actual deletion
2317 * and we would re-arm the timer although we are deleting the
2318 * reorder buffer.
2319 */
2320 reorder_buf->removed = true;
2321 spin_unlock_bh(&reorder_buf->lock);
2322 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002323 }
2324}
2325
2326static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2327 u32 sta_id,
2328 struct iwl_mvm_baid_data *data,
2329 u16 ssn, u8 buf_size)
2330{
2331 int i;
2332
2333 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2334 struct iwl_mvm_reorder_buffer *reorder_buf =
2335 &data->reorder_buf[i];
2336 int j;
2337
2338 reorder_buf->num_stored = 0;
2339 reorder_buf->head_sn = ssn;
2340 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002341 /* rx reorder timer */
2342 reorder_buf->reorder_timer.function =
2343 iwl_mvm_reorder_timer_expired;
2344 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2345 init_timer(&reorder_buf->reorder_timer);
2346 spin_lock_init(&reorder_buf->lock);
2347 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002348 reorder_buf->queue = i;
2349 reorder_buf->sta_id = sta_id;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002350 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002351 for (j = 0; j < reorder_buf->buf_size; j++)
2352 __skb_queue_head_init(&reorder_buf->entries[j]);
2353 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002354}
2355
Johannes Berg8ca151b2013-01-24 14:25:36 +01002356int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02002357 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002358{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002359 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002360 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002361 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002362 int ret;
2363 u32 status;
2364
2365 lockdep_assert_held(&mvm->mutex);
2366
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002367 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2368 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2369 return -ENOSPC;
2370 }
2371
Sara Sharon10b2b202016-03-20 16:23:41 +02002372 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2373 /*
2374 * Allocate here so if allocation fails we can bail out early
2375 * before starting the BA session in the firmware
2376 */
Sara Sharonb915c102016-03-23 16:32:02 +02002377 baid_data = kzalloc(sizeof(*baid_data) +
2378 mvm->trans->num_rx_queues *
2379 sizeof(baid_data->reorder_buf[0]),
2380 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002381 if (!baid_data)
2382 return -ENOMEM;
2383 }
2384
Johannes Berg8ca151b2013-01-24 14:25:36 +01002385 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2386 cmd.sta_id = mvm_sta->sta_id;
2387 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002388 if (start) {
2389 cmd.add_immediate_ba_tid = (u8) tid;
2390 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002391 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002392 } else {
2393 cmd.remove_immediate_ba_tid = (u8) tid;
2394 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002395 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2396 STA_MODIFY_REMOVE_BA_TID;
2397
2398 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002399 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2400 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002401 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002402 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002403 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002404
Sara Sharon837c4da2016-01-07 16:50:45 +02002405 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002406 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002407 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2408 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002409 break;
2410 case ADD_STA_IMMEDIATE_BA_FAILURE:
2411 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2412 ret = -ENOSPC;
2413 break;
2414 default:
2415 ret = -EIO;
2416 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2417 start ? "start" : "stopp", status);
2418 break;
2419 }
2420
Sara Sharon10b2b202016-03-20 16:23:41 +02002421 if (ret)
2422 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002423
Sara Sharon10b2b202016-03-20 16:23:41 +02002424 if (start) {
2425 u8 baid;
2426
2427 mvm->rx_ba_sessions++;
2428
2429 if (!iwl_mvm_has_new_rx_api(mvm))
2430 return 0;
2431
2432 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2433 ret = -EINVAL;
2434 goto out_free;
2435 }
2436 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2437 IWL_ADD_STA_BAID_SHIFT);
2438 baid_data->baid = baid;
2439 baid_data->timeout = timeout;
2440 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002441 setup_timer(&baid_data->session_timer,
2442 iwl_mvm_rx_agg_session_expired,
2443 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002444 baid_data->mvm = mvm;
2445 baid_data->tid = tid;
2446 baid_data->sta_id = mvm_sta->sta_id;
2447
2448 mvm_sta->tid_to_baid[tid] = baid;
2449 if (timeout)
2450 mod_timer(&baid_data->session_timer,
2451 TU_TO_EXP_TIME(timeout * 2));
2452
Sara Sharonb915c102016-03-23 16:32:02 +02002453 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2454 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002455 /*
2456 * protect the BA data with RCU to cover a case where our
2457 * internal RX sync mechanism will timeout (not that it's
2458 * supposed to happen) and we will free the session data while
2459 * RX is being processed in parallel
2460 */
Sara Sharon35263a02016-06-21 12:12:10 +03002461 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2462 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002463 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2464 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002465 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002466 u8 baid = mvm_sta->tid_to_baid[tid];
2467
Sara Sharon60dec522016-06-21 14:14:08 +03002468 if (mvm->rx_ba_sessions > 0)
2469 /* check that restart flow didn't zero the counter */
2470 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002471 if (!iwl_mvm_has_new_rx_api(mvm))
2472 return 0;
2473
2474 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2475 return -EINVAL;
2476
2477 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2478 if (WARN_ON(!baid_data))
2479 return -EINVAL;
2480
2481 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002482 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002483 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002484 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2485 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002486 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002487 }
2488 return 0;
2489
2490out_free:
2491 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002492 return ret;
2493}
2494
Liad Kaufman9794c642015-08-19 17:34:28 +03002495int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2496 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002497{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002498 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002499 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002500 int ret;
2501 u32 status;
2502
2503 lockdep_assert_held(&mvm->mutex);
2504
2505 if (start) {
2506 mvm_sta->tfd_queue_msk |= BIT(queue);
2507 mvm_sta->tid_disable_agg &= ~BIT(tid);
2508 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002509 /* In DQA-mode the queue isn't removed on agg termination */
2510 if (!iwl_mvm_is_dqa_supported(mvm))
2511 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002512 mvm_sta->tid_disable_agg |= BIT(tid);
2513 }
2514
2515 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2516 cmd.sta_id = mvm_sta->sta_id;
2517 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002518 if (!iwl_mvm_has_new_tx_api(mvm))
2519 cmd.modify_mask = STA_MODIFY_QUEUES;
2520 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002521 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2522 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2523
2524 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002525 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2526 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002527 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002528 if (ret)
2529 return ret;
2530
Sara Sharon837c4da2016-01-07 16:50:45 +02002531 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002532 case ADD_STA_SUCCESS:
2533 break;
2534 default:
2535 ret = -EIO;
2536 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2537 start ? "start" : "stopp", status);
2538 break;
2539 }
2540
2541 return ret;
2542}
2543
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002544const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002545 IEEE80211_AC_BE,
2546 IEEE80211_AC_BK,
2547 IEEE80211_AC_BK,
2548 IEEE80211_AC_BE,
2549 IEEE80211_AC_VI,
2550 IEEE80211_AC_VI,
2551 IEEE80211_AC_VO,
2552 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002553 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002554};
2555
Johannes Berg3e56ead2013-02-15 22:23:18 +01002556static const u8 tid_to_ucode_ac[] = {
2557 AC_BE,
2558 AC_BK,
2559 AC_BK,
2560 AC_BE,
2561 AC_VI,
2562 AC_VI,
2563 AC_VO,
2564 AC_VO,
2565};
2566
Johannes Berg8ca151b2013-01-24 14:25:36 +01002567int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2568 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2569{
Johannes Berg5b577a92013-11-14 18:20:04 +01002570 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002571 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002572 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002573 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002574 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002575
2576 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2577 return -EINVAL;
2578
2579 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2580 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2581 mvmsta->tid_data[tid].state);
2582 return -ENXIO;
2583 }
2584
2585 lockdep_assert_held(&mvm->mutex);
2586
Arik Nemtsovb2492502014-03-13 12:21:50 +02002587 spin_lock_bh(&mvmsta->lock);
2588
2589 /* possible race condition - we entered D0i3 while starting agg */
2590 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2591 spin_unlock_bh(&mvmsta->lock);
2592 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2593 return -EIO;
2594 }
2595
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002596 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002597
Liad Kaufmancf961e12015-08-13 19:16:08 +03002598 /*
2599 * Note the possible cases:
2600 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2601 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2602 * one and mark it as reserved
2603 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2604 * non-DQA mode, since the TXQ hasn't yet been allocated
Sara Sharon34e10862017-02-23 13:15:07 +02002605 * Don't support case 3 for new TX path as it is not expected to happen
2606 * and aggregation will be offloaded soon anyway
Liad Kaufmancf961e12015-08-13 19:16:08 +03002607 */
2608 txq_id = mvmsta->tid_data[tid].txq_id;
Sara Sharon34e10862017-02-23 13:15:07 +02002609 if (iwl_mvm_has_new_tx_api(mvm)) {
2610 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2611 ret = -ENXIO;
2612 goto release_locks;
2613 }
2614 } else if (iwl_mvm_is_dqa_supported(mvm) &&
2615 unlikely(mvm->queue_info[txq_id].status ==
2616 IWL_MVM_QUEUE_SHARED)) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002617 ret = -ENXIO;
2618 IWL_DEBUG_TX_QUEUES(mvm,
2619 "Can't start tid %d agg on shared queue!\n",
2620 tid);
2621 goto release_locks;
2622 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002623 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002624 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2625 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002626 mvm->last_agg_queue);
2627 if (txq_id < 0) {
2628 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002629 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2630 goto release_locks;
2631 }
Sara Sharon01796ff2016-11-16 17:04:36 +02002632 /*
2633 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2634 * an inactive queue from iwl_mvm_find_free_queue() is
2635 * certainly a bug
2636 */
2637 WARN_ON(mvm->queue_info[txq_id].status ==
2638 IWL_MVM_QUEUE_INACTIVE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002639
2640 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2641 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002642 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002643
2644 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002645
Liad Kaufmancf961e12015-08-13 19:16:08 +03002646 IWL_DEBUG_TX_QUEUES(mvm,
2647 "AGG for tid %d will be on queue #%d\n",
2648 tid, txq_id);
2649
Johannes Berg8ca151b2013-01-24 14:25:36 +01002650 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002651 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002652 tid_data->txq_id = txq_id;
2653 *ssn = tid_data->ssn;
2654
2655 IWL_DEBUG_TX_QUEUES(mvm,
2656 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2657 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2658 tid_data->next_reclaimed);
2659
Liad Kaufmandd321622017-04-05 16:25:11 +03002660 /*
2661 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
2662 * to align the wrap around of ssn so we compare relevant values.
2663 */
2664 normalized_ssn = tid_data->ssn;
2665 if (mvm->trans->cfg->gen2)
2666 normalized_ssn &= 0xff;
2667
2668 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002669 tid_data->state = IWL_AGG_STARTING;
2670 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2671 } else {
2672 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2673 }
2674
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002675 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002676 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002677
2678release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002679 spin_unlock(&mvm->queue_info_lock);
2680out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002681 spin_unlock_bh(&mvmsta->lock);
2682
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002683 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002684}
2685
2686int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002687 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2688 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002689{
Johannes Berg5b577a92013-11-14 18:20:04 +01002690 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002691 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002692 unsigned int wdg_timeout =
2693 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002694 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002695 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002696 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002697 u16 ssn;
2698
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002699 struct iwl_trans_txq_scd_cfg cfg = {
2700 .sta_id = mvmsta->sta_id,
2701 .tid = tid,
2702 .frame_limit = buf_size,
2703 .aggregate = true,
2704 };
2705
Eyal Shapiraefed6642014-09-14 15:58:53 +03002706 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2707 != IWL_MAX_TID_COUNT);
2708
Liad Kaufmana58bb462017-05-28 14:20:04 +03002709 if (!mvm->trans->cfg->gen2)
2710 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2711 else
2712 buf_size = min_t(int, buf_size,
2713 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002714
2715 spin_lock_bh(&mvmsta->lock);
2716 ssn = tid_data->ssn;
2717 queue = tid_data->txq_id;
2718 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002719 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002720 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002721 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002722 spin_unlock_bh(&mvmsta->lock);
2723
Sara Sharon34e10862017-02-23 13:15:07 +02002724 if (iwl_mvm_has_new_tx_api(mvm)) {
2725 /*
2726 * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
2727 * no need to check queue's status
2728 */
2729 if (buf_size < mvmsta->max_agg_bufsize)
2730 return -ENOTSUPP;
2731
2732 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2733 if (ret)
2734 return -EIO;
2735 goto out;
2736 }
2737
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002738 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002739
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002740 spin_lock_bh(&mvm->queue_info_lock);
2741 queue_status = mvm->queue_info[queue].status;
2742 spin_unlock_bh(&mvm->queue_info_lock);
2743
Liad Kaufmancf961e12015-08-13 19:16:08 +03002744 /* In DQA mode, the existing queue might need to be reconfigured */
2745 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002746 /* Maybe there is no need to even alloc a queue... */
2747 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2748 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002749
2750 /*
2751 * Only reconfig the SCD for the queue if the window size has
2752 * changed from current (become smaller)
2753 */
2754 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2755 /*
2756 * If reconfiguring an existing queue, it first must be
2757 * drained
2758 */
Sara Sharona1a57872017-03-05 11:38:58 +02002759 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2760 BIT(queue));
Liad Kaufmancf961e12015-08-13 19:16:08 +03002761 if (ret) {
2762 IWL_ERR(mvm,
2763 "Error draining queue before reconfig\n");
2764 return ret;
2765 }
2766
2767 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2768 mvmsta->sta_id, tid,
2769 buf_size, ssn);
2770 if (ret) {
2771 IWL_ERR(mvm,
2772 "Error reconfiguring TXQ #%d\n", queue);
2773 return ret;
2774 }
2775 }
2776 }
2777
2778 if (alloc_queue)
2779 iwl_mvm_enable_txq(mvm, queue,
2780 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2781 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002782
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002783 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2784 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2785 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2786 if (ret)
2787 return -EIO;
2788 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002789
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002790 /* No need to mark as reserved */
2791 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002792 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002793 spin_unlock_bh(&mvm->queue_info_lock);
2794
Sara Sharon34e10862017-02-23 13:15:07 +02002795out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002796 /*
2797 * Even though in theory the peer could have different
2798 * aggregation reorder buffer sizes for different sessions,
2799 * our ucode doesn't allow for that and has a global limit
2800 * for each station. Therefore, use the minimum of all the
2801 * aggregation sessions and our default value.
2802 */
2803 mvmsta->max_agg_bufsize =
2804 min(mvmsta->max_agg_bufsize, buf_size);
2805 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2806
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002807 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2808 sta->addr, tid);
2809
Eyal Shapira9e680942013-11-09 00:16:16 +02002810 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002811}
2812
Sara Sharon34e10862017-02-23 13:15:07 +02002813static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2814 struct iwl_mvm_sta *mvmsta,
2815 u16 txq_id)
2816{
2817 if (iwl_mvm_has_new_tx_api(mvm))
2818 return;
2819
2820 spin_lock_bh(&mvm->queue_info_lock);
2821 /*
2822 * The TXQ is marked as reserved only if no traffic came through yet
2823 * This means no traffic has been sent on this TID (agg'd or not), so
2824 * we no longer have use for the queue. Since it hasn't even been
2825 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2826 * free.
2827 */
2828 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2829 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2830
2831 spin_unlock_bh(&mvm->queue_info_lock);
2832}
2833
Johannes Berg8ca151b2013-01-24 14:25:36 +01002834int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2835 struct ieee80211_sta *sta, u16 tid)
2836{
Johannes Berg5b577a92013-11-14 18:20:04 +01002837 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002838 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2839 u16 txq_id;
2840 int err;
2841
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002842 /*
2843 * If mac80211 is cleaning its state, then say that we finished since
2844 * our state has been cleared anyway.
2845 */
2846 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2847 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2848 return 0;
2849 }
2850
Johannes Berg8ca151b2013-01-24 14:25:36 +01002851 spin_lock_bh(&mvmsta->lock);
2852
2853 txq_id = tid_data->txq_id;
2854
2855 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2856 mvmsta->sta_id, tid, txq_id, tid_data->state);
2857
Eyal Shapiraefed6642014-09-14 15:58:53 +03002858 mvmsta->agg_tids &= ~BIT(tid);
2859
Sara Sharon34e10862017-02-23 13:15:07 +02002860 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002861
Johannes Berg8ca151b2013-01-24 14:25:36 +01002862 switch (tid_data->state) {
2863 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002864 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002865
2866 IWL_DEBUG_TX_QUEUES(mvm,
2867 "ssn = %d, next_recl = %d\n",
2868 tid_data->ssn, tid_data->next_reclaimed);
2869
Liad Kaufman664e9682017-04-05 10:35:18 +03002870 /*
2871 * There are still packets for this RA / TID in the HW.
2872 * Not relevant for DQA mode, since there is no need to disable
2873 * the queue.
2874 */
2875 if (!iwl_mvm_is_dqa_supported(mvm) &&
2876 tid_data->ssn != tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002877 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2878 err = 0;
2879 break;
2880 }
2881
2882 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002883 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002884 spin_unlock_bh(&mvmsta->lock);
2885
2886 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2887
2888 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2889
Liad Kaufmancf961e12015-08-13 19:16:08 +03002890 if (!iwl_mvm_is_dqa_supported(mvm)) {
2891 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2892
2893 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2894 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002895 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002896 case IWL_AGG_STARTING:
2897 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2898 /*
2899 * The agg session has been stopped before it was set up. This
2900 * can happen when the AddBA timer times out for example.
2901 */
2902
2903 /* No barriers since we are under mutex */
2904 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002905
2906 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2907 tid_data->state = IWL_AGG_OFF;
2908 err = 0;
2909 break;
2910 default:
2911 IWL_ERR(mvm,
2912 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2913 mvmsta->sta_id, tid, tid_data->state);
2914 IWL_ERR(mvm,
2915 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2916 err = -EINVAL;
2917 }
2918
2919 spin_unlock_bh(&mvmsta->lock);
2920
2921 return err;
2922}
2923
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002924int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2925 struct ieee80211_sta *sta, u16 tid)
2926{
Johannes Berg5b577a92013-11-14 18:20:04 +01002927 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002928 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2929 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002930 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002931
2932 /*
2933 * First set the agg state to OFF to avoid calling
2934 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2935 */
2936 spin_lock_bh(&mvmsta->lock);
2937 txq_id = tid_data->txq_id;
2938 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2939 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002940 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002941 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002942 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002943 spin_unlock_bh(&mvmsta->lock);
2944
Sara Sharon34e10862017-02-23 13:15:07 +02002945 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002946
Johannes Bergb6658ff2013-07-24 13:55:51 +02002947 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002948 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02002949
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002950 if (iwl_mvm_has_new_tx_api(mvm)) {
2951 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2952 BIT(tid), 0))
2953 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02002954 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002955 } else {
2956 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2957 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02002958 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002959 }
Sara Sharond6d517b2017-03-06 10:16:11 +02002960
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002961 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002962
Johannes Bergf7f89e72014-08-05 15:24:44 +02002963 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2964
Liad Kaufmancf961e12015-08-13 19:16:08 +03002965 if (!iwl_mvm_is_dqa_supported(mvm)) {
2966 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2967
2968 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2969 tid, 0);
2970 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002971 }
2972
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002973 return 0;
2974}
2975
Johannes Berg8ca151b2013-01-24 14:25:36 +01002976static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2977{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002978 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002979
2980 lockdep_assert_held(&mvm->mutex);
2981
Johannes Berg2dc2a152015-06-16 17:09:18 +02002982 /* Pick the unused key offset with the highest 'deleted'
2983 * counter. Every time a key is deleted, all the counters
2984 * are incremented and the one that was just deleted is
2985 * reset to zero. Thus, the highest counter is the one
2986 * that was deleted longest ago. Pick that one.
2987 */
2988 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2989 if (test_bit(i, mvm->fw_key_table))
2990 continue;
2991 if (mvm->fw_key_deleted[i] > max) {
2992 max = mvm->fw_key_deleted[i];
2993 max_offs = i;
2994 }
2995 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002996
Johannes Berg2dc2a152015-06-16 17:09:18 +02002997 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002998 return STA_KEY_IDX_INVALID;
2999
Johannes Berg2dc2a152015-06-16 17:09:18 +02003000 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003001}
3002
Johannes Berg5f7a1842015-12-11 09:36:10 +01003003static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3004 struct ieee80211_vif *vif,
3005 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003006{
Johannes Berg5b530e92014-12-23 16:00:17 +01003007 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003008
Johannes Berg5f7a1842015-12-11 09:36:10 +01003009 if (sta)
3010 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003011
3012 /*
3013 * The device expects GTKs for station interfaces to be
3014 * installed as GTKs for the AP station. If we have no
3015 * station ID, then use AP's station ID.
3016 */
3017 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003018 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02003019 u8 sta_id = mvmvif->ap_sta_id;
3020
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003021 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3022 lockdep_is_held(&mvm->mutex));
3023
Avri Altman9513c5e2015-10-19 16:29:11 +02003024 /*
3025 * It is possible that the 'sta' parameter is NULL,
3026 * for example when a GTK is removed - the sta_id will then
3027 * be the AP ID, and no station was passed by mac80211.
3028 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003029 if (IS_ERR_OR_NULL(sta))
3030 return NULL;
3031
3032 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02003033 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003034
Johannes Berg5f7a1842015-12-11 09:36:10 +01003035 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003036}
3037
3038static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03003039 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02003040 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003041 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3042 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003043{
Sara Sharon45c458b2016-11-09 15:43:26 +02003044 union {
3045 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3046 struct iwl_mvm_add_sta_key_cmd cmd;
3047 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003048 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01003049 int ret;
3050 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003051 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02003052 u64 pn = 0;
3053 int i, size;
3054 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3055 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003056
David Spinadel85aeb582017-03-30 19:43:53 +03003057 if (sta_id == IWL_MVM_INVALID_STA)
3058 return -EINVAL;
3059
Sara Sharon45c458b2016-11-09 15:43:26 +02003060 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01003061 STA_KEY_FLG_KEYID_MSK;
3062 key_flags = cpu_to_le16(keyidx);
3063 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3064
Sara Sharon45c458b2016-11-09 15:43:26 +02003065 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003066 case WLAN_CIPHER_SUITE_TKIP:
3067 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003068 if (new_api) {
3069 memcpy((void *)&u.cmd.tx_mic_key,
3070 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3071 IWL_MIC_KEY_SIZE);
3072
3073 memcpy((void *)&u.cmd.rx_mic_key,
3074 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3075 IWL_MIC_KEY_SIZE);
3076 pn = atomic64_read(&key->tx_pn);
3077
3078 } else {
3079 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3080 for (i = 0; i < 5; i++)
3081 u.cmd_v1.tkip_rx_ttak[i] =
3082 cpu_to_le16(tkip_p1k[i]);
3083 }
3084 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003085 break;
3086 case WLAN_CIPHER_SUITE_CCMP:
3087 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02003088 memcpy(u.cmd.common.key, key->key, key->keylen);
3089 if (new_api)
3090 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003091 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01003092 case WLAN_CIPHER_SUITE_WEP104:
3093 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05003094 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01003095 case WLAN_CIPHER_SUITE_WEP40:
3096 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003097 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01003098 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03003099 case WLAN_CIPHER_SUITE_GCMP_256:
3100 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3101 /* fall through */
3102 case WLAN_CIPHER_SUITE_GCMP:
3103 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003104 memcpy(u.cmd.common.key, key->key, key->keylen);
3105 if (new_api)
3106 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03003107 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003108 default:
Max Stepanove36e5432013-08-27 19:56:13 +03003109 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02003110 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003111 }
3112
Johannes Bergba3943b2014-11-12 23:54:48 +01003113 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003114 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3115
Sara Sharon45c458b2016-11-09 15:43:26 +02003116 u.cmd.common.key_offset = key_offset;
3117 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03003118 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02003119
3120 if (new_api) {
3121 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3122 size = sizeof(u.cmd);
3123 } else {
3124 size = sizeof(u.cmd_v1);
3125 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003126
3127 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003128 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02003129 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3130 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003131 else
Sara Sharon45c458b2016-11-09 15:43:26 +02003132 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3133 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003134
3135 switch (status) {
3136 case ADD_STA_SUCCESS:
3137 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3138 break;
3139 default:
3140 ret = -EIO;
3141 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3142 break;
3143 }
3144
3145 return ret;
3146}
3147
3148static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3149 struct ieee80211_key_conf *keyconf,
3150 u8 sta_id, bool remove_key)
3151{
3152 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3153
3154 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003155 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3156 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3157 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3158 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3159 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3160 return -EINVAL;
3161
3162 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3163 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003164 return -EINVAL;
3165
3166 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3167 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3168
3169 if (remove_key) {
3170 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3171 } else {
3172 struct ieee80211_key_seq seq;
3173 const u8 *pn;
3174
Ayala Bekeraa950522016-06-01 00:28:09 +03003175 switch (keyconf->cipher) {
3176 case WLAN_CIPHER_SUITE_AES_CMAC:
3177 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3178 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003179 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3180 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3181 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3182 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003183 default:
3184 return -EINVAL;
3185 }
3186
Ayala Beker8e160ab2016-04-11 11:37:38 +03003187 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3188 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3189 igtk_cmd.ctrl_flags |=
3190 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003191 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3192 pn = seq.aes_cmac.pn;
3193 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3194 ((u64) pn[4] << 8) |
3195 ((u64) pn[3] << 16) |
3196 ((u64) pn[2] << 24) |
3197 ((u64) pn[1] << 32) |
3198 ((u64) pn[0] << 40));
3199 }
3200
3201 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3202 remove_key ? "removing" : "installing",
3203 igtk_cmd.sta_id);
3204
Ayala Beker8e160ab2016-04-11 11:37:38 +03003205 if (!iwl_mvm_has_new_rx_api(mvm)) {
3206 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3207 .ctrl_flags = igtk_cmd.ctrl_flags,
3208 .key_id = igtk_cmd.key_id,
3209 .sta_id = igtk_cmd.sta_id,
3210 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3211 };
3212
3213 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3214 ARRAY_SIZE(igtk_cmd_v1.igtk));
3215 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3216 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3217 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003218 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003219 sizeof(igtk_cmd), &igtk_cmd);
3220}
3221
3222
3223static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3224 struct ieee80211_vif *vif,
3225 struct ieee80211_sta *sta)
3226{
Johannes Berg5b530e92014-12-23 16:00:17 +01003227 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003228
3229 if (sta)
3230 return sta->addr;
3231
3232 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003233 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003234 u8 sta_id = mvmvif->ap_sta_id;
3235 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3236 lockdep_is_held(&mvm->mutex));
3237 return sta->addr;
3238 }
3239
3240
3241 return NULL;
3242}
3243
Johannes Berg2f6319d2014-11-12 23:39:56 +01003244static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3245 struct ieee80211_vif *vif,
3246 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003247 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003248 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003249 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003250{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003251 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003252 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003253 struct ieee80211_key_seq seq;
3254 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003255 u32 sta_id;
3256
3257 if (sta) {
3258 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3259
3260 sta_id = mvm_sta->sta_id;
3261 } else if (vif->type == NL80211_IFTYPE_AP &&
3262 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3263 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3264
3265 sta_id = mvmvif->mcast_sta.sta_id;
3266 } else {
3267 IWL_ERR(mvm, "Failed to find station id\n");
3268 return -EINVAL;
3269 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003270
Johannes Berg8ca151b2013-01-24 14:25:36 +01003271 switch (keyconf->cipher) {
3272 case WLAN_CIPHER_SUITE_TKIP:
David Spinadel85aeb582017-03-30 19:43:53 +03003273 if (vif->type == NL80211_IFTYPE_AP) {
3274 ret = -EINVAL;
3275 break;
3276 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003277 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3278 /* get phase 1 key from mac80211 */
3279 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3280 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003281 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003282 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003283 break;
3284 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003285 case WLAN_CIPHER_SUITE_WEP40:
3286 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003287 case WLAN_CIPHER_SUITE_GCMP:
3288 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003289 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003290 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003291 break;
3292 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003293 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003294 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003295 }
3296
Johannes Berg8ca151b2013-01-24 14:25:36 +01003297 return ret;
3298}
3299
Johannes Berg2f6319d2014-11-12 23:39:56 +01003300static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003301 struct ieee80211_key_conf *keyconf,
3302 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003303{
Sara Sharon45c458b2016-11-09 15:43:26 +02003304 union {
3305 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3306 struct iwl_mvm_add_sta_key_cmd cmd;
3307 } u = {};
3308 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3309 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003310 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003311 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003312 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003313
David Spinadel85aeb582017-03-30 19:43:53 +03003314 if (sta_id == IWL_MVM_INVALID_STA)
3315 return -EINVAL;
3316
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003317 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3318 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003319 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3320 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3321
Johannes Bergba3943b2014-11-12 23:54:48 +01003322 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003323 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3324
Sara Sharon45c458b2016-11-09 15:43:26 +02003325 /*
3326 * The fields assigned here are in the same location at the start
3327 * of the command, so we can do this union trick.
3328 */
3329 u.cmd.common.key_flags = key_flags;
3330 u.cmd.common.key_offset = keyconf->hw_key_idx;
3331 u.cmd.common.sta_id = sta_id;
3332
3333 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003334
Johannes Berg8ca151b2013-01-24 14:25:36 +01003335 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003336 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3337 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003338
3339 switch (status) {
3340 case ADD_STA_SUCCESS:
3341 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3342 break;
3343 default:
3344 ret = -EIO;
3345 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3346 break;
3347 }
3348
3349 return ret;
3350}
3351
Johannes Berg2f6319d2014-11-12 23:39:56 +01003352int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3353 struct ieee80211_vif *vif,
3354 struct ieee80211_sta *sta,
3355 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003356 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003357{
Johannes Bergba3943b2014-11-12 23:54:48 +01003358 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003359 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003360 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003361 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003362 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003363
3364 lockdep_assert_held(&mvm->mutex);
3365
David Spinadel85aeb582017-03-30 19:43:53 +03003366 if (vif->type != NL80211_IFTYPE_AP ||
3367 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3368 /* Get the station id from the mvm local station table */
3369 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3370 if (!mvm_sta) {
3371 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003372 return -EINVAL;
3373 }
David Spinadel85aeb582017-03-30 19:43:53 +03003374 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003375
David Spinadel85aeb582017-03-30 19:43:53 +03003376 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3377 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3378 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3379 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
3380 false);
3381 goto end;
3382 }
3383
3384 /*
3385 * It is possible that the 'sta' parameter is NULL, and thus
3386 * there is a need to retrieve the sta from the local station
3387 * table.
3388 */
3389 if (!sta) {
3390 sta = rcu_dereference_protected(
3391 mvm->fw_id_to_mac_id[sta_id],
3392 lockdep_is_held(&mvm->mutex));
3393 if (IS_ERR_OR_NULL(sta)) {
3394 IWL_ERR(mvm, "Invalid station id\n");
3395 return -EINVAL;
3396 }
3397 }
3398
3399 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3400 return -EINVAL;
3401 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003402
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003403 /* If the key_offset is not pre-assigned, we need to find a
3404 * new offset to use. In normal cases, the offset is not
3405 * pre-assigned, but during HW_RESTART we want to reuse the
3406 * same indices, so we pass them when this function is called.
3407 *
3408 * In D3 entry, we need to hardcoded the indices (because the
3409 * firmware hardcodes the PTK offset to 0). In this case, we
3410 * need to make sure we don't overwrite the hw_key_idx in the
3411 * keyconf structure, because otherwise we cannot configure
3412 * the original ones back when resuming.
3413 */
3414 if (key_offset == STA_KEY_IDX_INVALID) {
3415 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3416 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003417 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003418 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003419 }
3420
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003421 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003422 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003423 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003424
3425 /*
3426 * For WEP, the same key is used for multicast and unicast. Upload it
3427 * again, using the same key offset, and now pointing the other one
3428 * to the same key slot (offset).
3429 * If this fails, remove the original as well.
3430 */
David Spinadel85aeb582017-03-30 19:43:53 +03003431 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3432 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3433 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003434 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3435 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003436 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003437 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003438 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003439 }
3440 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003441
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003442 __set_bit(key_offset, mvm->fw_key_table);
3443
Johannes Berg2f6319d2014-11-12 23:39:56 +01003444end:
3445 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3446 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003447 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003448 return ret;
3449}
3450
3451int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3452 struct ieee80211_vif *vif,
3453 struct ieee80211_sta *sta,
3454 struct ieee80211_key_conf *keyconf)
3455{
Johannes Bergba3943b2014-11-12 23:54:48 +01003456 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003457 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003458 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003459 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003460
3461 lockdep_assert_held(&mvm->mutex);
3462
Johannes Berg5f7a1842015-12-11 09:36:10 +01003463 /* Get the station from the mvm local station table */
3464 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003465 if (mvm_sta)
3466 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003467 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3468 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3469
Johannes Berg2f6319d2014-11-12 23:39:56 +01003470
3471 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3472 keyconf->keyidx, sta_id);
3473
Luca Coelho71793b7d2017-03-30 12:04:47 +03003474 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3475 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3476 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003477 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3478
3479 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3480 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3481 keyconf->hw_key_idx);
3482 return -ENOENT;
3483 }
3484
Johannes Berg2dc2a152015-06-16 17:09:18 +02003485 /* track which key was deleted last */
3486 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3487 if (mvm->fw_key_deleted[i] < U8_MAX)
3488 mvm->fw_key_deleted[i]++;
3489 }
3490 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3491
David Spinadel85aeb582017-03-30 19:43:53 +03003492 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003493 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3494 return 0;
3495 }
3496
Johannes Bergba3943b2014-11-12 23:54:48 +01003497 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3498 if (ret)
3499 return ret;
3500
3501 /* delete WEP key twice to get rid of (now useless) offset */
3502 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3503 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3504 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3505
3506 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003507}
3508
Johannes Berg8ca151b2013-01-24 14:25:36 +01003509void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3510 struct ieee80211_vif *vif,
3511 struct ieee80211_key_conf *keyconf,
3512 struct ieee80211_sta *sta, u32 iv32,
3513 u16 *phase1key)
3514{
Beni Levc3eb5362013-02-06 17:22:18 +02003515 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003516 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003517
Beni Levc3eb5362013-02-06 17:22:18 +02003518 rcu_read_lock();
3519
Johannes Berg5f7a1842015-12-11 09:36:10 +01003520 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3521 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003522 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003523 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003524 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003525
3526 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003527 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003528}
3529
Johannes Berg9cc40712013-02-15 22:47:48 +01003530void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3531 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003532{
Johannes Berg5b577a92013-11-14 18:20:04 +01003533 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003534 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003535 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003536 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003537 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003538 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003539 };
3540 int ret;
3541
Sara Sharon854c5702016-01-26 13:17:47 +02003542 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3543 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003544 if (ret)
3545 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3546}
3547
Johannes Berg9cc40712013-02-15 22:47:48 +01003548void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3549 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003550 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003551 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003552 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003553{
Johannes Berg5b577a92013-11-14 18:20:04 +01003554 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003555 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003556 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003557 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003558 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3559 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003560 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003561 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003562 int tid, ret;
3563 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003564
Johannes Berg3e56ead2013-02-15 22:23:18 +01003565 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3566 * Note that this field is reserved and unused by firmware not
3567 * supporting GO uAPSD, so it's safe to always do this.
3568 */
3569 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3570 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3571
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003572 /* If we're releasing frames from aggregation or dqa queues then check
3573 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003574 * - more frames than the service period, in which case more_data
3575 * needs to be set
3576 * - fewer than 'cnt' frames, in which case we need to adjust the
3577 * firmware command (but do that unconditionally)
3578 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003579 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003580 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003581 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003582
3583 spin_lock_bh(&mvmsta->lock);
3584 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3585 struct iwl_mvm_tid_data *tid_data;
3586 u16 n_queued;
3587
3588 tid_data = &mvmsta->tid_data[tid];
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003589 if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
3590 tid_data->state != IWL_AGG_ON &&
Johannes Berg3e56ead2013-02-15 22:23:18 +01003591 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3592 "TID %d state is %d\n",
3593 tid, tid_data->state)) {
3594 spin_unlock_bh(&mvmsta->lock);
3595 ieee80211_sta_eosp(sta);
3596 return;
3597 }
3598
Liad Kaufmandd321622017-04-05 16:25:11 +03003599 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003600 if (n_queued > remaining) {
3601 more_data = true;
3602 remaining = 0;
3603 break;
3604 }
3605 remaining -= n_queued;
3606 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003607 sleep_tx_count = cnt - remaining;
3608 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3609 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003610 spin_unlock_bh(&mvmsta->lock);
3611
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003612 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003613 if (WARN_ON(cnt - remaining == 0)) {
3614 ieee80211_sta_eosp(sta);
3615 return;
3616 }
3617 }
3618
3619 /* Note: this is ignored by firmware not supporting GO uAPSD */
3620 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003621 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003622
3623 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3624 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003625 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003626 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003627 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003628 }
3629
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003630 /* block the Tx queues until the FW updated the sleep Tx count */
3631 iwl_trans_block_txq_ptrs(mvm->trans, true);
3632
3633 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3634 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003635 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003636 if (ret)
3637 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3638}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003639
Johannes Berg04168412015-06-23 21:22:09 +02003640void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3641 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003642{
3643 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3644 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3645 struct ieee80211_sta *sta;
3646 u32 sta_id = le32_to_cpu(notif->sta_id);
3647
3648 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003649 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003650
3651 rcu_read_lock();
3652 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3653 if (!IS_ERR_OR_NULL(sta))
3654 ieee80211_sta_eosp(sta);
3655 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003656}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003657
3658void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3659 struct iwl_mvm_sta *mvmsta, bool disable)
3660{
3661 struct iwl_mvm_add_sta_cmd cmd = {
3662 .add_modify = STA_MODE_MODIFY,
3663 .sta_id = mvmsta->sta_id,
3664 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3665 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3666 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3667 };
3668 int ret;
3669
Sara Sharon854c5702016-01-26 13:17:47 +02003670 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3671 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003672 if (ret)
3673 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3674}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003675
3676void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3677 struct ieee80211_sta *sta,
3678 bool disable)
3679{
3680 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3681
3682 spin_lock_bh(&mvm_sta->lock);
3683
3684 if (mvm_sta->disable_tx == disable) {
3685 spin_unlock_bh(&mvm_sta->lock);
3686 return;
3687 }
3688
3689 mvm_sta->disable_tx = disable;
3690
3691 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003692 * Tell mac80211 to start/stop queuing tx for this station,
3693 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003694 * for this station.
3695 */
3696 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3697 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3698
3699 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3700
3701 spin_unlock_bh(&mvm_sta->lock);
3702}
3703
Sara Sharonced19f22017-02-06 19:09:32 +02003704static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3705 struct iwl_mvm_vif *mvmvif,
3706 struct iwl_mvm_int_sta *sta,
3707 bool disable)
3708{
3709 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3710 struct iwl_mvm_add_sta_cmd cmd = {
3711 .add_modify = STA_MODE_MODIFY,
3712 .sta_id = sta->sta_id,
3713 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3714 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3715 .mac_id_n_color = cpu_to_le32(id),
3716 };
3717 int ret;
3718
3719 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3720 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3721 if (ret)
3722 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3723}
3724
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003725void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3726 struct iwl_mvm_vif *mvmvif,
3727 bool disable)
3728{
3729 struct ieee80211_sta *sta;
3730 struct iwl_mvm_sta *mvm_sta;
3731 int i;
3732
3733 lockdep_assert_held(&mvm->mutex);
3734
3735 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003736 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003737 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3738 lockdep_is_held(&mvm->mutex));
3739 if (IS_ERR_OR_NULL(sta))
3740 continue;
3741
3742 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3743 if (mvm_sta->mac_id_n_color !=
3744 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3745 continue;
3746
3747 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3748 }
Sara Sharonced19f22017-02-06 19:09:32 +02003749
3750 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3751 return;
3752
3753 /* Need to block/unblock also multicast station */
3754 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3755 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3756 &mvmvif->mcast_sta, disable);
3757
3758 /*
3759 * Only unblock the broadcast station (FW blocks it for immediate
3760 * quiet, not the driver)
3761 */
3762 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3763 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3764 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003765}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003766
3767void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3768{
3769 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3770 struct iwl_mvm_sta *mvmsta;
3771
3772 rcu_read_lock();
3773
3774 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3775
3776 if (!WARN_ON(!mvmsta))
3777 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3778
3779 rcu_read_unlock();
3780}
Liad Kaufmandd321622017-04-05 16:25:11 +03003781
3782u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3783{
3784 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3785
3786 /*
3787 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
3788 * to align the wrap around of ssn so we compare relevant values.
3789 */
3790 if (mvm->trans->cfg->gen2)
3791 sn &= 0xff;
3792
3793 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3794}