blob: ea35fcbd3b2660b003c62cf82024d68418f0b360 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020037 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83}
84
Eliad Pellerb92e6612014-01-23 17:58:23 +020085static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010087{
88 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020089 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010090
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94 lockdep_assert_held(&mvm->mutex);
95
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
99
Johannes Berg8ca151b2013-01-24 14:25:36 +0100100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200101 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200102 if (BIT(sta_id) & reserved_ids)
103 continue;
104
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
107 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200108 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200109 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100110}
111
Johannes Berg7a453972013-02-12 13:10:44 +0100112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300114 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100115{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Liad Kaufman24afba72015-07-28 18:56:08 +0300129 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100130 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300131
Sara Sharonbb497012016-09-29 14:52:40 +0300132 if (!iwl_mvm_has_new_tx_api(mvm)) {
133 add_sta_cmd.tfd_queue_msk =
134 cpu_to_le32(mvm_sta->tfd_queue_msk);
135
136 if (flags & STA_MODIFY_QUEUES)
137 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
138 } else {
139 WARN_ON(flags & STA_MODIFY_QUEUES);
140 }
Johannes Berg7a453972013-02-12 13:10:44 +0100141 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100142
143 switch (sta->bandwidth) {
144 case IEEE80211_STA_RX_BW_160:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
146 /* fall through */
147 case IEEE80211_STA_RX_BW_80:
148 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
149 /* fall through */
150 case IEEE80211_STA_RX_BW_40:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
152 /* fall through */
153 case IEEE80211_STA_RX_BW_20:
154 if (sta->ht_cap.ht_supported)
155 add_sta_cmd.station_flags |=
156 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
157 break;
158 }
159
160 switch (sta->rx_nss) {
161 case 1:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
163 break;
164 case 2:
165 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
166 break;
167 case 3 ... 8:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
169 break;
170 }
171
172 switch (sta->smps_mode) {
173 case IEEE80211_SMPS_AUTOMATIC:
174 case IEEE80211_SMPS_NUM_MODES:
175 WARN_ON(1);
176 break;
177 case IEEE80211_SMPS_STATIC:
178 /* override NSS */
179 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
180 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
181 break;
182 case IEEE80211_SMPS_DYNAMIC:
183 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
184 break;
185 case IEEE80211_SMPS_OFF:
186 /* nothing */
187 break;
188 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100189
190 if (sta->ht_cap.ht_supported) {
191 add_sta_cmd.station_flags_msk |=
192 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
193 STA_FLG_AGG_MPDU_DENS_MSK);
194
195 mpdu_dens = sta->ht_cap.ampdu_density;
196 }
197
198 if (sta->vht_cap.vht_supported) {
199 agg_size = sta->vht_cap.cap &
200 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
201 agg_size >>=
202 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
203 } else if (sta->ht_cap.ht_supported) {
204 agg_size = sta->ht_cap.ampdu_factor;
205 }
206
207 add_sta_cmd.station_flags |=
208 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
209 add_sta_cmd.station_flags |=
210 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100211 if (mvm_sta->associated)
212 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100213
Johannes Berg65e25482016-04-13 14:24:22 +0200214 if (sta->wme) {
215 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
216
217 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200218 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200219 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200220 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200221 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200222 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200223 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200224 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
225 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
226 add_sta_cmd.sp_length = sta->max_sp;
Johannes Berg65e25482016-04-13 14:24:22 +0200227 }
228
Johannes Berg8ca151b2013-01-24 14:25:36 +0100229 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200230 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
231 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300232 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100233 if (ret)
234 return ret;
235
Sara Sharon837c4da2016-01-07 16:50:45 +0200236 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100237 case ADD_STA_SUCCESS:
238 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
239 break;
240 default:
241 ret = -EIO;
242 IWL_ERR(mvm, "ADD_STA failed\n");
243 break;
244 }
245
246 return ret;
247}
248
Sara Sharon10b2b202016-03-20 16:23:41 +0200249static void iwl_mvm_rx_agg_session_expired(unsigned long data)
250{
251 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
252 struct iwl_mvm_baid_data *ba_data;
253 struct ieee80211_sta *sta;
254 struct iwl_mvm_sta *mvm_sta;
255 unsigned long timeout;
256
257 rcu_read_lock();
258
259 ba_data = rcu_dereference(*rcu_ptr);
260
261 if (WARN_ON(!ba_data))
262 goto unlock;
263
264 if (!ba_data->timeout)
265 goto unlock;
266
267 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
268 if (time_is_after_jiffies(timeout)) {
269 mod_timer(&ba_data->session_timer, timeout);
270 goto unlock;
271 }
272
273 /* Timer expired */
274 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
275 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
276 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
277 sta->addr, ba_data->tid);
278unlock:
279 rcu_read_unlock();
280}
281
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300282static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
283 struct ieee80211_sta *sta)
284{
285 unsigned long used_hw_queues;
286 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200287 unsigned int wdg_timeout =
288 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300289 u32 ac;
290
291 lockdep_assert_held(&mvm->mutex);
292
293 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
294
295 /* Find available queues, and allocate them to the ACs */
296 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
297 u8 queue = find_first_zero_bit(&used_hw_queues,
298 mvm->first_agg_queue);
299
300 if (queue >= mvm->first_agg_queue) {
301 IWL_ERR(mvm, "Failed to allocate STA queue\n");
302 return -EBUSY;
303 }
304
305 __set_bit(queue, &used_hw_queues);
306 mvmsta->hw_queue[ac] = queue;
307 }
308
309 /* Found a place for all queues - enable them */
310 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
311 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300312 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300313 iwl_mvm_ac_to_tx_fifo[ac], 0,
314 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300315 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
316 }
317
318 return 0;
319}
320
321static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
322 struct ieee80211_sta *sta)
323{
324 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
325 unsigned long sta_msk;
326 int i;
327
328 lockdep_assert_held(&mvm->mutex);
329
330 /* disable the TDLS STA-specific queues */
331 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200332 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300333 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300334}
335
Liad Kaufman9794c642015-08-19 17:34:28 +0300336/* Disable aggregations for a bitmap of TIDs for a given station */
337static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
338 unsigned long disable_agg_tids,
339 bool remove_queue)
340{
341 struct iwl_mvm_add_sta_cmd cmd = {};
342 struct ieee80211_sta *sta;
343 struct iwl_mvm_sta *mvmsta;
344 u32 status;
345 u8 sta_id;
346 int ret;
347
Sara Sharonbb497012016-09-29 14:52:40 +0300348 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
349 return -EINVAL;
350
Liad Kaufman9794c642015-08-19 17:34:28 +0300351 spin_lock_bh(&mvm->queue_info_lock);
352 sta_id = mvm->queue_info[queue].ra_sta_id;
353 spin_unlock_bh(&mvm->queue_info_lock);
354
355 rcu_read_lock();
356
357 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
358
359 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
360 rcu_read_unlock();
361 return -EINVAL;
362 }
363
364 mvmsta = iwl_mvm_sta_from_mac80211(sta);
365
366 mvmsta->tid_disable_agg |= disable_agg_tids;
367
368 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
369 cmd.sta_id = mvmsta->sta_id;
370 cmd.add_modify = STA_MODE_MODIFY;
371 cmd.modify_mask = STA_MODIFY_QUEUES;
372 if (disable_agg_tids)
373 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
374 if (remove_queue)
375 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
376 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
377 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
378
379 rcu_read_unlock();
380
381 /* Notify FW of queue removal from the STA queues */
382 status = ADD_STA_SUCCESS;
383 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
384 iwl_mvm_add_sta_cmd_size(mvm),
385 &cmd, &status);
386
387 return ret;
388}
389
Liad Kaufman42db09c2016-05-02 14:01:14 +0300390static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
391{
392 struct ieee80211_sta *sta;
393 struct iwl_mvm_sta *mvmsta;
394 unsigned long tid_bitmap;
395 unsigned long agg_tids = 0;
396 s8 sta_id;
397 int tid;
398
399 lockdep_assert_held(&mvm->mutex);
400
Sara Sharonbb497012016-09-29 14:52:40 +0300401 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
402 return -EINVAL;
403
Liad Kaufman42db09c2016-05-02 14:01:14 +0300404 spin_lock_bh(&mvm->queue_info_lock);
405 sta_id = mvm->queue_info[queue].ra_sta_id;
406 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
407 spin_unlock_bh(&mvm->queue_info_lock);
408
409 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
410 lockdep_is_held(&mvm->mutex));
411
412 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
413 return -EINVAL;
414
415 mvmsta = iwl_mvm_sta_from_mac80211(sta);
416
417 spin_lock_bh(&mvmsta->lock);
418 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
419 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
420 agg_tids |= BIT(tid);
421 }
422 spin_unlock_bh(&mvmsta->lock);
423
424 return agg_tids;
425}
426
Liad Kaufman9794c642015-08-19 17:34:28 +0300427/*
428 * Remove a queue from a station's resources.
429 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
430 * doesn't disable the queue
431 */
432static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
433{
434 struct ieee80211_sta *sta;
435 struct iwl_mvm_sta *mvmsta;
436 unsigned long tid_bitmap;
437 unsigned long disable_agg_tids = 0;
438 u8 sta_id;
439 int tid;
440
441 lockdep_assert_held(&mvm->mutex);
442
Sara Sharonbb497012016-09-29 14:52:40 +0300443 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
444 return -EINVAL;
445
Liad Kaufman9794c642015-08-19 17:34:28 +0300446 spin_lock_bh(&mvm->queue_info_lock);
447 sta_id = mvm->queue_info[queue].ra_sta_id;
448 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
449 spin_unlock_bh(&mvm->queue_info_lock);
450
451 rcu_read_lock();
452
453 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
454
455 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
456 rcu_read_unlock();
457 return 0;
458 }
459
460 mvmsta = iwl_mvm_sta_from_mac80211(sta);
461
462 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300463 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300464 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300465 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
466 disable_agg_tids |= BIT(tid);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300467 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300468 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300469
Liad Kaufman42db09c2016-05-02 14:01:14 +0300470 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300471 spin_unlock_bh(&mvmsta->lock);
472
473 rcu_read_unlock();
474
Liad Kaufman9794c642015-08-19 17:34:28 +0300475 return disable_agg_tids;
476}
477
Sara Sharon01796ff2016-11-16 17:04:36 +0200478static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
479 bool same_sta)
480{
481 struct iwl_mvm_sta *mvmsta;
482 u8 txq_curr_ac, sta_id, tid;
483 unsigned long disable_agg_tids = 0;
484 int ret;
485
486 lockdep_assert_held(&mvm->mutex);
487
Sara Sharonbb497012016-09-29 14:52:40 +0300488 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
489 return -EINVAL;
490
Sara Sharon01796ff2016-11-16 17:04:36 +0200491 spin_lock_bh(&mvm->queue_info_lock);
492 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
493 sta_id = mvm->queue_info[queue].ra_sta_id;
494 tid = mvm->queue_info[queue].txq_tid;
495 spin_unlock_bh(&mvm->queue_info_lock);
496
497 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
498
499 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
500 /* Disable the queue */
501 if (disable_agg_tids)
502 iwl_mvm_invalidate_sta_queue(mvm, queue,
503 disable_agg_tids, false);
504
505 ret = iwl_mvm_disable_txq(mvm, queue,
506 mvmsta->vif->hw_queue[txq_curr_ac],
507 tid, 0);
508 if (ret) {
509 /* Re-mark the inactive queue as inactive */
510 spin_lock_bh(&mvm->queue_info_lock);
511 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
512 spin_unlock_bh(&mvm->queue_info_lock);
513 IWL_ERR(mvm,
514 "Failed to free inactive queue %d (ret=%d)\n",
515 queue, ret);
516
517 return ret;
518 }
519
520 /* If TXQ is allocated to another STA, update removal in FW */
521 if (!same_sta)
522 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
523
524 return 0;
525}
526
Liad Kaufman42db09c2016-05-02 14:01:14 +0300527static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
528 unsigned long tfd_queue_mask, u8 ac)
529{
530 int queue = 0;
531 u8 ac_to_queue[IEEE80211_NUM_ACS];
532 int i;
533
534 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300535 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
536 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300537
538 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
539
540 /* See what ACs the existing queues for this STA have */
541 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
542 /* Only DATA queues can be shared */
543 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
544 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
545 continue;
546
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200547 /* Don't try and take queues being reconfigured */
548 if (mvm->queue_info[queue].status ==
549 IWL_MVM_QUEUE_RECONFIGURING)
550 continue;
551
Liad Kaufman42db09c2016-05-02 14:01:14 +0300552 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
553 }
554
555 /*
556 * The queue to share is chosen only from DATA queues as follows (in
557 * descending priority):
558 * 1. An AC_BE queue
559 * 2. Same AC queue
560 * 3. Highest AC queue that is lower than new AC
561 * 4. Any existing AC (there always is at least 1 DATA queue)
562 */
563
564 /* Priority 1: An AC_BE queue */
565 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
566 queue = ac_to_queue[IEEE80211_AC_BE];
567 /* Priority 2: Same AC queue */
568 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
569 queue = ac_to_queue[ac];
570 /* Priority 3a: If new AC is VO and VI exists - use VI */
571 else if (ac == IEEE80211_AC_VO &&
572 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
573 queue = ac_to_queue[IEEE80211_AC_VI];
574 /* Priority 3b: No BE so only AC less than the new one is BK */
575 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
576 queue = ac_to_queue[IEEE80211_AC_BK];
577 /* Priority 4a: No BE nor BK - use VI if exists */
578 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
579 queue = ac_to_queue[IEEE80211_AC_VI];
580 /* Priority 4b: No BE, BK nor VI - use VO if exists */
581 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
582 queue = ac_to_queue[IEEE80211_AC_VO];
583
584 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200585 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
586 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
587 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300588 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200589 return -ENOSPC;
590 }
591
592 /* Make sure the queue isn't in the middle of being reconfigured */
593 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
594 IWL_ERR(mvm,
595 "TXQ %d is in the middle of re-config - try again\n",
596 queue);
597 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300598 }
599
600 return queue;
601}
602
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200603/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200604 * If a given queue has a higher AC than the TID stream that is being compared
605 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200606 * in such a case, otherwise - if no redirection required - it does nothing,
607 * unless the %force param is true.
608 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200609int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
610 int ac, int ssn, unsigned int wdg_timeout,
611 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200612{
613 struct iwl_scd_txq_cfg_cmd cmd = {
614 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200615 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200616 };
617 bool shared_queue;
618 unsigned long mq;
619 int ret;
620
Sara Sharonbb497012016-09-29 14:52:40 +0300621 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
622 return -EINVAL;
623
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200624 /*
625 * If the AC is lower than current one - FIFO needs to be redirected to
626 * the lowest one of the streams in the queue. Check if this is needed
627 * here.
628 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
629 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
630 * we need to check if the numerical value of X is LARGER than of Y.
631 */
632 spin_lock_bh(&mvm->queue_info_lock);
633 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
634 spin_unlock_bh(&mvm->queue_info_lock);
635
636 IWL_DEBUG_TX_QUEUES(mvm,
637 "No redirection needed on TXQ #%d\n",
638 queue);
639 return 0;
640 }
641
642 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
643 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200644 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200645 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
646 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
647 spin_unlock_bh(&mvm->queue_info_lock);
648
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200649 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200650 queue, iwl_mvm_ac_to_tx_fifo[ac]);
651
652 /* Stop MAC queues and wait for this queue to empty */
653 iwl_mvm_stop_mac_queues(mvm, mq);
654 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
655 if (ret) {
656 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
657 queue);
658 ret = -EIO;
659 goto out;
660 }
661
662 /* Before redirecting the queue we need to de-activate it */
663 iwl_trans_txq_disable(mvm->trans, queue, false);
664 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
665 if (ret)
666 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
667 ret);
668
669 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300670 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200671
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200672 /* Update the TID "owner" of the queue */
673 spin_lock_bh(&mvm->queue_info_lock);
674 mvm->queue_info[queue].txq_tid = tid;
675 spin_unlock_bh(&mvm->queue_info_lock);
676
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200677 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
678
679 /* Redirect to lower AC */
680 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
681 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
682 ssn);
683
684 /* Update AC marking of the queue */
685 spin_lock_bh(&mvm->queue_info_lock);
686 mvm->queue_info[queue].mac80211_ac = ac;
687 spin_unlock_bh(&mvm->queue_info_lock);
688
689 /*
690 * Mark queue as shared in transport if shared
691 * Note this has to be done after queue enablement because enablement
692 * can also set this value, and there is no indication there to shared
693 * queues
694 */
695 if (shared_queue)
696 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
697
698out:
699 /* Continue using the MAC queues */
700 iwl_mvm_start_mac_queues(mvm, mq);
701
702 return ret;
703}
704
Sara Sharon310181e2017-01-17 14:27:48 +0200705static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
706 struct ieee80211_sta *sta, u8 ac,
707 int tid)
708{
709 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
710 unsigned int wdg_timeout =
711 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
712 u8 mac_queue = mvmsta->vif->hw_queue[ac];
713 int queue = -1;
714
715 lockdep_assert_held(&mvm->mutex);
716
717 IWL_DEBUG_TX_QUEUES(mvm,
718 "Allocating queue for sta %d on tid %d\n",
719 mvmsta->sta_id, tid);
720 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
721 wdg_timeout);
722 if (queue < 0)
723 return queue;
724
725 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
726
727 spin_lock_bh(&mvmsta->lock);
728 mvmsta->tid_data[tid].txq_id = queue;
729 mvmsta->tid_data[tid].is_tid_active = true;
730 mvmsta->tfd_queue_msk |= BIT(queue);
731 spin_unlock_bh(&mvmsta->lock);
732
733 spin_lock_bh(&mvm->queue_info_lock);
734 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
735 spin_unlock_bh(&mvm->queue_info_lock);
736
737 return 0;
738}
739
Liad Kaufman24afba72015-07-28 18:56:08 +0300740static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
741 struct ieee80211_sta *sta, u8 ac, int tid,
742 struct ieee80211_hdr *hdr)
743{
744 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
745 struct iwl_trans_txq_scd_cfg cfg = {
746 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
747 .sta_id = mvmsta->sta_id,
748 .tid = tid,
749 .frame_limit = IWL_FRAME_LIMIT,
750 };
751 unsigned int wdg_timeout =
752 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
753 u8 mac_queue = mvmsta->vif->hw_queue[ac];
754 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200755 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300756 unsigned long disable_agg_tids = 0;
757 enum iwl_mvm_agg_state queue_state;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300758 bool shared_queue = false;
Liad Kaufman24afba72015-07-28 18:56:08 +0300759 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300760 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300761 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300762
763 lockdep_assert_held(&mvm->mutex);
764
Sara Sharon310181e2017-01-17 14:27:48 +0200765 if (iwl_mvm_has_new_tx_api(mvm))
766 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
767
Liad Kaufman42db09c2016-05-02 14:01:14 +0300768 spin_lock_bh(&mvmsta->lock);
769 tfd_queue_mask = mvmsta->tfd_queue_msk;
770 spin_unlock_bh(&mvmsta->lock);
771
Liad Kaufmand2515a92016-03-23 16:31:08 +0200772 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300773
774 /*
775 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
776 * exists
777 */
778 if (!ieee80211_is_data_qos(hdr->frame_control) ||
779 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300780 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
781 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300782 IWL_MVM_DQA_MAX_MGMT_QUEUE);
783 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
784 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
785 queue);
786
787 /* If no such queue is found, we'll use a DATA queue instead */
788 }
789
Liad Kaufman9794c642015-08-19 17:34:28 +0300790 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
791 (mvm->queue_info[mvmsta->reserved_queue].status ==
792 IWL_MVM_QUEUE_RESERVED ||
793 mvm->queue_info[mvmsta->reserved_queue].status ==
794 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300795 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300796 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300797 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
798 }
799
800 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300801 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
802 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300803 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300804
805 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300806 * Check if this queue is already allocated but inactive.
807 * In such a case, we'll need to first free this queue before enabling
808 * it again, so we'll mark it as reserved to make sure no new traffic
809 * arrives on it
810 */
811 if (queue > 0 &&
812 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
813 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
814 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +0200815 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +0300816 IWL_DEBUG_TX_QUEUES(mvm,
817 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
818 queue, mvmsta->sta_id, tid);
819 }
820
Liad Kaufman42db09c2016-05-02 14:01:14 +0300821 /* No free queue - we'll have to share */
822 if (queue <= 0) {
823 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
824 if (queue > 0) {
825 shared_queue = true;
826 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
827 }
828 }
829
Liad Kaufman9794c642015-08-19 17:34:28 +0300830 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300831 * Mark TXQ as ready, even though it hasn't been fully configured yet,
832 * to make sure no one else takes it.
833 * This will allow avoiding re-acquiring the lock at the end of the
834 * configuration. On error we'll mark it back as free.
835 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300836 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300837 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300838
Liad Kaufmand2515a92016-03-23 16:31:08 +0200839 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300840
Liad Kaufman42db09c2016-05-02 14:01:14 +0300841 /* This shouldn't happen - out of queues */
842 if (WARN_ON(queue <= 0)) {
843 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
844 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200845 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300846 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300847
848 /*
849 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
850 * but for configuring the SCD to send A-MPDUs we need to mark the queue
851 * as aggregatable.
852 * Mark all DATA queues as allowing to be aggregated at some point
853 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300854 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
855 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300856
Liad Kaufman9794c642015-08-19 17:34:28 +0300857 /*
858 * If this queue was previously inactive (idle) - we need to free it
859 * first
860 */
861 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +0200862 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
863 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +0300864 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +0300865 }
866
Liad Kaufman42db09c2016-05-02 14:01:14 +0300867 IWL_DEBUG_TX_QUEUES(mvm,
868 "Allocating %squeue #%d to sta %d on tid %d\n",
869 shared_queue ? "shared " : "", queue,
870 mvmsta->sta_id, tid);
871
872 if (shared_queue) {
873 /* Disable any open aggs on this queue */
874 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
875
876 if (disable_agg_tids) {
877 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
878 queue);
879 iwl_mvm_invalidate_sta_queue(mvm, queue,
880 disable_agg_tids, false);
881 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300882 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300883
884 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
885 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
886 wdg_timeout);
887
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200888 /*
889 * Mark queue as shared in transport if shared
890 * Note this has to be done after queue enablement because enablement
891 * can also set this value, and there is no indication there to shared
892 * queues
893 */
894 if (shared_queue)
895 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
896
Liad Kaufman24afba72015-07-28 18:56:08 +0300897 spin_lock_bh(&mvmsta->lock);
898 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300899 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300900 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300901 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300902
903 if (mvmsta->reserved_queue == queue)
904 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
905 spin_unlock_bh(&mvmsta->lock);
906
Liad Kaufman42db09c2016-05-02 14:01:14 +0300907 if (!shared_queue) {
908 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
909 if (ret)
910 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300911
Liad Kaufman42db09c2016-05-02 14:01:14 +0300912 /* If we need to re-enable aggregations... */
913 if (queue_state == IWL_AGG_ON) {
914 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
915 if (ret)
916 goto out_err;
917 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200918 } else {
919 /* Redirect queue, if needed */
920 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
921 wdg_timeout, false);
922 if (ret)
923 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300924 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300925
Liad Kaufman42db09c2016-05-02 14:01:14 +0300926 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300927
928out_err:
929 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
930
931 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300932}
933
Liad Kaufman19aefa42016-03-08 14:29:51 +0200934static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
935{
936 struct iwl_scd_txq_cfg_cmd cmd = {
937 .scd_queue = queue,
938 .action = SCD_CFG_UPDATE_QUEUE_TID,
939 };
Liad Kaufman19aefa42016-03-08 14:29:51 +0200940 int tid;
941 unsigned long tid_bitmap;
942 int ret;
943
944 lockdep_assert_held(&mvm->mutex);
945
Sara Sharonbb497012016-09-29 14:52:40 +0300946 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
947 return;
948
Liad Kaufman19aefa42016-03-08 14:29:51 +0200949 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200950 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
951 spin_unlock_bh(&mvm->queue_info_lock);
952
953 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
954 return;
955
956 /* Find any TID for queue */
957 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
958 cmd.tid = tid;
959 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
960
961 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300962 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200963 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
964 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300965 return;
966 }
967
968 spin_lock_bh(&mvm->queue_info_lock);
969 mvm->queue_info[queue].txq_tid = tid;
970 spin_unlock_bh(&mvm->queue_info_lock);
971 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
972 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200973}
974
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200975static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
976{
977 struct ieee80211_sta *sta;
978 struct iwl_mvm_sta *mvmsta;
979 s8 sta_id;
980 int tid = -1;
981 unsigned long tid_bitmap;
982 unsigned int wdg_timeout;
983 int ssn;
984 int ret = true;
985
Sara Sharonbb497012016-09-29 14:52:40 +0300986 /* queue sharing is disabled on new TX path */
987 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
988 return;
989
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200990 lockdep_assert_held(&mvm->mutex);
991
992 spin_lock_bh(&mvm->queue_info_lock);
993 sta_id = mvm->queue_info[queue].ra_sta_id;
994 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
995 spin_unlock_bh(&mvm->queue_info_lock);
996
997 /* Find TID for queue, and make sure it is the only one on the queue */
998 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
999 if (tid_bitmap != BIT(tid)) {
1000 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1001 queue, tid_bitmap);
1002 return;
1003 }
1004
1005 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1006 tid);
1007
1008 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1009 lockdep_is_held(&mvm->mutex));
1010
1011 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1012 return;
1013
1014 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1015 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1016
1017 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1018
1019 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1020 tid_to_mac80211_ac[tid], ssn,
1021 wdg_timeout, true);
1022 if (ret) {
1023 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1024 return;
1025 }
1026
1027 /* If aggs should be turned back on - do it */
1028 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001029 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001030
1031 mvmsta->tid_disable_agg &= ~BIT(tid);
1032
1033 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1034 cmd.sta_id = mvmsta->sta_id;
1035 cmd.add_modify = STA_MODE_MODIFY;
1036 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1037 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1038 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1039
1040 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1041 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1042 if (!ret) {
1043 IWL_DEBUG_TX_QUEUES(mvm,
1044 "TXQ #%d is now aggregated again\n",
1045 queue);
1046
1047 /* Mark queue intenally as aggregating again */
1048 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1049 }
1050 }
1051
1052 spin_lock_bh(&mvm->queue_info_lock);
1053 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1054 spin_unlock_bh(&mvm->queue_info_lock);
1055}
1056
Liad Kaufman24afba72015-07-28 18:56:08 +03001057static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1058{
1059 if (tid == IWL_MAX_TID_COUNT)
1060 return IEEE80211_AC_VO; /* MGMT */
1061
1062 return tid_to_mac80211_ac[tid];
1063}
1064
1065static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1066 struct ieee80211_sta *sta, int tid)
1067{
1068 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1069 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1070 struct sk_buff *skb;
1071 struct ieee80211_hdr *hdr;
1072 struct sk_buff_head deferred_tx;
1073 u8 mac_queue;
1074 bool no_queue = false; /* Marks if there is a problem with the queue */
1075 u8 ac;
1076
1077 lockdep_assert_held(&mvm->mutex);
1078
1079 skb = skb_peek(&tid_data->deferred_tx_frames);
1080 if (!skb)
1081 return;
1082 hdr = (void *)skb->data;
1083
1084 ac = iwl_mvm_tid_to_ac_queue(tid);
1085 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1086
1087 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
1088 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1089 IWL_ERR(mvm,
1090 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1091 mvmsta->sta_id, tid);
1092
1093 /*
1094 * Mark queue as problematic so later the deferred traffic is
1095 * freed, as we can do nothing with it
1096 */
1097 no_queue = true;
1098 }
1099
1100 __skb_queue_head_init(&deferred_tx);
1101
Liad Kaufmand2515a92016-03-23 16:31:08 +02001102 /* Disable bottom-halves when entering TX path */
1103 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001104 spin_lock(&mvmsta->lock);
1105 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001106 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001107 spin_unlock(&mvmsta->lock);
1108
Liad Kaufman24afba72015-07-28 18:56:08 +03001109 while ((skb = __skb_dequeue(&deferred_tx)))
1110 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1111 ieee80211_free_txskb(mvm->hw, skb);
1112 local_bh_enable();
1113
1114 /* Wake queue */
1115 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1116}
1117
1118void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1119{
1120 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1121 add_stream_wk);
1122 struct ieee80211_sta *sta;
1123 struct iwl_mvm_sta *mvmsta;
1124 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001125 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001126
Liad Kaufman9794c642015-08-19 17:34:28 +03001127 /* Check inactivity of queues */
1128 iwl_mvm_inactivity_check(mvm);
1129
Liad Kaufman24afba72015-07-28 18:56:08 +03001130 mutex_lock(&mvm->mutex);
1131
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001132 /* Reconfigure queues requiring reconfiguation */
1133 for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1134 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001135 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001136
1137 spin_lock_bh(&mvm->queue_info_lock);
1138 reconfig = (mvm->queue_info[queue].status ==
1139 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001140
1141 /*
1142 * We need to take into account a situation in which a TXQ was
1143 * allocated to TID x, and then turned shared by adding TIDs y
1144 * and z. If TID x becomes inactive and is removed from the TXQ,
1145 * ownership must be given to one of the remaining TIDs.
1146 * This is mainly because if TID x continues - a new queue can't
1147 * be allocated for it as long as it is an owner of another TXQ.
1148 */
1149 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1150 BIT(mvm->queue_info[queue].txq_tid)) &&
1151 (mvm->queue_info[queue].status ==
1152 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001153 spin_unlock_bh(&mvm->queue_info_lock);
1154
1155 if (reconfig)
1156 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001157 else if (change_owner)
1158 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001159 }
1160
Liad Kaufman24afba72015-07-28 18:56:08 +03001161 /* Go over all stations with deferred traffic */
1162 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1163 IWL_MVM_STATION_COUNT) {
1164 clear_bit(sta_id, mvm->sta_deferred_frames);
1165 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1166 lockdep_is_held(&mvm->mutex));
1167 if (IS_ERR_OR_NULL(sta))
1168 continue;
1169
1170 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1171 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1172
1173 for_each_set_bit(tid, &deferred_tid_traffic,
1174 IWL_MAX_TID_COUNT + 1)
1175 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1176 }
1177
1178 mutex_unlock(&mvm->mutex);
1179}
1180
1181static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001182 struct ieee80211_sta *sta,
1183 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001184{
1185 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1186 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001187 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001188
Liad Kaufman9794c642015-08-19 17:34:28 +03001189 /*
1190 * Check for inactive queues, so we don't reach a situation where we
1191 * can't add a STA due to a shortage in queues that doesn't really exist
1192 */
1193 iwl_mvm_inactivity_check(mvm);
1194
Liad Kaufman24afba72015-07-28 18:56:08 +03001195 spin_lock_bh(&mvm->queue_info_lock);
1196
1197 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001198 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1199 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001200 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1201 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001202 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1203 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001204 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1205 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001206 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001207 if (queue < 0) {
1208 spin_unlock_bh(&mvm->queue_info_lock);
1209 IWL_ERR(mvm, "No available queues for new station\n");
1210 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001211 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1212 /*
1213 * If this queue is already allocated but inactive we'll need to
1214 * first free this queue before enabling it again, we'll mark
1215 * it as reserved to make sure no new traffic arrives on it
1216 */
1217 using_inactive_queue = true;
1218 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001219 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001220 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001221
1222 spin_unlock_bh(&mvm->queue_info_lock);
1223
1224 mvmsta->reserved_queue = queue;
1225
Sara Sharon01796ff2016-11-16 17:04:36 +02001226 if (using_inactive_queue)
1227 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1228
Liad Kaufman24afba72015-07-28 18:56:08 +03001229 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1230 queue, mvmsta->sta_id);
1231
1232 return 0;
1233}
1234
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001235/*
1236 * In DQA mode, after a HW restart the queues should be allocated as before, in
1237 * order to avoid race conditions when there are shared queues. This function
1238 * does the re-mapping and queue allocation.
1239 *
1240 * Note that re-enabling aggregations isn't done in this function.
1241 */
1242static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1243 struct iwl_mvm_sta *mvm_sta)
1244{
1245 unsigned int wdg_timeout =
1246 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1247 int i;
1248 struct iwl_trans_txq_scd_cfg cfg = {
1249 .sta_id = mvm_sta->sta_id,
1250 .frame_limit = IWL_FRAME_LIMIT,
1251 };
1252
Johannes Berg03c902b2016-12-02 12:03:36 +01001253 /* Make sure reserved queue is still marked as such (if allocated) */
1254 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1255 mvm->queue_info[mvm_sta->reserved_queue].status =
1256 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001257
1258 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1259 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1260 int txq_id = tid_data->txq_id;
1261 int ac;
1262 u8 mac_queue;
1263
1264 if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1265 continue;
1266
1267 skb_queue_head_init(&tid_data->deferred_tx_frames);
1268
1269 ac = tid_to_mac80211_ac[i];
1270 mac_queue = mvm_sta->vif->hw_queue[ac];
1271
Sara Sharon310181e2017-01-17 14:27:48 +02001272 if (iwl_mvm_has_new_tx_api(mvm)) {
1273 IWL_DEBUG_TX_QUEUES(mvm,
1274 "Re-mapping sta %d tid %d\n",
1275 mvm_sta->sta_id, i);
1276 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1277 mvm_sta->sta_id,
1278 i, wdg_timeout);
1279 tid_data->txq_id = txq_id;
1280 } else {
1281 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001282
Sara Sharon310181e2017-01-17 14:27:48 +02001283 cfg.tid = i;
1284 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1285 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1286 txq_id ==
1287 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001288
Sara Sharon310181e2017-01-17 14:27:48 +02001289 IWL_DEBUG_TX_QUEUES(mvm,
1290 "Re-mapping sta %d tid %d to queue %d\n",
1291 mvm_sta->sta_id, i, txq_id);
1292
1293 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1294 wdg_timeout);
1295 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001296
1297 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1298 }
1299
1300 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1301}
1302
Johannes Berg8ca151b2013-01-24 14:25:36 +01001303int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1304 struct ieee80211_vif *vif,
1305 struct ieee80211_sta *sta)
1306{
1307 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001308 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001309 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001310 int i, ret, sta_id;
1311
1312 lockdep_assert_held(&mvm->mutex);
1313
1314 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001315 sta_id = iwl_mvm_find_free_sta_id(mvm,
1316 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001317 else
1318 sta_id = mvm_sta->sta_id;
1319
Sara Sharon0ae98812017-01-04 14:53:58 +02001320 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001321 return -ENOSPC;
1322
1323 spin_lock_init(&mvm_sta->lock);
1324
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001325 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1326 if (iwl_mvm_is_dqa_supported(mvm) &&
1327 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1328 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1329 goto update_fw;
1330 }
1331
Johannes Berg8ca151b2013-01-24 14:25:36 +01001332 mvm_sta->sta_id = sta_id;
1333 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1334 mvmvif->color);
1335 mvm_sta->vif = vif;
1336 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001337 mvm_sta->tx_protection = 0;
1338 mvm_sta->tt_tx_protection = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001339
1340 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001341 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001342 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001343 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001344
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001345 /*
1346 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1347 * and then they'll be allocated dynamically
1348 */
1349 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001350 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1351 if (ret)
1352 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001353 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001354 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1355 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1356 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1357 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001358
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001359 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001360 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001361 u16 seq = mvm_sta->tid_data[i].seq_number;
1362 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1363 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001364
1365 if (!iwl_mvm_is_dqa_supported(mvm))
1366 continue;
1367
1368 /*
1369 * Mark all queues for this STA as unallocated and defer TX
1370 * frames until the queue is allocated
1371 */
1372 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1373 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001374 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001375 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001376 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001377
Sara Sharona571f5f2015-12-07 12:50:58 +02001378 if (iwl_mvm_has_new_rx_api(mvm) &&
1379 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1380 dup_data = kcalloc(mvm->trans->num_rx_queues,
1381 sizeof(*dup_data),
1382 GFP_KERNEL);
1383 if (!dup_data)
1384 return -ENOMEM;
1385 mvm_sta->dup_data = dup_data;
1386 }
1387
Liad Kaufman24afba72015-07-28 18:56:08 +03001388 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001389 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1390 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001391 if (ret)
1392 goto err;
1393 }
1394
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001395update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001396 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001397 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001398 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001399
Johannes Berg9e848012014-08-04 14:33:42 +02001400 if (vif->type == NL80211_IFTYPE_STATION) {
1401 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001402 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001403 mvmvif->ap_sta_id = sta_id;
1404 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001405 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001406 }
1407 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001408
1409 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1410
1411 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001412
1413err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001414 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1415 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001416 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001417}
1418
1419int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1420 bool drain)
1421{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001422 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001423 int ret;
1424 u32 status;
1425
1426 lockdep_assert_held(&mvm->mutex);
1427
1428 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1429 cmd.sta_id = mvmsta->sta_id;
1430 cmd.add_modify = STA_MODE_MODIFY;
1431 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1432 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1433
1434 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001435 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1436 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001437 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001438 if (ret)
1439 return ret;
1440
Sara Sharon837c4da2016-01-07 16:50:45 +02001441 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001442 case ADD_STA_SUCCESS:
1443 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1444 mvmsta->sta_id);
1445 break;
1446 default:
1447 ret = -EIO;
1448 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1449 mvmsta->sta_id);
1450 break;
1451 }
1452
1453 return ret;
1454}
1455
1456/*
1457 * Remove a station from the FW table. Before sending the command to remove
1458 * the station validate that the station is indeed known to the driver (sanity
1459 * only).
1460 */
1461static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1462{
1463 struct ieee80211_sta *sta;
1464 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1465 .sta_id = sta_id,
1466 };
1467 int ret;
1468
1469 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1470 lockdep_is_held(&mvm->mutex));
1471
1472 /* Note: internal stations are marked as error values */
1473 if (!sta) {
1474 IWL_ERR(mvm, "Invalid station id\n");
1475 return -EINVAL;
1476 }
1477
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001478 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001479 sizeof(rm_sta_cmd), &rm_sta_cmd);
1480 if (ret) {
1481 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1482 return ret;
1483 }
1484
1485 return 0;
1486}
1487
1488void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1489{
1490 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1491 u8 sta_id;
1492
1493 /*
1494 * The mutex is needed because of the SYNC cmd, but not only: if the
1495 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1496 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1497 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1498 * that later.
1499 */
1500 mutex_lock(&mvm->mutex);
1501
1502 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1503 int ret;
1504 struct ieee80211_sta *sta =
1505 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1506 lockdep_is_held(&mvm->mutex));
1507
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001508 /*
1509 * This station is in use or RCU-removed; the latter happens in
1510 * managed mode, where mac80211 removes the station before we
1511 * can remove it from firmware (we can only do that after the
1512 * MAC is marked unassociated), and possibly while the deauth
1513 * frame to disconnect from the AP is still queued. Then, the
1514 * station pointer is -ENOENT when the last skb is reclaimed.
1515 */
1516 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001517 continue;
1518
1519 if (PTR_ERR(sta) == -EINVAL) {
1520 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1521 sta_id);
1522 continue;
1523 }
1524
1525 if (!sta) {
1526 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1527 sta_id);
1528 continue;
1529 }
1530
1531 WARN_ON(PTR_ERR(sta) != -EBUSY);
1532 /* This station was removed and we waited until it got drained,
1533 * we can now proceed and remove it.
1534 */
1535 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1536 if (ret) {
1537 IWL_ERR(mvm,
1538 "Couldn't remove sta %d after it was drained\n",
1539 sta_id);
1540 continue;
1541 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301542 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001543 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001544
1545 if (mvm->tfd_drained[sta_id]) {
1546 unsigned long i, msk = mvm->tfd_drained[sta_id];
1547
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001548 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001549 iwl_mvm_disable_txq(mvm, i, i,
1550 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001551
1552 mvm->tfd_drained[sta_id] = 0;
1553 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1554 sta_id, msk);
1555 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001556 }
1557
1558 mutex_unlock(&mvm->mutex);
1559}
1560
Liad Kaufman24afba72015-07-28 18:56:08 +03001561static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1562 struct ieee80211_vif *vif,
1563 struct iwl_mvm_sta *mvm_sta)
1564{
1565 int ac;
1566 int i;
1567
1568 lockdep_assert_held(&mvm->mutex);
1569
1570 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1571 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1572 continue;
1573
1574 ac = iwl_mvm_tid_to_ac_queue(i);
1575 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1576 vif->hw_queue[ac], i, 0);
1577 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1578 }
1579}
1580
Johannes Berg8ca151b2013-01-24 14:25:36 +01001581int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1582 struct ieee80211_vif *vif,
1583 struct ieee80211_sta *sta)
1584{
1585 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001586 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001587 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001588 int ret;
1589
1590 lockdep_assert_held(&mvm->mutex);
1591
Sara Sharona571f5f2015-12-07 12:50:58 +02001592 if (iwl_mvm_has_new_rx_api(mvm))
1593 kfree(mvm_sta->dup_data);
1594
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001595 if ((vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001596 mvmvif->ap_sta_id == sta_id) ||
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001597 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001598 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1599 if (ret)
1600 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001601 /* flush its queues here since we are freeing mvm_sta */
Luca Coelho5888a402015-10-06 09:54:57 +03001602 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001603 if (ret)
1604 return ret;
1605 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1606 mvm_sta->tfd_queue_msk);
1607 if (ret)
1608 return ret;
1609 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001610
Liad Kaufman24afba72015-07-28 18:56:08 +03001611 /* If DQA is supported - the queues can be disabled now */
Sara Sharon94c3e612016-12-07 15:04:37 +02001612 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman56214742016-09-22 15:14:08 +03001613 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001614 /*
1615 * If pending_frames is set at this point - it must be
1616 * driver internal logic error, since queues are empty
1617 * and removed successuly.
1618 * warn on it but set it to 0 anyway to avoid station
1619 * not being removed later in the function
1620 */
1621 WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
1622 }
Liad Kaufman56214742016-09-22 15:14:08 +03001623
1624 /* If there is a TXQ still marked as reserved - free it */
1625 if (iwl_mvm_is_dqa_supported(mvm) &&
1626 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001627 u8 reserved_txq = mvm_sta->reserved_queue;
1628 enum iwl_mvm_queue_status *status;
1629
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001630 /*
1631 * If no traffic has gone through the reserved TXQ - it
1632 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1633 * should be manually marked as free again
1634 */
1635 spin_lock_bh(&mvm->queue_info_lock);
1636 status = &mvm->queue_info[reserved_txq].status;
1637 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1638 (*status != IWL_MVM_QUEUE_FREE),
1639 "sta_id %d reserved txq %d status %d",
Sara Sharon94c3e612016-12-07 15:04:37 +02001640 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001641 spin_unlock_bh(&mvm->queue_info_lock);
1642 return -EINVAL;
1643 }
1644
1645 *status = IWL_MVM_QUEUE_FREE;
1646 spin_unlock_bh(&mvm->queue_info_lock);
1647 }
1648
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001649 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001650 mvmvif->ap_sta_id == sta_id) {
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001651 /* if associated - we can't remove the AP STA now */
1652 if (vif->bss_conf.assoc)
1653 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001654
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001655 /* unassoc - go ahead - remove the AP STA now */
Sara Sharon0ae98812017-01-04 14:53:58 +02001656 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
Eliad Peller37577fe2013-12-05 17:19:39 +02001657
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001658 /* clear d0i3_ap_sta_id if no longer relevant */
Sara Sharon94c3e612016-12-07 15:04:37 +02001659 if (mvm->d0i3_ap_sta_id == sta_id)
Sara Sharon0ae98812017-01-04 14:53:58 +02001660 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001661 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001662 }
1663
1664 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001665 * This shouldn't happen - the TDLS channel switch should be canceled
1666 * before the STA is removed.
1667 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001668 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001669 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001670 cancel_delayed_work(&mvm->tdls_cs.dwork);
1671 }
1672
1673 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001674 * Make sure that the tx response code sees the station as -EBUSY and
1675 * calls the drain worker.
1676 */
1677 spin_lock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02001678
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001679 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001680 * There are frames pending on the AC queues for this station.
1681 * We need to wait until all the frames are drained...
1682 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001683 if (atomic_read(&mvm->pending_frames[sta_id])) {
1684 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
Johannes Berg8ca151b2013-01-24 14:25:36 +01001685 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001686 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001687
1688 /* disable TDLS sta queues on drain complete */
1689 if (sta->tdls) {
Sara Sharon94c3e612016-12-07 15:04:37 +02001690 mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
1691 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001692 }
1693
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001694 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001695 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001696 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001697
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001698 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001699 iwl_mvm_tdls_sta_deinit(mvm, sta);
1700
Johannes Berg8ca151b2013-01-24 14:25:36 +01001701 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301702 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001703 }
1704
1705 return ret;
1706}
1707
1708int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1709 struct ieee80211_vif *vif,
1710 u8 sta_id)
1711{
1712 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1713
1714 lockdep_assert_held(&mvm->mutex);
1715
Monam Agarwalc531c772014-03-24 00:05:56 +05301716 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001717 return ret;
1718}
1719
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001720int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1721 struct iwl_mvm_int_sta *sta,
1722 u32 qmask, enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001723{
1724 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001725 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02001726 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001727 return -ENOSPC;
1728 }
1729
1730 sta->tfd_queue_msk = qmask;
1731
1732 /* put a non-NULL value so iterating over the stations won't stop */
1733 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1734 return 0;
1735}
1736
Sara Sharon26d6c162017-01-03 12:00:19 +02001737void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001738{
Monam Agarwalc531c772014-03-24 00:05:56 +05301739 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001740 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02001741 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001742}
1743
1744static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1745 struct iwl_mvm_int_sta *sta,
1746 const u8 *addr,
1747 u16 mac_id, u16 color)
1748{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001749 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001750 int ret;
1751 u32 status;
1752
1753 lockdep_assert_held(&mvm->mutex);
1754
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001755 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001756 cmd.sta_id = sta->sta_id;
1757 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1758 color));
1759
Sara Sharonbb497012016-09-29 14:52:40 +03001760 if (!iwl_mvm_has_new_tx_api(mvm))
1761 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001762 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001763
1764 if (addr)
1765 memcpy(cmd.addr, addr, ETH_ALEN);
1766
Sara Sharon854c5702016-01-26 13:17:47 +02001767 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1768 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001769 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001770 if (ret)
1771 return ret;
1772
Sara Sharon837c4da2016-01-07 16:50:45 +02001773 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001774 case ADD_STA_SUCCESS:
1775 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1776 return 0;
1777 default:
1778 ret = -EIO;
1779 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1780 status);
1781 break;
1782 }
1783 return ret;
1784}
1785
Sara Sharonc5a719e2016-11-15 10:20:48 +02001786static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001787{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001788 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1789 mvm->cfg->base_params->wd_timeout :
1790 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001791
Sara Sharon310181e2017-01-17 14:27:48 +02001792 if (iwl_mvm_has_new_tx_api(mvm)) {
1793 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
1794 mvm->aux_sta.sta_id,
1795 IWL_MAX_TID_COUNT,
1796 wdg_timeout);
1797 mvm->aux_queue = queue;
1798 } else if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman28d07932015-09-01 16:36:25 +03001799 struct iwl_trans_txq_scd_cfg cfg = {
1800 .fifo = IWL_MVM_TX_FIFO_MCAST,
1801 .sta_id = mvm->aux_sta.sta_id,
1802 .tid = IWL_MAX_TID_COUNT,
1803 .aggregate = false,
1804 .frame_limit = IWL_FRAME_LIMIT,
1805 };
1806
1807 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1808 wdg_timeout);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001809 } else {
1810 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1811 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03001812 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02001813}
1814
1815int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1816{
1817 int ret;
1818
1819 lockdep_assert_held(&mvm->mutex);
1820
1821 /* Allocate aux station and assign to it the aux queue */
1822 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1823 NL80211_IFTYPE_UNSPECIFIED);
1824 if (ret)
1825 return ret;
1826
1827 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1828 if (!iwl_mvm_has_new_tx_api(mvm))
1829 iwl_mvm_enable_aux_queue(mvm);
Liad Kaufman28d07932015-09-01 16:36:25 +03001830
Johannes Berg8ca151b2013-01-24 14:25:36 +01001831 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1832 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001833 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001834 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001835 return ret;
1836 }
1837
1838 /*
1839 * For a000 firmware and on we cannot add queue to a station unknown
1840 * to firmware so enable queue here - after the station was added
1841 */
1842 if (iwl_mvm_has_new_tx_api(mvm))
1843 iwl_mvm_enable_aux_queue(mvm);
1844
1845 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001846}
1847
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001848int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1849{
1850 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1851
1852 lockdep_assert_held(&mvm->mutex);
1853 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1854 mvmvif->id, 0);
1855}
1856
1857int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1858{
1859 int ret;
1860
1861 lockdep_assert_held(&mvm->mutex);
1862
1863 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1864 if (ret)
1865 IWL_WARN(mvm, "Failed sending remove station\n");
1866
1867 return ret;
1868}
1869
1870void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1871{
1872 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1873}
1874
Johannes Berg712b24a2014-08-04 14:14:14 +02001875void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1876{
1877 lockdep_assert_held(&mvm->mutex);
1878
1879 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1880}
1881
Johannes Berg8ca151b2013-01-24 14:25:36 +01001882/*
1883 * Send the add station command for the vif's broadcast station.
1884 * Assumes that the station was already allocated.
1885 *
1886 * @mvm: the mvm component
1887 * @vif: the interface to which the broadcast station is added
1888 * @bsta: the broadcast station to add.
1889 */
Johannes Berg013290a2014-08-04 13:38:48 +02001890int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001891{
1892 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001893 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001894 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001895 const u8 *baddr = _baddr;
Sara Sharonc5a719e2016-11-15 10:20:48 +02001896 int queue = 0;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001897 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02001898 unsigned int wdg_timeout =
1899 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1900 struct iwl_trans_txq_scd_cfg cfg = {
1901 .fifo = IWL_MVM_TX_FIFO_VO,
1902 .sta_id = mvmvif->bcast_sta.sta_id,
1903 .tid = IWL_MAX_TID_COUNT,
1904 .aggregate = false,
1905 .frame_limit = IWL_FRAME_LIMIT,
1906 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01001907
1908 lockdep_assert_held(&mvm->mutex);
1909
Sara Sharon310181e2017-01-17 14:27:48 +02001910 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02001911 if (vif->type == NL80211_IFTYPE_AP ||
1912 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02001913 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001914 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02001915 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001916 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03001917 return -EINVAL;
1918
Liad Kaufmandf88c082016-11-24 15:31:00 +02001919 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001920
Sara Sharon310181e2017-01-17 14:27:48 +02001921 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1922 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03001923 }
1924
Johannes Berg5023d962013-07-31 14:07:43 +02001925 if (vif->type == NL80211_IFTYPE_ADHOC)
1926 baddr = vif->bss_conf.bssid;
1927
Sara Sharon0ae98812017-01-04 14:53:58 +02001928 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001929 return -ENOSPC;
1930
Liad Kaufmandf88c082016-11-24 15:31:00 +02001931 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1932 mvmvif->id, mvmvif->color);
1933 if (ret)
1934 return ret;
1935
1936 /*
Sara Sharonc5a719e2016-11-15 10:20:48 +02001937 * For a000 firmware and on we cannot add queue to a station unknown
1938 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02001939 */
Sara Sharon310181e2017-01-17 14:27:48 +02001940 if (iwl_mvm_has_new_tx_api(mvm)) {
1941 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1942 bsta->sta_id,
1943 IWL_MAX_TID_COUNT,
1944 wdg_timeout);
1945 if (vif->type == NL80211_IFTYPE_AP)
1946 mvm->probe_queue = queue;
1947 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1948 mvm->p2p_dev_queue = queue;
1949
1950 bsta->tfd_queue_msk |= BIT(queue);
1951 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02001952
1953 return 0;
1954}
1955
1956static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1957 struct ieee80211_vif *vif)
1958{
1959 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1960
1961 lockdep_assert_held(&mvm->mutex);
1962
Liad Kaufman4d339982017-03-21 17:13:16 +02001963 if (vif->type == NL80211_IFTYPE_AP ||
1964 vif->type == NL80211_IFTYPE_ADHOC)
Liad Kaufmandf88c082016-11-24 15:31:00 +02001965 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
1966 IWL_MAX_TID_COUNT, 0);
1967
Sara Sharon49f71712017-01-09 12:07:16 +02001968 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
1969 iwl_mvm_disable_txq(mvm, mvm->probe_queue,
Liad Kaufmandf88c082016-11-24 15:31:00 +02001970 vif->hw_queue[0], IWL_MAX_TID_COUNT,
1971 0);
Sara Sharon49f71712017-01-09 12:07:16 +02001972 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
Liad Kaufmandf88c082016-11-24 15:31:00 +02001973 }
1974
Sara Sharon49f71712017-01-09 12:07:16 +02001975 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
1976 iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
Liad Kaufmandf88c082016-11-24 15:31:00 +02001977 vif->hw_queue[0], IWL_MAX_TID_COUNT,
1978 0);
Sara Sharon49f71712017-01-09 12:07:16 +02001979 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
Liad Kaufmandf88c082016-11-24 15:31:00 +02001980 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001981}
1982
1983/* Send the FW a request to remove the station from it's internal data
1984 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001985int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001986{
Johannes Berg013290a2014-08-04 13:38:48 +02001987 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001988 int ret;
1989
1990 lockdep_assert_held(&mvm->mutex);
1991
Liad Kaufmandf88c082016-11-24 15:31:00 +02001992 if (iwl_mvm_is_dqa_supported(mvm))
1993 iwl_mvm_free_bcast_sta_queues(mvm, vif);
1994
Johannes Berg013290a2014-08-04 13:38:48 +02001995 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001996 if (ret)
1997 IWL_WARN(mvm, "Failed sending remove station\n");
1998 return ret;
1999}
2000
Johannes Berg013290a2014-08-04 13:38:48 +02002001int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2002{
2003 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03002004 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02002005
2006 lockdep_assert_held(&mvm->mutex);
2007
Liad Kaufmandf88c082016-11-24 15:31:00 +02002008 if (!iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmande24f632015-08-04 15:19:18 +03002009 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002010
Liad Kaufmande24f632015-08-04 15:19:18 +03002011 /*
2012 * The firmware defines the TFD queue mask to only be relevant
2013 * for *unicast* queues, so the multicast (CAB) queue shouldn't
Liad Kaufmandf88c082016-11-24 15:31:00 +02002014 * be included. This only happens in NL80211_IFTYPE_AP vif type,
2015 * so the next line will only have an effect there.
Liad Kaufmande24f632015-08-04 15:19:18 +03002016 */
Johannes Berg013290a2014-08-04 13:38:48 +02002017 qmask &= ~BIT(vif->cab_queue);
Liad Kaufmande24f632015-08-04 15:19:18 +03002018 }
2019
Johannes Berg013290a2014-08-04 13:38:48 +02002020 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
2021 ieee80211_vif_type_p2p(vif));
2022}
2023
Johannes Berg8ca151b2013-01-24 14:25:36 +01002024/* Allocate a new station entry for the broadcast station to the given vif,
2025 * and send it to the FW.
2026 * Note that each P2P mac should have its own broadcast station.
2027 *
2028 * @mvm: the mvm component
2029 * @vif: the interface to which the broadcast station is added
2030 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02002031int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002032{
2033 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002034 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002035 int ret;
2036
2037 lockdep_assert_held(&mvm->mutex);
2038
Johannes Berg013290a2014-08-04 13:38:48 +02002039 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002040 if (ret)
2041 return ret;
2042
Johannes Berg013290a2014-08-04 13:38:48 +02002043 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002044
2045 if (ret)
2046 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002047
Johannes Berg8ca151b2013-01-24 14:25:36 +01002048 return ret;
2049}
2050
Johannes Berg013290a2014-08-04 13:38:48 +02002051void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2052{
2053 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2054
2055 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2056}
2057
Johannes Berg8ca151b2013-01-24 14:25:36 +01002058/*
2059 * Send the FW a request to remove the station from it's internal data
2060 * structures, and in addition remove it from the local data structure.
2061 */
Johannes Berg013290a2014-08-04 13:38:48 +02002062int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002063{
2064 int ret;
2065
2066 lockdep_assert_held(&mvm->mutex);
2067
Johannes Berg013290a2014-08-04 13:38:48 +02002068 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002069
Johannes Berg013290a2014-08-04 13:38:48 +02002070 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2071
Johannes Berg8ca151b2013-01-24 14:25:36 +01002072 return ret;
2073}
2074
Sara Sharon26d6c162017-01-03 12:00:19 +02002075/*
2076 * Allocate a new station entry for the multicast station to the given vif,
2077 * and send it to the FW.
2078 * Note that each AP/GO mac should have its own multicast station.
2079 *
2080 * @mvm: the mvm component
2081 * @vif: the interface to which the multicast station is added
2082 */
2083int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2084{
2085 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2086 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2087 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2088 const u8 *maddr = _maddr;
2089 struct iwl_trans_txq_scd_cfg cfg = {
2090 .fifo = IWL_MVM_TX_FIFO_MCAST,
2091 .sta_id = msta->sta_id,
2092 .tid = IWL_MAX_TID_COUNT,
2093 .aggregate = false,
2094 .frame_limit = IWL_FRAME_LIMIT,
2095 };
2096 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2097 int ret;
2098
2099 lockdep_assert_held(&mvm->mutex);
2100
2101 if (!iwl_mvm_is_dqa_supported(mvm))
2102 return 0;
2103
2104 if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
2105 return -ENOTSUPP;
2106
2107 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2108 mvmvif->id, mvmvif->color);
2109 if (ret) {
2110 iwl_mvm_dealloc_int_sta(mvm, msta);
2111 return ret;
2112 }
2113
2114 /*
2115 * Enable cab queue after the ADD_STA command is sent.
2116 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
2117 * command with unknown station id.
2118 */
Sara Sharon310181e2017-01-17 14:27:48 +02002119 if (iwl_mvm_has_new_tx_api(mvm)) {
2120 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2121 msta->sta_id,
2122 IWL_MAX_TID_COUNT,
2123 timeout);
2124 vif->cab_queue = queue;
2125 } else {
2126 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2127 &cfg, timeout);
2128 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002129
2130 return 0;
2131}
2132
2133/*
2134 * Send the FW a request to remove the station from it's internal data
2135 * structures, and in addition remove it from the local data structure.
2136 */
2137int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2138{
2139 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2140 int ret;
2141
2142 lockdep_assert_held(&mvm->mutex);
2143
2144 if (!iwl_mvm_is_dqa_supported(mvm))
2145 return 0;
2146
2147 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
2148 IWL_MAX_TID_COUNT, 0);
2149
2150 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2151 if (ret)
2152 IWL_WARN(mvm, "Failed sending remove station\n");
2153
2154 return ret;
2155}
2156
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002157#define IWL_MAX_RX_BA_SESSIONS 16
2158
Sara Sharonb915c102016-03-23 16:32:02 +02002159static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002160{
Sara Sharonb915c102016-03-23 16:32:02 +02002161 struct iwl_mvm_delba_notif notif = {
2162 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2163 .metadata.sync = 1,
2164 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002165 };
Sara Sharonb915c102016-03-23 16:32:02 +02002166 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2167};
Sara Sharon10b2b202016-03-20 16:23:41 +02002168
Sara Sharonb915c102016-03-23 16:32:02 +02002169static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2170 struct iwl_mvm_baid_data *data)
2171{
2172 int i;
2173
2174 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2175
2176 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2177 int j;
2178 struct iwl_mvm_reorder_buffer *reorder_buf =
2179 &data->reorder_buf[i];
2180
Sara Sharon06904052016-02-28 20:28:17 +02002181 spin_lock_bh(&reorder_buf->lock);
2182 if (likely(!reorder_buf->num_stored)) {
2183 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002184 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002185 }
Sara Sharonb915c102016-03-23 16:32:02 +02002186
2187 /*
2188 * This shouldn't happen in regular DELBA since the internal
2189 * delBA notification should trigger a release of all frames in
2190 * the reorder buffer.
2191 */
2192 WARN_ON(1);
2193
2194 for (j = 0; j < reorder_buf->buf_size; j++)
2195 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02002196 /*
2197 * Prevent timer re-arm. This prevents a very far fetched case
2198 * where we timed out on the notification. There may be prior
2199 * RX frames pending in the RX queue before the notification
2200 * that might get processed between now and the actual deletion
2201 * and we would re-arm the timer although we are deleting the
2202 * reorder buffer.
2203 */
2204 reorder_buf->removed = true;
2205 spin_unlock_bh(&reorder_buf->lock);
2206 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002207 }
2208}
2209
2210static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2211 u32 sta_id,
2212 struct iwl_mvm_baid_data *data,
2213 u16 ssn, u8 buf_size)
2214{
2215 int i;
2216
2217 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2218 struct iwl_mvm_reorder_buffer *reorder_buf =
2219 &data->reorder_buf[i];
2220 int j;
2221
2222 reorder_buf->num_stored = 0;
2223 reorder_buf->head_sn = ssn;
2224 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002225 /* rx reorder timer */
2226 reorder_buf->reorder_timer.function =
2227 iwl_mvm_reorder_timer_expired;
2228 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2229 init_timer(&reorder_buf->reorder_timer);
2230 spin_lock_init(&reorder_buf->lock);
2231 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002232 reorder_buf->queue = i;
2233 reorder_buf->sta_id = sta_id;
2234 for (j = 0; j < reorder_buf->buf_size; j++)
2235 __skb_queue_head_init(&reorder_buf->entries[j]);
2236 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002237}
2238
Johannes Berg8ca151b2013-01-24 14:25:36 +01002239int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02002240 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002241{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002242 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002243 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002244 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002245 int ret;
2246 u32 status;
2247
2248 lockdep_assert_held(&mvm->mutex);
2249
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002250 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2251 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2252 return -ENOSPC;
2253 }
2254
Sara Sharon10b2b202016-03-20 16:23:41 +02002255 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2256 /*
2257 * Allocate here so if allocation fails we can bail out early
2258 * before starting the BA session in the firmware
2259 */
Sara Sharonb915c102016-03-23 16:32:02 +02002260 baid_data = kzalloc(sizeof(*baid_data) +
2261 mvm->trans->num_rx_queues *
2262 sizeof(baid_data->reorder_buf[0]),
2263 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002264 if (!baid_data)
2265 return -ENOMEM;
2266 }
2267
Johannes Berg8ca151b2013-01-24 14:25:36 +01002268 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2269 cmd.sta_id = mvm_sta->sta_id;
2270 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002271 if (start) {
2272 cmd.add_immediate_ba_tid = (u8) tid;
2273 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002274 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002275 } else {
2276 cmd.remove_immediate_ba_tid = (u8) tid;
2277 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002278 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2279 STA_MODIFY_REMOVE_BA_TID;
2280
2281 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002282 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2283 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002284 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002285 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002286 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002287
Sara Sharon837c4da2016-01-07 16:50:45 +02002288 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002289 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002290 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2291 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002292 break;
2293 case ADD_STA_IMMEDIATE_BA_FAILURE:
2294 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2295 ret = -ENOSPC;
2296 break;
2297 default:
2298 ret = -EIO;
2299 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2300 start ? "start" : "stopp", status);
2301 break;
2302 }
2303
Sara Sharon10b2b202016-03-20 16:23:41 +02002304 if (ret)
2305 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002306
Sara Sharon10b2b202016-03-20 16:23:41 +02002307 if (start) {
2308 u8 baid;
2309
2310 mvm->rx_ba_sessions++;
2311
2312 if (!iwl_mvm_has_new_rx_api(mvm))
2313 return 0;
2314
2315 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2316 ret = -EINVAL;
2317 goto out_free;
2318 }
2319 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2320 IWL_ADD_STA_BAID_SHIFT);
2321 baid_data->baid = baid;
2322 baid_data->timeout = timeout;
2323 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002324 setup_timer(&baid_data->session_timer,
2325 iwl_mvm_rx_agg_session_expired,
2326 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002327 baid_data->mvm = mvm;
2328 baid_data->tid = tid;
2329 baid_data->sta_id = mvm_sta->sta_id;
2330
2331 mvm_sta->tid_to_baid[tid] = baid;
2332 if (timeout)
2333 mod_timer(&baid_data->session_timer,
2334 TU_TO_EXP_TIME(timeout * 2));
2335
Sara Sharonb915c102016-03-23 16:32:02 +02002336 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2337 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002338 /*
2339 * protect the BA data with RCU to cover a case where our
2340 * internal RX sync mechanism will timeout (not that it's
2341 * supposed to happen) and we will free the session data while
2342 * RX is being processed in parallel
2343 */
Sara Sharon35263a02016-06-21 12:12:10 +03002344 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2345 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002346 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2347 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002348 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002349 u8 baid = mvm_sta->tid_to_baid[tid];
2350
Sara Sharon60dec522016-06-21 14:14:08 +03002351 if (mvm->rx_ba_sessions > 0)
2352 /* check that restart flow didn't zero the counter */
2353 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002354 if (!iwl_mvm_has_new_rx_api(mvm))
2355 return 0;
2356
2357 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2358 return -EINVAL;
2359
2360 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2361 if (WARN_ON(!baid_data))
2362 return -EINVAL;
2363
2364 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002365 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002366 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002367 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2368 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002369 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002370 }
2371 return 0;
2372
2373out_free:
2374 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002375 return ret;
2376}
2377
Liad Kaufman9794c642015-08-19 17:34:28 +03002378int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2379 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002380{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002381 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002382 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002383 int ret;
2384 u32 status;
2385
2386 lockdep_assert_held(&mvm->mutex);
2387
2388 if (start) {
2389 mvm_sta->tfd_queue_msk |= BIT(queue);
2390 mvm_sta->tid_disable_agg &= ~BIT(tid);
2391 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002392 /* In DQA-mode the queue isn't removed on agg termination */
2393 if (!iwl_mvm_is_dqa_supported(mvm))
2394 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002395 mvm_sta->tid_disable_agg |= BIT(tid);
2396 }
2397
2398 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2399 cmd.sta_id = mvm_sta->sta_id;
2400 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002401 if (!iwl_mvm_has_new_tx_api(mvm))
2402 cmd.modify_mask = STA_MODIFY_QUEUES;
2403 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002404 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2405 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2406
2407 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002408 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2409 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002410 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002411 if (ret)
2412 return ret;
2413
Sara Sharon837c4da2016-01-07 16:50:45 +02002414 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002415 case ADD_STA_SUCCESS:
2416 break;
2417 default:
2418 ret = -EIO;
2419 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2420 start ? "start" : "stopp", status);
2421 break;
2422 }
2423
2424 return ret;
2425}
2426
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002427const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002428 IEEE80211_AC_BE,
2429 IEEE80211_AC_BK,
2430 IEEE80211_AC_BK,
2431 IEEE80211_AC_BE,
2432 IEEE80211_AC_VI,
2433 IEEE80211_AC_VI,
2434 IEEE80211_AC_VO,
2435 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002436 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002437};
2438
Johannes Berg3e56ead2013-02-15 22:23:18 +01002439static const u8 tid_to_ucode_ac[] = {
2440 AC_BE,
2441 AC_BK,
2442 AC_BK,
2443 AC_BE,
2444 AC_VI,
2445 AC_VI,
2446 AC_VO,
2447 AC_VO,
2448};
2449
Johannes Berg8ca151b2013-01-24 14:25:36 +01002450int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2451 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2452{
Johannes Berg5b577a92013-11-14 18:20:04 +01002453 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002454 struct iwl_mvm_tid_data *tid_data;
2455 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002456 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002457
2458 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2459 return -EINVAL;
2460
2461 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2462 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2463 mvmsta->tid_data[tid].state);
2464 return -ENXIO;
2465 }
2466
2467 lockdep_assert_held(&mvm->mutex);
2468
Arik Nemtsovb2492502014-03-13 12:21:50 +02002469 spin_lock_bh(&mvmsta->lock);
2470
2471 /* possible race condition - we entered D0i3 while starting agg */
2472 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2473 spin_unlock_bh(&mvmsta->lock);
2474 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2475 return -EIO;
2476 }
2477
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002478 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002479
Liad Kaufmancf961e12015-08-13 19:16:08 +03002480 /*
2481 * Note the possible cases:
2482 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2483 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2484 * one and mark it as reserved
2485 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2486 * non-DQA mode, since the TXQ hasn't yet been allocated
2487 */
2488 txq_id = mvmsta->tid_data[tid].txq_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002489 if (iwl_mvm_is_dqa_supported(mvm) &&
2490 unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2491 ret = -ENXIO;
2492 IWL_DEBUG_TX_QUEUES(mvm,
2493 "Can't start tid %d agg on shared queue!\n",
2494 tid);
2495 goto release_locks;
2496 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002497 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002498 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2499 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002500 mvm->last_agg_queue);
2501 if (txq_id < 0) {
2502 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002503 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2504 goto release_locks;
2505 }
Sara Sharon01796ff2016-11-16 17:04:36 +02002506 /*
2507 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2508 * an inactive queue from iwl_mvm_find_free_queue() is
2509 * certainly a bug
2510 */
2511 WARN_ON(mvm->queue_info[txq_id].status ==
2512 IWL_MVM_QUEUE_INACTIVE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002513
2514 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2515 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002516 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002517
2518 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002519
Liad Kaufmancf961e12015-08-13 19:16:08 +03002520 IWL_DEBUG_TX_QUEUES(mvm,
2521 "AGG for tid %d will be on queue #%d\n",
2522 tid, txq_id);
2523
Johannes Berg8ca151b2013-01-24 14:25:36 +01002524 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002525 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002526 tid_data->txq_id = txq_id;
2527 *ssn = tid_data->ssn;
2528
2529 IWL_DEBUG_TX_QUEUES(mvm,
2530 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2531 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2532 tid_data->next_reclaimed);
2533
2534 if (tid_data->ssn == tid_data->next_reclaimed) {
2535 tid_data->state = IWL_AGG_STARTING;
2536 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2537 } else {
2538 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2539 }
2540
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002541 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002542 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002543
2544release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002545 spin_unlock(&mvm->queue_info_lock);
2546out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002547 spin_unlock_bh(&mvmsta->lock);
2548
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002549 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002550}
2551
2552int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002553 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2554 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002555{
Johannes Berg5b577a92013-11-14 18:20:04 +01002556 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002557 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002558 unsigned int wdg_timeout =
2559 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002560 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002561 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002562 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002563 u16 ssn;
2564
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002565 struct iwl_trans_txq_scd_cfg cfg = {
2566 .sta_id = mvmsta->sta_id,
2567 .tid = tid,
2568 .frame_limit = buf_size,
2569 .aggregate = true,
2570 };
2571
Eyal Shapiraefed6642014-09-14 15:58:53 +03002572 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2573 != IWL_MAX_TID_COUNT);
2574
Johannes Berg8ca151b2013-01-24 14:25:36 +01002575 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2576
2577 spin_lock_bh(&mvmsta->lock);
2578 ssn = tid_data->ssn;
2579 queue = tid_data->txq_id;
2580 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002581 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002582 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002583 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002584 spin_unlock_bh(&mvmsta->lock);
2585
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002586 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002587
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002588 spin_lock_bh(&mvm->queue_info_lock);
2589 queue_status = mvm->queue_info[queue].status;
2590 spin_unlock_bh(&mvm->queue_info_lock);
2591
Liad Kaufmancf961e12015-08-13 19:16:08 +03002592 /* In DQA mode, the existing queue might need to be reconfigured */
2593 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002594 /* Maybe there is no need to even alloc a queue... */
2595 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2596 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002597
2598 /*
2599 * Only reconfig the SCD for the queue if the window size has
2600 * changed from current (become smaller)
2601 */
2602 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2603 /*
Sara Sharonbb497012016-09-29 14:52:40 +03002604 * On new TX API rs and BA manager are offloaded.
2605 * For now though, just don't support being reconfigured
2606 */
2607 if (iwl_mvm_has_new_tx_api(mvm))
2608 return -ENOTSUPP;
2609
2610 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +03002611 * If reconfiguring an existing queue, it first must be
2612 * drained
2613 */
2614 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2615 BIT(queue));
2616 if (ret) {
2617 IWL_ERR(mvm,
2618 "Error draining queue before reconfig\n");
2619 return ret;
2620 }
2621
2622 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2623 mvmsta->sta_id, tid,
2624 buf_size, ssn);
2625 if (ret) {
2626 IWL_ERR(mvm,
2627 "Error reconfiguring TXQ #%d\n", queue);
2628 return ret;
2629 }
2630 }
2631 }
2632
2633 if (alloc_queue)
2634 iwl_mvm_enable_txq(mvm, queue,
2635 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2636 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002637
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002638 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2639 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2640 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2641 if (ret)
2642 return -EIO;
2643 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002644
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002645 /* No need to mark as reserved */
2646 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002647 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002648 spin_unlock_bh(&mvm->queue_info_lock);
2649
Johannes Berg8ca151b2013-01-24 14:25:36 +01002650 /*
2651 * Even though in theory the peer could have different
2652 * aggregation reorder buffer sizes for different sessions,
2653 * our ucode doesn't allow for that and has a global limit
2654 * for each station. Therefore, use the minimum of all the
2655 * aggregation sessions and our default value.
2656 */
2657 mvmsta->max_agg_bufsize =
2658 min(mvmsta->max_agg_bufsize, buf_size);
2659 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2660
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002661 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2662 sta->addr, tid);
2663
Eyal Shapira9e680942013-11-09 00:16:16 +02002664 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002665}
2666
2667int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2668 struct ieee80211_sta *sta, u16 tid)
2669{
Johannes Berg5b577a92013-11-14 18:20:04 +01002670 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002671 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2672 u16 txq_id;
2673 int err;
2674
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002675 /*
2676 * If mac80211 is cleaning its state, then say that we finished since
2677 * our state has been cleared anyway.
2678 */
2679 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2680 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2681 return 0;
2682 }
2683
Johannes Berg8ca151b2013-01-24 14:25:36 +01002684 spin_lock_bh(&mvmsta->lock);
2685
2686 txq_id = tid_data->txq_id;
2687
2688 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2689 mvmsta->sta_id, tid, txq_id, tid_data->state);
2690
Eyal Shapiraefed6642014-09-14 15:58:53 +03002691 mvmsta->agg_tids &= ~BIT(tid);
2692
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002693 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002694 /*
2695 * The TXQ is marked as reserved only if no traffic came through yet
2696 * This means no traffic has been sent on this TID (agg'd or not), so
2697 * we no longer have use for the queue. Since it hasn't even been
2698 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2699 * free.
2700 */
2701 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2702 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002703
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002704 spin_unlock_bh(&mvm->queue_info_lock);
2705
Johannes Berg8ca151b2013-01-24 14:25:36 +01002706 switch (tid_data->state) {
2707 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002708 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002709
2710 IWL_DEBUG_TX_QUEUES(mvm,
2711 "ssn = %d, next_recl = %d\n",
2712 tid_data->ssn, tid_data->next_reclaimed);
2713
2714 /* There are still packets for this RA / TID in the HW */
2715 if (tid_data->ssn != tid_data->next_reclaimed) {
2716 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2717 err = 0;
2718 break;
2719 }
2720
2721 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002722 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002723 spin_unlock_bh(&mvmsta->lock);
2724
2725 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2726
2727 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2728
Liad Kaufmancf961e12015-08-13 19:16:08 +03002729 if (!iwl_mvm_is_dqa_supported(mvm)) {
2730 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2731
2732 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2733 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002734 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002735 case IWL_AGG_STARTING:
2736 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2737 /*
2738 * The agg session has been stopped before it was set up. This
2739 * can happen when the AddBA timer times out for example.
2740 */
2741
2742 /* No barriers since we are under mutex */
2743 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002744
2745 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2746 tid_data->state = IWL_AGG_OFF;
2747 err = 0;
2748 break;
2749 default:
2750 IWL_ERR(mvm,
2751 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2752 mvmsta->sta_id, tid, tid_data->state);
2753 IWL_ERR(mvm,
2754 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2755 err = -EINVAL;
2756 }
2757
2758 spin_unlock_bh(&mvmsta->lock);
2759
2760 return err;
2761}
2762
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002763int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2764 struct ieee80211_sta *sta, u16 tid)
2765{
Johannes Berg5b577a92013-11-14 18:20:04 +01002766 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002767 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2768 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002769 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002770
2771 /*
2772 * First set the agg state to OFF to avoid calling
2773 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2774 */
2775 spin_lock_bh(&mvmsta->lock);
2776 txq_id = tid_data->txq_id;
2777 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2778 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002779 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002780 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002781 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002782 spin_unlock_bh(&mvmsta->lock);
2783
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002784 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002785 /*
2786 * The TXQ is marked as reserved only if no traffic came through yet
2787 * This means no traffic has been sent on this TID (agg'd or not), so
2788 * we no longer have use for the queue. Since it hasn't even been
2789 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2790 * free.
2791 */
2792 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2793 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002794 spin_unlock_bh(&mvm->queue_info_lock);
2795
Johannes Bergb6658ff2013-07-24 13:55:51 +02002796 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002797 iwl_mvm_drain_sta(mvm, mvmsta, true);
Luca Coelho5888a402015-10-06 09:54:57 +03002798 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
Johannes Bergb6658ff2013-07-24 13:55:51 +02002799 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002800 iwl_trans_wait_tx_queue_empty(mvm->trans,
2801 mvmsta->tfd_queue_msk);
2802 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002803
Johannes Bergf7f89e72014-08-05 15:24:44 +02002804 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2805
Liad Kaufmancf961e12015-08-13 19:16:08 +03002806 if (!iwl_mvm_is_dqa_supported(mvm)) {
2807 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2808
2809 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2810 tid, 0);
2811 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002812 }
2813
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002814 return 0;
2815}
2816
Johannes Berg8ca151b2013-01-24 14:25:36 +01002817static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2818{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002819 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002820
2821 lockdep_assert_held(&mvm->mutex);
2822
Johannes Berg2dc2a152015-06-16 17:09:18 +02002823 /* Pick the unused key offset with the highest 'deleted'
2824 * counter. Every time a key is deleted, all the counters
2825 * are incremented and the one that was just deleted is
2826 * reset to zero. Thus, the highest counter is the one
2827 * that was deleted longest ago. Pick that one.
2828 */
2829 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2830 if (test_bit(i, mvm->fw_key_table))
2831 continue;
2832 if (mvm->fw_key_deleted[i] > max) {
2833 max = mvm->fw_key_deleted[i];
2834 max_offs = i;
2835 }
2836 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002837
Johannes Berg2dc2a152015-06-16 17:09:18 +02002838 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002839 return STA_KEY_IDX_INVALID;
2840
Johannes Berg2dc2a152015-06-16 17:09:18 +02002841 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002842}
2843
Johannes Berg5f7a1842015-12-11 09:36:10 +01002844static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2845 struct ieee80211_vif *vif,
2846 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002847{
Johannes Berg5b530e92014-12-23 16:00:17 +01002848 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002849
Johannes Berg5f7a1842015-12-11 09:36:10 +01002850 if (sta)
2851 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002852
2853 /*
2854 * The device expects GTKs for station interfaces to be
2855 * installed as GTKs for the AP station. If we have no
2856 * station ID, then use AP's station ID.
2857 */
2858 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02002859 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02002860 u8 sta_id = mvmvif->ap_sta_id;
2861
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002862 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2863 lockdep_is_held(&mvm->mutex));
2864
Avri Altman9513c5e2015-10-19 16:29:11 +02002865 /*
2866 * It is possible that the 'sta' parameter is NULL,
2867 * for example when a GTK is removed - the sta_id will then
2868 * be the AP ID, and no station was passed by mac80211.
2869 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002870 if (IS_ERR_OR_NULL(sta))
2871 return NULL;
2872
2873 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02002874 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002875
Johannes Berg5f7a1842015-12-11 09:36:10 +01002876 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002877}
2878
2879static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2880 struct iwl_mvm_sta *mvm_sta,
Sara Sharon45c458b2016-11-09 15:43:26 +02002881 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002882 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2883 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002884{
Sara Sharon45c458b2016-11-09 15:43:26 +02002885 union {
2886 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2887 struct iwl_mvm_add_sta_key_cmd cmd;
2888 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002889 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002890 int ret;
2891 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002892 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02002893 u64 pn = 0;
2894 int i, size;
2895 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2896 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002897
Sara Sharon45c458b2016-11-09 15:43:26 +02002898 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01002899 STA_KEY_FLG_KEYID_MSK;
2900 key_flags = cpu_to_le16(keyidx);
2901 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2902
Sara Sharon45c458b2016-11-09 15:43:26 +02002903 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002904 case WLAN_CIPHER_SUITE_TKIP:
2905 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002906 if (new_api) {
2907 memcpy((void *)&u.cmd.tx_mic_key,
2908 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2909 IWL_MIC_KEY_SIZE);
2910
2911 memcpy((void *)&u.cmd.rx_mic_key,
2912 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2913 IWL_MIC_KEY_SIZE);
2914 pn = atomic64_read(&key->tx_pn);
2915
2916 } else {
2917 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2918 for (i = 0; i < 5; i++)
2919 u.cmd_v1.tkip_rx_ttak[i] =
2920 cpu_to_le16(tkip_p1k[i]);
2921 }
2922 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002923 break;
2924 case WLAN_CIPHER_SUITE_CCMP:
2925 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02002926 memcpy(u.cmd.common.key, key->key, key->keylen);
2927 if (new_api)
2928 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002929 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002930 case WLAN_CIPHER_SUITE_WEP104:
2931 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002932 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002933 case WLAN_CIPHER_SUITE_WEP40:
2934 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002935 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01002936 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03002937 case WLAN_CIPHER_SUITE_GCMP_256:
2938 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2939 /* fall through */
2940 case WLAN_CIPHER_SUITE_GCMP:
2941 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002942 memcpy(u.cmd.common.key, key->key, key->keylen);
2943 if (new_api)
2944 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03002945 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002946 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002947 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02002948 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002949 }
2950
Johannes Bergba3943b2014-11-12 23:54:48 +01002951 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002952 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2953
Sara Sharon45c458b2016-11-09 15:43:26 +02002954 u.cmd.common.key_offset = key_offset;
2955 u.cmd.common.key_flags = key_flags;
2956 u.cmd.common.sta_id = mvm_sta->sta_id;
2957
2958 if (new_api) {
2959 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2960 size = sizeof(u.cmd);
2961 } else {
2962 size = sizeof(u.cmd_v1);
2963 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002964
2965 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002966 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02002967 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2968 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002969 else
Sara Sharon45c458b2016-11-09 15:43:26 +02002970 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2971 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002972
2973 switch (status) {
2974 case ADD_STA_SUCCESS:
2975 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2976 break;
2977 default:
2978 ret = -EIO;
2979 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2980 break;
2981 }
2982
2983 return ret;
2984}
2985
2986static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2987 struct ieee80211_key_conf *keyconf,
2988 u8 sta_id, bool remove_key)
2989{
2990 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2991
2992 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03002993 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2994 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2995 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2996 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2997 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2998 return -EINVAL;
2999
3000 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3001 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003002 return -EINVAL;
3003
3004 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3005 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3006
3007 if (remove_key) {
3008 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3009 } else {
3010 struct ieee80211_key_seq seq;
3011 const u8 *pn;
3012
Ayala Bekeraa950522016-06-01 00:28:09 +03003013 switch (keyconf->cipher) {
3014 case WLAN_CIPHER_SUITE_AES_CMAC:
3015 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3016 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003017 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3018 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3019 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3020 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003021 default:
3022 return -EINVAL;
3023 }
3024
Ayala Beker8e160ab2016-04-11 11:37:38 +03003025 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3026 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3027 igtk_cmd.ctrl_flags |=
3028 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003029 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3030 pn = seq.aes_cmac.pn;
3031 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3032 ((u64) pn[4] << 8) |
3033 ((u64) pn[3] << 16) |
3034 ((u64) pn[2] << 24) |
3035 ((u64) pn[1] << 32) |
3036 ((u64) pn[0] << 40));
3037 }
3038
3039 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3040 remove_key ? "removing" : "installing",
3041 igtk_cmd.sta_id);
3042
Ayala Beker8e160ab2016-04-11 11:37:38 +03003043 if (!iwl_mvm_has_new_rx_api(mvm)) {
3044 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3045 .ctrl_flags = igtk_cmd.ctrl_flags,
3046 .key_id = igtk_cmd.key_id,
3047 .sta_id = igtk_cmd.sta_id,
3048 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3049 };
3050
3051 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3052 ARRAY_SIZE(igtk_cmd_v1.igtk));
3053 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3054 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3055 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003056 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003057 sizeof(igtk_cmd), &igtk_cmd);
3058}
3059
3060
3061static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3062 struct ieee80211_vif *vif,
3063 struct ieee80211_sta *sta)
3064{
Johannes Berg5b530e92014-12-23 16:00:17 +01003065 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003066
3067 if (sta)
3068 return sta->addr;
3069
3070 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003071 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003072 u8 sta_id = mvmvif->ap_sta_id;
3073 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3074 lockdep_is_held(&mvm->mutex));
3075 return sta->addr;
3076 }
3077
3078
3079 return NULL;
3080}
3081
Johannes Berg2f6319d2014-11-12 23:39:56 +01003082static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3083 struct ieee80211_vif *vif,
3084 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003085 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003086 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003087 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003088{
Johannes Berg2f6319d2014-11-12 23:39:56 +01003089 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003090 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003091 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003092 struct ieee80211_key_seq seq;
3093 u16 p1k[5];
3094
Johannes Berg8ca151b2013-01-24 14:25:36 +01003095 switch (keyconf->cipher) {
3096 case WLAN_CIPHER_SUITE_TKIP:
3097 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3098 /* get phase 1 key from mac80211 */
3099 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3100 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
Johannes Bergba3943b2014-11-12 23:54:48 +01003101 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003102 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003103 break;
3104 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003105 case WLAN_CIPHER_SUITE_WEP40:
3106 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003107 case WLAN_CIPHER_SUITE_GCMP:
3108 case WLAN_CIPHER_SUITE_GCMP_256:
Johannes Bergba3943b2014-11-12 23:54:48 +01003109 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003110 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003111 break;
3112 default:
Johannes Bergba3943b2014-11-12 23:54:48 +01003113 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003114 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003115 }
3116
Johannes Berg8ca151b2013-01-24 14:25:36 +01003117 return ret;
3118}
3119
Johannes Berg2f6319d2014-11-12 23:39:56 +01003120static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003121 struct ieee80211_key_conf *keyconf,
3122 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003123{
Sara Sharon45c458b2016-11-09 15:43:26 +02003124 union {
3125 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3126 struct iwl_mvm_add_sta_key_cmd cmd;
3127 } u = {};
3128 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3129 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003130 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003131 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003132 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003133
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003134 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3135 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003136 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3137 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3138
Johannes Bergba3943b2014-11-12 23:54:48 +01003139 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003140 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3141
Sara Sharon45c458b2016-11-09 15:43:26 +02003142 /*
3143 * The fields assigned here are in the same location at the start
3144 * of the command, so we can do this union trick.
3145 */
3146 u.cmd.common.key_flags = key_flags;
3147 u.cmd.common.key_offset = keyconf->hw_key_idx;
3148 u.cmd.common.sta_id = sta_id;
3149
3150 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003151
Johannes Berg8ca151b2013-01-24 14:25:36 +01003152 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003153 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3154 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003155
3156 switch (status) {
3157 case ADD_STA_SUCCESS:
3158 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3159 break;
3160 default:
3161 ret = -EIO;
3162 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3163 break;
3164 }
3165
3166 return ret;
3167}
3168
Johannes Berg2f6319d2014-11-12 23:39:56 +01003169int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3170 struct ieee80211_vif *vif,
3171 struct ieee80211_sta *sta,
3172 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003173 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003174{
Johannes Bergba3943b2014-11-12 23:54:48 +01003175 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003176 struct iwl_mvm_sta *mvm_sta;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003177 u8 sta_id;
3178 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003179 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003180
3181 lockdep_assert_held(&mvm->mutex);
3182
3183 /* Get the station id from the mvm local station table */
Johannes Berg5f7a1842015-12-11 09:36:10 +01003184 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3185 if (!mvm_sta) {
3186 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003187 return -EINVAL;
3188 }
Johannes Berg5f7a1842015-12-11 09:36:10 +01003189 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003190
Ayala Beker8e160ab2016-04-11 11:37:38 +03003191 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3192 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3193 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003194 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3195 goto end;
3196 }
3197
3198 /*
3199 * It is possible that the 'sta' parameter is NULL, and thus
3200 * there is a need to retrieve the sta from the local station table.
3201 */
3202 if (!sta) {
3203 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3204 lockdep_is_held(&mvm->mutex));
3205 if (IS_ERR_OR_NULL(sta)) {
3206 IWL_ERR(mvm, "Invalid station id\n");
3207 return -EINVAL;
3208 }
3209 }
3210
3211 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3212 return -EINVAL;
3213
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003214 /* If the key_offset is not pre-assigned, we need to find a
3215 * new offset to use. In normal cases, the offset is not
3216 * pre-assigned, but during HW_RESTART we want to reuse the
3217 * same indices, so we pass them when this function is called.
3218 *
3219 * In D3 entry, we need to hardcoded the indices (because the
3220 * firmware hardcodes the PTK offset to 0). In this case, we
3221 * need to make sure we don't overwrite the hw_key_idx in the
3222 * keyconf structure, because otherwise we cannot configure
3223 * the original ones back when resuming.
3224 */
3225 if (key_offset == STA_KEY_IDX_INVALID) {
3226 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3227 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003228 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003229 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003230 }
3231
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003232 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003233 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003234 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003235
3236 /*
3237 * For WEP, the same key is used for multicast and unicast. Upload it
3238 * again, using the same key offset, and now pointing the other one
3239 * to the same key slot (offset).
3240 * If this fails, remove the original as well.
3241 */
3242 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3243 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003244 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3245 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003246 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003247 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003248 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003249 }
3250 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003251
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003252 __set_bit(key_offset, mvm->fw_key_table);
3253
Johannes Berg2f6319d2014-11-12 23:39:56 +01003254end:
3255 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3256 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003257 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003258 return ret;
3259}
3260
3261int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3262 struct ieee80211_vif *vif,
3263 struct ieee80211_sta *sta,
3264 struct ieee80211_key_conf *keyconf)
3265{
Johannes Bergba3943b2014-11-12 23:54:48 +01003266 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003267 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003268 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003269 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003270
3271 lockdep_assert_held(&mvm->mutex);
3272
Johannes Berg5f7a1842015-12-11 09:36:10 +01003273 /* Get the station from the mvm local station table */
3274 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Ilan Peercd4d23c2017-01-16 15:07:03 +02003275 if (!mvm_sta) {
3276 IWL_ERR(mvm, "Failed to find station\n");
3277 return -EINVAL;
3278 }
3279 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003280
3281 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3282 keyconf->keyidx, sta_id);
3283
Ayala Beker8e160ab2016-04-11 11:37:38 +03003284 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3285 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3286 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003287 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3288
3289 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3290 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3291 keyconf->hw_key_idx);
3292 return -ENOENT;
3293 }
3294
Johannes Berg2dc2a152015-06-16 17:09:18 +02003295 /* track which key was deleted last */
3296 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3297 if (mvm->fw_key_deleted[i] < U8_MAX)
3298 mvm->fw_key_deleted[i]++;
3299 }
3300 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3301
Johannes Berg5f7a1842015-12-11 09:36:10 +01003302 if (!mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003303 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3304 return 0;
3305 }
3306
Johannes Bergba3943b2014-11-12 23:54:48 +01003307 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3308 if (ret)
3309 return ret;
3310
3311 /* delete WEP key twice to get rid of (now useless) offset */
3312 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3313 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3314 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3315
3316 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003317}
3318
Johannes Berg8ca151b2013-01-24 14:25:36 +01003319void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3320 struct ieee80211_vif *vif,
3321 struct ieee80211_key_conf *keyconf,
3322 struct ieee80211_sta *sta, u32 iv32,
3323 u16 *phase1key)
3324{
Beni Levc3eb5362013-02-06 17:22:18 +02003325 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003326 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003327
Beni Levc3eb5362013-02-06 17:22:18 +02003328 rcu_read_lock();
3329
Johannes Berg5f7a1842015-12-11 09:36:10 +01003330 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3331 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003332 goto unlock;
Johannes Bergba3943b2014-11-12 23:54:48 +01003333 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003334 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003335
3336 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003337 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003338}
3339
Johannes Berg9cc40712013-02-15 22:47:48 +01003340void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3341 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003342{
Johannes Berg5b577a92013-11-14 18:20:04 +01003343 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003344 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003345 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003346 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003347 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003348 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003349 };
3350 int ret;
3351
Sara Sharon854c5702016-01-26 13:17:47 +02003352 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3353 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003354 if (ret)
3355 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3356}
3357
Johannes Berg9cc40712013-02-15 22:47:48 +01003358void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3359 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003360 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003361 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003362 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003363{
Johannes Berg5b577a92013-11-14 18:20:04 +01003364 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003365 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003366 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003367 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003368 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3369 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003370 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003371 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003372 int tid, ret;
3373 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003374
Johannes Berg3e56ead2013-02-15 22:23:18 +01003375 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3376 * Note that this field is reserved and unused by firmware not
3377 * supporting GO uAPSD, so it's safe to always do this.
3378 */
3379 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3380 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3381
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003382 /* If we're releasing frames from aggregation or dqa queues then check
3383 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003384 * - more frames than the service period, in which case more_data
3385 * needs to be set
3386 * - fewer than 'cnt' frames, in which case we need to adjust the
3387 * firmware command (but do that unconditionally)
3388 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003389 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003390 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003391 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003392
3393 spin_lock_bh(&mvmsta->lock);
3394 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3395 struct iwl_mvm_tid_data *tid_data;
3396 u16 n_queued;
3397
3398 tid_data = &mvmsta->tid_data[tid];
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003399 if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
3400 tid_data->state != IWL_AGG_ON &&
Johannes Berg3e56ead2013-02-15 22:23:18 +01003401 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3402 "TID %d state is %d\n",
3403 tid, tid_data->state)) {
3404 spin_unlock_bh(&mvmsta->lock);
3405 ieee80211_sta_eosp(sta);
3406 return;
3407 }
3408
3409 n_queued = iwl_mvm_tid_queued(tid_data);
3410 if (n_queued > remaining) {
3411 more_data = true;
3412 remaining = 0;
3413 break;
3414 }
3415 remaining -= n_queued;
3416 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003417 sleep_tx_count = cnt - remaining;
3418 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3419 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003420 spin_unlock_bh(&mvmsta->lock);
3421
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003422 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003423 if (WARN_ON(cnt - remaining == 0)) {
3424 ieee80211_sta_eosp(sta);
3425 return;
3426 }
3427 }
3428
3429 /* Note: this is ignored by firmware not supporting GO uAPSD */
3430 if (more_data)
3431 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3432
3433 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3434 mvmsta->next_status_eosp = true;
3435 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3436 } else {
3437 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3438 }
3439
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003440 /* block the Tx queues until the FW updated the sleep Tx count */
3441 iwl_trans_block_txq_ptrs(mvm->trans, true);
3442
3443 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3444 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003445 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003446 if (ret)
3447 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3448}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003449
Johannes Berg04168412015-06-23 21:22:09 +02003450void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3451 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003452{
3453 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3454 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3455 struct ieee80211_sta *sta;
3456 u32 sta_id = le32_to_cpu(notif->sta_id);
3457
3458 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003459 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003460
3461 rcu_read_lock();
3462 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3463 if (!IS_ERR_OR_NULL(sta))
3464 ieee80211_sta_eosp(sta);
3465 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003466}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003467
3468void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3469 struct iwl_mvm_sta *mvmsta, bool disable)
3470{
3471 struct iwl_mvm_add_sta_cmd cmd = {
3472 .add_modify = STA_MODE_MODIFY,
3473 .sta_id = mvmsta->sta_id,
3474 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3475 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3476 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3477 };
3478 int ret;
3479
Sara Sharon854c5702016-01-26 13:17:47 +02003480 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3481 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003482 if (ret)
3483 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3484}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003485
3486void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3487 struct ieee80211_sta *sta,
3488 bool disable)
3489{
3490 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3491
3492 spin_lock_bh(&mvm_sta->lock);
3493
3494 if (mvm_sta->disable_tx == disable) {
3495 spin_unlock_bh(&mvm_sta->lock);
3496 return;
3497 }
3498
3499 mvm_sta->disable_tx = disable;
3500
3501 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003502 * Tell mac80211 to start/stop queuing tx for this station,
3503 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003504 * for this station.
3505 */
3506 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3507 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3508
3509 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3510
3511 spin_unlock_bh(&mvm_sta->lock);
3512}
3513
3514void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3515 struct iwl_mvm_vif *mvmvif,
3516 bool disable)
3517{
3518 struct ieee80211_sta *sta;
3519 struct iwl_mvm_sta *mvm_sta;
3520 int i;
3521
3522 lockdep_assert_held(&mvm->mutex);
3523
3524 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003525 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003526 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3527 lockdep_is_held(&mvm->mutex));
3528 if (IS_ERR_OR_NULL(sta))
3529 continue;
3530
3531 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3532 if (mvm_sta->mac_id_n_color !=
3533 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3534 continue;
3535
3536 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3537 }
3538}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003539
3540void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3541{
3542 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3543 struct iwl_mvm_sta *mvmsta;
3544
3545 rcu_read_lock();
3546
3547 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3548
3549 if (!WARN_ON(!mvmsta))
3550 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3551
3552 rcu_read_unlock();
3553}