blob: 2d5d1fc100134b2f8a4376dc1ae4af3835b69e83 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020037 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83}
84
Eliad Pellerb92e6612014-01-23 17:58:23 +020085static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010087{
88 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020089 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010090
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94 lockdep_assert_held(&mvm->mutex);
95
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
99
Johannes Berg8ca151b2013-01-24 14:25:36 +0100100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200101 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200102 if (BIT(sta_id) & reserved_ids)
103 continue;
104
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
107 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200108 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200109 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100110}
111
Johannes Berg7a453972013-02-12 13:10:44 +0100112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300114 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100115{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Liad Kaufman24afba72015-07-28 18:56:08 +0300129 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100130 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300131
Sara Sharonbb497012016-09-29 14:52:40 +0300132 if (!iwl_mvm_has_new_tx_api(mvm)) {
133 add_sta_cmd.tfd_queue_msk =
134 cpu_to_le32(mvm_sta->tfd_queue_msk);
135
136 if (flags & STA_MODIFY_QUEUES)
137 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
138 } else {
139 WARN_ON(flags & STA_MODIFY_QUEUES);
140 }
Johannes Berg7a453972013-02-12 13:10:44 +0100141 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100142
143 switch (sta->bandwidth) {
144 case IEEE80211_STA_RX_BW_160:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
146 /* fall through */
147 case IEEE80211_STA_RX_BW_80:
148 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
149 /* fall through */
150 case IEEE80211_STA_RX_BW_40:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
152 /* fall through */
153 case IEEE80211_STA_RX_BW_20:
154 if (sta->ht_cap.ht_supported)
155 add_sta_cmd.station_flags |=
156 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
157 break;
158 }
159
160 switch (sta->rx_nss) {
161 case 1:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
163 break;
164 case 2:
165 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
166 break;
167 case 3 ... 8:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
169 break;
170 }
171
172 switch (sta->smps_mode) {
173 case IEEE80211_SMPS_AUTOMATIC:
174 case IEEE80211_SMPS_NUM_MODES:
175 WARN_ON(1);
176 break;
177 case IEEE80211_SMPS_STATIC:
178 /* override NSS */
179 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
180 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
181 break;
182 case IEEE80211_SMPS_DYNAMIC:
183 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
184 break;
185 case IEEE80211_SMPS_OFF:
186 /* nothing */
187 break;
188 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100189
190 if (sta->ht_cap.ht_supported) {
191 add_sta_cmd.station_flags_msk |=
192 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
193 STA_FLG_AGG_MPDU_DENS_MSK);
194
195 mpdu_dens = sta->ht_cap.ampdu_density;
196 }
197
198 if (sta->vht_cap.vht_supported) {
199 agg_size = sta->vht_cap.cap &
200 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
201 agg_size >>=
202 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
203 } else if (sta->ht_cap.ht_supported) {
204 agg_size = sta->ht_cap.ampdu_factor;
205 }
206
207 add_sta_cmd.station_flags |=
208 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
209 add_sta_cmd.station_flags |=
210 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100211 if (mvm_sta->associated)
212 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100213
Johannes Berg65e25482016-04-13 14:24:22 +0200214 if (sta->wme) {
215 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
216
217 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200218 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200219 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200220 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200221 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200222 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200223 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200224 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
225 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200226 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200227 }
228
Johannes Berg8ca151b2013-01-24 14:25:36 +0100229 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200230 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
231 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300232 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100233 if (ret)
234 return ret;
235
Sara Sharon837c4da2016-01-07 16:50:45 +0200236 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100237 case ADD_STA_SUCCESS:
238 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
239 break;
240 default:
241 ret = -EIO;
242 IWL_ERR(mvm, "ADD_STA failed\n");
243 break;
244 }
245
246 return ret;
247}
248
Sara Sharon10b2b202016-03-20 16:23:41 +0200249static void iwl_mvm_rx_agg_session_expired(unsigned long data)
250{
251 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
252 struct iwl_mvm_baid_data *ba_data;
253 struct ieee80211_sta *sta;
254 struct iwl_mvm_sta *mvm_sta;
255 unsigned long timeout;
256
257 rcu_read_lock();
258
259 ba_data = rcu_dereference(*rcu_ptr);
260
261 if (WARN_ON(!ba_data))
262 goto unlock;
263
264 if (!ba_data->timeout)
265 goto unlock;
266
267 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
268 if (time_is_after_jiffies(timeout)) {
269 mod_timer(&ba_data->session_timer, timeout);
270 goto unlock;
271 }
272
273 /* Timer expired */
274 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
275 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
276 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
277 sta->addr, ba_data->tid);
278unlock:
279 rcu_read_unlock();
280}
281
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300282static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
283 struct ieee80211_sta *sta)
284{
285 unsigned long used_hw_queues;
286 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200287 unsigned int wdg_timeout =
288 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300289 u32 ac;
290
291 lockdep_assert_held(&mvm->mutex);
292
293 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
294
295 /* Find available queues, and allocate them to the ACs */
296 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
297 u8 queue = find_first_zero_bit(&used_hw_queues,
298 mvm->first_agg_queue);
299
300 if (queue >= mvm->first_agg_queue) {
301 IWL_ERR(mvm, "Failed to allocate STA queue\n");
302 return -EBUSY;
303 }
304
305 __set_bit(queue, &used_hw_queues);
306 mvmsta->hw_queue[ac] = queue;
307 }
308
309 /* Found a place for all queues - enable them */
310 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
311 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300312 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300313 iwl_mvm_ac_to_tx_fifo[ac], 0,
314 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300315 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
316 }
317
318 return 0;
319}
320
321static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
322 struct ieee80211_sta *sta)
323{
324 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
325 unsigned long sta_msk;
326 int i;
327
328 lockdep_assert_held(&mvm->mutex);
329
330 /* disable the TDLS STA-specific queues */
331 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200332 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300333 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300334}
335
Liad Kaufman9794c642015-08-19 17:34:28 +0300336/* Disable aggregations for a bitmap of TIDs for a given station */
337static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
338 unsigned long disable_agg_tids,
339 bool remove_queue)
340{
341 struct iwl_mvm_add_sta_cmd cmd = {};
342 struct ieee80211_sta *sta;
343 struct iwl_mvm_sta *mvmsta;
344 u32 status;
345 u8 sta_id;
346 int ret;
347
Sara Sharonbb497012016-09-29 14:52:40 +0300348 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
349 return -EINVAL;
350
Liad Kaufman9794c642015-08-19 17:34:28 +0300351 spin_lock_bh(&mvm->queue_info_lock);
352 sta_id = mvm->queue_info[queue].ra_sta_id;
353 spin_unlock_bh(&mvm->queue_info_lock);
354
355 rcu_read_lock();
356
357 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
358
359 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
360 rcu_read_unlock();
361 return -EINVAL;
362 }
363
364 mvmsta = iwl_mvm_sta_from_mac80211(sta);
365
366 mvmsta->tid_disable_agg |= disable_agg_tids;
367
368 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
369 cmd.sta_id = mvmsta->sta_id;
370 cmd.add_modify = STA_MODE_MODIFY;
371 cmd.modify_mask = STA_MODIFY_QUEUES;
372 if (disable_agg_tids)
373 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
374 if (remove_queue)
375 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
376 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
377 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
378
379 rcu_read_unlock();
380
381 /* Notify FW of queue removal from the STA queues */
382 status = ADD_STA_SUCCESS;
383 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
384 iwl_mvm_add_sta_cmd_size(mvm),
385 &cmd, &status);
386
387 return ret;
388}
389
Liad Kaufman42db09c2016-05-02 14:01:14 +0300390static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
391{
392 struct ieee80211_sta *sta;
393 struct iwl_mvm_sta *mvmsta;
394 unsigned long tid_bitmap;
395 unsigned long agg_tids = 0;
396 s8 sta_id;
397 int tid;
398
399 lockdep_assert_held(&mvm->mutex);
400
Sara Sharonbb497012016-09-29 14:52:40 +0300401 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
402 return -EINVAL;
403
Liad Kaufman42db09c2016-05-02 14:01:14 +0300404 spin_lock_bh(&mvm->queue_info_lock);
405 sta_id = mvm->queue_info[queue].ra_sta_id;
406 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
407 spin_unlock_bh(&mvm->queue_info_lock);
408
409 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
410 lockdep_is_held(&mvm->mutex));
411
412 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
413 return -EINVAL;
414
415 mvmsta = iwl_mvm_sta_from_mac80211(sta);
416
417 spin_lock_bh(&mvmsta->lock);
418 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
419 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
420 agg_tids |= BIT(tid);
421 }
422 spin_unlock_bh(&mvmsta->lock);
423
424 return agg_tids;
425}
426
Liad Kaufman9794c642015-08-19 17:34:28 +0300427/*
428 * Remove a queue from a station's resources.
429 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
430 * doesn't disable the queue
431 */
432static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
433{
434 struct ieee80211_sta *sta;
435 struct iwl_mvm_sta *mvmsta;
436 unsigned long tid_bitmap;
437 unsigned long disable_agg_tids = 0;
438 u8 sta_id;
439 int tid;
440
441 lockdep_assert_held(&mvm->mutex);
442
Sara Sharonbb497012016-09-29 14:52:40 +0300443 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
444 return -EINVAL;
445
Liad Kaufman9794c642015-08-19 17:34:28 +0300446 spin_lock_bh(&mvm->queue_info_lock);
447 sta_id = mvm->queue_info[queue].ra_sta_id;
448 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
449 spin_unlock_bh(&mvm->queue_info_lock);
450
451 rcu_read_lock();
452
453 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
454
455 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
456 rcu_read_unlock();
457 return 0;
458 }
459
460 mvmsta = iwl_mvm_sta_from_mac80211(sta);
461
462 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300463 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300464 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300465 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
466 disable_agg_tids |= BIT(tid);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300467 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300468 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300469
Liad Kaufman42db09c2016-05-02 14:01:14 +0300470 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300471 spin_unlock_bh(&mvmsta->lock);
472
473 rcu_read_unlock();
474
Liad Kaufman9794c642015-08-19 17:34:28 +0300475 return disable_agg_tids;
476}
477
Sara Sharon01796ff2016-11-16 17:04:36 +0200478static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
479 bool same_sta)
480{
481 struct iwl_mvm_sta *mvmsta;
482 u8 txq_curr_ac, sta_id, tid;
483 unsigned long disable_agg_tids = 0;
484 int ret;
485
486 lockdep_assert_held(&mvm->mutex);
487
Sara Sharonbb497012016-09-29 14:52:40 +0300488 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
489 return -EINVAL;
490
Sara Sharon01796ff2016-11-16 17:04:36 +0200491 spin_lock_bh(&mvm->queue_info_lock);
492 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
493 sta_id = mvm->queue_info[queue].ra_sta_id;
494 tid = mvm->queue_info[queue].txq_tid;
495 spin_unlock_bh(&mvm->queue_info_lock);
496
497 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200498 if (WARN_ON(!mvmsta))
499 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200500
501 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
502 /* Disable the queue */
503 if (disable_agg_tids)
504 iwl_mvm_invalidate_sta_queue(mvm, queue,
505 disable_agg_tids, false);
506
507 ret = iwl_mvm_disable_txq(mvm, queue,
508 mvmsta->vif->hw_queue[txq_curr_ac],
509 tid, 0);
510 if (ret) {
511 /* Re-mark the inactive queue as inactive */
512 spin_lock_bh(&mvm->queue_info_lock);
513 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
514 spin_unlock_bh(&mvm->queue_info_lock);
515 IWL_ERR(mvm,
516 "Failed to free inactive queue %d (ret=%d)\n",
517 queue, ret);
518
519 return ret;
520 }
521
522 /* If TXQ is allocated to another STA, update removal in FW */
523 if (!same_sta)
524 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
525
526 return 0;
527}
528
Liad Kaufman42db09c2016-05-02 14:01:14 +0300529static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
530 unsigned long tfd_queue_mask, u8 ac)
531{
532 int queue = 0;
533 u8 ac_to_queue[IEEE80211_NUM_ACS];
534 int i;
535
536 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300537 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
538 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300539
540 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
541
542 /* See what ACs the existing queues for this STA have */
543 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
544 /* Only DATA queues can be shared */
545 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
546 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
547 continue;
548
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200549 /* Don't try and take queues being reconfigured */
550 if (mvm->queue_info[queue].status ==
551 IWL_MVM_QUEUE_RECONFIGURING)
552 continue;
553
Liad Kaufman42db09c2016-05-02 14:01:14 +0300554 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
555 }
556
557 /*
558 * The queue to share is chosen only from DATA queues as follows (in
559 * descending priority):
560 * 1. An AC_BE queue
561 * 2. Same AC queue
562 * 3. Highest AC queue that is lower than new AC
563 * 4. Any existing AC (there always is at least 1 DATA queue)
564 */
565
566 /* Priority 1: An AC_BE queue */
567 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
568 queue = ac_to_queue[IEEE80211_AC_BE];
569 /* Priority 2: Same AC queue */
570 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
571 queue = ac_to_queue[ac];
572 /* Priority 3a: If new AC is VO and VI exists - use VI */
573 else if (ac == IEEE80211_AC_VO &&
574 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
575 queue = ac_to_queue[IEEE80211_AC_VI];
576 /* Priority 3b: No BE so only AC less than the new one is BK */
577 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
578 queue = ac_to_queue[IEEE80211_AC_BK];
579 /* Priority 4a: No BE nor BK - use VI if exists */
580 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
581 queue = ac_to_queue[IEEE80211_AC_VI];
582 /* Priority 4b: No BE, BK nor VI - use VO if exists */
583 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
584 queue = ac_to_queue[IEEE80211_AC_VO];
585
586 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200587 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
588 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
589 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300590 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200591 return -ENOSPC;
592 }
593
594 /* Make sure the queue isn't in the middle of being reconfigured */
595 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
596 IWL_ERR(mvm,
597 "TXQ %d is in the middle of re-config - try again\n",
598 queue);
599 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300600 }
601
602 return queue;
603}
604
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200605/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200606 * If a given queue has a higher AC than the TID stream that is being compared
607 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200608 * in such a case, otherwise - if no redirection required - it does nothing,
609 * unless the %force param is true.
610 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200611int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
612 int ac, int ssn, unsigned int wdg_timeout,
613 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200614{
615 struct iwl_scd_txq_cfg_cmd cmd = {
616 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200617 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200618 };
619 bool shared_queue;
620 unsigned long mq;
621 int ret;
622
Sara Sharonbb497012016-09-29 14:52:40 +0300623 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
624 return -EINVAL;
625
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200626 /*
627 * If the AC is lower than current one - FIFO needs to be redirected to
628 * the lowest one of the streams in the queue. Check if this is needed
629 * here.
630 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
631 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
632 * we need to check if the numerical value of X is LARGER than of Y.
633 */
634 spin_lock_bh(&mvm->queue_info_lock);
635 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
636 spin_unlock_bh(&mvm->queue_info_lock);
637
638 IWL_DEBUG_TX_QUEUES(mvm,
639 "No redirection needed on TXQ #%d\n",
640 queue);
641 return 0;
642 }
643
644 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
645 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200646 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200647 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
648 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
649 spin_unlock_bh(&mvm->queue_info_lock);
650
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200651 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200652 queue, iwl_mvm_ac_to_tx_fifo[ac]);
653
654 /* Stop MAC queues and wait for this queue to empty */
655 iwl_mvm_stop_mac_queues(mvm, mq);
656 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
657 if (ret) {
658 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
659 queue);
660 ret = -EIO;
661 goto out;
662 }
663
664 /* Before redirecting the queue we need to de-activate it */
665 iwl_trans_txq_disable(mvm->trans, queue, false);
666 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
667 if (ret)
668 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
669 ret);
670
671 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300672 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200673
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200674 /* Update the TID "owner" of the queue */
675 spin_lock_bh(&mvm->queue_info_lock);
676 mvm->queue_info[queue].txq_tid = tid;
677 spin_unlock_bh(&mvm->queue_info_lock);
678
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200679 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
680
681 /* Redirect to lower AC */
682 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
683 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
684 ssn);
685
686 /* Update AC marking of the queue */
687 spin_lock_bh(&mvm->queue_info_lock);
688 mvm->queue_info[queue].mac80211_ac = ac;
689 spin_unlock_bh(&mvm->queue_info_lock);
690
691 /*
692 * Mark queue as shared in transport if shared
693 * Note this has to be done after queue enablement because enablement
694 * can also set this value, and there is no indication there to shared
695 * queues
696 */
697 if (shared_queue)
698 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
699
700out:
701 /* Continue using the MAC queues */
702 iwl_mvm_start_mac_queues(mvm, mq);
703
704 return ret;
705}
706
Sara Sharon310181e2017-01-17 14:27:48 +0200707static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
708 struct ieee80211_sta *sta, u8 ac,
709 int tid)
710{
711 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
712 unsigned int wdg_timeout =
713 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
714 u8 mac_queue = mvmsta->vif->hw_queue[ac];
715 int queue = -1;
716
717 lockdep_assert_held(&mvm->mutex);
718
719 IWL_DEBUG_TX_QUEUES(mvm,
720 "Allocating queue for sta %d on tid %d\n",
721 mvmsta->sta_id, tid);
722 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
723 wdg_timeout);
724 if (queue < 0)
725 return queue;
726
727 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
728
729 spin_lock_bh(&mvmsta->lock);
730 mvmsta->tid_data[tid].txq_id = queue;
731 mvmsta->tid_data[tid].is_tid_active = true;
732 mvmsta->tfd_queue_msk |= BIT(queue);
733 spin_unlock_bh(&mvmsta->lock);
734
735 spin_lock_bh(&mvm->queue_info_lock);
736 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
737 spin_unlock_bh(&mvm->queue_info_lock);
738
739 return 0;
740}
741
Liad Kaufman24afba72015-07-28 18:56:08 +0300742static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
743 struct ieee80211_sta *sta, u8 ac, int tid,
744 struct ieee80211_hdr *hdr)
745{
746 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
747 struct iwl_trans_txq_scd_cfg cfg = {
748 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
749 .sta_id = mvmsta->sta_id,
750 .tid = tid,
751 .frame_limit = IWL_FRAME_LIMIT,
752 };
753 unsigned int wdg_timeout =
754 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
755 u8 mac_queue = mvmsta->vif->hw_queue[ac];
756 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200757 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300758 unsigned long disable_agg_tids = 0;
759 enum iwl_mvm_agg_state queue_state;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300760 bool shared_queue = false;
Liad Kaufman24afba72015-07-28 18:56:08 +0300761 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300762 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300763 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300764
765 lockdep_assert_held(&mvm->mutex);
766
Sara Sharon310181e2017-01-17 14:27:48 +0200767 if (iwl_mvm_has_new_tx_api(mvm))
768 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
769
Liad Kaufman42db09c2016-05-02 14:01:14 +0300770 spin_lock_bh(&mvmsta->lock);
771 tfd_queue_mask = mvmsta->tfd_queue_msk;
772 spin_unlock_bh(&mvmsta->lock);
773
Liad Kaufmand2515a92016-03-23 16:31:08 +0200774 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300775
776 /*
777 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
778 * exists
779 */
780 if (!ieee80211_is_data_qos(hdr->frame_control) ||
781 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300782 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
783 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300784 IWL_MVM_DQA_MAX_MGMT_QUEUE);
785 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
786 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
787 queue);
788
789 /* If no such queue is found, we'll use a DATA queue instead */
790 }
791
Liad Kaufman9794c642015-08-19 17:34:28 +0300792 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
793 (mvm->queue_info[mvmsta->reserved_queue].status ==
794 IWL_MVM_QUEUE_RESERVED ||
795 mvm->queue_info[mvmsta->reserved_queue].status ==
796 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300797 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300798 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300799 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
800 }
801
802 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300803 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
804 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300805 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300806
807 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300808 * Check if this queue is already allocated but inactive.
809 * In such a case, we'll need to first free this queue before enabling
810 * it again, so we'll mark it as reserved to make sure no new traffic
811 * arrives on it
812 */
813 if (queue > 0 &&
814 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
815 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
816 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +0200817 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +0300818 IWL_DEBUG_TX_QUEUES(mvm,
819 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
820 queue, mvmsta->sta_id, tid);
821 }
822
Liad Kaufman42db09c2016-05-02 14:01:14 +0300823 /* No free queue - we'll have to share */
824 if (queue <= 0) {
825 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
826 if (queue > 0) {
827 shared_queue = true;
828 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
829 }
830 }
831
Liad Kaufman9794c642015-08-19 17:34:28 +0300832 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300833 * Mark TXQ as ready, even though it hasn't been fully configured yet,
834 * to make sure no one else takes it.
835 * This will allow avoiding re-acquiring the lock at the end of the
836 * configuration. On error we'll mark it back as free.
837 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300838 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300839 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300840
Liad Kaufmand2515a92016-03-23 16:31:08 +0200841 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300842
Liad Kaufman42db09c2016-05-02 14:01:14 +0300843 /* This shouldn't happen - out of queues */
844 if (WARN_ON(queue <= 0)) {
845 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
846 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200847 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300848 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300849
850 /*
851 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
852 * but for configuring the SCD to send A-MPDUs we need to mark the queue
853 * as aggregatable.
854 * Mark all DATA queues as allowing to be aggregated at some point
855 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300856 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
857 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300858
Liad Kaufman9794c642015-08-19 17:34:28 +0300859 /*
860 * If this queue was previously inactive (idle) - we need to free it
861 * first
862 */
863 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +0200864 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
865 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +0300866 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +0300867 }
868
Liad Kaufman42db09c2016-05-02 14:01:14 +0300869 IWL_DEBUG_TX_QUEUES(mvm,
870 "Allocating %squeue #%d to sta %d on tid %d\n",
871 shared_queue ? "shared " : "", queue,
872 mvmsta->sta_id, tid);
873
874 if (shared_queue) {
875 /* Disable any open aggs on this queue */
876 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
877
878 if (disable_agg_tids) {
879 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
880 queue);
881 iwl_mvm_invalidate_sta_queue(mvm, queue,
882 disable_agg_tids, false);
883 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300884 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300885
886 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
887 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
888 wdg_timeout);
889
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200890 /*
891 * Mark queue as shared in transport if shared
892 * Note this has to be done after queue enablement because enablement
893 * can also set this value, and there is no indication there to shared
894 * queues
895 */
896 if (shared_queue)
897 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
898
Liad Kaufman24afba72015-07-28 18:56:08 +0300899 spin_lock_bh(&mvmsta->lock);
900 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300901 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300902 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300903 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300904
905 if (mvmsta->reserved_queue == queue)
906 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
907 spin_unlock_bh(&mvmsta->lock);
908
Liad Kaufman42db09c2016-05-02 14:01:14 +0300909 if (!shared_queue) {
910 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
911 if (ret)
912 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300913
Liad Kaufman42db09c2016-05-02 14:01:14 +0300914 /* If we need to re-enable aggregations... */
915 if (queue_state == IWL_AGG_ON) {
916 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
917 if (ret)
918 goto out_err;
919 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200920 } else {
921 /* Redirect queue, if needed */
922 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
923 wdg_timeout, false);
924 if (ret)
925 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300926 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300927
Liad Kaufman42db09c2016-05-02 14:01:14 +0300928 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300929
930out_err:
931 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
932
933 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300934}
935
Liad Kaufman19aefa42016-03-08 14:29:51 +0200936static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
937{
938 struct iwl_scd_txq_cfg_cmd cmd = {
939 .scd_queue = queue,
940 .action = SCD_CFG_UPDATE_QUEUE_TID,
941 };
Liad Kaufman19aefa42016-03-08 14:29:51 +0200942 int tid;
943 unsigned long tid_bitmap;
944 int ret;
945
946 lockdep_assert_held(&mvm->mutex);
947
Sara Sharonbb497012016-09-29 14:52:40 +0300948 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
949 return;
950
Liad Kaufman19aefa42016-03-08 14:29:51 +0200951 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200952 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
953 spin_unlock_bh(&mvm->queue_info_lock);
954
955 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
956 return;
957
958 /* Find any TID for queue */
959 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
960 cmd.tid = tid;
961 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
962
963 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300964 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200965 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
966 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300967 return;
968 }
969
970 spin_lock_bh(&mvm->queue_info_lock);
971 mvm->queue_info[queue].txq_tid = tid;
972 spin_unlock_bh(&mvm->queue_info_lock);
973 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
974 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200975}
976
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200977static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
978{
979 struct ieee80211_sta *sta;
980 struct iwl_mvm_sta *mvmsta;
981 s8 sta_id;
982 int tid = -1;
983 unsigned long tid_bitmap;
984 unsigned int wdg_timeout;
985 int ssn;
986 int ret = true;
987
Sara Sharonbb497012016-09-29 14:52:40 +0300988 /* queue sharing is disabled on new TX path */
989 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
990 return;
991
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200992 lockdep_assert_held(&mvm->mutex);
993
994 spin_lock_bh(&mvm->queue_info_lock);
995 sta_id = mvm->queue_info[queue].ra_sta_id;
996 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
997 spin_unlock_bh(&mvm->queue_info_lock);
998
999 /* Find TID for queue, and make sure it is the only one on the queue */
1000 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1001 if (tid_bitmap != BIT(tid)) {
1002 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1003 queue, tid_bitmap);
1004 return;
1005 }
1006
1007 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1008 tid);
1009
1010 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1011 lockdep_is_held(&mvm->mutex));
1012
1013 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1014 return;
1015
1016 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1017 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1018
1019 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1020
1021 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1022 tid_to_mac80211_ac[tid], ssn,
1023 wdg_timeout, true);
1024 if (ret) {
1025 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1026 return;
1027 }
1028
1029 /* If aggs should be turned back on - do it */
1030 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001031 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001032
1033 mvmsta->tid_disable_agg &= ~BIT(tid);
1034
1035 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1036 cmd.sta_id = mvmsta->sta_id;
1037 cmd.add_modify = STA_MODE_MODIFY;
1038 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1039 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1040 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1041
1042 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1043 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1044 if (!ret) {
1045 IWL_DEBUG_TX_QUEUES(mvm,
1046 "TXQ #%d is now aggregated again\n",
1047 queue);
1048
1049 /* Mark queue intenally as aggregating again */
1050 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1051 }
1052 }
1053
1054 spin_lock_bh(&mvm->queue_info_lock);
1055 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1056 spin_unlock_bh(&mvm->queue_info_lock);
1057}
1058
Liad Kaufman24afba72015-07-28 18:56:08 +03001059static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1060{
1061 if (tid == IWL_MAX_TID_COUNT)
1062 return IEEE80211_AC_VO; /* MGMT */
1063
1064 return tid_to_mac80211_ac[tid];
1065}
1066
1067static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1068 struct ieee80211_sta *sta, int tid)
1069{
1070 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1071 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1072 struct sk_buff *skb;
1073 struct ieee80211_hdr *hdr;
1074 struct sk_buff_head deferred_tx;
1075 u8 mac_queue;
1076 bool no_queue = false; /* Marks if there is a problem with the queue */
1077 u8 ac;
1078
1079 lockdep_assert_held(&mvm->mutex);
1080
1081 skb = skb_peek(&tid_data->deferred_tx_frames);
1082 if (!skb)
1083 return;
1084 hdr = (void *)skb->data;
1085
1086 ac = iwl_mvm_tid_to_ac_queue(tid);
1087 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1088
1089 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
1090 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1091 IWL_ERR(mvm,
1092 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1093 mvmsta->sta_id, tid);
1094
1095 /*
1096 * Mark queue as problematic so later the deferred traffic is
1097 * freed, as we can do nothing with it
1098 */
1099 no_queue = true;
1100 }
1101
1102 __skb_queue_head_init(&deferred_tx);
1103
Liad Kaufmand2515a92016-03-23 16:31:08 +02001104 /* Disable bottom-halves when entering TX path */
1105 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001106 spin_lock(&mvmsta->lock);
1107 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001108 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001109 spin_unlock(&mvmsta->lock);
1110
Liad Kaufman24afba72015-07-28 18:56:08 +03001111 while ((skb = __skb_dequeue(&deferred_tx)))
1112 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1113 ieee80211_free_txskb(mvm->hw, skb);
1114 local_bh_enable();
1115
1116 /* Wake queue */
1117 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1118}
1119
1120void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1121{
1122 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1123 add_stream_wk);
1124 struct ieee80211_sta *sta;
1125 struct iwl_mvm_sta *mvmsta;
1126 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001127 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001128
Liad Kaufman9794c642015-08-19 17:34:28 +03001129 /* Check inactivity of queues */
1130 iwl_mvm_inactivity_check(mvm);
1131
Liad Kaufman24afba72015-07-28 18:56:08 +03001132 mutex_lock(&mvm->mutex);
1133
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001134 /* Reconfigure queues requiring reconfiguation */
1135 for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1136 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001137 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001138
1139 spin_lock_bh(&mvm->queue_info_lock);
1140 reconfig = (mvm->queue_info[queue].status ==
1141 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001142
1143 /*
1144 * We need to take into account a situation in which a TXQ was
1145 * allocated to TID x, and then turned shared by adding TIDs y
1146 * and z. If TID x becomes inactive and is removed from the TXQ,
1147 * ownership must be given to one of the remaining TIDs.
1148 * This is mainly because if TID x continues - a new queue can't
1149 * be allocated for it as long as it is an owner of another TXQ.
1150 */
1151 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1152 BIT(mvm->queue_info[queue].txq_tid)) &&
1153 (mvm->queue_info[queue].status ==
1154 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001155 spin_unlock_bh(&mvm->queue_info_lock);
1156
1157 if (reconfig)
1158 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001159 else if (change_owner)
1160 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001161 }
1162
Liad Kaufman24afba72015-07-28 18:56:08 +03001163 /* Go over all stations with deferred traffic */
1164 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1165 IWL_MVM_STATION_COUNT) {
1166 clear_bit(sta_id, mvm->sta_deferred_frames);
1167 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1168 lockdep_is_held(&mvm->mutex));
1169 if (IS_ERR_OR_NULL(sta))
1170 continue;
1171
1172 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1173 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1174
1175 for_each_set_bit(tid, &deferred_tid_traffic,
1176 IWL_MAX_TID_COUNT + 1)
1177 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1178 }
1179
1180 mutex_unlock(&mvm->mutex);
1181}
1182
1183static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001184 struct ieee80211_sta *sta,
1185 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001186{
1187 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1188 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001189 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001190
Sara Sharon396952e2017-02-22 19:40:55 +02001191 /* queue reserving is disabled on new TX path */
1192 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1193 return 0;
1194
Liad Kaufman9794c642015-08-19 17:34:28 +03001195 /*
1196 * Check for inactive queues, so we don't reach a situation where we
1197 * can't add a STA due to a shortage in queues that doesn't really exist
1198 */
1199 iwl_mvm_inactivity_check(mvm);
1200
Liad Kaufman24afba72015-07-28 18:56:08 +03001201 spin_lock_bh(&mvm->queue_info_lock);
1202
1203 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001204 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1205 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001206 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1207 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001208 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1209 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001210 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1211 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001212 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001213 if (queue < 0) {
1214 spin_unlock_bh(&mvm->queue_info_lock);
1215 IWL_ERR(mvm, "No available queues for new station\n");
1216 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001217 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1218 /*
1219 * If this queue is already allocated but inactive we'll need to
1220 * first free this queue before enabling it again, we'll mark
1221 * it as reserved to make sure no new traffic arrives on it
1222 */
1223 using_inactive_queue = true;
1224 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001225 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001226 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001227
1228 spin_unlock_bh(&mvm->queue_info_lock);
1229
1230 mvmsta->reserved_queue = queue;
1231
Sara Sharon01796ff2016-11-16 17:04:36 +02001232 if (using_inactive_queue)
1233 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1234
Liad Kaufman24afba72015-07-28 18:56:08 +03001235 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1236 queue, mvmsta->sta_id);
1237
1238 return 0;
1239}
1240
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001241/*
1242 * In DQA mode, after a HW restart the queues should be allocated as before, in
1243 * order to avoid race conditions when there are shared queues. This function
1244 * does the re-mapping and queue allocation.
1245 *
1246 * Note that re-enabling aggregations isn't done in this function.
1247 */
1248static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1249 struct iwl_mvm_sta *mvm_sta)
1250{
1251 unsigned int wdg_timeout =
1252 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1253 int i;
1254 struct iwl_trans_txq_scd_cfg cfg = {
1255 .sta_id = mvm_sta->sta_id,
1256 .frame_limit = IWL_FRAME_LIMIT,
1257 };
1258
Johannes Berg03c902b2016-12-02 12:03:36 +01001259 /* Make sure reserved queue is still marked as such (if allocated) */
1260 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1261 mvm->queue_info[mvm_sta->reserved_queue].status =
1262 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001263
1264 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1265 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1266 int txq_id = tid_data->txq_id;
1267 int ac;
1268 u8 mac_queue;
1269
1270 if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1271 continue;
1272
1273 skb_queue_head_init(&tid_data->deferred_tx_frames);
1274
1275 ac = tid_to_mac80211_ac[i];
1276 mac_queue = mvm_sta->vif->hw_queue[ac];
1277
Sara Sharon310181e2017-01-17 14:27:48 +02001278 if (iwl_mvm_has_new_tx_api(mvm)) {
1279 IWL_DEBUG_TX_QUEUES(mvm,
1280 "Re-mapping sta %d tid %d\n",
1281 mvm_sta->sta_id, i);
1282 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1283 mvm_sta->sta_id,
1284 i, wdg_timeout);
1285 tid_data->txq_id = txq_id;
1286 } else {
1287 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001288
Sara Sharon310181e2017-01-17 14:27:48 +02001289 cfg.tid = i;
1290 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1291 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1292 txq_id ==
1293 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001294
Sara Sharon310181e2017-01-17 14:27:48 +02001295 IWL_DEBUG_TX_QUEUES(mvm,
1296 "Re-mapping sta %d tid %d to queue %d\n",
1297 mvm_sta->sta_id, i, txq_id);
1298
1299 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1300 wdg_timeout);
1301 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001302
1303 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1304 }
1305
1306 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1307}
1308
Johannes Berg8ca151b2013-01-24 14:25:36 +01001309int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1310 struct ieee80211_vif *vif,
1311 struct ieee80211_sta *sta)
1312{
1313 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001314 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001315 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001316 int i, ret, sta_id;
1317
1318 lockdep_assert_held(&mvm->mutex);
1319
1320 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001321 sta_id = iwl_mvm_find_free_sta_id(mvm,
1322 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001323 else
1324 sta_id = mvm_sta->sta_id;
1325
Sara Sharon0ae98812017-01-04 14:53:58 +02001326 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001327 return -ENOSPC;
1328
1329 spin_lock_init(&mvm_sta->lock);
1330
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001331 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1332 if (iwl_mvm_is_dqa_supported(mvm) &&
1333 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1334 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1335 goto update_fw;
1336 }
1337
Johannes Berg8ca151b2013-01-24 14:25:36 +01001338 mvm_sta->sta_id = sta_id;
1339 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1340 mvmvif->color);
1341 mvm_sta->vif = vif;
1342 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001343 mvm_sta->tx_protection = 0;
1344 mvm_sta->tt_tx_protection = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001345
1346 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001347 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001348 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001349 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001350
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001351 /*
1352 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1353 * and then they'll be allocated dynamically
1354 */
1355 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001356 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1357 if (ret)
1358 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001359 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001360 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1361 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1362 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1363 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001364
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001365 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001366 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001367 u16 seq = mvm_sta->tid_data[i].seq_number;
1368 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1369 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001370
1371 if (!iwl_mvm_is_dqa_supported(mvm))
1372 continue;
1373
1374 /*
1375 * Mark all queues for this STA as unallocated and defer TX
1376 * frames until the queue is allocated
1377 */
1378 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1379 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001380 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001381 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001382 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001383
Sara Sharona571f5f2015-12-07 12:50:58 +02001384 if (iwl_mvm_has_new_rx_api(mvm) &&
1385 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1386 dup_data = kcalloc(mvm->trans->num_rx_queues,
1387 sizeof(*dup_data),
1388 GFP_KERNEL);
1389 if (!dup_data)
1390 return -ENOMEM;
1391 mvm_sta->dup_data = dup_data;
1392 }
1393
Sara Sharon396952e2017-02-22 19:40:55 +02001394 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001395 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1396 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001397 if (ret)
1398 goto err;
1399 }
1400
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001401update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001402 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001403 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001404 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001405
Johannes Berg9e848012014-08-04 14:33:42 +02001406 if (vif->type == NL80211_IFTYPE_STATION) {
1407 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001408 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001409 mvmvif->ap_sta_id = sta_id;
1410 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001411 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001412 }
1413 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001414
1415 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1416
1417 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001418
1419err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001420 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1421 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001422 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001423}
1424
1425int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1426 bool drain)
1427{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001428 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001429 int ret;
1430 u32 status;
1431
1432 lockdep_assert_held(&mvm->mutex);
1433
1434 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1435 cmd.sta_id = mvmsta->sta_id;
1436 cmd.add_modify = STA_MODE_MODIFY;
1437 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1438 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1439
1440 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001441 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1442 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001443 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001444 if (ret)
1445 return ret;
1446
Sara Sharon837c4da2016-01-07 16:50:45 +02001447 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001448 case ADD_STA_SUCCESS:
1449 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1450 mvmsta->sta_id);
1451 break;
1452 default:
1453 ret = -EIO;
1454 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1455 mvmsta->sta_id);
1456 break;
1457 }
1458
1459 return ret;
1460}
1461
1462/*
1463 * Remove a station from the FW table. Before sending the command to remove
1464 * the station validate that the station is indeed known to the driver (sanity
1465 * only).
1466 */
1467static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1468{
1469 struct ieee80211_sta *sta;
1470 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1471 .sta_id = sta_id,
1472 };
1473 int ret;
1474
1475 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1476 lockdep_is_held(&mvm->mutex));
1477
1478 /* Note: internal stations are marked as error values */
1479 if (!sta) {
1480 IWL_ERR(mvm, "Invalid station id\n");
1481 return -EINVAL;
1482 }
1483
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001484 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001485 sizeof(rm_sta_cmd), &rm_sta_cmd);
1486 if (ret) {
1487 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1488 return ret;
1489 }
1490
1491 return 0;
1492}
1493
1494void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1495{
1496 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1497 u8 sta_id;
1498
1499 /*
1500 * The mutex is needed because of the SYNC cmd, but not only: if the
1501 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1502 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1503 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1504 * that later.
1505 */
1506 mutex_lock(&mvm->mutex);
1507
1508 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1509 int ret;
1510 struct ieee80211_sta *sta =
1511 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1512 lockdep_is_held(&mvm->mutex));
1513
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001514 /*
1515 * This station is in use or RCU-removed; the latter happens in
1516 * managed mode, where mac80211 removes the station before we
1517 * can remove it from firmware (we can only do that after the
1518 * MAC is marked unassociated), and possibly while the deauth
1519 * frame to disconnect from the AP is still queued. Then, the
1520 * station pointer is -ENOENT when the last skb is reclaimed.
1521 */
1522 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001523 continue;
1524
1525 if (PTR_ERR(sta) == -EINVAL) {
1526 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1527 sta_id);
1528 continue;
1529 }
1530
1531 if (!sta) {
1532 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1533 sta_id);
1534 continue;
1535 }
1536
1537 WARN_ON(PTR_ERR(sta) != -EBUSY);
1538 /* This station was removed and we waited until it got drained,
1539 * we can now proceed and remove it.
1540 */
1541 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1542 if (ret) {
1543 IWL_ERR(mvm,
1544 "Couldn't remove sta %d after it was drained\n",
1545 sta_id);
1546 continue;
1547 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301548 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001549 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001550
1551 if (mvm->tfd_drained[sta_id]) {
1552 unsigned long i, msk = mvm->tfd_drained[sta_id];
1553
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001554 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001555 iwl_mvm_disable_txq(mvm, i, i,
1556 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001557
1558 mvm->tfd_drained[sta_id] = 0;
1559 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1560 sta_id, msk);
1561 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001562 }
1563
1564 mutex_unlock(&mvm->mutex);
1565}
1566
Liad Kaufman24afba72015-07-28 18:56:08 +03001567static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1568 struct ieee80211_vif *vif,
1569 struct iwl_mvm_sta *mvm_sta)
1570{
1571 int ac;
1572 int i;
1573
1574 lockdep_assert_held(&mvm->mutex);
1575
1576 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1577 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1578 continue;
1579
1580 ac = iwl_mvm_tid_to_ac_queue(i);
1581 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1582 vif->hw_queue[ac], i, 0);
1583 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1584 }
1585}
1586
Johannes Berg8ca151b2013-01-24 14:25:36 +01001587int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1588 struct ieee80211_vif *vif,
1589 struct ieee80211_sta *sta)
1590{
1591 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001592 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001593 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001594 int ret;
1595
1596 lockdep_assert_held(&mvm->mutex);
1597
Sara Sharona571f5f2015-12-07 12:50:58 +02001598 if (iwl_mvm_has_new_rx_api(mvm))
1599 kfree(mvm_sta->dup_data);
1600
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001601 if ((vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001602 mvmvif->ap_sta_id == sta_id) ||
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001603 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001604 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1605 if (ret)
1606 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001607 /* flush its queues here since we are freeing mvm_sta */
Luca Coelho5888a402015-10-06 09:54:57 +03001608 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001609 if (ret)
1610 return ret;
1611 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1612 mvm_sta->tfd_queue_msk);
1613 if (ret)
1614 return ret;
1615 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001616
Liad Kaufman24afba72015-07-28 18:56:08 +03001617 /* If DQA is supported - the queues can be disabled now */
Sara Sharon94c3e612016-12-07 15:04:37 +02001618 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman56214742016-09-22 15:14:08 +03001619 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001620 /*
1621 * If pending_frames is set at this point - it must be
1622 * driver internal logic error, since queues are empty
1623 * and removed successuly.
1624 * warn on it but set it to 0 anyway to avoid station
1625 * not being removed later in the function
1626 */
1627 WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
1628 }
Liad Kaufman56214742016-09-22 15:14:08 +03001629
1630 /* If there is a TXQ still marked as reserved - free it */
1631 if (iwl_mvm_is_dqa_supported(mvm) &&
1632 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001633 u8 reserved_txq = mvm_sta->reserved_queue;
1634 enum iwl_mvm_queue_status *status;
1635
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001636 /*
1637 * If no traffic has gone through the reserved TXQ - it
1638 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1639 * should be manually marked as free again
1640 */
1641 spin_lock_bh(&mvm->queue_info_lock);
1642 status = &mvm->queue_info[reserved_txq].status;
1643 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1644 (*status != IWL_MVM_QUEUE_FREE),
1645 "sta_id %d reserved txq %d status %d",
Sara Sharon94c3e612016-12-07 15:04:37 +02001646 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001647 spin_unlock_bh(&mvm->queue_info_lock);
1648 return -EINVAL;
1649 }
1650
1651 *status = IWL_MVM_QUEUE_FREE;
1652 spin_unlock_bh(&mvm->queue_info_lock);
1653 }
1654
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001655 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001656 mvmvif->ap_sta_id == sta_id) {
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001657 /* if associated - we can't remove the AP STA now */
1658 if (vif->bss_conf.assoc)
1659 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001660
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001661 /* unassoc - go ahead - remove the AP STA now */
Sara Sharon0ae98812017-01-04 14:53:58 +02001662 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
Eliad Peller37577fe2013-12-05 17:19:39 +02001663
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001664 /* clear d0i3_ap_sta_id if no longer relevant */
Sara Sharon94c3e612016-12-07 15:04:37 +02001665 if (mvm->d0i3_ap_sta_id == sta_id)
Sara Sharon0ae98812017-01-04 14:53:58 +02001666 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001667 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001668 }
1669
1670 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001671 * This shouldn't happen - the TDLS channel switch should be canceled
1672 * before the STA is removed.
1673 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001674 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001675 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001676 cancel_delayed_work(&mvm->tdls_cs.dwork);
1677 }
1678
1679 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001680 * Make sure that the tx response code sees the station as -EBUSY and
1681 * calls the drain worker.
1682 */
1683 spin_lock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02001684
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001685 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001686 * There are frames pending on the AC queues for this station.
1687 * We need to wait until all the frames are drained...
1688 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001689 if (atomic_read(&mvm->pending_frames[sta_id])) {
1690 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
Johannes Berg8ca151b2013-01-24 14:25:36 +01001691 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001692 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001693
1694 /* disable TDLS sta queues on drain complete */
1695 if (sta->tdls) {
Sara Sharon94c3e612016-12-07 15:04:37 +02001696 mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
1697 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001698 }
1699
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001700 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001701 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001702 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001703
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001704 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001705 iwl_mvm_tdls_sta_deinit(mvm, sta);
1706
Johannes Berg8ca151b2013-01-24 14:25:36 +01001707 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301708 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001709 }
1710
1711 return ret;
1712}
1713
1714int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1715 struct ieee80211_vif *vif,
1716 u8 sta_id)
1717{
1718 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1719
1720 lockdep_assert_held(&mvm->mutex);
1721
Monam Agarwalc531c772014-03-24 00:05:56 +05301722 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001723 return ret;
1724}
1725
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001726int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1727 struct iwl_mvm_int_sta *sta,
1728 u32 qmask, enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001729{
1730 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001731 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02001732 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001733 return -ENOSPC;
1734 }
1735
1736 sta->tfd_queue_msk = qmask;
1737
1738 /* put a non-NULL value so iterating over the stations won't stop */
1739 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1740 return 0;
1741}
1742
Sara Sharon26d6c162017-01-03 12:00:19 +02001743void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001744{
Monam Agarwalc531c772014-03-24 00:05:56 +05301745 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001746 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02001747 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001748}
1749
1750static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1751 struct iwl_mvm_int_sta *sta,
1752 const u8 *addr,
1753 u16 mac_id, u16 color)
1754{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001755 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001756 int ret;
1757 u32 status;
1758
1759 lockdep_assert_held(&mvm->mutex);
1760
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001761 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001762 cmd.sta_id = sta->sta_id;
1763 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1764 color));
1765
Sara Sharonbb497012016-09-29 14:52:40 +03001766 if (!iwl_mvm_has_new_tx_api(mvm))
1767 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001768 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001769
1770 if (addr)
1771 memcpy(cmd.addr, addr, ETH_ALEN);
1772
Sara Sharon854c5702016-01-26 13:17:47 +02001773 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1774 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001775 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001776 if (ret)
1777 return ret;
1778
Sara Sharon837c4da2016-01-07 16:50:45 +02001779 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001780 case ADD_STA_SUCCESS:
1781 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1782 return 0;
1783 default:
1784 ret = -EIO;
1785 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1786 status);
1787 break;
1788 }
1789 return ret;
1790}
1791
Sara Sharonc5a719e2016-11-15 10:20:48 +02001792static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001793{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001794 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1795 mvm->cfg->base_params->wd_timeout :
1796 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001797
Sara Sharon310181e2017-01-17 14:27:48 +02001798 if (iwl_mvm_has_new_tx_api(mvm)) {
1799 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
1800 mvm->aux_sta.sta_id,
1801 IWL_MAX_TID_COUNT,
1802 wdg_timeout);
1803 mvm->aux_queue = queue;
1804 } else if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman28d07932015-09-01 16:36:25 +03001805 struct iwl_trans_txq_scd_cfg cfg = {
1806 .fifo = IWL_MVM_TX_FIFO_MCAST,
1807 .sta_id = mvm->aux_sta.sta_id,
1808 .tid = IWL_MAX_TID_COUNT,
1809 .aggregate = false,
1810 .frame_limit = IWL_FRAME_LIMIT,
1811 };
1812
1813 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1814 wdg_timeout);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001815 } else {
1816 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1817 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03001818 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02001819}
1820
1821int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1822{
1823 int ret;
1824
1825 lockdep_assert_held(&mvm->mutex);
1826
1827 /* Allocate aux station and assign to it the aux queue */
1828 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1829 NL80211_IFTYPE_UNSPECIFIED);
1830 if (ret)
1831 return ret;
1832
1833 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1834 if (!iwl_mvm_has_new_tx_api(mvm))
1835 iwl_mvm_enable_aux_queue(mvm);
Liad Kaufman28d07932015-09-01 16:36:25 +03001836
Johannes Berg8ca151b2013-01-24 14:25:36 +01001837 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1838 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001839 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001840 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001841 return ret;
1842 }
1843
1844 /*
1845 * For a000 firmware and on we cannot add queue to a station unknown
1846 * to firmware so enable queue here - after the station was added
1847 */
1848 if (iwl_mvm_has_new_tx_api(mvm))
1849 iwl_mvm_enable_aux_queue(mvm);
1850
1851 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001852}
1853
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001854int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1855{
1856 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1857
1858 lockdep_assert_held(&mvm->mutex);
1859 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1860 mvmvif->id, 0);
1861}
1862
1863int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1864{
1865 int ret;
1866
1867 lockdep_assert_held(&mvm->mutex);
1868
1869 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1870 if (ret)
1871 IWL_WARN(mvm, "Failed sending remove station\n");
1872
1873 return ret;
1874}
1875
1876void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1877{
1878 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1879}
1880
Johannes Berg712b24a2014-08-04 14:14:14 +02001881void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1882{
1883 lockdep_assert_held(&mvm->mutex);
1884
1885 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1886}
1887
Johannes Berg8ca151b2013-01-24 14:25:36 +01001888/*
1889 * Send the add station command for the vif's broadcast station.
1890 * Assumes that the station was already allocated.
1891 *
1892 * @mvm: the mvm component
1893 * @vif: the interface to which the broadcast station is added
1894 * @bsta: the broadcast station to add.
1895 */
Johannes Berg013290a2014-08-04 13:38:48 +02001896int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001897{
1898 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001899 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001900 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001901 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01001902 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001903 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02001904 unsigned int wdg_timeout =
1905 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1906 struct iwl_trans_txq_scd_cfg cfg = {
1907 .fifo = IWL_MVM_TX_FIFO_VO,
1908 .sta_id = mvmvif->bcast_sta.sta_id,
1909 .tid = IWL_MAX_TID_COUNT,
1910 .aggregate = false,
1911 .frame_limit = IWL_FRAME_LIMIT,
1912 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01001913
1914 lockdep_assert_held(&mvm->mutex);
1915
Sara Sharon310181e2017-01-17 14:27:48 +02001916 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02001917 if (vif->type == NL80211_IFTYPE_AP ||
1918 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02001919 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001920 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02001921 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001922 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03001923 return -EINVAL;
1924
Liad Kaufmandf88c082016-11-24 15:31:00 +02001925 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001926
Sara Sharon310181e2017-01-17 14:27:48 +02001927 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1928 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03001929 }
1930
Johannes Berg5023d962013-07-31 14:07:43 +02001931 if (vif->type == NL80211_IFTYPE_ADHOC)
1932 baddr = vif->bss_conf.bssid;
1933
Sara Sharon0ae98812017-01-04 14:53:58 +02001934 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001935 return -ENOSPC;
1936
Liad Kaufmandf88c082016-11-24 15:31:00 +02001937 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1938 mvmvif->id, mvmvif->color);
1939 if (ret)
1940 return ret;
1941
1942 /*
Sara Sharonc5a719e2016-11-15 10:20:48 +02001943 * For a000 firmware and on we cannot add queue to a station unknown
1944 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02001945 */
Sara Sharon310181e2017-01-17 14:27:48 +02001946 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01001947 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1948 bsta->sta_id,
1949 IWL_MAX_TID_COUNT,
1950 wdg_timeout);
1951
Sara Sharon310181e2017-01-17 14:27:48 +02001952 if (vif->type == NL80211_IFTYPE_AP)
1953 mvm->probe_queue = queue;
1954 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1955 mvm->p2p_dev_queue = queue;
1956
1957 bsta->tfd_queue_msk |= BIT(queue);
1958 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02001959
1960 return 0;
1961}
1962
1963static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1964 struct ieee80211_vif *vif)
1965{
1966 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1967
1968 lockdep_assert_held(&mvm->mutex);
1969
Liad Kaufman4d339982017-03-21 17:13:16 +02001970 if (vif->type == NL80211_IFTYPE_AP ||
1971 vif->type == NL80211_IFTYPE_ADHOC)
Liad Kaufmandf88c082016-11-24 15:31:00 +02001972 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
1973 IWL_MAX_TID_COUNT, 0);
1974
Sara Sharon49f71712017-01-09 12:07:16 +02001975 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
1976 iwl_mvm_disable_txq(mvm, mvm->probe_queue,
Liad Kaufmandf88c082016-11-24 15:31:00 +02001977 vif->hw_queue[0], IWL_MAX_TID_COUNT,
1978 0);
Sara Sharon49f71712017-01-09 12:07:16 +02001979 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
Liad Kaufmandf88c082016-11-24 15:31:00 +02001980 }
1981
Sara Sharon49f71712017-01-09 12:07:16 +02001982 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
1983 iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
Liad Kaufmandf88c082016-11-24 15:31:00 +02001984 vif->hw_queue[0], IWL_MAX_TID_COUNT,
1985 0);
Sara Sharon49f71712017-01-09 12:07:16 +02001986 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
Liad Kaufmandf88c082016-11-24 15:31:00 +02001987 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001988}
1989
1990/* Send the FW a request to remove the station from it's internal data
1991 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001992int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001993{
Johannes Berg013290a2014-08-04 13:38:48 +02001994 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001995 int ret;
1996
1997 lockdep_assert_held(&mvm->mutex);
1998
Liad Kaufmandf88c082016-11-24 15:31:00 +02001999 if (iwl_mvm_is_dqa_supported(mvm))
2000 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2001
Johannes Berg013290a2014-08-04 13:38:48 +02002002 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002003 if (ret)
2004 IWL_WARN(mvm, "Failed sending remove station\n");
2005 return ret;
2006}
2007
Johannes Berg013290a2014-08-04 13:38:48 +02002008int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2009{
2010 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03002011 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02002012
2013 lockdep_assert_held(&mvm->mutex);
2014
Liad Kaufmandf88c082016-11-24 15:31:00 +02002015 if (!iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmande24f632015-08-04 15:19:18 +03002016 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002017
Liad Kaufmande24f632015-08-04 15:19:18 +03002018 /*
2019 * The firmware defines the TFD queue mask to only be relevant
2020 * for *unicast* queues, so the multicast (CAB) queue shouldn't
Liad Kaufmandf88c082016-11-24 15:31:00 +02002021 * be included. This only happens in NL80211_IFTYPE_AP vif type,
2022 * so the next line will only have an effect there.
Liad Kaufmande24f632015-08-04 15:19:18 +03002023 */
Johannes Berg013290a2014-08-04 13:38:48 +02002024 qmask &= ~BIT(vif->cab_queue);
Liad Kaufmande24f632015-08-04 15:19:18 +03002025 }
2026
Johannes Berg013290a2014-08-04 13:38:48 +02002027 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
2028 ieee80211_vif_type_p2p(vif));
2029}
2030
Johannes Berg8ca151b2013-01-24 14:25:36 +01002031/* Allocate a new station entry for the broadcast station to the given vif,
2032 * and send it to the FW.
2033 * Note that each P2P mac should have its own broadcast station.
2034 *
2035 * @mvm: the mvm component
2036 * @vif: the interface to which the broadcast station is added
2037 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02002038int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002039{
2040 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002041 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002042 int ret;
2043
2044 lockdep_assert_held(&mvm->mutex);
2045
Johannes Berg013290a2014-08-04 13:38:48 +02002046 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002047 if (ret)
2048 return ret;
2049
Johannes Berg013290a2014-08-04 13:38:48 +02002050 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002051
2052 if (ret)
2053 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002054
Johannes Berg8ca151b2013-01-24 14:25:36 +01002055 return ret;
2056}
2057
Johannes Berg013290a2014-08-04 13:38:48 +02002058void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2059{
2060 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2061
2062 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2063}
2064
Johannes Berg8ca151b2013-01-24 14:25:36 +01002065/*
2066 * Send the FW a request to remove the station from it's internal data
2067 * structures, and in addition remove it from the local data structure.
2068 */
Johannes Berg013290a2014-08-04 13:38:48 +02002069int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002070{
2071 int ret;
2072
2073 lockdep_assert_held(&mvm->mutex);
2074
Johannes Berg013290a2014-08-04 13:38:48 +02002075 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002076
Johannes Berg013290a2014-08-04 13:38:48 +02002077 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2078
Johannes Berg8ca151b2013-01-24 14:25:36 +01002079 return ret;
2080}
2081
Sara Sharon26d6c162017-01-03 12:00:19 +02002082/*
2083 * Allocate a new station entry for the multicast station to the given vif,
2084 * and send it to the FW.
2085 * Note that each AP/GO mac should have its own multicast station.
2086 *
2087 * @mvm: the mvm component
2088 * @vif: the interface to which the multicast station is added
2089 */
2090int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2091{
2092 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2093 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2094 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2095 const u8 *maddr = _maddr;
2096 struct iwl_trans_txq_scd_cfg cfg = {
2097 .fifo = IWL_MVM_TX_FIFO_MCAST,
2098 .sta_id = msta->sta_id,
2099 .tid = IWL_MAX_TID_COUNT,
2100 .aggregate = false,
2101 .frame_limit = IWL_FRAME_LIMIT,
2102 };
2103 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2104 int ret;
2105
2106 lockdep_assert_held(&mvm->mutex);
2107
2108 if (!iwl_mvm_is_dqa_supported(mvm))
2109 return 0;
2110
2111 if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
2112 return -ENOTSUPP;
2113
2114 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2115 mvmvif->id, mvmvif->color);
2116 if (ret) {
2117 iwl_mvm_dealloc_int_sta(mvm, msta);
2118 return ret;
2119 }
2120
2121 /*
2122 * Enable cab queue after the ADD_STA command is sent.
2123 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
2124 * command with unknown station id.
2125 */
Sara Sharon310181e2017-01-17 14:27:48 +02002126 if (iwl_mvm_has_new_tx_api(mvm)) {
2127 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2128 msta->sta_id,
2129 IWL_MAX_TID_COUNT,
2130 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002131 mvmvif->cab_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002132 } else {
2133 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2134 &cfg, timeout);
2135 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002136
2137 return 0;
2138}
2139
2140/*
2141 * Send the FW a request to remove the station from it's internal data
2142 * structures, and in addition remove it from the local data structure.
2143 */
2144int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2145{
2146 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2147 int ret;
2148
2149 lockdep_assert_held(&mvm->mutex);
2150
2151 if (!iwl_mvm_is_dqa_supported(mvm))
2152 return 0;
2153
Sara Sharone2af3fa2017-02-22 19:35:10 +02002154 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Sara Sharon26d6c162017-01-03 12:00:19 +02002155 IWL_MAX_TID_COUNT, 0);
2156
2157 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2158 if (ret)
2159 IWL_WARN(mvm, "Failed sending remove station\n");
2160
2161 return ret;
2162}
2163
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002164#define IWL_MAX_RX_BA_SESSIONS 16
2165
Sara Sharonb915c102016-03-23 16:32:02 +02002166static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002167{
Sara Sharonb915c102016-03-23 16:32:02 +02002168 struct iwl_mvm_delba_notif notif = {
2169 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2170 .metadata.sync = 1,
2171 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002172 };
Sara Sharonb915c102016-03-23 16:32:02 +02002173 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2174};
Sara Sharon10b2b202016-03-20 16:23:41 +02002175
Sara Sharonb915c102016-03-23 16:32:02 +02002176static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2177 struct iwl_mvm_baid_data *data)
2178{
2179 int i;
2180
2181 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2182
2183 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2184 int j;
2185 struct iwl_mvm_reorder_buffer *reorder_buf =
2186 &data->reorder_buf[i];
2187
Sara Sharon06904052016-02-28 20:28:17 +02002188 spin_lock_bh(&reorder_buf->lock);
2189 if (likely(!reorder_buf->num_stored)) {
2190 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002191 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002192 }
Sara Sharonb915c102016-03-23 16:32:02 +02002193
2194 /*
2195 * This shouldn't happen in regular DELBA since the internal
2196 * delBA notification should trigger a release of all frames in
2197 * the reorder buffer.
2198 */
2199 WARN_ON(1);
2200
2201 for (j = 0; j < reorder_buf->buf_size; j++)
2202 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02002203 /*
2204 * Prevent timer re-arm. This prevents a very far fetched case
2205 * where we timed out on the notification. There may be prior
2206 * RX frames pending in the RX queue before the notification
2207 * that might get processed between now and the actual deletion
2208 * and we would re-arm the timer although we are deleting the
2209 * reorder buffer.
2210 */
2211 reorder_buf->removed = true;
2212 spin_unlock_bh(&reorder_buf->lock);
2213 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002214 }
2215}
2216
2217static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2218 u32 sta_id,
2219 struct iwl_mvm_baid_data *data,
2220 u16 ssn, u8 buf_size)
2221{
2222 int i;
2223
2224 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2225 struct iwl_mvm_reorder_buffer *reorder_buf =
2226 &data->reorder_buf[i];
2227 int j;
2228
2229 reorder_buf->num_stored = 0;
2230 reorder_buf->head_sn = ssn;
2231 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002232 /* rx reorder timer */
2233 reorder_buf->reorder_timer.function =
2234 iwl_mvm_reorder_timer_expired;
2235 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2236 init_timer(&reorder_buf->reorder_timer);
2237 spin_lock_init(&reorder_buf->lock);
2238 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002239 reorder_buf->queue = i;
2240 reorder_buf->sta_id = sta_id;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002241 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002242 for (j = 0; j < reorder_buf->buf_size; j++)
2243 __skb_queue_head_init(&reorder_buf->entries[j]);
2244 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002245}
2246
Johannes Berg8ca151b2013-01-24 14:25:36 +01002247int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02002248 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002249{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002250 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002251 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002252 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002253 int ret;
2254 u32 status;
2255
2256 lockdep_assert_held(&mvm->mutex);
2257
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002258 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2259 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2260 return -ENOSPC;
2261 }
2262
Sara Sharon10b2b202016-03-20 16:23:41 +02002263 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2264 /*
2265 * Allocate here so if allocation fails we can bail out early
2266 * before starting the BA session in the firmware
2267 */
Sara Sharonb915c102016-03-23 16:32:02 +02002268 baid_data = kzalloc(sizeof(*baid_data) +
2269 mvm->trans->num_rx_queues *
2270 sizeof(baid_data->reorder_buf[0]),
2271 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002272 if (!baid_data)
2273 return -ENOMEM;
2274 }
2275
Johannes Berg8ca151b2013-01-24 14:25:36 +01002276 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2277 cmd.sta_id = mvm_sta->sta_id;
2278 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002279 if (start) {
2280 cmd.add_immediate_ba_tid = (u8) tid;
2281 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002282 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002283 } else {
2284 cmd.remove_immediate_ba_tid = (u8) tid;
2285 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002286 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2287 STA_MODIFY_REMOVE_BA_TID;
2288
2289 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002290 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2291 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002292 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002293 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002294 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002295
Sara Sharon837c4da2016-01-07 16:50:45 +02002296 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002297 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002298 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2299 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002300 break;
2301 case ADD_STA_IMMEDIATE_BA_FAILURE:
2302 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2303 ret = -ENOSPC;
2304 break;
2305 default:
2306 ret = -EIO;
2307 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2308 start ? "start" : "stopp", status);
2309 break;
2310 }
2311
Sara Sharon10b2b202016-03-20 16:23:41 +02002312 if (ret)
2313 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002314
Sara Sharon10b2b202016-03-20 16:23:41 +02002315 if (start) {
2316 u8 baid;
2317
2318 mvm->rx_ba_sessions++;
2319
2320 if (!iwl_mvm_has_new_rx_api(mvm))
2321 return 0;
2322
2323 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2324 ret = -EINVAL;
2325 goto out_free;
2326 }
2327 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2328 IWL_ADD_STA_BAID_SHIFT);
2329 baid_data->baid = baid;
2330 baid_data->timeout = timeout;
2331 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002332 setup_timer(&baid_data->session_timer,
2333 iwl_mvm_rx_agg_session_expired,
2334 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002335 baid_data->mvm = mvm;
2336 baid_data->tid = tid;
2337 baid_data->sta_id = mvm_sta->sta_id;
2338
2339 mvm_sta->tid_to_baid[tid] = baid;
2340 if (timeout)
2341 mod_timer(&baid_data->session_timer,
2342 TU_TO_EXP_TIME(timeout * 2));
2343
Sara Sharonb915c102016-03-23 16:32:02 +02002344 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2345 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002346 /*
2347 * protect the BA data with RCU to cover a case where our
2348 * internal RX sync mechanism will timeout (not that it's
2349 * supposed to happen) and we will free the session data while
2350 * RX is being processed in parallel
2351 */
Sara Sharon35263a02016-06-21 12:12:10 +03002352 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2353 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002354 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2355 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002356 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002357 u8 baid = mvm_sta->tid_to_baid[tid];
2358
Sara Sharon60dec522016-06-21 14:14:08 +03002359 if (mvm->rx_ba_sessions > 0)
2360 /* check that restart flow didn't zero the counter */
2361 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002362 if (!iwl_mvm_has_new_rx_api(mvm))
2363 return 0;
2364
2365 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2366 return -EINVAL;
2367
2368 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2369 if (WARN_ON(!baid_data))
2370 return -EINVAL;
2371
2372 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002373 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002374 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002375 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2376 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002377 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002378 }
2379 return 0;
2380
2381out_free:
2382 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002383 return ret;
2384}
2385
Liad Kaufman9794c642015-08-19 17:34:28 +03002386int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2387 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002388{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002389 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002390 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002391 int ret;
2392 u32 status;
2393
2394 lockdep_assert_held(&mvm->mutex);
2395
2396 if (start) {
2397 mvm_sta->tfd_queue_msk |= BIT(queue);
2398 mvm_sta->tid_disable_agg &= ~BIT(tid);
2399 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002400 /* In DQA-mode the queue isn't removed on agg termination */
2401 if (!iwl_mvm_is_dqa_supported(mvm))
2402 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002403 mvm_sta->tid_disable_agg |= BIT(tid);
2404 }
2405
2406 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2407 cmd.sta_id = mvm_sta->sta_id;
2408 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002409 if (!iwl_mvm_has_new_tx_api(mvm))
2410 cmd.modify_mask = STA_MODIFY_QUEUES;
2411 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002412 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2413 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2414
2415 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002416 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2417 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002418 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002419 if (ret)
2420 return ret;
2421
Sara Sharon837c4da2016-01-07 16:50:45 +02002422 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002423 case ADD_STA_SUCCESS:
2424 break;
2425 default:
2426 ret = -EIO;
2427 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2428 start ? "start" : "stopp", status);
2429 break;
2430 }
2431
2432 return ret;
2433}
2434
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002435const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002436 IEEE80211_AC_BE,
2437 IEEE80211_AC_BK,
2438 IEEE80211_AC_BK,
2439 IEEE80211_AC_BE,
2440 IEEE80211_AC_VI,
2441 IEEE80211_AC_VI,
2442 IEEE80211_AC_VO,
2443 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002444 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002445};
2446
Johannes Berg3e56ead2013-02-15 22:23:18 +01002447static const u8 tid_to_ucode_ac[] = {
2448 AC_BE,
2449 AC_BK,
2450 AC_BK,
2451 AC_BE,
2452 AC_VI,
2453 AC_VI,
2454 AC_VO,
2455 AC_VO,
2456};
2457
Johannes Berg8ca151b2013-01-24 14:25:36 +01002458int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2459 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2460{
Johannes Berg5b577a92013-11-14 18:20:04 +01002461 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002462 struct iwl_mvm_tid_data *tid_data;
2463 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002464 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002465
2466 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2467 return -EINVAL;
2468
2469 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2470 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2471 mvmsta->tid_data[tid].state);
2472 return -ENXIO;
2473 }
2474
2475 lockdep_assert_held(&mvm->mutex);
2476
Arik Nemtsovb2492502014-03-13 12:21:50 +02002477 spin_lock_bh(&mvmsta->lock);
2478
2479 /* possible race condition - we entered D0i3 while starting agg */
2480 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2481 spin_unlock_bh(&mvmsta->lock);
2482 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2483 return -EIO;
2484 }
2485
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002486 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002487
Liad Kaufmancf961e12015-08-13 19:16:08 +03002488 /*
2489 * Note the possible cases:
2490 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2491 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2492 * one and mark it as reserved
2493 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2494 * non-DQA mode, since the TXQ hasn't yet been allocated
2495 */
2496 txq_id = mvmsta->tid_data[tid].txq_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002497 if (iwl_mvm_is_dqa_supported(mvm) &&
2498 unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2499 ret = -ENXIO;
2500 IWL_DEBUG_TX_QUEUES(mvm,
2501 "Can't start tid %d agg on shared queue!\n",
2502 tid);
2503 goto release_locks;
2504 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002505 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002506 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2507 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002508 mvm->last_agg_queue);
2509 if (txq_id < 0) {
2510 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002511 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2512 goto release_locks;
2513 }
Sara Sharon01796ff2016-11-16 17:04:36 +02002514 /*
2515 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2516 * an inactive queue from iwl_mvm_find_free_queue() is
2517 * certainly a bug
2518 */
2519 WARN_ON(mvm->queue_info[txq_id].status ==
2520 IWL_MVM_QUEUE_INACTIVE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002521
2522 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2523 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002524 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002525
2526 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002527
Liad Kaufmancf961e12015-08-13 19:16:08 +03002528 IWL_DEBUG_TX_QUEUES(mvm,
2529 "AGG for tid %d will be on queue #%d\n",
2530 tid, txq_id);
2531
Johannes Berg8ca151b2013-01-24 14:25:36 +01002532 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002533 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002534 tid_data->txq_id = txq_id;
2535 *ssn = tid_data->ssn;
2536
2537 IWL_DEBUG_TX_QUEUES(mvm,
2538 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2539 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2540 tid_data->next_reclaimed);
2541
2542 if (tid_data->ssn == tid_data->next_reclaimed) {
2543 tid_data->state = IWL_AGG_STARTING;
2544 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2545 } else {
2546 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2547 }
2548
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002549 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002550 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002551
2552release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002553 spin_unlock(&mvm->queue_info_lock);
2554out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002555 spin_unlock_bh(&mvmsta->lock);
2556
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002557 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002558}
2559
2560int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002561 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2562 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002563{
Johannes Berg5b577a92013-11-14 18:20:04 +01002564 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002565 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002566 unsigned int wdg_timeout =
2567 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002568 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002569 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002570 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002571 u16 ssn;
2572
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002573 struct iwl_trans_txq_scd_cfg cfg = {
2574 .sta_id = mvmsta->sta_id,
2575 .tid = tid,
2576 .frame_limit = buf_size,
2577 .aggregate = true,
2578 };
2579
Eyal Shapiraefed6642014-09-14 15:58:53 +03002580 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2581 != IWL_MAX_TID_COUNT);
2582
Johannes Berg8ca151b2013-01-24 14:25:36 +01002583 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2584
2585 spin_lock_bh(&mvmsta->lock);
2586 ssn = tid_data->ssn;
2587 queue = tid_data->txq_id;
2588 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002589 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002590 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002591 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002592 spin_unlock_bh(&mvmsta->lock);
2593
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002594 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002595
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002596 spin_lock_bh(&mvm->queue_info_lock);
2597 queue_status = mvm->queue_info[queue].status;
2598 spin_unlock_bh(&mvm->queue_info_lock);
2599
Liad Kaufmancf961e12015-08-13 19:16:08 +03002600 /* In DQA mode, the existing queue might need to be reconfigured */
2601 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002602 /* Maybe there is no need to even alloc a queue... */
2603 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2604 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002605
2606 /*
2607 * Only reconfig the SCD for the queue if the window size has
2608 * changed from current (become smaller)
2609 */
2610 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2611 /*
Sara Sharonbb497012016-09-29 14:52:40 +03002612 * On new TX API rs and BA manager are offloaded.
2613 * For now though, just don't support being reconfigured
2614 */
2615 if (iwl_mvm_has_new_tx_api(mvm))
2616 return -ENOTSUPP;
2617
2618 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +03002619 * If reconfiguring an existing queue, it first must be
2620 * drained
2621 */
2622 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2623 BIT(queue));
2624 if (ret) {
2625 IWL_ERR(mvm,
2626 "Error draining queue before reconfig\n");
2627 return ret;
2628 }
2629
2630 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2631 mvmsta->sta_id, tid,
2632 buf_size, ssn);
2633 if (ret) {
2634 IWL_ERR(mvm,
2635 "Error reconfiguring TXQ #%d\n", queue);
2636 return ret;
2637 }
2638 }
2639 }
2640
2641 if (alloc_queue)
2642 iwl_mvm_enable_txq(mvm, queue,
2643 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2644 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002645
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002646 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2647 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2648 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2649 if (ret)
2650 return -EIO;
2651 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002652
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002653 /* No need to mark as reserved */
2654 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002655 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002656 spin_unlock_bh(&mvm->queue_info_lock);
2657
Johannes Berg8ca151b2013-01-24 14:25:36 +01002658 /*
2659 * Even though in theory the peer could have different
2660 * aggregation reorder buffer sizes for different sessions,
2661 * our ucode doesn't allow for that and has a global limit
2662 * for each station. Therefore, use the minimum of all the
2663 * aggregation sessions and our default value.
2664 */
2665 mvmsta->max_agg_bufsize =
2666 min(mvmsta->max_agg_bufsize, buf_size);
2667 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2668
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002669 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2670 sta->addr, tid);
2671
Eyal Shapira9e680942013-11-09 00:16:16 +02002672 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002673}
2674
2675int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2676 struct ieee80211_sta *sta, u16 tid)
2677{
Johannes Berg5b577a92013-11-14 18:20:04 +01002678 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002679 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2680 u16 txq_id;
2681 int err;
2682
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002683 /*
2684 * If mac80211 is cleaning its state, then say that we finished since
2685 * our state has been cleared anyway.
2686 */
2687 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2688 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2689 return 0;
2690 }
2691
Johannes Berg8ca151b2013-01-24 14:25:36 +01002692 spin_lock_bh(&mvmsta->lock);
2693
2694 txq_id = tid_data->txq_id;
2695
2696 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2697 mvmsta->sta_id, tid, txq_id, tid_data->state);
2698
Eyal Shapiraefed6642014-09-14 15:58:53 +03002699 mvmsta->agg_tids &= ~BIT(tid);
2700
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002701 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002702 /*
2703 * The TXQ is marked as reserved only if no traffic came through yet
2704 * This means no traffic has been sent on this TID (agg'd or not), so
2705 * we no longer have use for the queue. Since it hasn't even been
2706 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2707 * free.
2708 */
2709 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2710 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002711
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002712 spin_unlock_bh(&mvm->queue_info_lock);
2713
Johannes Berg8ca151b2013-01-24 14:25:36 +01002714 switch (tid_data->state) {
2715 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002716 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002717
2718 IWL_DEBUG_TX_QUEUES(mvm,
2719 "ssn = %d, next_recl = %d\n",
2720 tid_data->ssn, tid_data->next_reclaimed);
2721
2722 /* There are still packets for this RA / TID in the HW */
2723 if (tid_data->ssn != tid_data->next_reclaimed) {
2724 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2725 err = 0;
2726 break;
2727 }
2728
2729 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002730 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002731 spin_unlock_bh(&mvmsta->lock);
2732
2733 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2734
2735 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2736
Liad Kaufmancf961e12015-08-13 19:16:08 +03002737 if (!iwl_mvm_is_dqa_supported(mvm)) {
2738 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2739
2740 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2741 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002742 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002743 case IWL_AGG_STARTING:
2744 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2745 /*
2746 * The agg session has been stopped before it was set up. This
2747 * can happen when the AddBA timer times out for example.
2748 */
2749
2750 /* No barriers since we are under mutex */
2751 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002752
2753 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2754 tid_data->state = IWL_AGG_OFF;
2755 err = 0;
2756 break;
2757 default:
2758 IWL_ERR(mvm,
2759 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2760 mvmsta->sta_id, tid, tid_data->state);
2761 IWL_ERR(mvm,
2762 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2763 err = -EINVAL;
2764 }
2765
2766 spin_unlock_bh(&mvmsta->lock);
2767
2768 return err;
2769}
2770
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002771int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2772 struct ieee80211_sta *sta, u16 tid)
2773{
Johannes Berg5b577a92013-11-14 18:20:04 +01002774 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002775 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2776 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002777 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002778
2779 /*
2780 * First set the agg state to OFF to avoid calling
2781 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2782 */
2783 spin_lock_bh(&mvmsta->lock);
2784 txq_id = tid_data->txq_id;
2785 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2786 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002787 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002788 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002789 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002790 spin_unlock_bh(&mvmsta->lock);
2791
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002792 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002793 /*
2794 * The TXQ is marked as reserved only if no traffic came through yet
2795 * This means no traffic has been sent on this TID (agg'd or not), so
2796 * we no longer have use for the queue. Since it hasn't even been
2797 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2798 * free.
2799 */
2800 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2801 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002802 spin_unlock_bh(&mvm->queue_info_lock);
2803
Johannes Bergb6658ff2013-07-24 13:55:51 +02002804 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002805 iwl_mvm_drain_sta(mvm, mvmsta, true);
Luca Coelho5888a402015-10-06 09:54:57 +03002806 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
Johannes Bergb6658ff2013-07-24 13:55:51 +02002807 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002808 iwl_trans_wait_tx_queue_empty(mvm->trans,
2809 mvmsta->tfd_queue_msk);
2810 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002811
Johannes Bergf7f89e72014-08-05 15:24:44 +02002812 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2813
Liad Kaufmancf961e12015-08-13 19:16:08 +03002814 if (!iwl_mvm_is_dqa_supported(mvm)) {
2815 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2816
2817 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2818 tid, 0);
2819 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002820 }
2821
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002822 return 0;
2823}
2824
Johannes Berg8ca151b2013-01-24 14:25:36 +01002825static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2826{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002827 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002828
2829 lockdep_assert_held(&mvm->mutex);
2830
Johannes Berg2dc2a152015-06-16 17:09:18 +02002831 /* Pick the unused key offset with the highest 'deleted'
2832 * counter. Every time a key is deleted, all the counters
2833 * are incremented and the one that was just deleted is
2834 * reset to zero. Thus, the highest counter is the one
2835 * that was deleted longest ago. Pick that one.
2836 */
2837 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2838 if (test_bit(i, mvm->fw_key_table))
2839 continue;
2840 if (mvm->fw_key_deleted[i] > max) {
2841 max = mvm->fw_key_deleted[i];
2842 max_offs = i;
2843 }
2844 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002845
Johannes Berg2dc2a152015-06-16 17:09:18 +02002846 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002847 return STA_KEY_IDX_INVALID;
2848
Johannes Berg2dc2a152015-06-16 17:09:18 +02002849 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002850}
2851
Johannes Berg5f7a1842015-12-11 09:36:10 +01002852static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2853 struct ieee80211_vif *vif,
2854 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002855{
Johannes Berg5b530e92014-12-23 16:00:17 +01002856 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002857
Johannes Berg5f7a1842015-12-11 09:36:10 +01002858 if (sta)
2859 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002860
2861 /*
2862 * The device expects GTKs for station interfaces to be
2863 * installed as GTKs for the AP station. If we have no
2864 * station ID, then use AP's station ID.
2865 */
2866 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02002867 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02002868 u8 sta_id = mvmvif->ap_sta_id;
2869
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002870 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2871 lockdep_is_held(&mvm->mutex));
2872
Avri Altman9513c5e2015-10-19 16:29:11 +02002873 /*
2874 * It is possible that the 'sta' parameter is NULL,
2875 * for example when a GTK is removed - the sta_id will then
2876 * be the AP ID, and no station was passed by mac80211.
2877 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002878 if (IS_ERR_OR_NULL(sta))
2879 return NULL;
2880
2881 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02002882 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002883
Johannes Berg5f7a1842015-12-11 09:36:10 +01002884 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002885}
2886
2887static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2888 struct iwl_mvm_sta *mvm_sta,
Sara Sharon45c458b2016-11-09 15:43:26 +02002889 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002890 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2891 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002892{
Sara Sharon45c458b2016-11-09 15:43:26 +02002893 union {
2894 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2895 struct iwl_mvm_add_sta_key_cmd cmd;
2896 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002897 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002898 int ret;
2899 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002900 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02002901 u64 pn = 0;
2902 int i, size;
2903 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2904 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002905
Sara Sharon45c458b2016-11-09 15:43:26 +02002906 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01002907 STA_KEY_FLG_KEYID_MSK;
2908 key_flags = cpu_to_le16(keyidx);
2909 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2910
Sara Sharon45c458b2016-11-09 15:43:26 +02002911 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002912 case WLAN_CIPHER_SUITE_TKIP:
2913 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002914 if (new_api) {
2915 memcpy((void *)&u.cmd.tx_mic_key,
2916 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2917 IWL_MIC_KEY_SIZE);
2918
2919 memcpy((void *)&u.cmd.rx_mic_key,
2920 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2921 IWL_MIC_KEY_SIZE);
2922 pn = atomic64_read(&key->tx_pn);
2923
2924 } else {
2925 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2926 for (i = 0; i < 5; i++)
2927 u.cmd_v1.tkip_rx_ttak[i] =
2928 cpu_to_le16(tkip_p1k[i]);
2929 }
2930 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002931 break;
2932 case WLAN_CIPHER_SUITE_CCMP:
2933 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02002934 memcpy(u.cmd.common.key, key->key, key->keylen);
2935 if (new_api)
2936 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002937 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002938 case WLAN_CIPHER_SUITE_WEP104:
2939 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002940 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002941 case WLAN_CIPHER_SUITE_WEP40:
2942 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002943 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01002944 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03002945 case WLAN_CIPHER_SUITE_GCMP_256:
2946 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2947 /* fall through */
2948 case WLAN_CIPHER_SUITE_GCMP:
2949 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002950 memcpy(u.cmd.common.key, key->key, key->keylen);
2951 if (new_api)
2952 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03002953 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002954 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002955 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02002956 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002957 }
2958
Johannes Bergba3943b2014-11-12 23:54:48 +01002959 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002960 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2961
Sara Sharon45c458b2016-11-09 15:43:26 +02002962 u.cmd.common.key_offset = key_offset;
2963 u.cmd.common.key_flags = key_flags;
2964 u.cmd.common.sta_id = mvm_sta->sta_id;
2965
2966 if (new_api) {
2967 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2968 size = sizeof(u.cmd);
2969 } else {
2970 size = sizeof(u.cmd_v1);
2971 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002972
2973 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002974 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02002975 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2976 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002977 else
Sara Sharon45c458b2016-11-09 15:43:26 +02002978 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2979 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002980
2981 switch (status) {
2982 case ADD_STA_SUCCESS:
2983 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2984 break;
2985 default:
2986 ret = -EIO;
2987 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2988 break;
2989 }
2990
2991 return ret;
2992}
2993
2994static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2995 struct ieee80211_key_conf *keyconf,
2996 u8 sta_id, bool remove_key)
2997{
2998 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2999
3000 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003001 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3002 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3003 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3004 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3005 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3006 return -EINVAL;
3007
3008 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3009 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003010 return -EINVAL;
3011
3012 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3013 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3014
3015 if (remove_key) {
3016 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3017 } else {
3018 struct ieee80211_key_seq seq;
3019 const u8 *pn;
3020
Ayala Bekeraa950522016-06-01 00:28:09 +03003021 switch (keyconf->cipher) {
3022 case WLAN_CIPHER_SUITE_AES_CMAC:
3023 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3024 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003025 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3026 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3027 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3028 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003029 default:
3030 return -EINVAL;
3031 }
3032
Ayala Beker8e160ab2016-04-11 11:37:38 +03003033 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3034 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3035 igtk_cmd.ctrl_flags |=
3036 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003037 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3038 pn = seq.aes_cmac.pn;
3039 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3040 ((u64) pn[4] << 8) |
3041 ((u64) pn[3] << 16) |
3042 ((u64) pn[2] << 24) |
3043 ((u64) pn[1] << 32) |
3044 ((u64) pn[0] << 40));
3045 }
3046
3047 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3048 remove_key ? "removing" : "installing",
3049 igtk_cmd.sta_id);
3050
Ayala Beker8e160ab2016-04-11 11:37:38 +03003051 if (!iwl_mvm_has_new_rx_api(mvm)) {
3052 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3053 .ctrl_flags = igtk_cmd.ctrl_flags,
3054 .key_id = igtk_cmd.key_id,
3055 .sta_id = igtk_cmd.sta_id,
3056 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3057 };
3058
3059 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3060 ARRAY_SIZE(igtk_cmd_v1.igtk));
3061 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3062 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3063 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003064 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003065 sizeof(igtk_cmd), &igtk_cmd);
3066}
3067
3068
3069static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3070 struct ieee80211_vif *vif,
3071 struct ieee80211_sta *sta)
3072{
Johannes Berg5b530e92014-12-23 16:00:17 +01003073 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003074
3075 if (sta)
3076 return sta->addr;
3077
3078 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003079 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003080 u8 sta_id = mvmvif->ap_sta_id;
3081 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3082 lockdep_is_held(&mvm->mutex));
3083 return sta->addr;
3084 }
3085
3086
3087 return NULL;
3088}
3089
Johannes Berg2f6319d2014-11-12 23:39:56 +01003090static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3091 struct ieee80211_vif *vif,
3092 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003093 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003094 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003095 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003096{
Johannes Berg2f6319d2014-11-12 23:39:56 +01003097 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003098 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003099 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003100 struct ieee80211_key_seq seq;
3101 u16 p1k[5];
3102
Johannes Berg8ca151b2013-01-24 14:25:36 +01003103 switch (keyconf->cipher) {
3104 case WLAN_CIPHER_SUITE_TKIP:
3105 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3106 /* get phase 1 key from mac80211 */
3107 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3108 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
Johannes Bergba3943b2014-11-12 23:54:48 +01003109 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003110 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003111 break;
3112 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003113 case WLAN_CIPHER_SUITE_WEP40:
3114 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003115 case WLAN_CIPHER_SUITE_GCMP:
3116 case WLAN_CIPHER_SUITE_GCMP_256:
Johannes Bergba3943b2014-11-12 23:54:48 +01003117 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003118 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003119 break;
3120 default:
Johannes Bergba3943b2014-11-12 23:54:48 +01003121 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003122 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003123 }
3124
Johannes Berg8ca151b2013-01-24 14:25:36 +01003125 return ret;
3126}
3127
Johannes Berg2f6319d2014-11-12 23:39:56 +01003128static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003129 struct ieee80211_key_conf *keyconf,
3130 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003131{
Sara Sharon45c458b2016-11-09 15:43:26 +02003132 union {
3133 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3134 struct iwl_mvm_add_sta_key_cmd cmd;
3135 } u = {};
3136 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3137 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003138 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003139 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003140 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003141
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003142 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3143 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003144 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3145 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3146
Johannes Bergba3943b2014-11-12 23:54:48 +01003147 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003148 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3149
Sara Sharon45c458b2016-11-09 15:43:26 +02003150 /*
3151 * The fields assigned here are in the same location at the start
3152 * of the command, so we can do this union trick.
3153 */
3154 u.cmd.common.key_flags = key_flags;
3155 u.cmd.common.key_offset = keyconf->hw_key_idx;
3156 u.cmd.common.sta_id = sta_id;
3157
3158 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003159
Johannes Berg8ca151b2013-01-24 14:25:36 +01003160 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003161 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3162 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003163
3164 switch (status) {
3165 case ADD_STA_SUCCESS:
3166 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3167 break;
3168 default:
3169 ret = -EIO;
3170 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3171 break;
3172 }
3173
3174 return ret;
3175}
3176
Johannes Berg2f6319d2014-11-12 23:39:56 +01003177int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3178 struct ieee80211_vif *vif,
3179 struct ieee80211_sta *sta,
3180 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003181 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003182{
Johannes Bergba3943b2014-11-12 23:54:48 +01003183 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003184 struct iwl_mvm_sta *mvm_sta;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003185 u8 sta_id;
3186 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003187 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003188
3189 lockdep_assert_held(&mvm->mutex);
3190
3191 /* Get the station id from the mvm local station table */
Johannes Berg5f7a1842015-12-11 09:36:10 +01003192 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3193 if (!mvm_sta) {
3194 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003195 return -EINVAL;
3196 }
Johannes Berg5f7a1842015-12-11 09:36:10 +01003197 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003198
Ayala Beker8e160ab2016-04-11 11:37:38 +03003199 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3200 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3201 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003202 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3203 goto end;
3204 }
3205
3206 /*
3207 * It is possible that the 'sta' parameter is NULL, and thus
3208 * there is a need to retrieve the sta from the local station table.
3209 */
3210 if (!sta) {
3211 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3212 lockdep_is_held(&mvm->mutex));
3213 if (IS_ERR_OR_NULL(sta)) {
3214 IWL_ERR(mvm, "Invalid station id\n");
3215 return -EINVAL;
3216 }
3217 }
3218
3219 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3220 return -EINVAL;
3221
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003222 /* If the key_offset is not pre-assigned, we need to find a
3223 * new offset to use. In normal cases, the offset is not
3224 * pre-assigned, but during HW_RESTART we want to reuse the
3225 * same indices, so we pass them when this function is called.
3226 *
3227 * In D3 entry, we need to hardcoded the indices (because the
3228 * firmware hardcodes the PTK offset to 0). In this case, we
3229 * need to make sure we don't overwrite the hw_key_idx in the
3230 * keyconf structure, because otherwise we cannot configure
3231 * the original ones back when resuming.
3232 */
3233 if (key_offset == STA_KEY_IDX_INVALID) {
3234 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3235 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003236 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003237 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003238 }
3239
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003240 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003241 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003242 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003243
3244 /*
3245 * For WEP, the same key is used for multicast and unicast. Upload it
3246 * again, using the same key offset, and now pointing the other one
3247 * to the same key slot (offset).
3248 * If this fails, remove the original as well.
3249 */
3250 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3251 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003252 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3253 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003254 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003255 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003256 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003257 }
3258 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003259
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003260 __set_bit(key_offset, mvm->fw_key_table);
3261
Johannes Berg2f6319d2014-11-12 23:39:56 +01003262end:
3263 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3264 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003265 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003266 return ret;
3267}
3268
3269int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3270 struct ieee80211_vif *vif,
3271 struct ieee80211_sta *sta,
3272 struct ieee80211_key_conf *keyconf)
3273{
Johannes Bergba3943b2014-11-12 23:54:48 +01003274 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003275 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003276 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003277 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003278
3279 lockdep_assert_held(&mvm->mutex);
3280
Johannes Berg5f7a1842015-12-11 09:36:10 +01003281 /* Get the station from the mvm local station table */
3282 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Ilan Peercd4d23c2017-01-16 15:07:03 +02003283 if (!mvm_sta) {
3284 IWL_ERR(mvm, "Failed to find station\n");
3285 return -EINVAL;
3286 }
3287 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003288
3289 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3290 keyconf->keyidx, sta_id);
3291
Ayala Beker8e160ab2016-04-11 11:37:38 +03003292 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3293 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3294 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003295 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3296
3297 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3298 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3299 keyconf->hw_key_idx);
3300 return -ENOENT;
3301 }
3302
Johannes Berg2dc2a152015-06-16 17:09:18 +02003303 /* track which key was deleted last */
3304 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3305 if (mvm->fw_key_deleted[i] < U8_MAX)
3306 mvm->fw_key_deleted[i]++;
3307 }
3308 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3309
Johannes Berg5f7a1842015-12-11 09:36:10 +01003310 if (!mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003311 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3312 return 0;
3313 }
3314
Johannes Bergba3943b2014-11-12 23:54:48 +01003315 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3316 if (ret)
3317 return ret;
3318
3319 /* delete WEP key twice to get rid of (now useless) offset */
3320 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3321 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3322 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3323
3324 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003325}
3326
Johannes Berg8ca151b2013-01-24 14:25:36 +01003327void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3328 struct ieee80211_vif *vif,
3329 struct ieee80211_key_conf *keyconf,
3330 struct ieee80211_sta *sta, u32 iv32,
3331 u16 *phase1key)
3332{
Beni Levc3eb5362013-02-06 17:22:18 +02003333 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003334 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003335
Beni Levc3eb5362013-02-06 17:22:18 +02003336 rcu_read_lock();
3337
Johannes Berg5f7a1842015-12-11 09:36:10 +01003338 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3339 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003340 goto unlock;
Johannes Bergba3943b2014-11-12 23:54:48 +01003341 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003342 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003343
3344 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003345 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003346}
3347
Johannes Berg9cc40712013-02-15 22:47:48 +01003348void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3349 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003350{
Johannes Berg5b577a92013-11-14 18:20:04 +01003351 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003352 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003353 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003354 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003355 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003356 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003357 };
3358 int ret;
3359
Sara Sharon854c5702016-01-26 13:17:47 +02003360 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3361 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003362 if (ret)
3363 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3364}
3365
Johannes Berg9cc40712013-02-15 22:47:48 +01003366void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3367 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003368 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003369 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003370 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003371{
Johannes Berg5b577a92013-11-14 18:20:04 +01003372 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003373 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003374 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003375 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003376 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3377 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003378 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003379 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003380 int tid, ret;
3381 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003382
Johannes Berg3e56ead2013-02-15 22:23:18 +01003383 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3384 * Note that this field is reserved and unused by firmware not
3385 * supporting GO uAPSD, so it's safe to always do this.
3386 */
3387 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3388 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3389
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003390 /* If we're releasing frames from aggregation or dqa queues then check
3391 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003392 * - more frames than the service period, in which case more_data
3393 * needs to be set
3394 * - fewer than 'cnt' frames, in which case we need to adjust the
3395 * firmware command (but do that unconditionally)
3396 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003397 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003398 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003399 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003400
3401 spin_lock_bh(&mvmsta->lock);
3402 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3403 struct iwl_mvm_tid_data *tid_data;
3404 u16 n_queued;
3405
3406 tid_data = &mvmsta->tid_data[tid];
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003407 if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
3408 tid_data->state != IWL_AGG_ON &&
Johannes Berg3e56ead2013-02-15 22:23:18 +01003409 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3410 "TID %d state is %d\n",
3411 tid, tid_data->state)) {
3412 spin_unlock_bh(&mvmsta->lock);
3413 ieee80211_sta_eosp(sta);
3414 return;
3415 }
3416
3417 n_queued = iwl_mvm_tid_queued(tid_data);
3418 if (n_queued > remaining) {
3419 more_data = true;
3420 remaining = 0;
3421 break;
3422 }
3423 remaining -= n_queued;
3424 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003425 sleep_tx_count = cnt - remaining;
3426 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3427 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003428 spin_unlock_bh(&mvmsta->lock);
3429
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003430 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003431 if (WARN_ON(cnt - remaining == 0)) {
3432 ieee80211_sta_eosp(sta);
3433 return;
3434 }
3435 }
3436
3437 /* Note: this is ignored by firmware not supporting GO uAPSD */
3438 if (more_data)
3439 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3440
3441 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3442 mvmsta->next_status_eosp = true;
3443 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3444 } else {
3445 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3446 }
3447
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003448 /* block the Tx queues until the FW updated the sleep Tx count */
3449 iwl_trans_block_txq_ptrs(mvm->trans, true);
3450
3451 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3452 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003453 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003454 if (ret)
3455 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3456}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003457
Johannes Berg04168412015-06-23 21:22:09 +02003458void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3459 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003460{
3461 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3462 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3463 struct ieee80211_sta *sta;
3464 u32 sta_id = le32_to_cpu(notif->sta_id);
3465
3466 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003467 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003468
3469 rcu_read_lock();
3470 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3471 if (!IS_ERR_OR_NULL(sta))
3472 ieee80211_sta_eosp(sta);
3473 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003474}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003475
3476void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3477 struct iwl_mvm_sta *mvmsta, bool disable)
3478{
3479 struct iwl_mvm_add_sta_cmd cmd = {
3480 .add_modify = STA_MODE_MODIFY,
3481 .sta_id = mvmsta->sta_id,
3482 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3483 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3484 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3485 };
3486 int ret;
3487
Sara Sharon854c5702016-01-26 13:17:47 +02003488 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3489 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003490 if (ret)
3491 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3492}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003493
3494void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3495 struct ieee80211_sta *sta,
3496 bool disable)
3497{
3498 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3499
3500 spin_lock_bh(&mvm_sta->lock);
3501
3502 if (mvm_sta->disable_tx == disable) {
3503 spin_unlock_bh(&mvm_sta->lock);
3504 return;
3505 }
3506
3507 mvm_sta->disable_tx = disable;
3508
3509 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003510 * Tell mac80211 to start/stop queuing tx for this station,
3511 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003512 * for this station.
3513 */
3514 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3515 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3516
3517 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3518
3519 spin_unlock_bh(&mvm_sta->lock);
3520}
3521
3522void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3523 struct iwl_mvm_vif *mvmvif,
3524 bool disable)
3525{
3526 struct ieee80211_sta *sta;
3527 struct iwl_mvm_sta *mvm_sta;
3528 int i;
3529
3530 lockdep_assert_held(&mvm->mutex);
3531
3532 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003533 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003534 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3535 lockdep_is_held(&mvm->mutex));
3536 if (IS_ERR_OR_NULL(sta))
3537 continue;
3538
3539 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3540 if (mvm_sta->mac_id_n_color !=
3541 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3542 continue;
3543
3544 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3545 }
3546}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003547
3548void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3549{
3550 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3551 struct iwl_mvm_sta *mvmsta;
3552
3553 rcu_read_lock();
3554
3555 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3556
3557 if (!WARN_ON(!mvmsta))
3558 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3559
3560 rcu_read_unlock();
3561}