blob: 6ebc715d90df60634751544351ac4d8624229407 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Emmanuel Grumbach48831452018-01-29 10:00:05 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030031 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030068#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069
Sara Sharon854c5702016-01-26 13:17:47 +020070/*
71 * New version of ADD_STA_sta command added new fields at the end of the
72 * structure, so sending the size of the relevant API's structure is enough to
73 * support both API versions.
74 */
75static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
76{
Sara Sharonced19f22017-02-06 19:09:32 +020077 if (iwl_mvm_has_new_rx_api(mvm) ||
78 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
79 return sizeof(struct iwl_mvm_add_sta_cmd);
80 else
81 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020082}
83
Eliad Pellerb92e6612014-01-23 17:58:23 +020084static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
85 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010086{
87 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020088 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010089
Eliad Pellerb92e6612014-01-23 17:58:23 +020090 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010091 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
92
93 lockdep_assert_held(&mvm->mutex);
94
Eliad Pellerb92e6612014-01-23 17:58:23 +020095 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
96 if (iftype != NL80211_IFTYPE_STATION)
97 reserved_ids = BIT(0);
98
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200100 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200101 if (BIT(sta_id) & reserved_ids)
102 continue;
103
Johannes Berg8ca151b2013-01-24 14:25:36 +0100104 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
105 lockdep_is_held(&mvm->mutex)))
106 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200107 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200108 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100109}
110
Johannes Berg7a453972013-02-12 13:10:44 +0100111/* send station add/update command to firmware */
112int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300113 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100114{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100115 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300116 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
117 .sta_id = mvm_sta->sta_id,
118 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
119 .add_modify = update ? 1 : 0,
120 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
Naftali Goldstein8addabf2017-07-27 04:53:55 +0300121 STA_FLG_MIMO_EN_MSK |
122 STA_FLG_RTS_MIMO_PROT),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Sara Sharonced19f22017-02-06 19:09:32 +0200129 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
130 add_sta_cmd.station_type = mvm_sta->sta_type;
131
Liad Kaufman24afba72015-07-28 18:56:08 +0300132 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100133 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300134
Sara Sharonbb497012016-09-29 14:52:40 +0300135 if (!iwl_mvm_has_new_tx_api(mvm)) {
136 add_sta_cmd.tfd_queue_msk =
137 cpu_to_le32(mvm_sta->tfd_queue_msk);
138
139 if (flags & STA_MODIFY_QUEUES)
140 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
141 } else {
142 WARN_ON(flags & STA_MODIFY_QUEUES);
143 }
Johannes Berg7a453972013-02-12 13:10:44 +0100144 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100145
146 switch (sta->bandwidth) {
147 case IEEE80211_STA_RX_BW_160:
148 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
149 /* fall through */
150 case IEEE80211_STA_RX_BW_80:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
152 /* fall through */
153 case IEEE80211_STA_RX_BW_40:
154 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
155 /* fall through */
156 case IEEE80211_STA_RX_BW_20:
157 if (sta->ht_cap.ht_supported)
158 add_sta_cmd.station_flags |=
159 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
160 break;
161 }
162
163 switch (sta->rx_nss) {
164 case 1:
165 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
166 break;
167 case 2:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
169 break;
170 case 3 ... 8:
171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
172 break;
173 }
174
175 switch (sta->smps_mode) {
176 case IEEE80211_SMPS_AUTOMATIC:
177 case IEEE80211_SMPS_NUM_MODES:
178 WARN_ON(1);
179 break;
180 case IEEE80211_SMPS_STATIC:
181 /* override NSS */
182 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
183 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
184 break;
185 case IEEE80211_SMPS_DYNAMIC:
186 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
187 break;
188 case IEEE80211_SMPS_OFF:
189 /* nothing */
190 break;
191 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100192
193 if (sta->ht_cap.ht_supported) {
194 add_sta_cmd.station_flags_msk |=
195 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
196 STA_FLG_AGG_MPDU_DENS_MSK);
197
198 mpdu_dens = sta->ht_cap.ampdu_density;
199 }
200
201 if (sta->vht_cap.vht_supported) {
202 agg_size = sta->vht_cap.cap &
203 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
204 agg_size >>=
205 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
206 } else if (sta->ht_cap.ht_supported) {
207 agg_size = sta->ht_cap.ampdu_factor;
208 }
209
210 add_sta_cmd.station_flags |=
211 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
212 add_sta_cmd.station_flags |=
213 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Gregory Greenmand94c5a82018-04-24 06:26:41 +0300214 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100215 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100216
Johannes Berg65e25482016-04-13 14:24:22 +0200217 if (sta->wme) {
218 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
219
220 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200221 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200222 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200223 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200224 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200225 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200226 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200227 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
228 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200229 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200230 }
231
Johannes Berg8ca151b2013-01-24 14:25:36 +0100232 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200233 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
234 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300235 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100236 if (ret)
237 return ret;
238
Sara Sharon837c4da2016-01-07 16:50:45 +0200239 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240 case ADD_STA_SUCCESS:
241 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
242 break;
243 default:
244 ret = -EIO;
245 IWL_ERR(mvm, "ADD_STA failed\n");
246 break;
247 }
248
249 return ret;
250}
251
Kees Cook8cef5342017-10-24 02:29:37 -0700252static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
Sara Sharon10b2b202016-03-20 16:23:41 +0200253{
Kees Cook8cef5342017-10-24 02:29:37 -0700254 struct iwl_mvm_baid_data *data =
255 from_timer(data, t, session_timer);
256 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
Sara Sharon10b2b202016-03-20 16:23:41 +0200257 struct iwl_mvm_baid_data *ba_data;
258 struct ieee80211_sta *sta;
259 struct iwl_mvm_sta *mvm_sta;
260 unsigned long timeout;
261
262 rcu_read_lock();
263
264 ba_data = rcu_dereference(*rcu_ptr);
265
266 if (WARN_ON(!ba_data))
267 goto unlock;
268
269 if (!ba_data->timeout)
270 goto unlock;
271
272 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
273 if (time_is_after_jiffies(timeout)) {
274 mod_timer(&ba_data->session_timer, timeout);
275 goto unlock;
276 }
277
278 /* Timer expired */
279 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
Emmanuel Grumbach61dd8a82017-06-12 15:10:09 +0300280
281 /*
282 * sta should be valid unless the following happens:
283 * The firmware asserts which triggers a reconfig flow, but
284 * the reconfig fails before we set the pointer to sta into
285 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
286 * A-MDPU and hence the timer continues to run. Then, the
287 * timer expires and sta is NULL.
288 */
289 if (!sta)
290 goto unlock;
291
Sara Sharon10b2b202016-03-20 16:23:41 +0200292 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Naftali Goldstein20fc6902017-07-11 10:07:32 +0300293 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
294 sta->addr, ba_data->tid);
Sara Sharon10b2b202016-03-20 16:23:41 +0200295unlock:
296 rcu_read_unlock();
297}
298
Liad Kaufman9794c642015-08-19 17:34:28 +0300299/* Disable aggregations for a bitmap of TIDs for a given station */
300static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
301 unsigned long disable_agg_tids,
302 bool remove_queue)
303{
304 struct iwl_mvm_add_sta_cmd cmd = {};
305 struct ieee80211_sta *sta;
306 struct iwl_mvm_sta *mvmsta;
307 u32 status;
308 u8 sta_id;
309 int ret;
310
Sara Sharonbb497012016-09-29 14:52:40 +0300311 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
312 return -EINVAL;
313
Liad Kaufman9794c642015-08-19 17:34:28 +0300314 spin_lock_bh(&mvm->queue_info_lock);
315 sta_id = mvm->queue_info[queue].ra_sta_id;
316 spin_unlock_bh(&mvm->queue_info_lock);
317
318 rcu_read_lock();
319
320 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
321
322 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
323 rcu_read_unlock();
324 return -EINVAL;
325 }
326
327 mvmsta = iwl_mvm_sta_from_mac80211(sta);
328
329 mvmsta->tid_disable_agg |= disable_agg_tids;
330
331 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
332 cmd.sta_id = mvmsta->sta_id;
333 cmd.add_modify = STA_MODE_MODIFY;
334 cmd.modify_mask = STA_MODIFY_QUEUES;
335 if (disable_agg_tids)
336 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
337 if (remove_queue)
338 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
339 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
340 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
341
342 rcu_read_unlock();
343
344 /* Notify FW of queue removal from the STA queues */
345 status = ADD_STA_SUCCESS;
346 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
347 iwl_mvm_add_sta_cmd_size(mvm),
348 &cmd, &status);
349
350 return ret;
351}
352
Liad Kaufman42db09c2016-05-02 14:01:14 +0300353static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
354{
355 struct ieee80211_sta *sta;
356 struct iwl_mvm_sta *mvmsta;
357 unsigned long tid_bitmap;
358 unsigned long agg_tids = 0;
Sharon Dvir806911d2017-05-21 12:09:49 +0300359 u8 sta_id;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300360 int tid;
361
362 lockdep_assert_held(&mvm->mutex);
363
Sara Sharonbb497012016-09-29 14:52:40 +0300364 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
365 return -EINVAL;
366
Liad Kaufman42db09c2016-05-02 14:01:14 +0300367 spin_lock_bh(&mvm->queue_info_lock);
368 sta_id = mvm->queue_info[queue].ra_sta_id;
369 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
370 spin_unlock_bh(&mvm->queue_info_lock);
371
372 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
373 lockdep_is_held(&mvm->mutex));
374
375 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
376 return -EINVAL;
377
378 mvmsta = iwl_mvm_sta_from_mac80211(sta);
379
380 spin_lock_bh(&mvmsta->lock);
381 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
382 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
383 agg_tids |= BIT(tid);
384 }
385 spin_unlock_bh(&mvmsta->lock);
386
387 return agg_tids;
388}
389
Liad Kaufman9794c642015-08-19 17:34:28 +0300390/*
391 * Remove a queue from a station's resources.
392 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
393 * doesn't disable the queue
394 */
395static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
396{
397 struct ieee80211_sta *sta;
398 struct iwl_mvm_sta *mvmsta;
399 unsigned long tid_bitmap;
400 unsigned long disable_agg_tids = 0;
401 u8 sta_id;
402 int tid;
403
404 lockdep_assert_held(&mvm->mutex);
405
Sara Sharonbb497012016-09-29 14:52:40 +0300406 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
407 return -EINVAL;
408
Liad Kaufman9794c642015-08-19 17:34:28 +0300409 spin_lock_bh(&mvm->queue_info_lock);
410 sta_id = mvm->queue_info[queue].ra_sta_id;
411 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
412 spin_unlock_bh(&mvm->queue_info_lock);
413
414 rcu_read_lock();
415
416 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
417
418 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
419 rcu_read_unlock();
420 return 0;
421 }
422
423 mvmsta = iwl_mvm_sta_from_mac80211(sta);
424
425 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300426 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300427 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300428 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
429 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200430 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300431 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300432
Liad Kaufman42db09c2016-05-02 14:01:14 +0300433 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300434 spin_unlock_bh(&mvmsta->lock);
435
436 rcu_read_unlock();
437
Liad Kaufman9794c642015-08-19 17:34:28 +0300438 return disable_agg_tids;
439}
440
Sara Sharon01796ff2016-11-16 17:04:36 +0200441static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
442 bool same_sta)
443{
444 struct iwl_mvm_sta *mvmsta;
445 u8 txq_curr_ac, sta_id, tid;
446 unsigned long disable_agg_tids = 0;
447 int ret;
448
449 lockdep_assert_held(&mvm->mutex);
450
Sara Sharonbb497012016-09-29 14:52:40 +0300451 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
452 return -EINVAL;
453
Sara Sharon01796ff2016-11-16 17:04:36 +0200454 spin_lock_bh(&mvm->queue_info_lock);
455 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
456 sta_id = mvm->queue_info[queue].ra_sta_id;
457 tid = mvm->queue_info[queue].txq_tid;
458 spin_unlock_bh(&mvm->queue_info_lock);
459
460 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200461 if (WARN_ON(!mvmsta))
462 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200463
464 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
465 /* Disable the queue */
466 if (disable_agg_tids)
467 iwl_mvm_invalidate_sta_queue(mvm, queue,
468 disable_agg_tids, false);
469
470 ret = iwl_mvm_disable_txq(mvm, queue,
471 mvmsta->vif->hw_queue[txq_curr_ac],
472 tid, 0);
473 if (ret) {
474 /* Re-mark the inactive queue as inactive */
475 spin_lock_bh(&mvm->queue_info_lock);
476 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
477 spin_unlock_bh(&mvm->queue_info_lock);
478 IWL_ERR(mvm,
479 "Failed to free inactive queue %d (ret=%d)\n",
480 queue, ret);
481
482 return ret;
483 }
484
485 /* If TXQ is allocated to another STA, update removal in FW */
486 if (!same_sta)
487 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
488
489 return 0;
490}
491
Liad Kaufman42db09c2016-05-02 14:01:14 +0300492static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
493 unsigned long tfd_queue_mask, u8 ac)
494{
495 int queue = 0;
496 u8 ac_to_queue[IEEE80211_NUM_ACS];
497 int i;
498
499 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300500 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
501 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300502
503 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
504
505 /* See what ACs the existing queues for this STA have */
506 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
507 /* Only DATA queues can be shared */
508 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
509 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
510 continue;
511
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200512 /* Don't try and take queues being reconfigured */
513 if (mvm->queue_info[queue].status ==
514 IWL_MVM_QUEUE_RECONFIGURING)
515 continue;
516
Liad Kaufman42db09c2016-05-02 14:01:14 +0300517 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
518 }
519
520 /*
521 * The queue to share is chosen only from DATA queues as follows (in
522 * descending priority):
523 * 1. An AC_BE queue
524 * 2. Same AC queue
525 * 3. Highest AC queue that is lower than new AC
526 * 4. Any existing AC (there always is at least 1 DATA queue)
527 */
528
529 /* Priority 1: An AC_BE queue */
530 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
531 queue = ac_to_queue[IEEE80211_AC_BE];
532 /* Priority 2: Same AC queue */
533 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
534 queue = ac_to_queue[ac];
535 /* Priority 3a: If new AC is VO and VI exists - use VI */
536 else if (ac == IEEE80211_AC_VO &&
537 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
538 queue = ac_to_queue[IEEE80211_AC_VI];
539 /* Priority 3b: No BE so only AC less than the new one is BK */
540 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
541 queue = ac_to_queue[IEEE80211_AC_BK];
542 /* Priority 4a: No BE nor BK - use VI if exists */
543 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
544 queue = ac_to_queue[IEEE80211_AC_VI];
545 /* Priority 4b: No BE, BK nor VI - use VO if exists */
546 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
547 queue = ac_to_queue[IEEE80211_AC_VO];
548
549 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200550 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
551 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
552 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300553 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200554 return -ENOSPC;
555 }
556
557 /* Make sure the queue isn't in the middle of being reconfigured */
558 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
559 IWL_ERR(mvm,
560 "TXQ %d is in the middle of re-config - try again\n",
561 queue);
562 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300563 }
564
565 return queue;
566}
567
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200568/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200569 * If a given queue has a higher AC than the TID stream that is being compared
570 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200571 * in such a case, otherwise - if no redirection required - it does nothing,
572 * unless the %force param is true.
573 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200574int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
575 int ac, int ssn, unsigned int wdg_timeout,
576 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200577{
578 struct iwl_scd_txq_cfg_cmd cmd = {
579 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200580 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200581 };
582 bool shared_queue;
583 unsigned long mq;
584 int ret;
585
Sara Sharonbb497012016-09-29 14:52:40 +0300586 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
587 return -EINVAL;
588
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200589 /*
590 * If the AC is lower than current one - FIFO needs to be redirected to
591 * the lowest one of the streams in the queue. Check if this is needed
592 * here.
593 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
594 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
595 * we need to check if the numerical value of X is LARGER than of Y.
596 */
597 spin_lock_bh(&mvm->queue_info_lock);
598 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
599 spin_unlock_bh(&mvm->queue_info_lock);
600
601 IWL_DEBUG_TX_QUEUES(mvm,
602 "No redirection needed on TXQ #%d\n",
603 queue);
604 return 0;
605 }
606
607 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
608 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200609 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200610 mq = mvm->hw_queue_to_mac80211[queue];
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200611 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
612 spin_unlock_bh(&mvm->queue_info_lock);
613
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200614 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200615 queue, iwl_mvm_ac_to_tx_fifo[ac]);
616
617 /* Stop MAC queues and wait for this queue to empty */
618 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200619 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200620 if (ret) {
621 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
622 queue);
623 ret = -EIO;
624 goto out;
625 }
626
627 /* Before redirecting the queue we need to de-activate it */
628 iwl_trans_txq_disable(mvm->trans, queue, false);
629 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
630 if (ret)
631 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
632 ret);
633
634 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300635 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200636
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200637 /* Update the TID "owner" of the queue */
638 spin_lock_bh(&mvm->queue_info_lock);
639 mvm->queue_info[queue].txq_tid = tid;
640 spin_unlock_bh(&mvm->queue_info_lock);
641
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200642 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
643
644 /* Redirect to lower AC */
645 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
Sara Sharon0ec9257b2017-10-16 09:45:10 +0300646 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200647
648 /* Update AC marking of the queue */
649 spin_lock_bh(&mvm->queue_info_lock);
650 mvm->queue_info[queue].mac80211_ac = ac;
651 spin_unlock_bh(&mvm->queue_info_lock);
652
653 /*
654 * Mark queue as shared in transport if shared
655 * Note this has to be done after queue enablement because enablement
656 * can also set this value, and there is no indication there to shared
657 * queues
658 */
659 if (shared_queue)
660 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
661
662out:
663 /* Continue using the MAC queues */
664 iwl_mvm_start_mac_queues(mvm, mq);
665
666 return ret;
667}
668
Sara Sharon310181e2017-01-17 14:27:48 +0200669static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
670 struct ieee80211_sta *sta, u8 ac,
671 int tid)
672{
673 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
674 unsigned int wdg_timeout =
675 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
676 u8 mac_queue = mvmsta->vif->hw_queue[ac];
677 int queue = -1;
678
679 lockdep_assert_held(&mvm->mutex);
680
681 IWL_DEBUG_TX_QUEUES(mvm,
682 "Allocating queue for sta %d on tid %d\n",
683 mvmsta->sta_id, tid);
684 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
685 wdg_timeout);
686 if (queue < 0)
687 return queue;
688
689 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
690
691 spin_lock_bh(&mvmsta->lock);
692 mvmsta->tid_data[tid].txq_id = queue;
693 mvmsta->tid_data[tid].is_tid_active = true;
Sara Sharon310181e2017-01-17 14:27:48 +0200694 spin_unlock_bh(&mvmsta->lock);
695
Sara Sharon310181e2017-01-17 14:27:48 +0200696 return 0;
697}
698
Liad Kaufman24afba72015-07-28 18:56:08 +0300699static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
700 struct ieee80211_sta *sta, u8 ac, int tid,
701 struct ieee80211_hdr *hdr)
702{
703 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
704 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +0300705 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
Liad Kaufman24afba72015-07-28 18:56:08 +0300706 .sta_id = mvmsta->sta_id,
707 .tid = tid,
708 .frame_limit = IWL_FRAME_LIMIT,
709 };
710 unsigned int wdg_timeout =
711 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
712 u8 mac_queue = mvmsta->vif->hw_queue[ac];
713 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200714 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300715 unsigned long disable_agg_tids = 0;
716 enum iwl_mvm_agg_state queue_state;
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300717 bool shared_queue = false, inc_ssn;
Liad Kaufman24afba72015-07-28 18:56:08 +0300718 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300719 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300720 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300721
722 lockdep_assert_held(&mvm->mutex);
723
Sara Sharon310181e2017-01-17 14:27:48 +0200724 if (iwl_mvm_has_new_tx_api(mvm))
725 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
726
Liad Kaufman42db09c2016-05-02 14:01:14 +0300727 spin_lock_bh(&mvmsta->lock);
728 tfd_queue_mask = mvmsta->tfd_queue_msk;
729 spin_unlock_bh(&mvmsta->lock);
730
Liad Kaufmand2515a92016-03-23 16:31:08 +0200731 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300732
733 /*
734 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
735 * exists
736 */
737 if (!ieee80211_is_data_qos(hdr->frame_control) ||
738 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300739 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
740 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300741 IWL_MVM_DQA_MAX_MGMT_QUEUE);
742 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
743 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
744 queue);
745
746 /* If no such queue is found, we'll use a DATA queue instead */
747 }
748
Liad Kaufman9794c642015-08-19 17:34:28 +0300749 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
750 (mvm->queue_info[mvmsta->reserved_queue].status ==
751 IWL_MVM_QUEUE_RESERVED ||
752 mvm->queue_info[mvmsta->reserved_queue].status ==
753 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300754 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300755 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300756 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
757 }
758
759 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300760 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
761 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300762 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300763
764 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300765 * Check if this queue is already allocated but inactive.
766 * In such a case, we'll need to first free this queue before enabling
767 * it again, so we'll mark it as reserved to make sure no new traffic
768 * arrives on it
769 */
770 if (queue > 0 &&
771 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
772 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
773 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +0200774 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +0300775 IWL_DEBUG_TX_QUEUES(mvm,
776 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
777 queue, mvmsta->sta_id, tid);
778 }
779
Liad Kaufman42db09c2016-05-02 14:01:14 +0300780 /* No free queue - we'll have to share */
781 if (queue <= 0) {
782 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
783 if (queue > 0) {
784 shared_queue = true;
785 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
786 }
787 }
788
Liad Kaufman9794c642015-08-19 17:34:28 +0300789 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300790 * Mark TXQ as ready, even though it hasn't been fully configured yet,
791 * to make sure no one else takes it.
792 * This will allow avoiding re-acquiring the lock at the end of the
793 * configuration. On error we'll mark it back as free.
794 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300795 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300796 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300797
Liad Kaufmand2515a92016-03-23 16:31:08 +0200798 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300799
Liad Kaufman42db09c2016-05-02 14:01:14 +0300800 /* This shouldn't happen - out of queues */
801 if (WARN_ON(queue <= 0)) {
802 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
803 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200804 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300805 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300806
807 /*
808 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
809 * but for configuring the SCD to send A-MPDUs we need to mark the queue
810 * as aggregatable.
811 * Mark all DATA queues as allowing to be aggregated at some point
812 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300813 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
814 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300815
Liad Kaufman9794c642015-08-19 17:34:28 +0300816 /*
817 * If this queue was previously inactive (idle) - we need to free it
818 * first
819 */
820 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +0200821 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
822 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +0300823 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +0300824 }
825
Liad Kaufman42db09c2016-05-02 14:01:14 +0300826 IWL_DEBUG_TX_QUEUES(mvm,
827 "Allocating %squeue #%d to sta %d on tid %d\n",
828 shared_queue ? "shared " : "", queue,
829 mvmsta->sta_id, tid);
830
831 if (shared_queue) {
832 /* Disable any open aggs on this queue */
833 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
834
835 if (disable_agg_tids) {
836 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
837 queue);
838 iwl_mvm_invalidate_sta_queue(mvm, queue,
839 disable_agg_tids, false);
840 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300841 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300842
843 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300844 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
845 ssn, &cfg, wdg_timeout);
846 if (inc_ssn) {
847 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
848 le16_add_cpu(&hdr->seq_ctrl, 0x10);
849 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300850
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200851 /*
852 * Mark queue as shared in transport if shared
853 * Note this has to be done after queue enablement because enablement
854 * can also set this value, and there is no indication there to shared
855 * queues
856 */
857 if (shared_queue)
858 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
859
Liad Kaufman24afba72015-07-28 18:56:08 +0300860 spin_lock_bh(&mvmsta->lock);
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300861 /*
862 * This looks racy, but it is not. We have only one packet for
863 * this ra/tid in our Tx path since we stop the Qdisc when we
864 * need to allocate a new TFD queue.
865 */
866 if (inc_ssn)
867 mvmsta->tid_data[tid].seq_number += 0x10;
Liad Kaufman24afba72015-07-28 18:56:08 +0300868 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300869 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300870 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300871 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300872
873 if (mvmsta->reserved_queue == queue)
874 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
875 spin_unlock_bh(&mvmsta->lock);
876
Liad Kaufman42db09c2016-05-02 14:01:14 +0300877 if (!shared_queue) {
878 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
879 if (ret)
880 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300881
Liad Kaufman42db09c2016-05-02 14:01:14 +0300882 /* If we need to re-enable aggregations... */
883 if (queue_state == IWL_AGG_ON) {
884 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
885 if (ret)
886 goto out_err;
887 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200888 } else {
889 /* Redirect queue, if needed */
890 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
891 wdg_timeout, false);
892 if (ret)
893 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300894 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300895
Liad Kaufman42db09c2016-05-02 14:01:14 +0300896 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300897
898out_err:
899 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
900
901 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300902}
903
Liad Kaufman19aefa42016-03-08 14:29:51 +0200904static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
905{
906 struct iwl_scd_txq_cfg_cmd cmd = {
907 .scd_queue = queue,
908 .action = SCD_CFG_UPDATE_QUEUE_TID,
909 };
Liad Kaufman19aefa42016-03-08 14:29:51 +0200910 int tid;
911 unsigned long tid_bitmap;
912 int ret;
913
914 lockdep_assert_held(&mvm->mutex);
915
Sara Sharonbb497012016-09-29 14:52:40 +0300916 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
917 return;
918
Liad Kaufman19aefa42016-03-08 14:29:51 +0200919 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200920 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
921 spin_unlock_bh(&mvm->queue_info_lock);
922
923 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
924 return;
925
926 /* Find any TID for queue */
927 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
928 cmd.tid = tid;
929 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
930
931 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300932 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200933 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
934 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300935 return;
936 }
937
938 spin_lock_bh(&mvm->queue_info_lock);
939 mvm->queue_info[queue].txq_tid = tid;
940 spin_unlock_bh(&mvm->queue_info_lock);
941 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
942 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200943}
944
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200945static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
946{
947 struct ieee80211_sta *sta;
948 struct iwl_mvm_sta *mvmsta;
Sharon Dvir806911d2017-05-21 12:09:49 +0300949 u8 sta_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200950 int tid = -1;
951 unsigned long tid_bitmap;
952 unsigned int wdg_timeout;
953 int ssn;
954 int ret = true;
955
Sara Sharonbb497012016-09-29 14:52:40 +0300956 /* queue sharing is disabled on new TX path */
957 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
958 return;
959
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200960 lockdep_assert_held(&mvm->mutex);
961
962 spin_lock_bh(&mvm->queue_info_lock);
963 sta_id = mvm->queue_info[queue].ra_sta_id;
964 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
965 spin_unlock_bh(&mvm->queue_info_lock);
966
967 /* Find TID for queue, and make sure it is the only one on the queue */
968 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
969 if (tid_bitmap != BIT(tid)) {
970 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
971 queue, tid_bitmap);
972 return;
973 }
974
975 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
976 tid);
977
978 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
979 lockdep_is_held(&mvm->mutex));
980
981 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
982 return;
983
984 mvmsta = iwl_mvm_sta_from_mac80211(sta);
985 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
986
987 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
988
989 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
990 tid_to_mac80211_ac[tid], ssn,
991 wdg_timeout, true);
992 if (ret) {
993 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
994 return;
995 }
996
997 /* If aggs should be turned back on - do it */
998 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +0300999 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001000
1001 mvmsta->tid_disable_agg &= ~BIT(tid);
1002
1003 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1004 cmd.sta_id = mvmsta->sta_id;
1005 cmd.add_modify = STA_MODE_MODIFY;
1006 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1007 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1008 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1009
1010 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1011 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1012 if (!ret) {
1013 IWL_DEBUG_TX_QUEUES(mvm,
1014 "TXQ #%d is now aggregated again\n",
1015 queue);
1016
1017 /* Mark queue intenally as aggregating again */
1018 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1019 }
1020 }
1021
1022 spin_lock_bh(&mvm->queue_info_lock);
1023 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1024 spin_unlock_bh(&mvm->queue_info_lock);
1025}
1026
Liad Kaufman24afba72015-07-28 18:56:08 +03001027static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1028{
1029 if (tid == IWL_MAX_TID_COUNT)
1030 return IEEE80211_AC_VO; /* MGMT */
1031
1032 return tid_to_mac80211_ac[tid];
1033}
1034
1035static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1036 struct ieee80211_sta *sta, int tid)
1037{
1038 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1039 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1040 struct sk_buff *skb;
1041 struct ieee80211_hdr *hdr;
1042 struct sk_buff_head deferred_tx;
1043 u8 mac_queue;
1044 bool no_queue = false; /* Marks if there is a problem with the queue */
1045 u8 ac;
1046
1047 lockdep_assert_held(&mvm->mutex);
1048
1049 skb = skb_peek(&tid_data->deferred_tx_frames);
1050 if (!skb)
1051 return;
1052 hdr = (void *)skb->data;
1053
1054 ac = iwl_mvm_tid_to_ac_queue(tid);
1055 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1056
Sara Sharon6862fce2017-02-22 19:34:17 +02001057 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001058 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1059 IWL_ERR(mvm,
1060 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1061 mvmsta->sta_id, tid);
1062
1063 /*
1064 * Mark queue as problematic so later the deferred traffic is
1065 * freed, as we can do nothing with it
1066 */
1067 no_queue = true;
1068 }
1069
1070 __skb_queue_head_init(&deferred_tx);
1071
Liad Kaufmand2515a92016-03-23 16:31:08 +02001072 /* Disable bottom-halves when entering TX path */
1073 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001074 spin_lock(&mvmsta->lock);
1075 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001076 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001077 spin_unlock(&mvmsta->lock);
1078
Liad Kaufman24afba72015-07-28 18:56:08 +03001079 while ((skb = __skb_dequeue(&deferred_tx)))
1080 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1081 ieee80211_free_txskb(mvm->hw, skb);
1082 local_bh_enable();
1083
1084 /* Wake queue */
1085 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1086}
1087
1088void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1089{
1090 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1091 add_stream_wk);
1092 struct ieee80211_sta *sta;
1093 struct iwl_mvm_sta *mvmsta;
1094 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001095 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001096
Liad Kaufman9794c642015-08-19 17:34:28 +03001097 /* Check inactivity of queues */
1098 iwl_mvm_inactivity_check(mvm);
1099
Liad Kaufman24afba72015-07-28 18:56:08 +03001100 mutex_lock(&mvm->mutex);
1101
Sara Sharon34e10862017-02-23 13:15:07 +02001102 /* No queue reconfiguration in TVQM mode */
1103 if (iwl_mvm_has_new_tx_api(mvm))
1104 goto alloc_queues;
1105
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001106 /* Reconfigure queues requiring reconfiguation */
Sara Sharon34e10862017-02-23 13:15:07 +02001107 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001108 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001109 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001110
1111 spin_lock_bh(&mvm->queue_info_lock);
1112 reconfig = (mvm->queue_info[queue].status ==
1113 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001114
1115 /*
1116 * We need to take into account a situation in which a TXQ was
1117 * allocated to TID x, and then turned shared by adding TIDs y
1118 * and z. If TID x becomes inactive and is removed from the TXQ,
1119 * ownership must be given to one of the remaining TIDs.
1120 * This is mainly because if TID x continues - a new queue can't
1121 * be allocated for it as long as it is an owner of another TXQ.
1122 */
1123 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1124 BIT(mvm->queue_info[queue].txq_tid)) &&
1125 (mvm->queue_info[queue].status ==
1126 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001127 spin_unlock_bh(&mvm->queue_info_lock);
1128
1129 if (reconfig)
1130 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001131 else if (change_owner)
1132 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001133 }
1134
Sara Sharon34e10862017-02-23 13:15:07 +02001135alloc_queues:
Liad Kaufman24afba72015-07-28 18:56:08 +03001136 /* Go over all stations with deferred traffic */
1137 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1138 IWL_MVM_STATION_COUNT) {
1139 clear_bit(sta_id, mvm->sta_deferred_frames);
1140 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1141 lockdep_is_held(&mvm->mutex));
1142 if (IS_ERR_OR_NULL(sta))
1143 continue;
1144
1145 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1146 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1147
1148 for_each_set_bit(tid, &deferred_tid_traffic,
1149 IWL_MAX_TID_COUNT + 1)
1150 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1151 }
1152
1153 mutex_unlock(&mvm->mutex);
1154}
1155
1156static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001157 struct ieee80211_sta *sta,
1158 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001159{
1160 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1161 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001162 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001163
Sara Sharon396952e2017-02-22 19:40:55 +02001164 /* queue reserving is disabled on new TX path */
1165 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1166 return 0;
1167
Liad Kaufman9794c642015-08-19 17:34:28 +03001168 /*
1169 * Check for inactive queues, so we don't reach a situation where we
1170 * can't add a STA due to a shortage in queues that doesn't really exist
1171 */
1172 iwl_mvm_inactivity_check(mvm);
1173
Liad Kaufman24afba72015-07-28 18:56:08 +03001174 spin_lock_bh(&mvm->queue_info_lock);
1175
1176 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001177 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1178 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001179 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1180 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001181 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1182 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001183 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1184 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001185 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001186 if (queue < 0) {
1187 spin_unlock_bh(&mvm->queue_info_lock);
1188 IWL_ERR(mvm, "No available queues for new station\n");
1189 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001190 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1191 /*
1192 * If this queue is already allocated but inactive we'll need to
1193 * first free this queue before enabling it again, we'll mark
1194 * it as reserved to make sure no new traffic arrives on it
1195 */
1196 using_inactive_queue = true;
1197 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001198 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001199 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001200
1201 spin_unlock_bh(&mvm->queue_info_lock);
1202
1203 mvmsta->reserved_queue = queue;
1204
Sara Sharon01796ff2016-11-16 17:04:36 +02001205 if (using_inactive_queue)
1206 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1207
Liad Kaufman24afba72015-07-28 18:56:08 +03001208 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1209 queue, mvmsta->sta_id);
1210
1211 return 0;
1212}
1213
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001214/*
1215 * In DQA mode, after a HW restart the queues should be allocated as before, in
1216 * order to avoid race conditions when there are shared queues. This function
1217 * does the re-mapping and queue allocation.
1218 *
1219 * Note that re-enabling aggregations isn't done in this function.
1220 */
1221static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1222 struct iwl_mvm_sta *mvm_sta)
1223{
1224 unsigned int wdg_timeout =
1225 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1226 int i;
1227 struct iwl_trans_txq_scd_cfg cfg = {
1228 .sta_id = mvm_sta->sta_id,
1229 .frame_limit = IWL_FRAME_LIMIT,
1230 };
1231
Johannes Berg03c902b2016-12-02 12:03:36 +01001232 /* Make sure reserved queue is still marked as such (if allocated) */
1233 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1234 mvm->queue_info[mvm_sta->reserved_queue].status =
1235 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001236
1237 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1238 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1239 int txq_id = tid_data->txq_id;
1240 int ac;
1241 u8 mac_queue;
1242
Sara Sharon6862fce2017-02-22 19:34:17 +02001243 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001244 continue;
1245
1246 skb_queue_head_init(&tid_data->deferred_tx_frames);
1247
1248 ac = tid_to_mac80211_ac[i];
1249 mac_queue = mvm_sta->vif->hw_queue[ac];
1250
Sara Sharon310181e2017-01-17 14:27:48 +02001251 if (iwl_mvm_has_new_tx_api(mvm)) {
1252 IWL_DEBUG_TX_QUEUES(mvm,
1253 "Re-mapping sta %d tid %d\n",
1254 mvm_sta->sta_id, i);
1255 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1256 mvm_sta->sta_id,
1257 i, wdg_timeout);
1258 tid_data->txq_id = txq_id;
Liad Kaufman5d390512017-10-17 16:26:00 +03001259
1260 /*
1261 * Since we don't set the seq number after reset, and HW
1262 * sets it now, FW reset will cause the seq num to start
1263 * at 0 again, so driver will need to update it
1264 * internally as well, so it keeps in sync with real val
1265 */
1266 tid_data->seq_number = 0;
Sara Sharon310181e2017-01-17 14:27:48 +02001267 } else {
1268 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001269
Sara Sharon310181e2017-01-17 14:27:48 +02001270 cfg.tid = i;
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +03001271 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
Sara Sharon310181e2017-01-17 14:27:48 +02001272 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1273 txq_id ==
1274 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001275
Sara Sharon310181e2017-01-17 14:27:48 +02001276 IWL_DEBUG_TX_QUEUES(mvm,
1277 "Re-mapping sta %d tid %d to queue %d\n",
1278 mvm_sta->sta_id, i, txq_id);
1279
1280 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1281 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001282 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001283 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001284 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001285}
1286
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001287static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1288 struct iwl_mvm_int_sta *sta,
1289 const u8 *addr,
1290 u16 mac_id, u16 color)
1291{
1292 struct iwl_mvm_add_sta_cmd cmd;
1293 int ret;
Luca Coelho3f497de2017-09-02 11:05:22 +03001294 u32 status = ADD_STA_SUCCESS;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001295
1296 lockdep_assert_held(&mvm->mutex);
1297
1298 memset(&cmd, 0, sizeof(cmd));
1299 cmd.sta_id = sta->sta_id;
1300 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1301 color));
1302 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1303 cmd.station_type = sta->type;
1304
1305 if (!iwl_mvm_has_new_tx_api(mvm))
1306 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1307 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1308
1309 if (addr)
1310 memcpy(cmd.addr, addr, ETH_ALEN);
1311
1312 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1313 iwl_mvm_add_sta_cmd_size(mvm),
1314 &cmd, &status);
1315 if (ret)
1316 return ret;
1317
1318 switch (status & IWL_ADD_STA_STATUS_MASK) {
1319 case ADD_STA_SUCCESS:
1320 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1321 return 0;
1322 default:
1323 ret = -EIO;
1324 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1325 status);
1326 break;
1327 }
1328 return ret;
1329}
1330
Johannes Berg8ca151b2013-01-24 14:25:36 +01001331int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1332 struct ieee80211_vif *vif,
1333 struct ieee80211_sta *sta)
1334{
1335 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001336 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001337 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001338 int i, ret, sta_id;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001339 bool sta_update = false;
1340 unsigned int sta_flags = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001341
1342 lockdep_assert_held(&mvm->mutex);
1343
1344 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001345 sta_id = iwl_mvm_find_free_sta_id(mvm,
1346 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001347 else
1348 sta_id = mvm_sta->sta_id;
1349
Sara Sharon0ae98812017-01-04 14:53:58 +02001350 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001351 return -ENOSPC;
1352
1353 spin_lock_init(&mvm_sta->lock);
1354
Johannes Bergc8f54702017-06-19 23:50:31 +02001355 /* if this is a HW restart re-alloc existing queues */
1356 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001357 struct iwl_mvm_int_sta tmp_sta = {
1358 .sta_id = sta_id,
1359 .type = mvm_sta->sta_type,
1360 };
1361
1362 /*
1363 * First add an empty station since allocating
1364 * a queue requires a valid station
1365 */
1366 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1367 mvmvif->id, mvmvif->color);
1368 if (ret)
1369 goto err;
1370
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001371 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001372 sta_update = true;
1373 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001374 goto update_fw;
1375 }
1376
Johannes Berg8ca151b2013-01-24 14:25:36 +01001377 mvm_sta->sta_id = sta_id;
1378 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1379 mvmvif->color);
1380 mvm_sta->vif = vif;
Liad Kaufmana58bb462017-05-28 14:20:04 +03001381 if (!mvm->trans->cfg->gen2)
1382 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1383 else
1384 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001385 mvm_sta->tx_protection = 0;
1386 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001387 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001388
1389 /* HW restart, don't assume the memory has been zeroed */
Liad Kaufman69191af2015-09-01 18:50:22 +03001390 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001391 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001392
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001393 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001394 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001395 u16 seq = mvm_sta->tid_data[i].seq_number;
1396 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1397 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001398
Liad Kaufman24afba72015-07-28 18:56:08 +03001399 /*
1400 * Mark all queues for this STA as unallocated and defer TX
1401 * frames until the queue is allocated
1402 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001403 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001404 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001405 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001406 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001407 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001408
Sara Sharona571f5f2015-12-07 12:50:58 +02001409 if (iwl_mvm_has_new_rx_api(mvm) &&
1410 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Berg92c4dca2017-06-07 10:35:54 +02001411 int q;
1412
Sara Sharona571f5f2015-12-07 12:50:58 +02001413 dup_data = kcalloc(mvm->trans->num_rx_queues,
Johannes Berg92c4dca2017-06-07 10:35:54 +02001414 sizeof(*dup_data), GFP_KERNEL);
Sara Sharona571f5f2015-12-07 12:50:58 +02001415 if (!dup_data)
1416 return -ENOMEM;
Johannes Berg92c4dca2017-06-07 10:35:54 +02001417 /*
1418 * Initialize all the last_seq values to 0xffff which can never
1419 * compare equal to the frame's seq_ctrl in the check in
1420 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1421 * number and fragmented packets don't reach that function.
1422 *
1423 * This thus allows receiving a packet with seqno 0 and the
1424 * retry bit set as the very first packet on a new TID.
1425 */
1426 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1427 memset(dup_data[q].last_seq, 0xff,
1428 sizeof(dup_data[q].last_seq));
Sara Sharona571f5f2015-12-07 12:50:58 +02001429 mvm_sta->dup_data = dup_data;
1430 }
1431
Johannes Bergc8f54702017-06-19 23:50:31 +02001432 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001433 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1434 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001435 if (ret)
1436 goto err;
1437 }
1438
Gregory Greenman9f66a392017-11-05 18:49:48 +02001439 /*
1440 * if rs is registered with mac80211, then "add station" will be handled
1441 * via the corresponding ops, otherwise need to notify rate scaling here
1442 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02001443 if (iwl_mvm_has_tlc_offload(mvm))
Gregory Greenman9f66a392017-11-05 18:49:48 +02001444 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1445
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001446update_fw:
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001447 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001448 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001449 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001450
Johannes Berg9e848012014-08-04 14:33:42 +02001451 if (vif->type == NL80211_IFTYPE_STATION) {
1452 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001453 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001454 mvmvif->ap_sta_id = sta_id;
1455 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001456 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001457 }
1458 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001459
1460 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1461
1462 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001463
1464err:
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001465 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001466}
1467
1468int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1469 bool drain)
1470{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001471 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001472 int ret;
1473 u32 status;
1474
1475 lockdep_assert_held(&mvm->mutex);
1476
1477 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1478 cmd.sta_id = mvmsta->sta_id;
1479 cmd.add_modify = STA_MODE_MODIFY;
1480 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1481 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1482
1483 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001484 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1485 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001486 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001487 if (ret)
1488 return ret;
1489
Sara Sharon837c4da2016-01-07 16:50:45 +02001490 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001491 case ADD_STA_SUCCESS:
1492 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1493 mvmsta->sta_id);
1494 break;
1495 default:
1496 ret = -EIO;
1497 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1498 mvmsta->sta_id);
1499 break;
1500 }
1501
1502 return ret;
1503}
1504
1505/*
1506 * Remove a station from the FW table. Before sending the command to remove
1507 * the station validate that the station is indeed known to the driver (sanity
1508 * only).
1509 */
1510static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1511{
1512 struct ieee80211_sta *sta;
1513 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1514 .sta_id = sta_id,
1515 };
1516 int ret;
1517
1518 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1519 lockdep_is_held(&mvm->mutex));
1520
1521 /* Note: internal stations are marked as error values */
1522 if (!sta) {
1523 IWL_ERR(mvm, "Invalid station id\n");
1524 return -EINVAL;
1525 }
1526
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001527 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001528 sizeof(rm_sta_cmd), &rm_sta_cmd);
1529 if (ret) {
1530 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1531 return ret;
1532 }
1533
1534 return 0;
1535}
1536
Liad Kaufman24afba72015-07-28 18:56:08 +03001537static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1538 struct ieee80211_vif *vif,
1539 struct iwl_mvm_sta *mvm_sta)
1540{
1541 int ac;
1542 int i;
1543
1544 lockdep_assert_held(&mvm->mutex);
1545
1546 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001547 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001548 continue;
1549
1550 ac = iwl_mvm_tid_to_ac_queue(i);
1551 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1552 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001553 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001554 }
1555}
1556
Sara Sharond6d517b2017-03-06 10:16:11 +02001557int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1558 struct iwl_mvm_sta *mvm_sta)
1559{
Sharon Dvirbec95222017-06-12 11:40:33 +03001560 int i;
Sara Sharond6d517b2017-03-06 10:16:11 +02001561
1562 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1563 u16 txq_id;
Sharon Dvirbec95222017-06-12 11:40:33 +03001564 int ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001565
1566 spin_lock_bh(&mvm_sta->lock);
1567 txq_id = mvm_sta->tid_data[i].txq_id;
1568 spin_unlock_bh(&mvm_sta->lock);
1569
1570 if (txq_id == IWL_MVM_INVALID_QUEUE)
1571 continue;
1572
1573 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1574 if (ret)
Sharon Dvirbec95222017-06-12 11:40:33 +03001575 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001576 }
1577
Sharon Dvirbec95222017-06-12 11:40:33 +03001578 return 0;
Sara Sharond6d517b2017-03-06 10:16:11 +02001579}
1580
Johannes Berg8ca151b2013-01-24 14:25:36 +01001581int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1582 struct ieee80211_vif *vif,
1583 struct ieee80211_sta *sta)
1584{
1585 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001586 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001587 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001588 int ret;
1589
1590 lockdep_assert_held(&mvm->mutex);
1591
Sara Sharona571f5f2015-12-07 12:50:58 +02001592 if (iwl_mvm_has_new_rx_api(mvm))
1593 kfree(mvm_sta->dup_data);
1594
Johannes Bergc8f54702017-06-19 23:50:31 +02001595 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1596 if (ret)
1597 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001598
Johannes Bergc8f54702017-06-19 23:50:31 +02001599 /* flush its queues here since we are freeing mvm_sta */
1600 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1601 if (ret)
1602 return ret;
1603 if (iwl_mvm_has_new_tx_api(mvm)) {
1604 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1605 } else {
1606 u32 q_mask = mvm_sta->tfd_queue_msk;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001607
Johannes Bergc8f54702017-06-19 23:50:31 +02001608 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1609 q_mask);
1610 }
1611 if (ret)
1612 return ret;
Liad Kaufman56214742016-09-22 15:14:08 +03001613
Johannes Bergc8f54702017-06-19 23:50:31 +02001614 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001615
Johannes Bergc8f54702017-06-19 23:50:31 +02001616 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001617
Johannes Bergc8f54702017-06-19 23:50:31 +02001618 /* If there is a TXQ still marked as reserved - free it */
1619 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1620 u8 reserved_txq = mvm_sta->reserved_queue;
1621 enum iwl_mvm_queue_status *status;
1622
1623 /*
1624 * If no traffic has gone through the reserved TXQ - it
1625 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1626 * should be manually marked as free again
1627 */
1628 spin_lock_bh(&mvm->queue_info_lock);
1629 status = &mvm->queue_info[reserved_txq].status;
1630 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1631 (*status != IWL_MVM_QUEUE_FREE),
1632 "sta_id %d reserved txq %d status %d",
1633 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001634 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02001635 return -EINVAL;
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001636 }
1637
Johannes Bergc8f54702017-06-19 23:50:31 +02001638 *status = IWL_MVM_QUEUE_FREE;
1639 spin_unlock_bh(&mvm->queue_info_lock);
1640 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001641
Johannes Bergc8f54702017-06-19 23:50:31 +02001642 if (vif->type == NL80211_IFTYPE_STATION &&
1643 mvmvif->ap_sta_id == sta_id) {
1644 /* if associated - we can't remove the AP STA now */
1645 if (vif->bss_conf.assoc)
1646 return ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02001647
Johannes Bergc8f54702017-06-19 23:50:31 +02001648 /* unassoc - go ahead - remove the AP STA now */
1649 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1650
1651 /* clear d0i3_ap_sta_id if no longer relevant */
1652 if (mvm->d0i3_ap_sta_id == sta_id)
1653 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001654 }
1655
1656 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001657 * This shouldn't happen - the TDLS channel switch should be canceled
1658 * before the STA is removed.
1659 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001660 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001661 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001662 cancel_delayed_work(&mvm->tdls_cs.dwork);
1663 }
1664
1665 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001666 * Make sure that the tx response code sees the station as -EBUSY and
1667 * calls the drain worker.
1668 */
1669 spin_lock_bh(&mvm_sta->lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02001670 spin_unlock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02001671
Johannes Bergc8f54702017-06-19 23:50:31 +02001672 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1673 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001674
1675 return ret;
1676}
1677
1678int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1679 struct ieee80211_vif *vif,
1680 u8 sta_id)
1681{
1682 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1683
1684 lockdep_assert_held(&mvm->mutex);
1685
Monam Agarwalc531c772014-03-24 00:05:56 +05301686 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001687 return ret;
1688}
1689
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001690int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1691 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02001692 u32 qmask, enum nl80211_iftype iftype,
1693 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001694{
Avraham Sterndf65c8d2018-03-06 14:10:49 +02001695 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1696 sta->sta_id == IWL_MVM_INVALID_STA) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001697 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02001698 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001699 return -ENOSPC;
1700 }
1701
1702 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02001703 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001704
1705 /* put a non-NULL value so iterating over the stations won't stop */
1706 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1707 return 0;
1708}
1709
Sara Sharon26d6c162017-01-03 12:00:19 +02001710void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001711{
Monam Agarwalc531c772014-03-24 00:05:56 +05301712 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001713 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02001714 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001715}
1716
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001717static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1718 u8 sta_id, u8 fifo)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001719{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001720 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1721 mvm->cfg->base_params->wd_timeout :
1722 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001723
Sara Sharon310181e2017-01-17 14:27:48 +02001724 if (iwl_mvm_has_new_tx_api(mvm)) {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001725 int tvqm_queue =
1726 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
1727 IWL_MAX_TID_COUNT,
1728 wdg_timeout);
1729 *queue = tvqm_queue;
Johannes Bergc8f54702017-06-19 23:50:31 +02001730 } else {
Liad Kaufman28d07932015-09-01 16:36:25 +03001731 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001732 .fifo = fifo,
1733 .sta_id = sta_id,
Liad Kaufman28d07932015-09-01 16:36:25 +03001734 .tid = IWL_MAX_TID_COUNT,
1735 .aggregate = false,
1736 .frame_limit = IWL_FRAME_LIMIT,
1737 };
1738
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001739 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03001740 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02001741}
1742
1743int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1744{
1745 int ret;
1746
1747 lockdep_assert_held(&mvm->mutex);
1748
1749 /* Allocate aux station and assign to it the aux queue */
1750 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02001751 NL80211_IFTYPE_UNSPECIFIED,
1752 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001753 if (ret)
1754 return ret;
1755
1756 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1757 if (!iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001758 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1759 mvm->aux_sta.sta_id,
1760 IWL_MVM_TX_FIFO_MCAST);
Liad Kaufman28d07932015-09-01 16:36:25 +03001761
Johannes Berg8ca151b2013-01-24 14:25:36 +01001762 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1763 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001764 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001765 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001766 return ret;
1767 }
1768
1769 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02001770 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02001771 * to firmware so enable queue here - after the station was added
1772 */
1773 if (iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001774 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1775 mvm->aux_sta.sta_id,
1776 IWL_MVM_TX_FIFO_MCAST);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001777
1778 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001779}
1780
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001781int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1782{
1783 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001784 int ret;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001785
1786 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001787
1788 /* Map snif queue to fifo - must happen before adding snif station */
1789 if (!iwl_mvm_has_new_tx_api(mvm))
1790 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1791 mvm->snif_sta.sta_id,
1792 IWL_MVM_TX_FIFO_BE);
1793
1794 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001795 mvmvif->id, 0);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001796 if (ret)
1797 return ret;
1798
1799 /*
1800 * For 22000 firmware and on we cannot add queue to a station unknown
1801 * to firmware so enable queue here - after the station was added
1802 */
1803 if (iwl_mvm_has_new_tx_api(mvm))
1804 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1805 mvm->snif_sta.sta_id,
1806 IWL_MVM_TX_FIFO_BE);
1807
1808 return 0;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001809}
1810
1811int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1812{
1813 int ret;
1814
1815 lockdep_assert_held(&mvm->mutex);
1816
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02001817 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
1818 IWL_MAX_TID_COUNT, 0);
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001819 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1820 if (ret)
1821 IWL_WARN(mvm, "Failed sending remove station\n");
1822
1823 return ret;
1824}
1825
1826void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1827{
1828 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1829}
1830
Johannes Berg712b24a2014-08-04 14:14:14 +02001831void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1832{
1833 lockdep_assert_held(&mvm->mutex);
1834
1835 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1836}
1837
Johannes Berg8ca151b2013-01-24 14:25:36 +01001838/*
1839 * Send the add station command for the vif's broadcast station.
1840 * Assumes that the station was already allocated.
1841 *
1842 * @mvm: the mvm component
1843 * @vif: the interface to which the broadcast station is added
1844 * @bsta: the broadcast station to add.
1845 */
Johannes Berg013290a2014-08-04 13:38:48 +02001846int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001847{
1848 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001849 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001850 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001851 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01001852 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001853 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02001854 unsigned int wdg_timeout =
1855 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1856 struct iwl_trans_txq_scd_cfg cfg = {
1857 .fifo = IWL_MVM_TX_FIFO_VO,
1858 .sta_id = mvmvif->bcast_sta.sta_id,
1859 .tid = IWL_MAX_TID_COUNT,
1860 .aggregate = false,
1861 .frame_limit = IWL_FRAME_LIMIT,
1862 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01001863
1864 lockdep_assert_held(&mvm->mutex);
1865
Johannes Bergc8f54702017-06-19 23:50:31 +02001866 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02001867 if (vif->type == NL80211_IFTYPE_AP ||
1868 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02001869 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001870 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02001871 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001872 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03001873 return -EINVAL;
1874
Liad Kaufmandf88c082016-11-24 15:31:00 +02001875 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001876
Sara Sharon310181e2017-01-17 14:27:48 +02001877 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1878 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03001879 }
1880
Johannes Berg5023d962013-07-31 14:07:43 +02001881 if (vif->type == NL80211_IFTYPE_ADHOC)
1882 baddr = vif->bss_conf.bssid;
1883
Sara Sharon0ae98812017-01-04 14:53:58 +02001884 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001885 return -ENOSPC;
1886
Liad Kaufmandf88c082016-11-24 15:31:00 +02001887 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1888 mvmvif->id, mvmvif->color);
1889 if (ret)
1890 return ret;
1891
1892 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02001893 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02001894 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02001895 */
Sara Sharon310181e2017-01-17 14:27:48 +02001896 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01001897 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1898 bsta->sta_id,
1899 IWL_MAX_TID_COUNT,
1900 wdg_timeout);
1901
Luca Coelho7b758a12017-06-20 13:40:03 +03001902 if (vif->type == NL80211_IFTYPE_AP ||
1903 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon310181e2017-01-17 14:27:48 +02001904 mvm->probe_queue = queue;
1905 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1906 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02001907 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02001908
1909 return 0;
1910}
1911
1912static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1913 struct ieee80211_vif *vif)
1914{
1915 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001916 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001917
1918 lockdep_assert_held(&mvm->mutex);
1919
Sara Sharond49394a2017-03-05 13:01:08 +02001920 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
1921
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001922 switch (vif->type) {
1923 case NL80211_IFTYPE_AP:
1924 case NL80211_IFTYPE_ADHOC:
1925 queue = mvm->probe_queue;
1926 break;
1927 case NL80211_IFTYPE_P2P_DEVICE:
1928 queue = mvm->p2p_dev_queue;
1929 break;
1930 default:
1931 WARN(1, "Can't free bcast queue on vif type %d\n",
1932 vif->type);
1933 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001934 }
1935
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001936 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
1937 if (iwl_mvm_has_new_tx_api(mvm))
1938 return;
1939
1940 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
1941 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001942}
1943
1944/* Send the FW a request to remove the station from it's internal data
1945 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001946int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001947{
Johannes Berg013290a2014-08-04 13:38:48 +02001948 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001949 int ret;
1950
1951 lockdep_assert_held(&mvm->mutex);
1952
Johannes Bergc8f54702017-06-19 23:50:31 +02001953 iwl_mvm_free_bcast_sta_queues(mvm, vif);
Liad Kaufmandf88c082016-11-24 15:31:00 +02001954
Johannes Berg013290a2014-08-04 13:38:48 +02001955 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001956 if (ret)
1957 IWL_WARN(mvm, "Failed sending remove station\n");
1958 return ret;
1959}
1960
Johannes Berg013290a2014-08-04 13:38:48 +02001961int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1962{
1963 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001964
1965 lockdep_assert_held(&mvm->mutex);
1966
Johannes Bergc8f54702017-06-19 23:50:31 +02001967 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
Sara Sharonced19f22017-02-06 19:09:32 +02001968 ieee80211_vif_type_p2p(vif),
1969 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02001970}
1971
Johannes Berg8ca151b2013-01-24 14:25:36 +01001972/* Allocate a new station entry for the broadcast station to the given vif,
1973 * and send it to the FW.
1974 * Note that each P2P mac should have its own broadcast station.
1975 *
1976 * @mvm: the mvm component
1977 * @vif: the interface to which the broadcast station is added
1978 * @bsta: the broadcast station to add. */
Luca Coelhod1973582017-06-22 16:00:25 +03001979int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001980{
1981 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001982 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001983 int ret;
1984
1985 lockdep_assert_held(&mvm->mutex);
1986
Johannes Berg013290a2014-08-04 13:38:48 +02001987 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001988 if (ret)
1989 return ret;
1990
Johannes Berg013290a2014-08-04 13:38:48 +02001991 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001992
1993 if (ret)
1994 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02001995
Johannes Berg8ca151b2013-01-24 14:25:36 +01001996 return ret;
1997}
1998
Johannes Berg013290a2014-08-04 13:38:48 +02001999void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2000{
2001 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2002
2003 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2004}
2005
Johannes Berg8ca151b2013-01-24 14:25:36 +01002006/*
2007 * Send the FW a request to remove the station from it's internal data
2008 * structures, and in addition remove it from the local data structure.
2009 */
Luca Coelhod1973582017-06-22 16:00:25 +03002010int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002011{
2012 int ret;
2013
2014 lockdep_assert_held(&mvm->mutex);
2015
Johannes Berg013290a2014-08-04 13:38:48 +02002016 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002017
Johannes Berg013290a2014-08-04 13:38:48 +02002018 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2019
Johannes Berg8ca151b2013-01-24 14:25:36 +01002020 return ret;
2021}
2022
Sara Sharon26d6c162017-01-03 12:00:19 +02002023/*
2024 * Allocate a new station entry for the multicast station to the given vif,
2025 * and send it to the FW.
2026 * Note that each AP/GO mac should have its own multicast station.
2027 *
2028 * @mvm: the mvm component
2029 * @vif: the interface to which the multicast station is added
2030 */
2031int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2032{
2033 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2034 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2035 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2036 const u8 *maddr = _maddr;
2037 struct iwl_trans_txq_scd_cfg cfg = {
2038 .fifo = IWL_MVM_TX_FIFO_MCAST,
2039 .sta_id = msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002040 .tid = 0,
Sara Sharon26d6c162017-01-03 12:00:19 +02002041 .aggregate = false,
2042 .frame_limit = IWL_FRAME_LIMIT,
2043 };
2044 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2045 int ret;
2046
2047 lockdep_assert_held(&mvm->mutex);
2048
Liad Kaufmanee48b722017-03-21 17:13:16 +02002049 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2050 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002051 return -ENOTSUPP;
2052
Sara Sharonced19f22017-02-06 19:09:32 +02002053 /*
Sara Sharonfc07bd82017-12-21 15:05:28 +02002054 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2055 * invalid, so make sure we use the queue we want.
2056 * Note that this is done here as we want to avoid making DQA
2057 * changes in mac80211 layer.
2058 */
2059 if (vif->type == NL80211_IFTYPE_ADHOC) {
2060 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2061 mvmvif->cab_queue = vif->cab_queue;
2062 }
2063
2064 /*
Sara Sharonced19f22017-02-06 19:09:32 +02002065 * While in previous FWs we had to exclude cab queue from TFD queue
2066 * mask, now it is needed as any other queue.
2067 */
2068 if (!iwl_mvm_has_new_tx_api(mvm) &&
2069 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2070 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2071 &cfg, timeout);
2072 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2073 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002074 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2075 mvmvif->id, mvmvif->color);
2076 if (ret) {
2077 iwl_mvm_dealloc_int_sta(mvm, msta);
2078 return ret;
2079 }
2080
2081 /*
2082 * Enable cab queue after the ADD_STA command is sent.
Luca Coelho2f7a3862017-11-15 15:07:34 +02002083 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002084 * command with unknown station id, and for FW that doesn't support
2085 * station API since the cab queue is not included in the
2086 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002087 */
Sara Sharon310181e2017-01-17 14:27:48 +02002088 if (iwl_mvm_has_new_tx_api(mvm)) {
2089 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2090 msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002091 0,
Sara Sharon310181e2017-01-17 14:27:48 +02002092 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002093 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002094 } else if (!fw_has_api(&mvm->fw->ucode_capa,
Sara Sharonfc07bd82017-12-21 15:05:28 +02002095 IWL_UCODE_TLV_API_STA_TYPE))
Sara Sharon310181e2017-01-17 14:27:48 +02002096 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2097 &cfg, timeout);
Sara Sharon26d6c162017-01-03 12:00:19 +02002098
2099 return 0;
2100}
2101
2102/*
2103 * Send the FW a request to remove the station from it's internal data
2104 * structures, and in addition remove it from the local data structure.
2105 */
2106int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2107{
2108 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2109 int ret;
2110
2111 lockdep_assert_held(&mvm->mutex);
2112
Sara Sharond49394a2017-03-05 13:01:08 +02002113 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2114
Sara Sharone2af3fa2017-02-22 19:35:10 +02002115 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Ilan Peer6508de02018-01-25 15:22:41 +02002116 0, 0);
Sara Sharon26d6c162017-01-03 12:00:19 +02002117
2118 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2119 if (ret)
2120 IWL_WARN(mvm, "Failed sending remove station\n");
2121
2122 return ret;
2123}
2124
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002125#define IWL_MAX_RX_BA_SESSIONS 16
2126
Sara Sharonb915c102016-03-23 16:32:02 +02002127static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002128{
Sara Sharonb915c102016-03-23 16:32:02 +02002129 struct iwl_mvm_delba_notif notif = {
2130 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2131 .metadata.sync = 1,
2132 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002133 };
Sara Sharonb915c102016-03-23 16:32:02 +02002134 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2135};
Sara Sharon10b2b202016-03-20 16:23:41 +02002136
Sara Sharonb915c102016-03-23 16:32:02 +02002137static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2138 struct iwl_mvm_baid_data *data)
2139{
2140 int i;
2141
2142 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2143
2144 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2145 int j;
2146 struct iwl_mvm_reorder_buffer *reorder_buf =
2147 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002148 struct iwl_mvm_reorder_buf_entry *entries =
2149 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002150
Sara Sharon06904052016-02-28 20:28:17 +02002151 spin_lock_bh(&reorder_buf->lock);
2152 if (likely(!reorder_buf->num_stored)) {
2153 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002154 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002155 }
Sara Sharonb915c102016-03-23 16:32:02 +02002156
2157 /*
2158 * This shouldn't happen in regular DELBA since the internal
2159 * delBA notification should trigger a release of all frames in
2160 * the reorder buffer.
2161 */
2162 WARN_ON(1);
2163
2164 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002165 __skb_queue_purge(&entries[j].e.frames);
Sara Sharon06904052016-02-28 20:28:17 +02002166 /*
2167 * Prevent timer re-arm. This prevents a very far fetched case
2168 * where we timed out on the notification. There may be prior
2169 * RX frames pending in the RX queue before the notification
2170 * that might get processed between now and the actual deletion
2171 * and we would re-arm the timer although we are deleting the
2172 * reorder buffer.
2173 */
2174 reorder_buf->removed = true;
2175 spin_unlock_bh(&reorder_buf->lock);
2176 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002177 }
2178}
2179
2180static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
Sara Sharonb915c102016-03-23 16:32:02 +02002181 struct iwl_mvm_baid_data *data,
Luca Coelho514c30692018-06-24 11:59:54 +03002182 u16 ssn, u16 buf_size)
Sara Sharonb915c102016-03-23 16:32:02 +02002183{
2184 int i;
2185
2186 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2187 struct iwl_mvm_reorder_buffer *reorder_buf =
2188 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002189 struct iwl_mvm_reorder_buf_entry *entries =
2190 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002191 int j;
2192
2193 reorder_buf->num_stored = 0;
2194 reorder_buf->head_sn = ssn;
2195 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002196 /* rx reorder timer */
Kees Cook8cef5342017-10-24 02:29:37 -07002197 timer_setup(&reorder_buf->reorder_timer,
2198 iwl_mvm_reorder_timer_expired, 0);
Sara Sharon06904052016-02-28 20:28:17 +02002199 spin_lock_init(&reorder_buf->lock);
2200 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002201 reorder_buf->queue = i;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002202 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002203 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002204 __skb_queue_head_init(&entries[j].e.frames);
Sara Sharonb915c102016-03-23 16:32:02 +02002205 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002206}
2207
Johannes Berg8ca151b2013-01-24 14:25:36 +01002208int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Luca Coelho514c30692018-06-24 11:59:54 +03002209 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002210{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002211 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002212 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002213 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002214 int ret;
2215 u32 status;
2216
2217 lockdep_assert_held(&mvm->mutex);
2218
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002219 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2220 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2221 return -ENOSPC;
2222 }
2223
Sara Sharon10b2b202016-03-20 16:23:41 +02002224 if (iwl_mvm_has_new_rx_api(mvm) && start) {
Johannes Bergdfdddd92017-09-26 12:24:51 +02002225 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2226
2227 /* sparse doesn't like the __align() so don't check */
2228#ifndef __CHECKER__
2229 /*
2230 * The division below will be OK if either the cache line size
2231 * can be divided by the entry size (ALIGN will round up) or if
2232 * if the entry size can be divided by the cache line size, in
2233 * which case the ALIGN() will do nothing.
2234 */
2235 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2236 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2237#endif
2238
2239 /*
2240 * Upward align the reorder buffer size to fill an entire cache
2241 * line for each queue, to avoid sharing cache lines between
2242 * different queues.
2243 */
2244 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2245
Sara Sharon10b2b202016-03-20 16:23:41 +02002246 /*
2247 * Allocate here so if allocation fails we can bail out early
2248 * before starting the BA session in the firmware
2249 */
Sara Sharonb915c102016-03-23 16:32:02 +02002250 baid_data = kzalloc(sizeof(*baid_data) +
2251 mvm->trans->num_rx_queues *
Johannes Bergdfdddd92017-09-26 12:24:51 +02002252 reorder_buf_size,
Sara Sharonb915c102016-03-23 16:32:02 +02002253 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002254 if (!baid_data)
2255 return -ENOMEM;
Johannes Bergdfdddd92017-09-26 12:24:51 +02002256
2257 /*
2258 * This division is why we need the above BUILD_BUG_ON(),
2259 * if that doesn't hold then this will not be right.
2260 */
2261 baid_data->entries_per_queue =
2262 reorder_buf_size / sizeof(baid_data->entries[0]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002263 }
2264
Johannes Berg8ca151b2013-01-24 14:25:36 +01002265 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2266 cmd.sta_id = mvm_sta->sta_id;
2267 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002268 if (start) {
2269 cmd.add_immediate_ba_tid = (u8) tid;
2270 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Luca Coelho514c30692018-06-24 11:59:54 +03002271 cmd.rx_ba_window = cpu_to_le16(buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002272 } else {
2273 cmd.remove_immediate_ba_tid = (u8) tid;
2274 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002275 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2276 STA_MODIFY_REMOVE_BA_TID;
2277
2278 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002279 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2280 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002281 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002282 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002283 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002284
Sara Sharon837c4da2016-01-07 16:50:45 +02002285 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002286 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002287 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2288 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002289 break;
2290 case ADD_STA_IMMEDIATE_BA_FAILURE:
2291 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2292 ret = -ENOSPC;
2293 break;
2294 default:
2295 ret = -EIO;
2296 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2297 start ? "start" : "stopp", status);
2298 break;
2299 }
2300
Sara Sharon10b2b202016-03-20 16:23:41 +02002301 if (ret)
2302 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002303
Sara Sharon10b2b202016-03-20 16:23:41 +02002304 if (start) {
2305 u8 baid;
2306
2307 mvm->rx_ba_sessions++;
2308
2309 if (!iwl_mvm_has_new_rx_api(mvm))
2310 return 0;
2311
2312 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2313 ret = -EINVAL;
2314 goto out_free;
2315 }
2316 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2317 IWL_ADD_STA_BAID_SHIFT);
2318 baid_data->baid = baid;
2319 baid_data->timeout = timeout;
2320 baid_data->last_rx = jiffies;
Kees Cook8cef5342017-10-24 02:29:37 -07002321 baid_data->rcu_ptr = &mvm->baid_map[baid];
2322 timer_setup(&baid_data->session_timer,
2323 iwl_mvm_rx_agg_session_expired, 0);
Sara Sharon10b2b202016-03-20 16:23:41 +02002324 baid_data->mvm = mvm;
2325 baid_data->tid = tid;
2326 baid_data->sta_id = mvm_sta->sta_id;
2327
2328 mvm_sta->tid_to_baid[tid] = baid;
2329 if (timeout)
2330 mod_timer(&baid_data->session_timer,
2331 TU_TO_EXP_TIME(timeout * 2));
2332
Sara Sharon3f1c4c52017-10-02 12:07:59 +03002333 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002334 /*
2335 * protect the BA data with RCU to cover a case where our
2336 * internal RX sync mechanism will timeout (not that it's
2337 * supposed to happen) and we will free the session data while
2338 * RX is being processed in parallel
2339 */
Sara Sharon35263a02016-06-21 12:12:10 +03002340 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2341 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002342 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2343 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002344 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002345 u8 baid = mvm_sta->tid_to_baid[tid];
2346
Sara Sharon60dec522016-06-21 14:14:08 +03002347 if (mvm->rx_ba_sessions > 0)
2348 /* check that restart flow didn't zero the counter */
2349 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002350 if (!iwl_mvm_has_new_rx_api(mvm))
2351 return 0;
2352
2353 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2354 return -EINVAL;
2355
2356 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2357 if (WARN_ON(!baid_data))
2358 return -EINVAL;
2359
2360 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002361 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002362 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002363 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2364 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002365 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002366 }
2367 return 0;
2368
2369out_free:
2370 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002371 return ret;
2372}
2373
Liad Kaufman9794c642015-08-19 17:34:28 +03002374int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2375 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002376{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002377 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002378 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002379 int ret;
2380 u32 status;
2381
2382 lockdep_assert_held(&mvm->mutex);
2383
2384 if (start) {
2385 mvm_sta->tfd_queue_msk |= BIT(queue);
2386 mvm_sta->tid_disable_agg &= ~BIT(tid);
2387 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002388 /* In DQA-mode the queue isn't removed on agg termination */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002389 mvm_sta->tid_disable_agg |= BIT(tid);
2390 }
2391
2392 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2393 cmd.sta_id = mvm_sta->sta_id;
2394 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002395 if (!iwl_mvm_has_new_tx_api(mvm))
2396 cmd.modify_mask = STA_MODIFY_QUEUES;
2397 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002398 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2399 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2400
2401 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002402 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2403 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002404 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002405 if (ret)
2406 return ret;
2407
Sara Sharon837c4da2016-01-07 16:50:45 +02002408 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002409 case ADD_STA_SUCCESS:
2410 break;
2411 default:
2412 ret = -EIO;
2413 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2414 start ? "start" : "stopp", status);
2415 break;
2416 }
2417
2418 return ret;
2419}
2420
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002421const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002422 IEEE80211_AC_BE,
2423 IEEE80211_AC_BK,
2424 IEEE80211_AC_BK,
2425 IEEE80211_AC_BE,
2426 IEEE80211_AC_VI,
2427 IEEE80211_AC_VI,
2428 IEEE80211_AC_VO,
2429 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002430 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002431};
2432
Johannes Berg3e56ead2013-02-15 22:23:18 +01002433static const u8 tid_to_ucode_ac[] = {
2434 AC_BE,
2435 AC_BK,
2436 AC_BK,
2437 AC_BE,
2438 AC_VI,
2439 AC_VI,
2440 AC_VO,
2441 AC_VO,
2442};
2443
Johannes Berg8ca151b2013-01-24 14:25:36 +01002444int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2445 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2446{
Johannes Berg5b577a92013-11-14 18:20:04 +01002447 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002448 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002449 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002450 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002451 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002452
2453 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2454 return -EINVAL;
2455
Naftali Goldsteinbd800e42017-08-28 11:51:05 +03002456 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2457 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2458 IWL_ERR(mvm,
2459 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
Johannes Berg8ca151b2013-01-24 14:25:36 +01002460 mvmsta->tid_data[tid].state);
2461 return -ENXIO;
2462 }
2463
2464 lockdep_assert_held(&mvm->mutex);
2465
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +02002466 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2467 iwl_mvm_has_new_tx_api(mvm)) {
2468 u8 ac = tid_to_mac80211_ac[tid];
2469
2470 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2471 if (ret)
2472 return ret;
2473 }
2474
Arik Nemtsovb2492502014-03-13 12:21:50 +02002475 spin_lock_bh(&mvmsta->lock);
2476
2477 /* possible race condition - we entered D0i3 while starting agg */
2478 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2479 spin_unlock_bh(&mvmsta->lock);
2480 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2481 return -EIO;
2482 }
2483
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002484 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002485
Liad Kaufmancf961e12015-08-13 19:16:08 +03002486 /*
2487 * Note the possible cases:
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002488 * 1. An enabled TXQ - TXQ needs to become agg'ed
2489 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2490 * it as reserved
Liad Kaufmancf961e12015-08-13 19:16:08 +03002491 */
2492 txq_id = mvmsta->tid_data[tid].txq_id;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002493 if (txq_id == IWL_MVM_INVALID_QUEUE) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002494 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
Johannes Bergc8f54702017-06-19 23:50:31 +02002495 IWL_MVM_DQA_MIN_DATA_QUEUE,
2496 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002497 if (txq_id < 0) {
2498 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002499 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2500 goto release_locks;
2501 }
2502
2503 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2504 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002505 } else if (unlikely(mvm->queue_info[txq_id].status ==
2506 IWL_MVM_QUEUE_SHARED)) {
2507 ret = -ENXIO;
2508 IWL_DEBUG_TX_QUEUES(mvm,
2509 "Can't start tid %d agg on shared queue!\n",
2510 tid);
2511 goto release_locks;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002512 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002513
2514 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002515
Liad Kaufmancf961e12015-08-13 19:16:08 +03002516 IWL_DEBUG_TX_QUEUES(mvm,
2517 "AGG for tid %d will be on queue #%d\n",
2518 tid, txq_id);
2519
Johannes Berg8ca151b2013-01-24 14:25:36 +01002520 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002521 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002522 tid_data->txq_id = txq_id;
2523 *ssn = tid_data->ssn;
2524
2525 IWL_DEBUG_TX_QUEUES(mvm,
2526 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2527 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2528 tid_data->next_reclaimed);
2529
Liad Kaufmandd321622017-04-05 16:25:11 +03002530 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002531 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03002532 * to align the wrap around of ssn so we compare relevant values.
2533 */
2534 normalized_ssn = tid_data->ssn;
2535 if (mvm->trans->cfg->gen2)
2536 normalized_ssn &= 0xff;
2537
2538 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002539 tid_data->state = IWL_AGG_STARTING;
2540 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2541 } else {
2542 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2543 }
2544
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002545 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002546 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002547
2548release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002549 spin_unlock(&mvm->queue_info_lock);
2550out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002551 spin_unlock_bh(&mvmsta->lock);
2552
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002553 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002554}
2555
2556int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Luca Coelho514c30692018-06-24 11:59:54 +03002557 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002558 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002559{
Johannes Berg5b577a92013-11-14 18:20:04 +01002560 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002561 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002562 unsigned int wdg_timeout =
2563 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002564 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002565 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002566 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002567 u16 ssn;
2568
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002569 struct iwl_trans_txq_scd_cfg cfg = {
2570 .sta_id = mvmsta->sta_id,
2571 .tid = tid,
2572 .frame_limit = buf_size,
2573 .aggregate = true,
2574 };
2575
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002576 /*
2577 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2578 * manager, so this function should never be called in this case.
2579 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02002580 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002581 return -EINVAL;
2582
Eyal Shapiraefed6642014-09-14 15:58:53 +03002583 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2584 != IWL_MAX_TID_COUNT);
2585
Johannes Berg8ca151b2013-01-24 14:25:36 +01002586 spin_lock_bh(&mvmsta->lock);
2587 ssn = tid_data->ssn;
2588 queue = tid_data->txq_id;
2589 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002590 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002591 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002592 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002593 spin_unlock_bh(&mvmsta->lock);
2594
Sara Sharon34e10862017-02-23 13:15:07 +02002595 if (iwl_mvm_has_new_tx_api(mvm)) {
2596 /*
Sara Sharon0ec9257b2017-10-16 09:45:10 +03002597 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2598 * would have failed, so if we are here there is no need to
2599 * allocate a queue.
2600 * However, if aggregation size is different than the default
2601 * size, the scheduler should be reconfigured.
2602 * We cannot do this with the new TX API, so return unsupported
2603 * for now, until it will be offloaded to firmware..
2604 * Note that if SCD default value changes - this condition
2605 * should be updated as well.
Sara Sharon34e10862017-02-23 13:15:07 +02002606 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03002607 if (buf_size < IWL_FRAME_LIMIT)
Sara Sharon34e10862017-02-23 13:15:07 +02002608 return -ENOTSUPP;
2609
2610 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2611 if (ret)
2612 return -EIO;
2613 goto out;
2614 }
2615
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002616 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002617
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002618 spin_lock_bh(&mvm->queue_info_lock);
2619 queue_status = mvm->queue_info[queue].status;
2620 spin_unlock_bh(&mvm->queue_info_lock);
2621
Johannes Bergc8f54702017-06-19 23:50:31 +02002622 /* Maybe there is no need to even alloc a queue... */
2623 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2624 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002625
Johannes Bergc8f54702017-06-19 23:50:31 +02002626 /*
2627 * Only reconfig the SCD for the queue if the window size has
2628 * changed from current (become smaller)
2629 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03002630 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002631 /*
Johannes Bergc8f54702017-06-19 23:50:31 +02002632 * If reconfiguring an existing queue, it first must be
2633 * drained
Liad Kaufmancf961e12015-08-13 19:16:08 +03002634 */
Johannes Bergc8f54702017-06-19 23:50:31 +02002635 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2636 BIT(queue));
2637 if (ret) {
2638 IWL_ERR(mvm,
2639 "Error draining queue before reconfig\n");
2640 return ret;
2641 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03002642
Johannes Bergc8f54702017-06-19 23:50:31 +02002643 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2644 mvmsta->sta_id, tid,
2645 buf_size, ssn);
2646 if (ret) {
2647 IWL_ERR(mvm,
2648 "Error reconfiguring TXQ #%d\n", queue);
2649 return ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002650 }
2651 }
2652
2653 if (alloc_queue)
2654 iwl_mvm_enable_txq(mvm, queue,
2655 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2656 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002657
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002658 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2659 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2660 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2661 if (ret)
2662 return -EIO;
2663 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002664
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002665 /* No need to mark as reserved */
2666 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002667 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002668 spin_unlock_bh(&mvm->queue_info_lock);
2669
Sara Sharon34e10862017-02-23 13:15:07 +02002670out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002671 /*
2672 * Even though in theory the peer could have different
2673 * aggregation reorder buffer sizes for different sessions,
2674 * our ucode doesn't allow for that and has a global limit
2675 * for each station. Therefore, use the minimum of all the
2676 * aggregation sessions and our default value.
2677 */
2678 mvmsta->max_agg_bufsize =
2679 min(mvmsta->max_agg_bufsize, buf_size);
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002680 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002681
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002682 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2683 sta->addr, tid);
2684
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002685 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002686}
2687
Sara Sharon34e10862017-02-23 13:15:07 +02002688static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2689 struct iwl_mvm_sta *mvmsta,
Avraham Stern4b387902018-03-07 10:41:18 +02002690 struct iwl_mvm_tid_data *tid_data)
Sara Sharon34e10862017-02-23 13:15:07 +02002691{
Avraham Stern4b387902018-03-07 10:41:18 +02002692 u16 txq_id = tid_data->txq_id;
2693
Sara Sharon34e10862017-02-23 13:15:07 +02002694 if (iwl_mvm_has_new_tx_api(mvm))
2695 return;
2696
2697 spin_lock_bh(&mvm->queue_info_lock);
2698 /*
2699 * The TXQ is marked as reserved only if no traffic came through yet
2700 * This means no traffic has been sent on this TID (agg'd or not), so
2701 * we no longer have use for the queue. Since it hasn't even been
2702 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2703 * free.
2704 */
Avraham Stern4b387902018-03-07 10:41:18 +02002705 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
Sara Sharon34e10862017-02-23 13:15:07 +02002706 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Avraham Stern4b387902018-03-07 10:41:18 +02002707 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
2708 }
Sara Sharon34e10862017-02-23 13:15:07 +02002709
2710 spin_unlock_bh(&mvm->queue_info_lock);
2711}
2712
Johannes Berg8ca151b2013-01-24 14:25:36 +01002713int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2714 struct ieee80211_sta *sta, u16 tid)
2715{
Johannes Berg5b577a92013-11-14 18:20:04 +01002716 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002717 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2718 u16 txq_id;
2719 int err;
2720
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002721 /*
2722 * If mac80211 is cleaning its state, then say that we finished since
2723 * our state has been cleared anyway.
2724 */
2725 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2726 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2727 return 0;
2728 }
2729
Johannes Berg8ca151b2013-01-24 14:25:36 +01002730 spin_lock_bh(&mvmsta->lock);
2731
2732 txq_id = tid_data->txq_id;
2733
2734 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2735 mvmsta->sta_id, tid, txq_id, tid_data->state);
2736
Eyal Shapiraefed6642014-09-14 15:58:53 +03002737 mvmsta->agg_tids &= ~BIT(tid);
2738
Avraham Stern4b387902018-03-07 10:41:18 +02002739 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002740
Johannes Berg8ca151b2013-01-24 14:25:36 +01002741 switch (tid_data->state) {
2742 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002743 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002744
2745 IWL_DEBUG_TX_QUEUES(mvm,
2746 "ssn = %d, next_recl = %d\n",
2747 tid_data->ssn, tid_data->next_reclaimed);
2748
Johannes Berg8ca151b2013-01-24 14:25:36 +01002749 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002750 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002751 spin_unlock_bh(&mvmsta->lock);
2752
2753 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2754
2755 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergf7f89e72014-08-05 15:24:44 +02002756 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002757 case IWL_AGG_STARTING:
2758 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2759 /*
2760 * The agg session has been stopped before it was set up. This
2761 * can happen when the AddBA timer times out for example.
2762 */
2763
2764 /* No barriers since we are under mutex */
2765 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002766
2767 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2768 tid_data->state = IWL_AGG_OFF;
2769 err = 0;
2770 break;
2771 default:
2772 IWL_ERR(mvm,
2773 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2774 mvmsta->sta_id, tid, tid_data->state);
2775 IWL_ERR(mvm,
2776 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2777 err = -EINVAL;
2778 }
2779
2780 spin_unlock_bh(&mvmsta->lock);
2781
2782 return err;
2783}
2784
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002785int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2786 struct ieee80211_sta *sta, u16 tid)
2787{
Johannes Berg5b577a92013-11-14 18:20:04 +01002788 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002789 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2790 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002791 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002792
2793 /*
2794 * First set the agg state to OFF to avoid calling
2795 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2796 */
2797 spin_lock_bh(&mvmsta->lock);
2798 txq_id = tid_data->txq_id;
2799 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2800 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002801 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002802 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002803 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002804 spin_unlock_bh(&mvmsta->lock);
2805
Avraham Stern4b387902018-03-07 10:41:18 +02002806 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002807
Johannes Bergb6658ff2013-07-24 13:55:51 +02002808 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002809 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02002810
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002811 if (iwl_mvm_has_new_tx_api(mvm)) {
2812 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2813 BIT(tid), 0))
2814 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02002815 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002816 } else {
2817 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2818 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02002819 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002820 }
Sara Sharond6d517b2017-03-06 10:16:11 +02002821
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002822 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002823
Johannes Bergf7f89e72014-08-05 15:24:44 +02002824 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002825 }
2826
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002827 return 0;
2828}
2829
Johannes Berg8ca151b2013-01-24 14:25:36 +01002830static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2831{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002832 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002833
2834 lockdep_assert_held(&mvm->mutex);
2835
Johannes Berg2dc2a152015-06-16 17:09:18 +02002836 /* Pick the unused key offset with the highest 'deleted'
2837 * counter. Every time a key is deleted, all the counters
2838 * are incremented and the one that was just deleted is
2839 * reset to zero. Thus, the highest counter is the one
2840 * that was deleted longest ago. Pick that one.
2841 */
2842 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2843 if (test_bit(i, mvm->fw_key_table))
2844 continue;
2845 if (mvm->fw_key_deleted[i] > max) {
2846 max = mvm->fw_key_deleted[i];
2847 max_offs = i;
2848 }
2849 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002850
Johannes Berg2dc2a152015-06-16 17:09:18 +02002851 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002852 return STA_KEY_IDX_INVALID;
2853
Johannes Berg2dc2a152015-06-16 17:09:18 +02002854 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002855}
2856
Johannes Berg5f7a1842015-12-11 09:36:10 +01002857static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2858 struct ieee80211_vif *vif,
2859 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002860{
Johannes Berg5b530e92014-12-23 16:00:17 +01002861 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002862
Johannes Berg5f7a1842015-12-11 09:36:10 +01002863 if (sta)
2864 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002865
2866 /*
2867 * The device expects GTKs for station interfaces to be
2868 * installed as GTKs for the AP station. If we have no
2869 * station ID, then use AP's station ID.
2870 */
2871 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02002872 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02002873 u8 sta_id = mvmvif->ap_sta_id;
2874
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002875 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2876 lockdep_is_held(&mvm->mutex));
2877
Avri Altman9513c5e2015-10-19 16:29:11 +02002878 /*
2879 * It is possible that the 'sta' parameter is NULL,
2880 * for example when a GTK is removed - the sta_id will then
2881 * be the AP ID, and no station was passed by mac80211.
2882 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002883 if (IS_ERR_OR_NULL(sta))
2884 return NULL;
2885
2886 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02002887 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002888
Johannes Berg5f7a1842015-12-11 09:36:10 +01002889 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002890}
2891
2892static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03002893 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02002894 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002895 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02002896 u8 key_offset, bool mfp)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002897{
Sara Sharon45c458b2016-11-09 15:43:26 +02002898 union {
2899 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2900 struct iwl_mvm_add_sta_key_cmd cmd;
2901 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002902 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002903 int ret;
2904 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002905 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02002906 u64 pn = 0;
2907 int i, size;
2908 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2909 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002910
David Spinadel85aeb582017-03-30 19:43:53 +03002911 if (sta_id == IWL_MVM_INVALID_STA)
2912 return -EINVAL;
2913
Sara Sharon45c458b2016-11-09 15:43:26 +02002914 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01002915 STA_KEY_FLG_KEYID_MSK;
2916 key_flags = cpu_to_le16(keyidx);
2917 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2918
Sara Sharon45c458b2016-11-09 15:43:26 +02002919 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002920 case WLAN_CIPHER_SUITE_TKIP:
2921 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002922 if (new_api) {
2923 memcpy((void *)&u.cmd.tx_mic_key,
2924 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2925 IWL_MIC_KEY_SIZE);
2926
2927 memcpy((void *)&u.cmd.rx_mic_key,
2928 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2929 IWL_MIC_KEY_SIZE);
2930 pn = atomic64_read(&key->tx_pn);
2931
2932 } else {
2933 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2934 for (i = 0; i < 5; i++)
2935 u.cmd_v1.tkip_rx_ttak[i] =
2936 cpu_to_le16(tkip_p1k[i]);
2937 }
2938 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002939 break;
2940 case WLAN_CIPHER_SUITE_CCMP:
2941 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02002942 memcpy(u.cmd.common.key, key->key, key->keylen);
2943 if (new_api)
2944 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002945 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002946 case WLAN_CIPHER_SUITE_WEP104:
2947 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002948 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002949 case WLAN_CIPHER_SUITE_WEP40:
2950 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002951 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01002952 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03002953 case WLAN_CIPHER_SUITE_GCMP_256:
2954 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2955 /* fall through */
2956 case WLAN_CIPHER_SUITE_GCMP:
2957 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002958 memcpy(u.cmd.common.key, key->key, key->keylen);
2959 if (new_api)
2960 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03002961 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002962 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002963 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02002964 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002965 }
2966
Johannes Bergba3943b2014-11-12 23:54:48 +01002967 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002968 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02002969 if (mfp)
2970 key_flags |= cpu_to_le16(STA_KEY_MFP);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002971
Sara Sharon45c458b2016-11-09 15:43:26 +02002972 u.cmd.common.key_offset = key_offset;
2973 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03002974 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02002975
2976 if (new_api) {
2977 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2978 size = sizeof(u.cmd);
2979 } else {
2980 size = sizeof(u.cmd_v1);
2981 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002982
2983 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002984 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02002985 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2986 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002987 else
Sara Sharon45c458b2016-11-09 15:43:26 +02002988 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2989 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002990
2991 switch (status) {
2992 case ADD_STA_SUCCESS:
2993 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2994 break;
2995 default:
2996 ret = -EIO;
2997 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2998 break;
2999 }
3000
3001 return ret;
3002}
3003
3004static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3005 struct ieee80211_key_conf *keyconf,
3006 u8 sta_id, bool remove_key)
3007{
3008 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3009
3010 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003011 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3012 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3013 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3014 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3015 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3016 return -EINVAL;
3017
3018 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3019 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003020 return -EINVAL;
3021
3022 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3023 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3024
3025 if (remove_key) {
3026 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3027 } else {
3028 struct ieee80211_key_seq seq;
3029 const u8 *pn;
3030
Ayala Bekeraa950522016-06-01 00:28:09 +03003031 switch (keyconf->cipher) {
3032 case WLAN_CIPHER_SUITE_AES_CMAC:
3033 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3034 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003035 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3036 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3037 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3038 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003039 default:
3040 return -EINVAL;
3041 }
3042
Ayala Beker8e160ab2016-04-11 11:37:38 +03003043 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3044 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3045 igtk_cmd.ctrl_flags |=
3046 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003047 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3048 pn = seq.aes_cmac.pn;
3049 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3050 ((u64) pn[4] << 8) |
3051 ((u64) pn[3] << 16) |
3052 ((u64) pn[2] << 24) |
3053 ((u64) pn[1] << 32) |
3054 ((u64) pn[0] << 40));
3055 }
3056
3057 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3058 remove_key ? "removing" : "installing",
3059 igtk_cmd.sta_id);
3060
Ayala Beker8e160ab2016-04-11 11:37:38 +03003061 if (!iwl_mvm_has_new_rx_api(mvm)) {
3062 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3063 .ctrl_flags = igtk_cmd.ctrl_flags,
3064 .key_id = igtk_cmd.key_id,
3065 .sta_id = igtk_cmd.sta_id,
3066 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3067 };
3068
3069 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3070 ARRAY_SIZE(igtk_cmd_v1.igtk));
3071 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3072 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3073 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003074 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003075 sizeof(igtk_cmd), &igtk_cmd);
3076}
3077
3078
3079static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3080 struct ieee80211_vif *vif,
3081 struct ieee80211_sta *sta)
3082{
Johannes Berg5b530e92014-12-23 16:00:17 +01003083 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003084
3085 if (sta)
3086 return sta->addr;
3087
3088 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003089 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003090 u8 sta_id = mvmvif->ap_sta_id;
3091 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3092 lockdep_is_held(&mvm->mutex));
3093 return sta->addr;
3094 }
3095
3096
3097 return NULL;
3098}
3099
Johannes Berg2f6319d2014-11-12 23:39:56 +01003100static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3101 struct ieee80211_vif *vif,
3102 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003103 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003104 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003105 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003106{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003107 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003108 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003109 struct ieee80211_key_seq seq;
3110 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003111 u32 sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003112 bool mfp = false;
David Spinadel85aeb582017-03-30 19:43:53 +03003113
3114 if (sta) {
3115 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3116
3117 sta_id = mvm_sta->sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003118 mfp = sta->mfp;
David Spinadel85aeb582017-03-30 19:43:53 +03003119 } else if (vif->type == NL80211_IFTYPE_AP &&
3120 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3121 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3122
3123 sta_id = mvmvif->mcast_sta.sta_id;
3124 } else {
3125 IWL_ERR(mvm, "Failed to find station id\n");
3126 return -EINVAL;
3127 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003128
Johannes Berg8ca151b2013-01-24 14:25:36 +01003129 switch (keyconf->cipher) {
3130 case WLAN_CIPHER_SUITE_TKIP:
David Spinadel85aeb582017-03-30 19:43:53 +03003131 if (vif->type == NL80211_IFTYPE_AP) {
3132 ret = -EINVAL;
3133 break;
3134 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003135 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3136 /* get phase 1 key from mac80211 */
3137 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3138 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003139 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003140 seq.tkip.iv32, p1k, 0, key_offset,
3141 mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003142 break;
3143 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003144 case WLAN_CIPHER_SUITE_WEP40:
3145 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003146 case WLAN_CIPHER_SUITE_GCMP:
3147 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003148 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003149 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003150 break;
3151 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003152 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003153 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003154 }
3155
Johannes Berg8ca151b2013-01-24 14:25:36 +01003156 return ret;
3157}
3158
Johannes Berg2f6319d2014-11-12 23:39:56 +01003159static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003160 struct ieee80211_key_conf *keyconf,
3161 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003162{
Sara Sharon45c458b2016-11-09 15:43:26 +02003163 union {
3164 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3165 struct iwl_mvm_add_sta_key_cmd cmd;
3166 } u = {};
3167 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3168 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003169 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003170 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003171 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003172
Sara Sharone4f13ad2018-01-15 13:50:59 +02003173 /* This is a valid situation for GTK removal */
David Spinadel85aeb582017-03-30 19:43:53 +03003174 if (sta_id == IWL_MVM_INVALID_STA)
Sara Sharone4f13ad2018-01-15 13:50:59 +02003175 return 0;
David Spinadel85aeb582017-03-30 19:43:53 +03003176
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003177 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3178 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003179 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3180 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3181
Johannes Bergba3943b2014-11-12 23:54:48 +01003182 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003183 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3184
Sara Sharon45c458b2016-11-09 15:43:26 +02003185 /*
3186 * The fields assigned here are in the same location at the start
3187 * of the command, so we can do this union trick.
3188 */
3189 u.cmd.common.key_flags = key_flags;
3190 u.cmd.common.key_offset = keyconf->hw_key_idx;
3191 u.cmd.common.sta_id = sta_id;
3192
3193 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003194
Johannes Berg8ca151b2013-01-24 14:25:36 +01003195 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003196 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3197 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003198
3199 switch (status) {
3200 case ADD_STA_SUCCESS:
3201 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3202 break;
3203 default:
3204 ret = -EIO;
3205 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3206 break;
3207 }
3208
3209 return ret;
3210}
3211
Johannes Berg2f6319d2014-11-12 23:39:56 +01003212int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3213 struct ieee80211_vif *vif,
3214 struct ieee80211_sta *sta,
3215 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003216 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003217{
Johannes Bergba3943b2014-11-12 23:54:48 +01003218 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003219 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003220 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003221 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003222 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003223
3224 lockdep_assert_held(&mvm->mutex);
3225
David Spinadel85aeb582017-03-30 19:43:53 +03003226 if (vif->type != NL80211_IFTYPE_AP ||
3227 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3228 /* Get the station id from the mvm local station table */
3229 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3230 if (!mvm_sta) {
3231 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003232 return -EINVAL;
3233 }
David Spinadel85aeb582017-03-30 19:43:53 +03003234 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003235
David Spinadel85aeb582017-03-30 19:43:53 +03003236 /*
3237 * It is possible that the 'sta' parameter is NULL, and thus
Beni Leve829b172018-02-20 13:41:54 +02003238 * there is a need to retrieve the sta from the local station
David Spinadel85aeb582017-03-30 19:43:53 +03003239 * table.
3240 */
3241 if (!sta) {
3242 sta = rcu_dereference_protected(
3243 mvm->fw_id_to_mac_id[sta_id],
3244 lockdep_is_held(&mvm->mutex));
3245 if (IS_ERR_OR_NULL(sta)) {
3246 IWL_ERR(mvm, "Invalid station id\n");
3247 return -EINVAL;
3248 }
3249 }
3250
3251 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3252 return -EINVAL;
Beni Leve829b172018-02-20 13:41:54 +02003253 } else {
3254 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3255
3256 sta_id = mvmvif->mcast_sta.sta_id;
3257 }
3258
3259 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3260 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3261 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3262 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3263 goto end;
David Spinadel85aeb582017-03-30 19:43:53 +03003264 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003265
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003266 /* If the key_offset is not pre-assigned, we need to find a
3267 * new offset to use. In normal cases, the offset is not
3268 * pre-assigned, but during HW_RESTART we want to reuse the
3269 * same indices, so we pass them when this function is called.
3270 *
3271 * In D3 entry, we need to hardcoded the indices (because the
3272 * firmware hardcodes the PTK offset to 0). In this case, we
3273 * need to make sure we don't overwrite the hw_key_idx in the
3274 * keyconf structure, because otherwise we cannot configure
3275 * the original ones back when resuming.
3276 */
3277 if (key_offset == STA_KEY_IDX_INVALID) {
3278 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3279 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003280 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003281 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003282 }
3283
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003284 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003285 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003286 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003287
3288 /*
3289 * For WEP, the same key is used for multicast and unicast. Upload it
3290 * again, using the same key offset, and now pointing the other one
3291 * to the same key slot (offset).
3292 * If this fails, remove the original as well.
3293 */
David Spinadel85aeb582017-03-30 19:43:53 +03003294 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3295 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3296 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003297 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3298 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003299 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003300 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003301 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003302 }
3303 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003304
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003305 __set_bit(key_offset, mvm->fw_key_table);
3306
Johannes Berg2f6319d2014-11-12 23:39:56 +01003307end:
3308 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3309 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003310 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003311 return ret;
3312}
3313
3314int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3315 struct ieee80211_vif *vif,
3316 struct ieee80211_sta *sta,
3317 struct ieee80211_key_conf *keyconf)
3318{
Johannes Bergba3943b2014-11-12 23:54:48 +01003319 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003320 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003321 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003322 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003323
3324 lockdep_assert_held(&mvm->mutex);
3325
Johannes Berg5f7a1842015-12-11 09:36:10 +01003326 /* Get the station from the mvm local station table */
3327 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003328 if (mvm_sta)
3329 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003330 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3331 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3332
Johannes Berg2f6319d2014-11-12 23:39:56 +01003333
3334 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3335 keyconf->keyidx, sta_id);
3336
Luca Coelho71793b7d2017-03-30 12:04:47 +03003337 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3338 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3339 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003340 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3341
3342 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3343 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3344 keyconf->hw_key_idx);
3345 return -ENOENT;
3346 }
3347
Johannes Berg2dc2a152015-06-16 17:09:18 +02003348 /* track which key was deleted last */
3349 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3350 if (mvm->fw_key_deleted[i] < U8_MAX)
3351 mvm->fw_key_deleted[i]++;
3352 }
3353 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3354
David Spinadel85aeb582017-03-30 19:43:53 +03003355 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003356 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3357 return 0;
3358 }
3359
Johannes Bergba3943b2014-11-12 23:54:48 +01003360 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3361 if (ret)
3362 return ret;
3363
3364 /* delete WEP key twice to get rid of (now useless) offset */
3365 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3366 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3367 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3368
3369 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003370}
3371
Johannes Berg8ca151b2013-01-24 14:25:36 +01003372void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3373 struct ieee80211_vif *vif,
3374 struct ieee80211_key_conf *keyconf,
3375 struct ieee80211_sta *sta, u32 iv32,
3376 u16 *phase1key)
3377{
Beni Levc3eb5362013-02-06 17:22:18 +02003378 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003379 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003380 bool mfp = sta ? sta->mfp : false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003381
Beni Levc3eb5362013-02-06 17:22:18 +02003382 rcu_read_lock();
3383
Johannes Berg5f7a1842015-12-11 09:36:10 +01003384 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3385 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003386 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003387 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003388 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3389 mfp);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003390
3391 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003392 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003393}
3394
Johannes Berg9cc40712013-02-15 22:47:48 +01003395void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3396 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003397{
Johannes Berg5b577a92013-11-14 18:20:04 +01003398 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003399 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003400 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003401 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003402 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003403 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003404 };
3405 int ret;
3406
Sara Sharon854c5702016-01-26 13:17:47 +02003407 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3408 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003409 if (ret)
3410 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3411}
3412
Johannes Berg9cc40712013-02-15 22:47:48 +01003413void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3414 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003415 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003416 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003417 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003418{
Johannes Berg5b577a92013-11-14 18:20:04 +01003419 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003420 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003421 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003422 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003423 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3424 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003425 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003426 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003427 int tid, ret;
3428 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003429
Johannes Berg3e56ead2013-02-15 22:23:18 +01003430 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3431 * Note that this field is reserved and unused by firmware not
3432 * supporting GO uAPSD, so it's safe to always do this.
3433 */
3434 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3435 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3436
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003437 /* If we're releasing frames from aggregation or dqa queues then check
3438 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003439 * - more frames than the service period, in which case more_data
3440 * needs to be set
3441 * - fewer than 'cnt' frames, in which case we need to adjust the
3442 * firmware command (but do that unconditionally)
3443 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003444 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003445 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003446 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003447
3448 spin_lock_bh(&mvmsta->lock);
3449 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3450 struct iwl_mvm_tid_data *tid_data;
3451 u16 n_queued;
3452
3453 tid_data = &mvmsta->tid_data[tid];
Johannes Berg3e56ead2013-02-15 22:23:18 +01003454
Liad Kaufmandd321622017-04-05 16:25:11 +03003455 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003456 if (n_queued > remaining) {
3457 more_data = true;
3458 remaining = 0;
3459 break;
3460 }
3461 remaining -= n_queued;
3462 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003463 sleep_tx_count = cnt - remaining;
3464 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3465 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003466 spin_unlock_bh(&mvmsta->lock);
3467
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003468 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003469 if (WARN_ON(cnt - remaining == 0)) {
3470 ieee80211_sta_eosp(sta);
3471 return;
3472 }
3473 }
3474
3475 /* Note: this is ignored by firmware not supporting GO uAPSD */
3476 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003477 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003478
3479 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3480 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003481 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003482 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003483 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003484 }
3485
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003486 /* block the Tx queues until the FW updated the sleep Tx count */
3487 iwl_trans_block_txq_ptrs(mvm->trans, true);
3488
3489 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3490 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003491 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003492 if (ret)
3493 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3494}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003495
Johannes Berg04168412015-06-23 21:22:09 +02003496void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3497 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003498{
3499 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3500 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3501 struct ieee80211_sta *sta;
3502 u32 sta_id = le32_to_cpu(notif->sta_id);
3503
3504 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003505 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003506
3507 rcu_read_lock();
3508 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3509 if (!IS_ERR_OR_NULL(sta))
3510 ieee80211_sta_eosp(sta);
3511 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003512}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003513
3514void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3515 struct iwl_mvm_sta *mvmsta, bool disable)
3516{
3517 struct iwl_mvm_add_sta_cmd cmd = {
3518 .add_modify = STA_MODE_MODIFY,
3519 .sta_id = mvmsta->sta_id,
3520 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3521 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3522 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3523 };
3524 int ret;
3525
Sara Sharon854c5702016-01-26 13:17:47 +02003526 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3527 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003528 if (ret)
3529 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3530}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003531
3532void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3533 struct ieee80211_sta *sta,
3534 bool disable)
3535{
3536 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3537
3538 spin_lock_bh(&mvm_sta->lock);
3539
3540 if (mvm_sta->disable_tx == disable) {
3541 spin_unlock_bh(&mvm_sta->lock);
3542 return;
3543 }
3544
3545 mvm_sta->disable_tx = disable;
3546
Johannes Bergc8f54702017-06-19 23:50:31 +02003547 /* Tell mac80211 to start/stop queuing tx for this station */
3548 ieee80211_sta_block_awake(mvm->hw, sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003549
3550 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3551
3552 spin_unlock_bh(&mvm_sta->lock);
3553}
3554
Sara Sharonced19f22017-02-06 19:09:32 +02003555static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3556 struct iwl_mvm_vif *mvmvif,
3557 struct iwl_mvm_int_sta *sta,
3558 bool disable)
3559{
3560 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3561 struct iwl_mvm_add_sta_cmd cmd = {
3562 .add_modify = STA_MODE_MODIFY,
3563 .sta_id = sta->sta_id,
3564 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3565 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3566 .mac_id_n_color = cpu_to_le32(id),
3567 };
3568 int ret;
3569
3570 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3571 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3572 if (ret)
3573 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3574}
3575
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003576void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3577 struct iwl_mvm_vif *mvmvif,
3578 bool disable)
3579{
3580 struct ieee80211_sta *sta;
3581 struct iwl_mvm_sta *mvm_sta;
3582 int i;
3583
3584 lockdep_assert_held(&mvm->mutex);
3585
3586 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003587 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003588 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3589 lockdep_is_held(&mvm->mutex));
3590 if (IS_ERR_OR_NULL(sta))
3591 continue;
3592
3593 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3594 if (mvm_sta->mac_id_n_color !=
3595 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3596 continue;
3597
3598 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3599 }
Sara Sharonced19f22017-02-06 19:09:32 +02003600
3601 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3602 return;
3603
3604 /* Need to block/unblock also multicast station */
3605 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3606 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3607 &mvmvif->mcast_sta, disable);
3608
3609 /*
3610 * Only unblock the broadcast station (FW blocks it for immediate
3611 * quiet, not the driver)
3612 */
3613 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3614 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3615 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003616}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003617
3618void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3619{
3620 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3621 struct iwl_mvm_sta *mvmsta;
3622
3623 rcu_read_lock();
3624
3625 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3626
3627 if (!WARN_ON(!mvmsta))
3628 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3629
3630 rcu_read_unlock();
3631}
Liad Kaufmandd321622017-04-05 16:25:11 +03003632
3633u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3634{
3635 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3636
3637 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02003638 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03003639 * to align the wrap around of ssn so we compare relevant values.
3640 */
3641 if (mvm->trans->cfg->gen2)
3642 sn &= 0xff;
3643
3644 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3645}