blob: 02f35a929606138d16817e79804ea2ecb317e678 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020037 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
Sara Sharonced19f22017-02-06 19:09:32 +020080 if (iwl_mvm_has_new_rx_api(mvm) ||
81 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
82 return sizeof(struct iwl_mvm_add_sta_cmd);
83 else
84 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020085}
86
Eliad Pellerb92e6612014-01-23 17:58:23 +020087static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
88 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010089{
90 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010092
Eliad Pellerb92e6612014-01-23 17:58:23 +020093 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010094 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
95
96 lockdep_assert_held(&mvm->mutex);
97
Eliad Pellerb92e6612014-01-23 17:58:23 +020098 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
99 if (iftype != NL80211_IFTYPE_STATION)
100 reserved_ids = BIT(0);
101
Johannes Berg8ca151b2013-01-24 14:25:36 +0100102 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200103 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200104 if (BIT(sta_id) & reserved_ids)
105 continue;
106
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
108 lockdep_is_held(&mvm->mutex)))
109 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200110 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200111 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112}
113
Johannes Berg7a453972013-02-12 13:10:44 +0100114/* send station add/update command to firmware */
115int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300116 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100118 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300119 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
120 .sta_id = mvm_sta->sta_id,
121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
122 .add_modify = update ? 1 : 0,
123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
124 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200125 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300126 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100127 int ret;
128 u32 status;
129 u32 agg_size = 0, mpdu_dens = 0;
130
Sara Sharonced19f22017-02-06 19:09:32 +0200131 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
132 add_sta_cmd.station_type = mvm_sta->sta_type;
133
Liad Kaufman24afba72015-07-28 18:56:08 +0300134 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100135 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300136
Sara Sharonbb497012016-09-29 14:52:40 +0300137 if (!iwl_mvm_has_new_tx_api(mvm)) {
138 add_sta_cmd.tfd_queue_msk =
139 cpu_to_le32(mvm_sta->tfd_queue_msk);
140
141 if (flags & STA_MODIFY_QUEUES)
142 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
143 } else {
144 WARN_ON(flags & STA_MODIFY_QUEUES);
145 }
Johannes Berg7a453972013-02-12 13:10:44 +0100146 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100147
148 switch (sta->bandwidth) {
149 case IEEE80211_STA_RX_BW_160:
150 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
151 /* fall through */
152 case IEEE80211_STA_RX_BW_80:
153 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
154 /* fall through */
155 case IEEE80211_STA_RX_BW_40:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_20:
159 if (sta->ht_cap.ht_supported)
160 add_sta_cmd.station_flags |=
161 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
162 break;
163 }
164
165 switch (sta->rx_nss) {
166 case 1:
167 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
168 break;
169 case 2:
170 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
171 break;
172 case 3 ... 8:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
174 break;
175 }
176
177 switch (sta->smps_mode) {
178 case IEEE80211_SMPS_AUTOMATIC:
179 case IEEE80211_SMPS_NUM_MODES:
180 WARN_ON(1);
181 break;
182 case IEEE80211_SMPS_STATIC:
183 /* override NSS */
184 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
185 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
186 break;
187 case IEEE80211_SMPS_DYNAMIC:
188 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
189 break;
190 case IEEE80211_SMPS_OFF:
191 /* nothing */
192 break;
193 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100194
195 if (sta->ht_cap.ht_supported) {
196 add_sta_cmd.station_flags_msk |=
197 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
198 STA_FLG_AGG_MPDU_DENS_MSK);
199
200 mpdu_dens = sta->ht_cap.ampdu_density;
201 }
202
203 if (sta->vht_cap.vht_supported) {
204 agg_size = sta->vht_cap.cap &
205 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
206 agg_size >>=
207 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
208 } else if (sta->ht_cap.ht_supported) {
209 agg_size = sta->ht_cap.ampdu_factor;
210 }
211
212 add_sta_cmd.station_flags |=
213 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
214 add_sta_cmd.station_flags |=
215 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100216 if (mvm_sta->associated)
217 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100218
Johannes Berg65e25482016-04-13 14:24:22 +0200219 if (sta->wme) {
220 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
221
222 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200223 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200224 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200225 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200226 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200227 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200229 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
230 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200231 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200232 }
233
Johannes Berg8ca151b2013-01-24 14:25:36 +0100234 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200235 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
236 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300237 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100238 if (ret)
239 return ret;
240
Sara Sharon837c4da2016-01-07 16:50:45 +0200241 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100242 case ADD_STA_SUCCESS:
243 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
244 break;
245 default:
246 ret = -EIO;
247 IWL_ERR(mvm, "ADD_STA failed\n");
248 break;
249 }
250
251 return ret;
252}
253
Sara Sharon10b2b202016-03-20 16:23:41 +0200254static void iwl_mvm_rx_agg_session_expired(unsigned long data)
255{
256 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
257 struct iwl_mvm_baid_data *ba_data;
258 struct ieee80211_sta *sta;
259 struct iwl_mvm_sta *mvm_sta;
260 unsigned long timeout;
261
262 rcu_read_lock();
263
264 ba_data = rcu_dereference(*rcu_ptr);
265
266 if (WARN_ON(!ba_data))
267 goto unlock;
268
269 if (!ba_data->timeout)
270 goto unlock;
271
272 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
273 if (time_is_after_jiffies(timeout)) {
274 mod_timer(&ba_data->session_timer, timeout);
275 goto unlock;
276 }
277
278 /* Timer expired */
279 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
280 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
281 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
282 sta->addr, ba_data->tid);
283unlock:
284 rcu_read_unlock();
285}
286
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300287static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
288 struct ieee80211_sta *sta)
289{
290 unsigned long used_hw_queues;
291 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200292 unsigned int wdg_timeout =
293 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300294 u32 ac;
295
296 lockdep_assert_held(&mvm->mutex);
297
298 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
299
300 /* Find available queues, and allocate them to the ACs */
301 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
302 u8 queue = find_first_zero_bit(&used_hw_queues,
303 mvm->first_agg_queue);
304
305 if (queue >= mvm->first_agg_queue) {
306 IWL_ERR(mvm, "Failed to allocate STA queue\n");
307 return -EBUSY;
308 }
309
310 __set_bit(queue, &used_hw_queues);
311 mvmsta->hw_queue[ac] = queue;
312 }
313
314 /* Found a place for all queues - enable them */
315 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
316 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300317 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300318 iwl_mvm_ac_to_tx_fifo[ac], 0,
319 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300320 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
321 }
322
323 return 0;
324}
325
326static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
327 struct ieee80211_sta *sta)
328{
329 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
330 unsigned long sta_msk;
331 int i;
332
333 lockdep_assert_held(&mvm->mutex);
334
335 /* disable the TDLS STA-specific queues */
336 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200337 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300338 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300339}
340
Liad Kaufman9794c642015-08-19 17:34:28 +0300341/* Disable aggregations for a bitmap of TIDs for a given station */
342static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
343 unsigned long disable_agg_tids,
344 bool remove_queue)
345{
346 struct iwl_mvm_add_sta_cmd cmd = {};
347 struct ieee80211_sta *sta;
348 struct iwl_mvm_sta *mvmsta;
349 u32 status;
350 u8 sta_id;
351 int ret;
352
Sara Sharonbb497012016-09-29 14:52:40 +0300353 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
354 return -EINVAL;
355
Liad Kaufman9794c642015-08-19 17:34:28 +0300356 spin_lock_bh(&mvm->queue_info_lock);
357 sta_id = mvm->queue_info[queue].ra_sta_id;
358 spin_unlock_bh(&mvm->queue_info_lock);
359
360 rcu_read_lock();
361
362 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
363
364 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
365 rcu_read_unlock();
366 return -EINVAL;
367 }
368
369 mvmsta = iwl_mvm_sta_from_mac80211(sta);
370
371 mvmsta->tid_disable_agg |= disable_agg_tids;
372
373 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
374 cmd.sta_id = mvmsta->sta_id;
375 cmd.add_modify = STA_MODE_MODIFY;
376 cmd.modify_mask = STA_MODIFY_QUEUES;
377 if (disable_agg_tids)
378 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
379 if (remove_queue)
380 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
381 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
382 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
383
384 rcu_read_unlock();
385
386 /* Notify FW of queue removal from the STA queues */
387 status = ADD_STA_SUCCESS;
388 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
389 iwl_mvm_add_sta_cmd_size(mvm),
390 &cmd, &status);
391
392 return ret;
393}
394
Liad Kaufman42db09c2016-05-02 14:01:14 +0300395static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
396{
397 struct ieee80211_sta *sta;
398 struct iwl_mvm_sta *mvmsta;
399 unsigned long tid_bitmap;
400 unsigned long agg_tids = 0;
401 s8 sta_id;
402 int tid;
403
404 lockdep_assert_held(&mvm->mutex);
405
Sara Sharonbb497012016-09-29 14:52:40 +0300406 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
407 return -EINVAL;
408
Liad Kaufman42db09c2016-05-02 14:01:14 +0300409 spin_lock_bh(&mvm->queue_info_lock);
410 sta_id = mvm->queue_info[queue].ra_sta_id;
411 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
412 spin_unlock_bh(&mvm->queue_info_lock);
413
414 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
415 lockdep_is_held(&mvm->mutex));
416
417 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
418 return -EINVAL;
419
420 mvmsta = iwl_mvm_sta_from_mac80211(sta);
421
422 spin_lock_bh(&mvmsta->lock);
423 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
424 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
425 agg_tids |= BIT(tid);
426 }
427 spin_unlock_bh(&mvmsta->lock);
428
429 return agg_tids;
430}
431
Liad Kaufman9794c642015-08-19 17:34:28 +0300432/*
433 * Remove a queue from a station's resources.
434 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
435 * doesn't disable the queue
436 */
437static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
438{
439 struct ieee80211_sta *sta;
440 struct iwl_mvm_sta *mvmsta;
441 unsigned long tid_bitmap;
442 unsigned long disable_agg_tids = 0;
443 u8 sta_id;
444 int tid;
445
446 lockdep_assert_held(&mvm->mutex);
447
Sara Sharonbb497012016-09-29 14:52:40 +0300448 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
449 return -EINVAL;
450
Liad Kaufman9794c642015-08-19 17:34:28 +0300451 spin_lock_bh(&mvm->queue_info_lock);
452 sta_id = mvm->queue_info[queue].ra_sta_id;
453 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
454 spin_unlock_bh(&mvm->queue_info_lock);
455
456 rcu_read_lock();
457
458 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
459
460 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
461 rcu_read_unlock();
462 return 0;
463 }
464
465 mvmsta = iwl_mvm_sta_from_mac80211(sta);
466
467 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300468 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300469 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300470 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
471 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200472 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300473 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300474
Liad Kaufman42db09c2016-05-02 14:01:14 +0300475 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300476 spin_unlock_bh(&mvmsta->lock);
477
478 rcu_read_unlock();
479
Liad Kaufman9794c642015-08-19 17:34:28 +0300480 return disable_agg_tids;
481}
482
Sara Sharon01796ff2016-11-16 17:04:36 +0200483static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
484 bool same_sta)
485{
486 struct iwl_mvm_sta *mvmsta;
487 u8 txq_curr_ac, sta_id, tid;
488 unsigned long disable_agg_tids = 0;
489 int ret;
490
491 lockdep_assert_held(&mvm->mutex);
492
Sara Sharonbb497012016-09-29 14:52:40 +0300493 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
494 return -EINVAL;
495
Sara Sharon01796ff2016-11-16 17:04:36 +0200496 spin_lock_bh(&mvm->queue_info_lock);
497 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
498 sta_id = mvm->queue_info[queue].ra_sta_id;
499 tid = mvm->queue_info[queue].txq_tid;
500 spin_unlock_bh(&mvm->queue_info_lock);
501
502 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200503 if (WARN_ON(!mvmsta))
504 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200505
506 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
507 /* Disable the queue */
508 if (disable_agg_tids)
509 iwl_mvm_invalidate_sta_queue(mvm, queue,
510 disable_agg_tids, false);
511
512 ret = iwl_mvm_disable_txq(mvm, queue,
513 mvmsta->vif->hw_queue[txq_curr_ac],
514 tid, 0);
515 if (ret) {
516 /* Re-mark the inactive queue as inactive */
517 spin_lock_bh(&mvm->queue_info_lock);
518 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
519 spin_unlock_bh(&mvm->queue_info_lock);
520 IWL_ERR(mvm,
521 "Failed to free inactive queue %d (ret=%d)\n",
522 queue, ret);
523
524 return ret;
525 }
526
527 /* If TXQ is allocated to another STA, update removal in FW */
528 if (!same_sta)
529 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
530
531 return 0;
532}
533
Liad Kaufman42db09c2016-05-02 14:01:14 +0300534static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
535 unsigned long tfd_queue_mask, u8 ac)
536{
537 int queue = 0;
538 u8 ac_to_queue[IEEE80211_NUM_ACS];
539 int i;
540
541 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300542 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
543 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300544
545 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
546
547 /* See what ACs the existing queues for this STA have */
548 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
549 /* Only DATA queues can be shared */
550 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
551 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
552 continue;
553
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200554 /* Don't try and take queues being reconfigured */
555 if (mvm->queue_info[queue].status ==
556 IWL_MVM_QUEUE_RECONFIGURING)
557 continue;
558
Liad Kaufman42db09c2016-05-02 14:01:14 +0300559 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
560 }
561
562 /*
563 * The queue to share is chosen only from DATA queues as follows (in
564 * descending priority):
565 * 1. An AC_BE queue
566 * 2. Same AC queue
567 * 3. Highest AC queue that is lower than new AC
568 * 4. Any existing AC (there always is at least 1 DATA queue)
569 */
570
571 /* Priority 1: An AC_BE queue */
572 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
573 queue = ac_to_queue[IEEE80211_AC_BE];
574 /* Priority 2: Same AC queue */
575 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
576 queue = ac_to_queue[ac];
577 /* Priority 3a: If new AC is VO and VI exists - use VI */
578 else if (ac == IEEE80211_AC_VO &&
579 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
580 queue = ac_to_queue[IEEE80211_AC_VI];
581 /* Priority 3b: No BE so only AC less than the new one is BK */
582 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
583 queue = ac_to_queue[IEEE80211_AC_BK];
584 /* Priority 4a: No BE nor BK - use VI if exists */
585 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
586 queue = ac_to_queue[IEEE80211_AC_VI];
587 /* Priority 4b: No BE, BK nor VI - use VO if exists */
588 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
589 queue = ac_to_queue[IEEE80211_AC_VO];
590
591 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200592 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
593 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
594 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300595 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200596 return -ENOSPC;
597 }
598
599 /* Make sure the queue isn't in the middle of being reconfigured */
600 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
601 IWL_ERR(mvm,
602 "TXQ %d is in the middle of re-config - try again\n",
603 queue);
604 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300605 }
606
607 return queue;
608}
609
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200610/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200611 * If a given queue has a higher AC than the TID stream that is being compared
612 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200613 * in such a case, otherwise - if no redirection required - it does nothing,
614 * unless the %force param is true.
615 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200616int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
617 int ac, int ssn, unsigned int wdg_timeout,
618 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200619{
620 struct iwl_scd_txq_cfg_cmd cmd = {
621 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200622 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200623 };
624 bool shared_queue;
625 unsigned long mq;
626 int ret;
627
Sara Sharonbb497012016-09-29 14:52:40 +0300628 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
629 return -EINVAL;
630
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200631 /*
632 * If the AC is lower than current one - FIFO needs to be redirected to
633 * the lowest one of the streams in the queue. Check if this is needed
634 * here.
635 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
636 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
637 * we need to check if the numerical value of X is LARGER than of Y.
638 */
639 spin_lock_bh(&mvm->queue_info_lock);
640 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
641 spin_unlock_bh(&mvm->queue_info_lock);
642
643 IWL_DEBUG_TX_QUEUES(mvm,
644 "No redirection needed on TXQ #%d\n",
645 queue);
646 return 0;
647 }
648
649 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
650 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200651 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200652 mq = mvm->hw_queue_to_mac80211[queue];
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200653 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
654 spin_unlock_bh(&mvm->queue_info_lock);
655
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200656 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200657 queue, iwl_mvm_ac_to_tx_fifo[ac]);
658
659 /* Stop MAC queues and wait for this queue to empty */
660 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200661 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200662 if (ret) {
663 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
664 queue);
665 ret = -EIO;
666 goto out;
667 }
668
669 /* Before redirecting the queue we need to de-activate it */
670 iwl_trans_txq_disable(mvm->trans, queue, false);
671 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
672 if (ret)
673 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
674 ret);
675
676 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300677 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200678
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200679 /* Update the TID "owner" of the queue */
680 spin_lock_bh(&mvm->queue_info_lock);
681 mvm->queue_info[queue].txq_tid = tid;
682 spin_unlock_bh(&mvm->queue_info_lock);
683
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200684 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
685
686 /* Redirect to lower AC */
687 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
688 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
689 ssn);
690
691 /* Update AC marking of the queue */
692 spin_lock_bh(&mvm->queue_info_lock);
693 mvm->queue_info[queue].mac80211_ac = ac;
694 spin_unlock_bh(&mvm->queue_info_lock);
695
696 /*
697 * Mark queue as shared in transport if shared
698 * Note this has to be done after queue enablement because enablement
699 * can also set this value, and there is no indication there to shared
700 * queues
701 */
702 if (shared_queue)
703 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
704
705out:
706 /* Continue using the MAC queues */
707 iwl_mvm_start_mac_queues(mvm, mq);
708
709 return ret;
710}
711
Sara Sharon310181e2017-01-17 14:27:48 +0200712static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
713 struct ieee80211_sta *sta, u8 ac,
714 int tid)
715{
716 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
717 unsigned int wdg_timeout =
718 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
719 u8 mac_queue = mvmsta->vif->hw_queue[ac];
720 int queue = -1;
721
722 lockdep_assert_held(&mvm->mutex);
723
724 IWL_DEBUG_TX_QUEUES(mvm,
725 "Allocating queue for sta %d on tid %d\n",
726 mvmsta->sta_id, tid);
727 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
728 wdg_timeout);
729 if (queue < 0)
730 return queue;
731
732 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
733
734 spin_lock_bh(&mvmsta->lock);
735 mvmsta->tid_data[tid].txq_id = queue;
736 mvmsta->tid_data[tid].is_tid_active = true;
Sara Sharon310181e2017-01-17 14:27:48 +0200737 spin_unlock_bh(&mvmsta->lock);
738
Sara Sharon310181e2017-01-17 14:27:48 +0200739 return 0;
740}
741
Liad Kaufman24afba72015-07-28 18:56:08 +0300742static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
743 struct ieee80211_sta *sta, u8 ac, int tid,
744 struct ieee80211_hdr *hdr)
745{
746 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
747 struct iwl_trans_txq_scd_cfg cfg = {
748 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
749 .sta_id = mvmsta->sta_id,
750 .tid = tid,
751 .frame_limit = IWL_FRAME_LIMIT,
752 };
753 unsigned int wdg_timeout =
754 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
755 u8 mac_queue = mvmsta->vif->hw_queue[ac];
756 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200757 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300758 unsigned long disable_agg_tids = 0;
759 enum iwl_mvm_agg_state queue_state;
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300760 bool shared_queue = false, inc_ssn;
Liad Kaufman24afba72015-07-28 18:56:08 +0300761 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300762 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300763 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300764
765 lockdep_assert_held(&mvm->mutex);
766
Sara Sharon310181e2017-01-17 14:27:48 +0200767 if (iwl_mvm_has_new_tx_api(mvm))
768 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
769
Liad Kaufman42db09c2016-05-02 14:01:14 +0300770 spin_lock_bh(&mvmsta->lock);
771 tfd_queue_mask = mvmsta->tfd_queue_msk;
772 spin_unlock_bh(&mvmsta->lock);
773
Liad Kaufmand2515a92016-03-23 16:31:08 +0200774 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300775
776 /*
777 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
778 * exists
779 */
780 if (!ieee80211_is_data_qos(hdr->frame_control) ||
781 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300782 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
783 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300784 IWL_MVM_DQA_MAX_MGMT_QUEUE);
785 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
786 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
787 queue);
788
789 /* If no such queue is found, we'll use a DATA queue instead */
790 }
791
Liad Kaufman9794c642015-08-19 17:34:28 +0300792 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
793 (mvm->queue_info[mvmsta->reserved_queue].status ==
794 IWL_MVM_QUEUE_RESERVED ||
795 mvm->queue_info[mvmsta->reserved_queue].status ==
796 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300797 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300798 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300799 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
800 }
801
802 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300803 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
804 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300805 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300806
807 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300808 * Check if this queue is already allocated but inactive.
809 * In such a case, we'll need to first free this queue before enabling
810 * it again, so we'll mark it as reserved to make sure no new traffic
811 * arrives on it
812 */
813 if (queue > 0 &&
814 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
815 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
816 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +0200817 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +0300818 IWL_DEBUG_TX_QUEUES(mvm,
819 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
820 queue, mvmsta->sta_id, tid);
821 }
822
Liad Kaufman42db09c2016-05-02 14:01:14 +0300823 /* No free queue - we'll have to share */
824 if (queue <= 0) {
825 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
826 if (queue > 0) {
827 shared_queue = true;
828 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
829 }
830 }
831
Liad Kaufman9794c642015-08-19 17:34:28 +0300832 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300833 * Mark TXQ as ready, even though it hasn't been fully configured yet,
834 * to make sure no one else takes it.
835 * This will allow avoiding re-acquiring the lock at the end of the
836 * configuration. On error we'll mark it back as free.
837 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300838 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300839 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300840
Liad Kaufmand2515a92016-03-23 16:31:08 +0200841 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300842
Liad Kaufman42db09c2016-05-02 14:01:14 +0300843 /* This shouldn't happen - out of queues */
844 if (WARN_ON(queue <= 0)) {
845 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
846 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200847 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300848 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300849
850 /*
851 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
852 * but for configuring the SCD to send A-MPDUs we need to mark the queue
853 * as aggregatable.
854 * Mark all DATA queues as allowing to be aggregated at some point
855 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300856 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
857 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300858
Liad Kaufman9794c642015-08-19 17:34:28 +0300859 /*
860 * If this queue was previously inactive (idle) - we need to free it
861 * first
862 */
863 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +0200864 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
865 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +0300866 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +0300867 }
868
Liad Kaufman42db09c2016-05-02 14:01:14 +0300869 IWL_DEBUG_TX_QUEUES(mvm,
870 "Allocating %squeue #%d to sta %d on tid %d\n",
871 shared_queue ? "shared " : "", queue,
872 mvmsta->sta_id, tid);
873
874 if (shared_queue) {
875 /* Disable any open aggs on this queue */
876 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
877
878 if (disable_agg_tids) {
879 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
880 queue);
881 iwl_mvm_invalidate_sta_queue(mvm, queue,
882 disable_agg_tids, false);
883 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300884 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300885
886 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300887 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
888 ssn, &cfg, wdg_timeout);
889 if (inc_ssn) {
890 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
891 le16_add_cpu(&hdr->seq_ctrl, 0x10);
892 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300893
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200894 /*
895 * Mark queue as shared in transport if shared
896 * Note this has to be done after queue enablement because enablement
897 * can also set this value, and there is no indication there to shared
898 * queues
899 */
900 if (shared_queue)
901 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
902
Liad Kaufman24afba72015-07-28 18:56:08 +0300903 spin_lock_bh(&mvmsta->lock);
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300904 /*
905 * This looks racy, but it is not. We have only one packet for
906 * this ra/tid in our Tx path since we stop the Qdisc when we
907 * need to allocate a new TFD queue.
908 */
909 if (inc_ssn)
910 mvmsta->tid_data[tid].seq_number += 0x10;
Liad Kaufman24afba72015-07-28 18:56:08 +0300911 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300912 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300913 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300914 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300915
916 if (mvmsta->reserved_queue == queue)
917 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
918 spin_unlock_bh(&mvmsta->lock);
919
Liad Kaufman42db09c2016-05-02 14:01:14 +0300920 if (!shared_queue) {
921 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
922 if (ret)
923 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300924
Liad Kaufman42db09c2016-05-02 14:01:14 +0300925 /* If we need to re-enable aggregations... */
926 if (queue_state == IWL_AGG_ON) {
927 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
928 if (ret)
929 goto out_err;
930 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200931 } else {
932 /* Redirect queue, if needed */
933 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
934 wdg_timeout, false);
935 if (ret)
936 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300937 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300938
Liad Kaufman42db09c2016-05-02 14:01:14 +0300939 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300940
941out_err:
942 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
943
944 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300945}
946
Liad Kaufman19aefa42016-03-08 14:29:51 +0200947static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
948{
949 struct iwl_scd_txq_cfg_cmd cmd = {
950 .scd_queue = queue,
951 .action = SCD_CFG_UPDATE_QUEUE_TID,
952 };
Liad Kaufman19aefa42016-03-08 14:29:51 +0200953 int tid;
954 unsigned long tid_bitmap;
955 int ret;
956
957 lockdep_assert_held(&mvm->mutex);
958
Sara Sharonbb497012016-09-29 14:52:40 +0300959 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
960 return;
961
Liad Kaufman19aefa42016-03-08 14:29:51 +0200962 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200963 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
964 spin_unlock_bh(&mvm->queue_info_lock);
965
966 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
967 return;
968
969 /* Find any TID for queue */
970 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
971 cmd.tid = tid;
972 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
973
974 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300975 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200976 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
977 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300978 return;
979 }
980
981 spin_lock_bh(&mvm->queue_info_lock);
982 mvm->queue_info[queue].txq_tid = tid;
983 spin_unlock_bh(&mvm->queue_info_lock);
984 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
985 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200986}
987
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200988static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
989{
990 struct ieee80211_sta *sta;
991 struct iwl_mvm_sta *mvmsta;
992 s8 sta_id;
993 int tid = -1;
994 unsigned long tid_bitmap;
995 unsigned int wdg_timeout;
996 int ssn;
997 int ret = true;
998
Sara Sharonbb497012016-09-29 14:52:40 +0300999 /* queue sharing is disabled on new TX path */
1000 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1001 return;
1002
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001003 lockdep_assert_held(&mvm->mutex);
1004
1005 spin_lock_bh(&mvm->queue_info_lock);
1006 sta_id = mvm->queue_info[queue].ra_sta_id;
1007 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1008 spin_unlock_bh(&mvm->queue_info_lock);
1009
1010 /* Find TID for queue, and make sure it is the only one on the queue */
1011 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1012 if (tid_bitmap != BIT(tid)) {
1013 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1014 queue, tid_bitmap);
1015 return;
1016 }
1017
1018 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1019 tid);
1020
1021 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1022 lockdep_is_held(&mvm->mutex));
1023
1024 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1025 return;
1026
1027 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1028 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1029
1030 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1031
1032 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1033 tid_to_mac80211_ac[tid], ssn,
1034 wdg_timeout, true);
1035 if (ret) {
1036 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1037 return;
1038 }
1039
1040 /* If aggs should be turned back on - do it */
1041 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001042 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001043
1044 mvmsta->tid_disable_agg &= ~BIT(tid);
1045
1046 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1047 cmd.sta_id = mvmsta->sta_id;
1048 cmd.add_modify = STA_MODE_MODIFY;
1049 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1050 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1051 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1052
1053 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1054 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1055 if (!ret) {
1056 IWL_DEBUG_TX_QUEUES(mvm,
1057 "TXQ #%d is now aggregated again\n",
1058 queue);
1059
1060 /* Mark queue intenally as aggregating again */
1061 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1062 }
1063 }
1064
1065 spin_lock_bh(&mvm->queue_info_lock);
1066 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1067 spin_unlock_bh(&mvm->queue_info_lock);
1068}
1069
Liad Kaufman24afba72015-07-28 18:56:08 +03001070static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1071{
1072 if (tid == IWL_MAX_TID_COUNT)
1073 return IEEE80211_AC_VO; /* MGMT */
1074
1075 return tid_to_mac80211_ac[tid];
1076}
1077
1078static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1079 struct ieee80211_sta *sta, int tid)
1080{
1081 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1082 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1083 struct sk_buff *skb;
1084 struct ieee80211_hdr *hdr;
1085 struct sk_buff_head deferred_tx;
1086 u8 mac_queue;
1087 bool no_queue = false; /* Marks if there is a problem with the queue */
1088 u8 ac;
1089
1090 lockdep_assert_held(&mvm->mutex);
1091
1092 skb = skb_peek(&tid_data->deferred_tx_frames);
1093 if (!skb)
1094 return;
1095 hdr = (void *)skb->data;
1096
1097 ac = iwl_mvm_tid_to_ac_queue(tid);
1098 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1099
Sara Sharon6862fce2017-02-22 19:34:17 +02001100 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001101 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1102 IWL_ERR(mvm,
1103 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1104 mvmsta->sta_id, tid);
1105
1106 /*
1107 * Mark queue as problematic so later the deferred traffic is
1108 * freed, as we can do nothing with it
1109 */
1110 no_queue = true;
1111 }
1112
1113 __skb_queue_head_init(&deferred_tx);
1114
Liad Kaufmand2515a92016-03-23 16:31:08 +02001115 /* Disable bottom-halves when entering TX path */
1116 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001117 spin_lock(&mvmsta->lock);
1118 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001119 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001120 spin_unlock(&mvmsta->lock);
1121
Liad Kaufman24afba72015-07-28 18:56:08 +03001122 while ((skb = __skb_dequeue(&deferred_tx)))
1123 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1124 ieee80211_free_txskb(mvm->hw, skb);
1125 local_bh_enable();
1126
1127 /* Wake queue */
1128 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1129}
1130
1131void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1132{
1133 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1134 add_stream_wk);
1135 struct ieee80211_sta *sta;
1136 struct iwl_mvm_sta *mvmsta;
1137 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001138 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001139
Liad Kaufman9794c642015-08-19 17:34:28 +03001140 /* Check inactivity of queues */
1141 iwl_mvm_inactivity_check(mvm);
1142
Liad Kaufman24afba72015-07-28 18:56:08 +03001143 mutex_lock(&mvm->mutex);
1144
Sara Sharon34e10862017-02-23 13:15:07 +02001145 /* No queue reconfiguration in TVQM mode */
1146 if (iwl_mvm_has_new_tx_api(mvm))
1147 goto alloc_queues;
1148
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001149 /* Reconfigure queues requiring reconfiguation */
Sara Sharon34e10862017-02-23 13:15:07 +02001150 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001151 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001152 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001153
1154 spin_lock_bh(&mvm->queue_info_lock);
1155 reconfig = (mvm->queue_info[queue].status ==
1156 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001157
1158 /*
1159 * We need to take into account a situation in which a TXQ was
1160 * allocated to TID x, and then turned shared by adding TIDs y
1161 * and z. If TID x becomes inactive and is removed from the TXQ,
1162 * ownership must be given to one of the remaining TIDs.
1163 * This is mainly because if TID x continues - a new queue can't
1164 * be allocated for it as long as it is an owner of another TXQ.
1165 */
1166 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1167 BIT(mvm->queue_info[queue].txq_tid)) &&
1168 (mvm->queue_info[queue].status ==
1169 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001170 spin_unlock_bh(&mvm->queue_info_lock);
1171
1172 if (reconfig)
1173 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001174 else if (change_owner)
1175 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001176 }
1177
Sara Sharon34e10862017-02-23 13:15:07 +02001178alloc_queues:
Liad Kaufman24afba72015-07-28 18:56:08 +03001179 /* Go over all stations with deferred traffic */
1180 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1181 IWL_MVM_STATION_COUNT) {
1182 clear_bit(sta_id, mvm->sta_deferred_frames);
1183 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1184 lockdep_is_held(&mvm->mutex));
1185 if (IS_ERR_OR_NULL(sta))
1186 continue;
1187
1188 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1189 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1190
1191 for_each_set_bit(tid, &deferred_tid_traffic,
1192 IWL_MAX_TID_COUNT + 1)
1193 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1194 }
1195
1196 mutex_unlock(&mvm->mutex);
1197}
1198
1199static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001200 struct ieee80211_sta *sta,
1201 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001202{
1203 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1204 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001205 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001206
Sara Sharon396952e2017-02-22 19:40:55 +02001207 /* queue reserving is disabled on new TX path */
1208 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1209 return 0;
1210
Liad Kaufman9794c642015-08-19 17:34:28 +03001211 /*
1212 * Check for inactive queues, so we don't reach a situation where we
1213 * can't add a STA due to a shortage in queues that doesn't really exist
1214 */
1215 iwl_mvm_inactivity_check(mvm);
1216
Liad Kaufman24afba72015-07-28 18:56:08 +03001217 spin_lock_bh(&mvm->queue_info_lock);
1218
1219 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001220 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1221 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001222 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1223 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001224 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1225 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001226 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1227 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001228 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001229 if (queue < 0) {
1230 spin_unlock_bh(&mvm->queue_info_lock);
1231 IWL_ERR(mvm, "No available queues for new station\n");
1232 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001233 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1234 /*
1235 * If this queue is already allocated but inactive we'll need to
1236 * first free this queue before enabling it again, we'll mark
1237 * it as reserved to make sure no new traffic arrives on it
1238 */
1239 using_inactive_queue = true;
1240 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001241 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001242 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001243
1244 spin_unlock_bh(&mvm->queue_info_lock);
1245
1246 mvmsta->reserved_queue = queue;
1247
Sara Sharon01796ff2016-11-16 17:04:36 +02001248 if (using_inactive_queue)
1249 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1250
Liad Kaufman24afba72015-07-28 18:56:08 +03001251 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1252 queue, mvmsta->sta_id);
1253
1254 return 0;
1255}
1256
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001257/*
1258 * In DQA mode, after a HW restart the queues should be allocated as before, in
1259 * order to avoid race conditions when there are shared queues. This function
1260 * does the re-mapping and queue allocation.
1261 *
1262 * Note that re-enabling aggregations isn't done in this function.
1263 */
1264static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1265 struct iwl_mvm_sta *mvm_sta)
1266{
1267 unsigned int wdg_timeout =
1268 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1269 int i;
1270 struct iwl_trans_txq_scd_cfg cfg = {
1271 .sta_id = mvm_sta->sta_id,
1272 .frame_limit = IWL_FRAME_LIMIT,
1273 };
1274
Johannes Berg03c902b2016-12-02 12:03:36 +01001275 /* Make sure reserved queue is still marked as such (if allocated) */
1276 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1277 mvm->queue_info[mvm_sta->reserved_queue].status =
1278 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001279
1280 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1281 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1282 int txq_id = tid_data->txq_id;
1283 int ac;
1284 u8 mac_queue;
1285
Sara Sharon6862fce2017-02-22 19:34:17 +02001286 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001287 continue;
1288
1289 skb_queue_head_init(&tid_data->deferred_tx_frames);
1290
1291 ac = tid_to_mac80211_ac[i];
1292 mac_queue = mvm_sta->vif->hw_queue[ac];
1293
Sara Sharon310181e2017-01-17 14:27:48 +02001294 if (iwl_mvm_has_new_tx_api(mvm)) {
1295 IWL_DEBUG_TX_QUEUES(mvm,
1296 "Re-mapping sta %d tid %d\n",
1297 mvm_sta->sta_id, i);
1298 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1299 mvm_sta->sta_id,
1300 i, wdg_timeout);
1301 tid_data->txq_id = txq_id;
1302 } else {
1303 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001304
Sara Sharon310181e2017-01-17 14:27:48 +02001305 cfg.tid = i;
1306 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1307 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1308 txq_id ==
1309 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001310
Sara Sharon310181e2017-01-17 14:27:48 +02001311 IWL_DEBUG_TX_QUEUES(mvm,
1312 "Re-mapping sta %d tid %d to queue %d\n",
1313 mvm_sta->sta_id, i, txq_id);
1314
1315 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1316 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001317 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001318 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001319 }
1320
1321 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1322}
1323
Johannes Berg8ca151b2013-01-24 14:25:36 +01001324int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1325 struct ieee80211_vif *vif,
1326 struct ieee80211_sta *sta)
1327{
1328 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001329 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001330 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001331 int i, ret, sta_id;
1332
1333 lockdep_assert_held(&mvm->mutex);
1334
1335 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001336 sta_id = iwl_mvm_find_free_sta_id(mvm,
1337 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001338 else
1339 sta_id = mvm_sta->sta_id;
1340
Sara Sharon0ae98812017-01-04 14:53:58 +02001341 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001342 return -ENOSPC;
1343
1344 spin_lock_init(&mvm_sta->lock);
1345
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001346 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1347 if (iwl_mvm_is_dqa_supported(mvm) &&
1348 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1349 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1350 goto update_fw;
1351 }
1352
Johannes Berg8ca151b2013-01-24 14:25:36 +01001353 mvm_sta->sta_id = sta_id;
1354 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1355 mvmvif->color);
1356 mvm_sta->vif = vif;
1357 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001358 mvm_sta->tx_protection = 0;
1359 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001360 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001361
1362 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001363 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001364 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001365 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001366
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001367 /*
1368 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1369 * and then they'll be allocated dynamically
1370 */
1371 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001372 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1373 if (ret)
1374 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001375 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001376 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1377 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1378 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1379 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001380
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001381 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001382 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001383 u16 seq = mvm_sta->tid_data[i].seq_number;
1384 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1385 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001386
1387 if (!iwl_mvm_is_dqa_supported(mvm))
1388 continue;
1389
1390 /*
1391 * Mark all queues for this STA as unallocated and defer TX
1392 * frames until the queue is allocated
1393 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001394 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001395 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001396 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001397 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001398 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001399
Sara Sharona571f5f2015-12-07 12:50:58 +02001400 if (iwl_mvm_has_new_rx_api(mvm) &&
1401 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1402 dup_data = kcalloc(mvm->trans->num_rx_queues,
1403 sizeof(*dup_data),
1404 GFP_KERNEL);
1405 if (!dup_data)
1406 return -ENOMEM;
1407 mvm_sta->dup_data = dup_data;
1408 }
1409
Sara Sharon396952e2017-02-22 19:40:55 +02001410 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001411 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1412 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001413 if (ret)
1414 goto err;
1415 }
1416
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001417update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001418 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001419 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001420 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001421
Johannes Berg9e848012014-08-04 14:33:42 +02001422 if (vif->type == NL80211_IFTYPE_STATION) {
1423 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001424 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001425 mvmvif->ap_sta_id = sta_id;
1426 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001427 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001428 }
1429 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001430
1431 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1432
1433 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001434
1435err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001436 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1437 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001438 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001439}
1440
1441int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1442 bool drain)
1443{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001444 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001445 int ret;
1446 u32 status;
1447
1448 lockdep_assert_held(&mvm->mutex);
1449
1450 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1451 cmd.sta_id = mvmsta->sta_id;
1452 cmd.add_modify = STA_MODE_MODIFY;
1453 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1454 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1455
1456 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001457 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1458 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001459 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001460 if (ret)
1461 return ret;
1462
Sara Sharon837c4da2016-01-07 16:50:45 +02001463 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001464 case ADD_STA_SUCCESS:
1465 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1466 mvmsta->sta_id);
1467 break;
1468 default:
1469 ret = -EIO;
1470 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1471 mvmsta->sta_id);
1472 break;
1473 }
1474
1475 return ret;
1476}
1477
1478/*
1479 * Remove a station from the FW table. Before sending the command to remove
1480 * the station validate that the station is indeed known to the driver (sanity
1481 * only).
1482 */
1483static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1484{
1485 struct ieee80211_sta *sta;
1486 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1487 .sta_id = sta_id,
1488 };
1489 int ret;
1490
1491 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1492 lockdep_is_held(&mvm->mutex));
1493
1494 /* Note: internal stations are marked as error values */
1495 if (!sta) {
1496 IWL_ERR(mvm, "Invalid station id\n");
1497 return -EINVAL;
1498 }
1499
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001500 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001501 sizeof(rm_sta_cmd), &rm_sta_cmd);
1502 if (ret) {
1503 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1504 return ret;
1505 }
1506
1507 return 0;
1508}
1509
1510void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1511{
1512 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1513 u8 sta_id;
1514
1515 /*
1516 * The mutex is needed because of the SYNC cmd, but not only: if the
1517 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1518 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1519 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1520 * that later.
1521 */
1522 mutex_lock(&mvm->mutex);
1523
1524 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1525 int ret;
1526 struct ieee80211_sta *sta =
1527 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1528 lockdep_is_held(&mvm->mutex));
1529
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001530 /*
1531 * This station is in use or RCU-removed; the latter happens in
1532 * managed mode, where mac80211 removes the station before we
1533 * can remove it from firmware (we can only do that after the
1534 * MAC is marked unassociated), and possibly while the deauth
1535 * frame to disconnect from the AP is still queued. Then, the
1536 * station pointer is -ENOENT when the last skb is reclaimed.
1537 */
1538 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001539 continue;
1540
1541 if (PTR_ERR(sta) == -EINVAL) {
1542 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1543 sta_id);
1544 continue;
1545 }
1546
1547 if (!sta) {
1548 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1549 sta_id);
1550 continue;
1551 }
1552
1553 WARN_ON(PTR_ERR(sta) != -EBUSY);
1554 /* This station was removed and we waited until it got drained,
1555 * we can now proceed and remove it.
1556 */
1557 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1558 if (ret) {
1559 IWL_ERR(mvm,
1560 "Couldn't remove sta %d after it was drained\n",
1561 sta_id);
1562 continue;
1563 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301564 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001565 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001566
1567 if (mvm->tfd_drained[sta_id]) {
1568 unsigned long i, msk = mvm->tfd_drained[sta_id];
1569
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001570 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001571 iwl_mvm_disable_txq(mvm, i, i,
1572 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001573
1574 mvm->tfd_drained[sta_id] = 0;
1575 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1576 sta_id, msk);
1577 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001578 }
1579
1580 mutex_unlock(&mvm->mutex);
1581}
1582
Liad Kaufman24afba72015-07-28 18:56:08 +03001583static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1584 struct ieee80211_vif *vif,
1585 struct iwl_mvm_sta *mvm_sta)
1586{
1587 int ac;
1588 int i;
1589
1590 lockdep_assert_held(&mvm->mutex);
1591
1592 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001593 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001594 continue;
1595
1596 ac = iwl_mvm_tid_to_ac_queue(i);
1597 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1598 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001599 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001600 }
1601}
1602
Sara Sharond6d517b2017-03-06 10:16:11 +02001603int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1604 struct iwl_mvm_sta *mvm_sta)
1605{
1606 int i, ret;
1607
1608 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1609 u16 txq_id;
1610
1611 spin_lock_bh(&mvm_sta->lock);
1612 txq_id = mvm_sta->tid_data[i].txq_id;
1613 spin_unlock_bh(&mvm_sta->lock);
1614
1615 if (txq_id == IWL_MVM_INVALID_QUEUE)
1616 continue;
1617
1618 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1619 if (ret)
1620 break;
1621 }
1622
1623 return ret;
1624}
1625
Johannes Berg8ca151b2013-01-24 14:25:36 +01001626int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1627 struct ieee80211_vif *vif,
1628 struct ieee80211_sta *sta)
1629{
1630 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001631 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001632 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001633 int ret;
1634
1635 lockdep_assert_held(&mvm->mutex);
1636
Sara Sharona571f5f2015-12-07 12:50:58 +02001637 if (iwl_mvm_has_new_rx_api(mvm))
1638 kfree(mvm_sta->dup_data);
1639
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001640 if ((vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001641 mvmvif->ap_sta_id == sta_id) ||
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001642 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001643 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1644 if (ret)
1645 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001646 /* flush its queues here since we are freeing mvm_sta */
Sara Sharond49394a2017-03-05 13:01:08 +02001647 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001648 if (ret)
1649 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001650 if (iwl_mvm_has_new_tx_api(mvm)) {
1651 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1652 } else {
1653 u32 q_mask = mvm_sta->tfd_queue_msk;
1654
1655 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1656 q_mask);
1657 }
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001658 if (ret)
1659 return ret;
1660 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001661
Liad Kaufman24afba72015-07-28 18:56:08 +03001662 /* If DQA is supported - the queues can be disabled now */
Sara Sharon94c3e612016-12-07 15:04:37 +02001663 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman56214742016-09-22 15:14:08 +03001664 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001665 /*
1666 * If pending_frames is set at this point - it must be
1667 * driver internal logic error, since queues are empty
1668 * and removed successuly.
1669 * warn on it but set it to 0 anyway to avoid station
1670 * not being removed later in the function
1671 */
1672 WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
1673 }
Liad Kaufman56214742016-09-22 15:14:08 +03001674
1675 /* If there is a TXQ still marked as reserved - free it */
1676 if (iwl_mvm_is_dqa_supported(mvm) &&
1677 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001678 u8 reserved_txq = mvm_sta->reserved_queue;
1679 enum iwl_mvm_queue_status *status;
1680
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001681 /*
1682 * If no traffic has gone through the reserved TXQ - it
1683 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1684 * should be manually marked as free again
1685 */
1686 spin_lock_bh(&mvm->queue_info_lock);
1687 status = &mvm->queue_info[reserved_txq].status;
1688 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1689 (*status != IWL_MVM_QUEUE_FREE),
1690 "sta_id %d reserved txq %d status %d",
Sara Sharon94c3e612016-12-07 15:04:37 +02001691 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001692 spin_unlock_bh(&mvm->queue_info_lock);
1693 return -EINVAL;
1694 }
1695
1696 *status = IWL_MVM_QUEUE_FREE;
1697 spin_unlock_bh(&mvm->queue_info_lock);
1698 }
1699
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001700 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001701 mvmvif->ap_sta_id == sta_id) {
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001702 /* if associated - we can't remove the AP STA now */
1703 if (vif->bss_conf.assoc)
1704 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001705
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001706 /* unassoc - go ahead - remove the AP STA now */
Sara Sharon0ae98812017-01-04 14:53:58 +02001707 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
Eliad Peller37577fe2013-12-05 17:19:39 +02001708
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001709 /* clear d0i3_ap_sta_id if no longer relevant */
Sara Sharon94c3e612016-12-07 15:04:37 +02001710 if (mvm->d0i3_ap_sta_id == sta_id)
Sara Sharon0ae98812017-01-04 14:53:58 +02001711 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001712 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001713 }
1714
1715 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001716 * This shouldn't happen - the TDLS channel switch should be canceled
1717 * before the STA is removed.
1718 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001719 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001720 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001721 cancel_delayed_work(&mvm->tdls_cs.dwork);
1722 }
1723
1724 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001725 * Make sure that the tx response code sees the station as -EBUSY and
1726 * calls the drain worker.
1727 */
1728 spin_lock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02001729
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001730 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001731 * There are frames pending on the AC queues for this station.
1732 * We need to wait until all the frames are drained...
1733 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001734 if (atomic_read(&mvm->pending_frames[sta_id])) {
1735 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
Johannes Berg8ca151b2013-01-24 14:25:36 +01001736 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001737 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001738
1739 /* disable TDLS sta queues on drain complete */
1740 if (sta->tdls) {
Sara Sharon94c3e612016-12-07 15:04:37 +02001741 mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
1742 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001743 }
1744
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001745 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001746 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001747 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001748
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001749 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001750 iwl_mvm_tdls_sta_deinit(mvm, sta);
1751
Johannes Berg8ca151b2013-01-24 14:25:36 +01001752 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301753 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001754 }
1755
1756 return ret;
1757}
1758
1759int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1760 struct ieee80211_vif *vif,
1761 u8 sta_id)
1762{
1763 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1764
1765 lockdep_assert_held(&mvm->mutex);
1766
Monam Agarwalc531c772014-03-24 00:05:56 +05301767 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001768 return ret;
1769}
1770
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001771int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1772 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02001773 u32 qmask, enum nl80211_iftype iftype,
1774 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001775{
1776 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001777 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02001778 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001779 return -ENOSPC;
1780 }
1781
1782 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02001783 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001784
1785 /* put a non-NULL value so iterating over the stations won't stop */
1786 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1787 return 0;
1788}
1789
Sara Sharon26d6c162017-01-03 12:00:19 +02001790void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001791{
Monam Agarwalc531c772014-03-24 00:05:56 +05301792 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001793 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02001794 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001795}
1796
1797static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1798 struct iwl_mvm_int_sta *sta,
1799 const u8 *addr,
1800 u16 mac_id, u16 color)
1801{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001802 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001803 int ret;
1804 u32 status;
1805
1806 lockdep_assert_held(&mvm->mutex);
1807
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001808 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001809 cmd.sta_id = sta->sta_id;
1810 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1811 color));
Sara Sharonced19f22017-02-06 19:09:32 +02001812 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1813 cmd.station_type = sta->type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001814
Sara Sharonbb497012016-09-29 14:52:40 +03001815 if (!iwl_mvm_has_new_tx_api(mvm))
1816 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001817 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001818
1819 if (addr)
1820 memcpy(cmd.addr, addr, ETH_ALEN);
1821
Sara Sharon854c5702016-01-26 13:17:47 +02001822 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1823 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001824 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001825 if (ret)
1826 return ret;
1827
Sara Sharon837c4da2016-01-07 16:50:45 +02001828 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001829 case ADD_STA_SUCCESS:
1830 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1831 return 0;
1832 default:
1833 ret = -EIO;
1834 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1835 status);
1836 break;
1837 }
1838 return ret;
1839}
1840
Sara Sharonc5a719e2016-11-15 10:20:48 +02001841static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001842{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001843 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1844 mvm->cfg->base_params->wd_timeout :
1845 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001846
Sara Sharon310181e2017-01-17 14:27:48 +02001847 if (iwl_mvm_has_new_tx_api(mvm)) {
1848 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
1849 mvm->aux_sta.sta_id,
1850 IWL_MAX_TID_COUNT,
1851 wdg_timeout);
1852 mvm->aux_queue = queue;
1853 } else if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman28d07932015-09-01 16:36:25 +03001854 struct iwl_trans_txq_scd_cfg cfg = {
1855 .fifo = IWL_MVM_TX_FIFO_MCAST,
1856 .sta_id = mvm->aux_sta.sta_id,
1857 .tid = IWL_MAX_TID_COUNT,
1858 .aggregate = false,
1859 .frame_limit = IWL_FRAME_LIMIT,
1860 };
1861
1862 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1863 wdg_timeout);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001864 } else {
1865 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1866 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03001867 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02001868}
1869
1870int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1871{
1872 int ret;
1873
1874 lockdep_assert_held(&mvm->mutex);
1875
1876 /* Allocate aux station and assign to it the aux queue */
1877 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02001878 NL80211_IFTYPE_UNSPECIFIED,
1879 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001880 if (ret)
1881 return ret;
1882
1883 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1884 if (!iwl_mvm_has_new_tx_api(mvm))
1885 iwl_mvm_enable_aux_queue(mvm);
Liad Kaufman28d07932015-09-01 16:36:25 +03001886
Johannes Berg8ca151b2013-01-24 14:25:36 +01001887 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1888 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001889 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001890 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001891 return ret;
1892 }
1893
1894 /*
1895 * For a000 firmware and on we cannot add queue to a station unknown
1896 * to firmware so enable queue here - after the station was added
1897 */
1898 if (iwl_mvm_has_new_tx_api(mvm))
1899 iwl_mvm_enable_aux_queue(mvm);
1900
1901 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001902}
1903
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001904int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1905{
1906 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1907
1908 lockdep_assert_held(&mvm->mutex);
1909 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1910 mvmvif->id, 0);
1911}
1912
1913int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1914{
1915 int ret;
1916
1917 lockdep_assert_held(&mvm->mutex);
1918
1919 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1920 if (ret)
1921 IWL_WARN(mvm, "Failed sending remove station\n");
1922
1923 return ret;
1924}
1925
1926void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1927{
1928 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1929}
1930
Johannes Berg712b24a2014-08-04 14:14:14 +02001931void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1932{
1933 lockdep_assert_held(&mvm->mutex);
1934
1935 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1936}
1937
Johannes Berg8ca151b2013-01-24 14:25:36 +01001938/*
1939 * Send the add station command for the vif's broadcast station.
1940 * Assumes that the station was already allocated.
1941 *
1942 * @mvm: the mvm component
1943 * @vif: the interface to which the broadcast station is added
1944 * @bsta: the broadcast station to add.
1945 */
Johannes Berg013290a2014-08-04 13:38:48 +02001946int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001947{
1948 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001949 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001950 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001951 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01001952 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001953 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02001954 unsigned int wdg_timeout =
1955 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1956 struct iwl_trans_txq_scd_cfg cfg = {
1957 .fifo = IWL_MVM_TX_FIFO_VO,
1958 .sta_id = mvmvif->bcast_sta.sta_id,
1959 .tid = IWL_MAX_TID_COUNT,
1960 .aggregate = false,
1961 .frame_limit = IWL_FRAME_LIMIT,
1962 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01001963
1964 lockdep_assert_held(&mvm->mutex);
1965
Sara Sharon310181e2017-01-17 14:27:48 +02001966 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02001967 if (vif->type == NL80211_IFTYPE_AP ||
1968 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02001969 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001970 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02001971 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001972 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03001973 return -EINVAL;
1974
Liad Kaufmandf88c082016-11-24 15:31:00 +02001975 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001976
Sara Sharon310181e2017-01-17 14:27:48 +02001977 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1978 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03001979 }
1980
Johannes Berg5023d962013-07-31 14:07:43 +02001981 if (vif->type == NL80211_IFTYPE_ADHOC)
1982 baddr = vif->bss_conf.bssid;
1983
Sara Sharon0ae98812017-01-04 14:53:58 +02001984 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001985 return -ENOSPC;
1986
Liad Kaufmandf88c082016-11-24 15:31:00 +02001987 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1988 mvmvif->id, mvmvif->color);
1989 if (ret)
1990 return ret;
1991
1992 /*
Sara Sharonc5a719e2016-11-15 10:20:48 +02001993 * For a000 firmware and on we cannot add queue to a station unknown
1994 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02001995 */
Sara Sharon310181e2017-01-17 14:27:48 +02001996 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01001997 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1998 bsta->sta_id,
1999 IWL_MAX_TID_COUNT,
2000 wdg_timeout);
2001
Sara Sharon310181e2017-01-17 14:27:48 +02002002 if (vif->type == NL80211_IFTYPE_AP)
2003 mvm->probe_queue = queue;
2004 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2005 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002006 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02002007
2008 return 0;
2009}
2010
2011static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2012 struct ieee80211_vif *vif)
2013{
2014 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002015 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002016
2017 lockdep_assert_held(&mvm->mutex);
2018
Sara Sharond49394a2017-03-05 13:01:08 +02002019 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2020
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002021 switch (vif->type) {
2022 case NL80211_IFTYPE_AP:
2023 case NL80211_IFTYPE_ADHOC:
2024 queue = mvm->probe_queue;
2025 break;
2026 case NL80211_IFTYPE_P2P_DEVICE:
2027 queue = mvm->p2p_dev_queue;
2028 break;
2029 default:
2030 WARN(1, "Can't free bcast queue on vif type %d\n",
2031 vif->type);
2032 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002033 }
2034
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002035 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2036 if (iwl_mvm_has_new_tx_api(mvm))
2037 return;
2038
2039 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2040 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002041}
2042
2043/* Send the FW a request to remove the station from it's internal data
2044 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02002045int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002046{
Johannes Berg013290a2014-08-04 13:38:48 +02002047 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002048 int ret;
2049
2050 lockdep_assert_held(&mvm->mutex);
2051
Liad Kaufmandf88c082016-11-24 15:31:00 +02002052 if (iwl_mvm_is_dqa_supported(mvm))
2053 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2054
Johannes Berg013290a2014-08-04 13:38:48 +02002055 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002056 if (ret)
2057 IWL_WARN(mvm, "Failed sending remove station\n");
2058 return ret;
2059}
2060
Johannes Berg013290a2014-08-04 13:38:48 +02002061int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2062{
2063 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03002064 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02002065
2066 lockdep_assert_held(&mvm->mutex);
2067
Liad Kaufmandf88c082016-11-24 15:31:00 +02002068 if (!iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmande24f632015-08-04 15:19:18 +03002069 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002070
Liad Kaufmande24f632015-08-04 15:19:18 +03002071 /*
2072 * The firmware defines the TFD queue mask to only be relevant
2073 * for *unicast* queues, so the multicast (CAB) queue shouldn't
Liad Kaufmandf88c082016-11-24 15:31:00 +02002074 * be included. This only happens in NL80211_IFTYPE_AP vif type,
2075 * so the next line will only have an effect there.
Liad Kaufmande24f632015-08-04 15:19:18 +03002076 */
Johannes Berg013290a2014-08-04 13:38:48 +02002077 qmask &= ~BIT(vif->cab_queue);
Liad Kaufmande24f632015-08-04 15:19:18 +03002078 }
2079
Johannes Berg013290a2014-08-04 13:38:48 +02002080 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
Sara Sharonced19f22017-02-06 19:09:32 +02002081 ieee80211_vif_type_p2p(vif),
2082 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02002083}
2084
Johannes Berg8ca151b2013-01-24 14:25:36 +01002085/* Allocate a new station entry for the broadcast station to the given vif,
2086 * and send it to the FW.
2087 * Note that each P2P mac should have its own broadcast station.
2088 *
2089 * @mvm: the mvm component
2090 * @vif: the interface to which the broadcast station is added
2091 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02002092int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002093{
2094 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002095 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002096 int ret;
2097
2098 lockdep_assert_held(&mvm->mutex);
2099
Johannes Berg013290a2014-08-04 13:38:48 +02002100 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002101 if (ret)
2102 return ret;
2103
Johannes Berg013290a2014-08-04 13:38:48 +02002104 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002105
2106 if (ret)
2107 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002108
Johannes Berg8ca151b2013-01-24 14:25:36 +01002109 return ret;
2110}
2111
Johannes Berg013290a2014-08-04 13:38:48 +02002112void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2113{
2114 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2115
2116 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2117}
2118
Johannes Berg8ca151b2013-01-24 14:25:36 +01002119/*
2120 * Send the FW a request to remove the station from it's internal data
2121 * structures, and in addition remove it from the local data structure.
2122 */
Johannes Berg013290a2014-08-04 13:38:48 +02002123int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002124{
2125 int ret;
2126
2127 lockdep_assert_held(&mvm->mutex);
2128
Johannes Berg013290a2014-08-04 13:38:48 +02002129 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002130
Johannes Berg013290a2014-08-04 13:38:48 +02002131 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2132
Johannes Berg8ca151b2013-01-24 14:25:36 +01002133 return ret;
2134}
2135
Sara Sharon26d6c162017-01-03 12:00:19 +02002136/*
2137 * Allocate a new station entry for the multicast station to the given vif,
2138 * and send it to the FW.
2139 * Note that each AP/GO mac should have its own multicast station.
2140 *
2141 * @mvm: the mvm component
2142 * @vif: the interface to which the multicast station is added
2143 */
2144int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2145{
2146 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2147 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2148 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2149 const u8 *maddr = _maddr;
2150 struct iwl_trans_txq_scd_cfg cfg = {
2151 .fifo = IWL_MVM_TX_FIFO_MCAST,
2152 .sta_id = msta->sta_id,
2153 .tid = IWL_MAX_TID_COUNT,
2154 .aggregate = false,
2155 .frame_limit = IWL_FRAME_LIMIT,
2156 };
2157 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2158 int ret;
2159
2160 lockdep_assert_held(&mvm->mutex);
2161
2162 if (!iwl_mvm_is_dqa_supported(mvm))
2163 return 0;
2164
Liad Kaufmanee48b722017-03-21 17:13:16 +02002165 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2166 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002167 return -ENOTSUPP;
2168
Sara Sharonced19f22017-02-06 19:09:32 +02002169 /*
2170 * While in previous FWs we had to exclude cab queue from TFD queue
2171 * mask, now it is needed as any other queue.
2172 */
2173 if (!iwl_mvm_has_new_tx_api(mvm) &&
2174 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2175 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2176 &cfg, timeout);
2177 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2178 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002179 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2180 mvmvif->id, mvmvif->color);
2181 if (ret) {
2182 iwl_mvm_dealloc_int_sta(mvm, msta);
2183 return ret;
2184 }
2185
2186 /*
2187 * Enable cab queue after the ADD_STA command is sent.
2188 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002189 * command with unknown station id, and for FW that doesn't support
2190 * station API since the cab queue is not included in the
2191 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002192 */
Sara Sharon310181e2017-01-17 14:27:48 +02002193 if (iwl_mvm_has_new_tx_api(mvm)) {
2194 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2195 msta->sta_id,
2196 IWL_MAX_TID_COUNT,
2197 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002198 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002199 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2200 IWL_UCODE_TLV_API_STA_TYPE)) {
Liad Kaufmanee48b722017-03-21 17:13:16 +02002201 /*
2202 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2203 * invalid, so make sure we use the queue we want.
2204 * Note that this is done here as we want to avoid making DQA
2205 * changes in mac80211 layer.
2206 */
2207 if (vif->type == NL80211_IFTYPE_ADHOC) {
2208 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2209 mvmvif->cab_queue = vif->cab_queue;
2210 }
Sara Sharon310181e2017-01-17 14:27:48 +02002211 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2212 &cfg, timeout);
2213 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002214
2215 return 0;
2216}
2217
2218/*
2219 * Send the FW a request to remove the station from it's internal data
2220 * structures, and in addition remove it from the local data structure.
2221 */
2222int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2223{
2224 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2225 int ret;
2226
2227 lockdep_assert_held(&mvm->mutex);
2228
2229 if (!iwl_mvm_is_dqa_supported(mvm))
2230 return 0;
2231
Sara Sharond49394a2017-03-05 13:01:08 +02002232 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2233
Sara Sharone2af3fa2017-02-22 19:35:10 +02002234 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Sara Sharon26d6c162017-01-03 12:00:19 +02002235 IWL_MAX_TID_COUNT, 0);
2236
2237 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2238 if (ret)
2239 IWL_WARN(mvm, "Failed sending remove station\n");
2240
2241 return ret;
2242}
2243
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002244#define IWL_MAX_RX_BA_SESSIONS 16
2245
Sara Sharonb915c102016-03-23 16:32:02 +02002246static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002247{
Sara Sharonb915c102016-03-23 16:32:02 +02002248 struct iwl_mvm_delba_notif notif = {
2249 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2250 .metadata.sync = 1,
2251 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002252 };
Sara Sharonb915c102016-03-23 16:32:02 +02002253 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2254};
Sara Sharon10b2b202016-03-20 16:23:41 +02002255
Sara Sharonb915c102016-03-23 16:32:02 +02002256static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2257 struct iwl_mvm_baid_data *data)
2258{
2259 int i;
2260
2261 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2262
2263 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2264 int j;
2265 struct iwl_mvm_reorder_buffer *reorder_buf =
2266 &data->reorder_buf[i];
2267
Sara Sharon06904052016-02-28 20:28:17 +02002268 spin_lock_bh(&reorder_buf->lock);
2269 if (likely(!reorder_buf->num_stored)) {
2270 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002271 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002272 }
Sara Sharonb915c102016-03-23 16:32:02 +02002273
2274 /*
2275 * This shouldn't happen in regular DELBA since the internal
2276 * delBA notification should trigger a release of all frames in
2277 * the reorder buffer.
2278 */
2279 WARN_ON(1);
2280
2281 for (j = 0; j < reorder_buf->buf_size; j++)
2282 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02002283 /*
2284 * Prevent timer re-arm. This prevents a very far fetched case
2285 * where we timed out on the notification. There may be prior
2286 * RX frames pending in the RX queue before the notification
2287 * that might get processed between now and the actual deletion
2288 * and we would re-arm the timer although we are deleting the
2289 * reorder buffer.
2290 */
2291 reorder_buf->removed = true;
2292 spin_unlock_bh(&reorder_buf->lock);
2293 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002294 }
2295}
2296
2297static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2298 u32 sta_id,
2299 struct iwl_mvm_baid_data *data,
2300 u16 ssn, u8 buf_size)
2301{
2302 int i;
2303
2304 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2305 struct iwl_mvm_reorder_buffer *reorder_buf =
2306 &data->reorder_buf[i];
2307 int j;
2308
2309 reorder_buf->num_stored = 0;
2310 reorder_buf->head_sn = ssn;
2311 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002312 /* rx reorder timer */
2313 reorder_buf->reorder_timer.function =
2314 iwl_mvm_reorder_timer_expired;
2315 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2316 init_timer(&reorder_buf->reorder_timer);
2317 spin_lock_init(&reorder_buf->lock);
2318 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002319 reorder_buf->queue = i;
2320 reorder_buf->sta_id = sta_id;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002321 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002322 for (j = 0; j < reorder_buf->buf_size; j++)
2323 __skb_queue_head_init(&reorder_buf->entries[j]);
2324 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002325}
2326
Johannes Berg8ca151b2013-01-24 14:25:36 +01002327int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02002328 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002329{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002330 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002331 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002332 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002333 int ret;
2334 u32 status;
2335
2336 lockdep_assert_held(&mvm->mutex);
2337
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002338 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2339 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2340 return -ENOSPC;
2341 }
2342
Sara Sharon10b2b202016-03-20 16:23:41 +02002343 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2344 /*
2345 * Allocate here so if allocation fails we can bail out early
2346 * before starting the BA session in the firmware
2347 */
Sara Sharonb915c102016-03-23 16:32:02 +02002348 baid_data = kzalloc(sizeof(*baid_data) +
2349 mvm->trans->num_rx_queues *
2350 sizeof(baid_data->reorder_buf[0]),
2351 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002352 if (!baid_data)
2353 return -ENOMEM;
2354 }
2355
Johannes Berg8ca151b2013-01-24 14:25:36 +01002356 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2357 cmd.sta_id = mvm_sta->sta_id;
2358 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002359 if (start) {
2360 cmd.add_immediate_ba_tid = (u8) tid;
2361 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002362 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002363 } else {
2364 cmd.remove_immediate_ba_tid = (u8) tid;
2365 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002366 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2367 STA_MODIFY_REMOVE_BA_TID;
2368
2369 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002370 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2371 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002372 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002373 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002374 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002375
Sara Sharon837c4da2016-01-07 16:50:45 +02002376 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002377 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002378 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2379 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002380 break;
2381 case ADD_STA_IMMEDIATE_BA_FAILURE:
2382 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2383 ret = -ENOSPC;
2384 break;
2385 default:
2386 ret = -EIO;
2387 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2388 start ? "start" : "stopp", status);
2389 break;
2390 }
2391
Sara Sharon10b2b202016-03-20 16:23:41 +02002392 if (ret)
2393 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002394
Sara Sharon10b2b202016-03-20 16:23:41 +02002395 if (start) {
2396 u8 baid;
2397
2398 mvm->rx_ba_sessions++;
2399
2400 if (!iwl_mvm_has_new_rx_api(mvm))
2401 return 0;
2402
2403 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2404 ret = -EINVAL;
2405 goto out_free;
2406 }
2407 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2408 IWL_ADD_STA_BAID_SHIFT);
2409 baid_data->baid = baid;
2410 baid_data->timeout = timeout;
2411 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002412 setup_timer(&baid_data->session_timer,
2413 iwl_mvm_rx_agg_session_expired,
2414 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002415 baid_data->mvm = mvm;
2416 baid_data->tid = tid;
2417 baid_data->sta_id = mvm_sta->sta_id;
2418
2419 mvm_sta->tid_to_baid[tid] = baid;
2420 if (timeout)
2421 mod_timer(&baid_data->session_timer,
2422 TU_TO_EXP_TIME(timeout * 2));
2423
Sara Sharonb915c102016-03-23 16:32:02 +02002424 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2425 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002426 /*
2427 * protect the BA data with RCU to cover a case where our
2428 * internal RX sync mechanism will timeout (not that it's
2429 * supposed to happen) and we will free the session data while
2430 * RX is being processed in parallel
2431 */
Sara Sharon35263a02016-06-21 12:12:10 +03002432 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2433 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002434 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2435 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002436 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002437 u8 baid = mvm_sta->tid_to_baid[tid];
2438
Sara Sharon60dec522016-06-21 14:14:08 +03002439 if (mvm->rx_ba_sessions > 0)
2440 /* check that restart flow didn't zero the counter */
2441 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002442 if (!iwl_mvm_has_new_rx_api(mvm))
2443 return 0;
2444
2445 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2446 return -EINVAL;
2447
2448 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2449 if (WARN_ON(!baid_data))
2450 return -EINVAL;
2451
2452 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002453 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002454 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002455 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2456 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002457 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002458 }
2459 return 0;
2460
2461out_free:
2462 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002463 return ret;
2464}
2465
Liad Kaufman9794c642015-08-19 17:34:28 +03002466int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2467 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002468{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002469 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002470 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002471 int ret;
2472 u32 status;
2473
2474 lockdep_assert_held(&mvm->mutex);
2475
2476 if (start) {
2477 mvm_sta->tfd_queue_msk |= BIT(queue);
2478 mvm_sta->tid_disable_agg &= ~BIT(tid);
2479 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002480 /* In DQA-mode the queue isn't removed on agg termination */
2481 if (!iwl_mvm_is_dqa_supported(mvm))
2482 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002483 mvm_sta->tid_disable_agg |= BIT(tid);
2484 }
2485
2486 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2487 cmd.sta_id = mvm_sta->sta_id;
2488 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002489 if (!iwl_mvm_has_new_tx_api(mvm))
2490 cmd.modify_mask = STA_MODIFY_QUEUES;
2491 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002492 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2493 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2494
2495 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002496 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2497 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002498 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002499 if (ret)
2500 return ret;
2501
Sara Sharon837c4da2016-01-07 16:50:45 +02002502 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002503 case ADD_STA_SUCCESS:
2504 break;
2505 default:
2506 ret = -EIO;
2507 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2508 start ? "start" : "stopp", status);
2509 break;
2510 }
2511
2512 return ret;
2513}
2514
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002515const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002516 IEEE80211_AC_BE,
2517 IEEE80211_AC_BK,
2518 IEEE80211_AC_BK,
2519 IEEE80211_AC_BE,
2520 IEEE80211_AC_VI,
2521 IEEE80211_AC_VI,
2522 IEEE80211_AC_VO,
2523 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002524 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002525};
2526
Johannes Berg3e56ead2013-02-15 22:23:18 +01002527static const u8 tid_to_ucode_ac[] = {
2528 AC_BE,
2529 AC_BK,
2530 AC_BK,
2531 AC_BE,
2532 AC_VI,
2533 AC_VI,
2534 AC_VO,
2535 AC_VO,
2536};
2537
Johannes Berg8ca151b2013-01-24 14:25:36 +01002538int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2539 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2540{
Johannes Berg5b577a92013-11-14 18:20:04 +01002541 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002542 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002543 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002544 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002545 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002546
2547 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2548 return -EINVAL;
2549
2550 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2551 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2552 mvmsta->tid_data[tid].state);
2553 return -ENXIO;
2554 }
2555
2556 lockdep_assert_held(&mvm->mutex);
2557
Arik Nemtsovb2492502014-03-13 12:21:50 +02002558 spin_lock_bh(&mvmsta->lock);
2559
2560 /* possible race condition - we entered D0i3 while starting agg */
2561 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2562 spin_unlock_bh(&mvmsta->lock);
2563 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2564 return -EIO;
2565 }
2566
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002567 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002568
Liad Kaufmancf961e12015-08-13 19:16:08 +03002569 /*
2570 * Note the possible cases:
2571 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2572 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2573 * one and mark it as reserved
2574 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2575 * non-DQA mode, since the TXQ hasn't yet been allocated
Sara Sharon34e10862017-02-23 13:15:07 +02002576 * Don't support case 3 for new TX path as it is not expected to happen
2577 * and aggregation will be offloaded soon anyway
Liad Kaufmancf961e12015-08-13 19:16:08 +03002578 */
2579 txq_id = mvmsta->tid_data[tid].txq_id;
Sara Sharon34e10862017-02-23 13:15:07 +02002580 if (iwl_mvm_has_new_tx_api(mvm)) {
2581 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2582 ret = -ENXIO;
2583 goto release_locks;
2584 }
2585 } else if (iwl_mvm_is_dqa_supported(mvm) &&
2586 unlikely(mvm->queue_info[txq_id].status ==
2587 IWL_MVM_QUEUE_SHARED)) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002588 ret = -ENXIO;
2589 IWL_DEBUG_TX_QUEUES(mvm,
2590 "Can't start tid %d agg on shared queue!\n",
2591 tid);
2592 goto release_locks;
2593 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002594 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002595 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2596 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002597 mvm->last_agg_queue);
2598 if (txq_id < 0) {
2599 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002600 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2601 goto release_locks;
2602 }
Sara Sharon01796ff2016-11-16 17:04:36 +02002603 /*
2604 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2605 * an inactive queue from iwl_mvm_find_free_queue() is
2606 * certainly a bug
2607 */
2608 WARN_ON(mvm->queue_info[txq_id].status ==
2609 IWL_MVM_QUEUE_INACTIVE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002610
2611 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2612 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002613 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002614
2615 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002616
Liad Kaufmancf961e12015-08-13 19:16:08 +03002617 IWL_DEBUG_TX_QUEUES(mvm,
2618 "AGG for tid %d will be on queue #%d\n",
2619 tid, txq_id);
2620
Johannes Berg8ca151b2013-01-24 14:25:36 +01002621 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002622 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002623 tid_data->txq_id = txq_id;
2624 *ssn = tid_data->ssn;
2625
2626 IWL_DEBUG_TX_QUEUES(mvm,
2627 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2628 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2629 tid_data->next_reclaimed);
2630
Liad Kaufmandd321622017-04-05 16:25:11 +03002631 /*
2632 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
2633 * to align the wrap around of ssn so we compare relevant values.
2634 */
2635 normalized_ssn = tid_data->ssn;
2636 if (mvm->trans->cfg->gen2)
2637 normalized_ssn &= 0xff;
2638
2639 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002640 tid_data->state = IWL_AGG_STARTING;
2641 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2642 } else {
2643 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2644 }
2645
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002646 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002647 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002648
2649release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002650 spin_unlock(&mvm->queue_info_lock);
2651out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002652 spin_unlock_bh(&mvmsta->lock);
2653
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002654 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002655}
2656
2657int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002658 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2659 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002660{
Johannes Berg5b577a92013-11-14 18:20:04 +01002661 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002662 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002663 unsigned int wdg_timeout =
2664 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002665 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002666 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002667 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002668 u16 ssn;
2669
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002670 struct iwl_trans_txq_scd_cfg cfg = {
2671 .sta_id = mvmsta->sta_id,
2672 .tid = tid,
2673 .frame_limit = buf_size,
2674 .aggregate = true,
2675 };
2676
Eyal Shapiraefed6642014-09-14 15:58:53 +03002677 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2678 != IWL_MAX_TID_COUNT);
2679
Johannes Berg8ca151b2013-01-24 14:25:36 +01002680 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2681
2682 spin_lock_bh(&mvmsta->lock);
2683 ssn = tid_data->ssn;
2684 queue = tid_data->txq_id;
2685 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002686 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002687 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002688 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002689 spin_unlock_bh(&mvmsta->lock);
2690
Sara Sharon34e10862017-02-23 13:15:07 +02002691 if (iwl_mvm_has_new_tx_api(mvm)) {
2692 /*
2693 * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
2694 * no need to check queue's status
2695 */
2696 if (buf_size < mvmsta->max_agg_bufsize)
2697 return -ENOTSUPP;
2698
2699 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2700 if (ret)
2701 return -EIO;
2702 goto out;
2703 }
2704
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002705 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002706
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002707 spin_lock_bh(&mvm->queue_info_lock);
2708 queue_status = mvm->queue_info[queue].status;
2709 spin_unlock_bh(&mvm->queue_info_lock);
2710
Liad Kaufmancf961e12015-08-13 19:16:08 +03002711 /* In DQA mode, the existing queue might need to be reconfigured */
2712 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002713 /* Maybe there is no need to even alloc a queue... */
2714 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2715 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002716
2717 /*
2718 * Only reconfig the SCD for the queue if the window size has
2719 * changed from current (become smaller)
2720 */
2721 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2722 /*
2723 * If reconfiguring an existing queue, it first must be
2724 * drained
2725 */
Sara Sharona1a57872017-03-05 11:38:58 +02002726 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2727 BIT(queue));
Liad Kaufmancf961e12015-08-13 19:16:08 +03002728 if (ret) {
2729 IWL_ERR(mvm,
2730 "Error draining queue before reconfig\n");
2731 return ret;
2732 }
2733
2734 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2735 mvmsta->sta_id, tid,
2736 buf_size, ssn);
2737 if (ret) {
2738 IWL_ERR(mvm,
2739 "Error reconfiguring TXQ #%d\n", queue);
2740 return ret;
2741 }
2742 }
2743 }
2744
2745 if (alloc_queue)
2746 iwl_mvm_enable_txq(mvm, queue,
2747 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2748 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002749
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002750 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2751 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2752 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2753 if (ret)
2754 return -EIO;
2755 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002756
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002757 /* No need to mark as reserved */
2758 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002759 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002760 spin_unlock_bh(&mvm->queue_info_lock);
2761
Sara Sharon34e10862017-02-23 13:15:07 +02002762out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002763 /*
2764 * Even though in theory the peer could have different
2765 * aggregation reorder buffer sizes for different sessions,
2766 * our ucode doesn't allow for that and has a global limit
2767 * for each station. Therefore, use the minimum of all the
2768 * aggregation sessions and our default value.
2769 */
2770 mvmsta->max_agg_bufsize =
2771 min(mvmsta->max_agg_bufsize, buf_size);
2772 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2773
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002774 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2775 sta->addr, tid);
2776
Eyal Shapira9e680942013-11-09 00:16:16 +02002777 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002778}
2779
Sara Sharon34e10862017-02-23 13:15:07 +02002780static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2781 struct iwl_mvm_sta *mvmsta,
2782 u16 txq_id)
2783{
2784 if (iwl_mvm_has_new_tx_api(mvm))
2785 return;
2786
2787 spin_lock_bh(&mvm->queue_info_lock);
2788 /*
2789 * The TXQ is marked as reserved only if no traffic came through yet
2790 * This means no traffic has been sent on this TID (agg'd or not), so
2791 * we no longer have use for the queue. Since it hasn't even been
2792 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2793 * free.
2794 */
2795 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2796 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2797
2798 spin_unlock_bh(&mvm->queue_info_lock);
2799}
2800
Johannes Berg8ca151b2013-01-24 14:25:36 +01002801int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2802 struct ieee80211_sta *sta, u16 tid)
2803{
Johannes Berg5b577a92013-11-14 18:20:04 +01002804 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002805 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2806 u16 txq_id;
2807 int err;
2808
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002809 /*
2810 * If mac80211 is cleaning its state, then say that we finished since
2811 * our state has been cleared anyway.
2812 */
2813 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2814 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2815 return 0;
2816 }
2817
Johannes Berg8ca151b2013-01-24 14:25:36 +01002818 spin_lock_bh(&mvmsta->lock);
2819
2820 txq_id = tid_data->txq_id;
2821
2822 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2823 mvmsta->sta_id, tid, txq_id, tid_data->state);
2824
Eyal Shapiraefed6642014-09-14 15:58:53 +03002825 mvmsta->agg_tids &= ~BIT(tid);
2826
Sara Sharon34e10862017-02-23 13:15:07 +02002827 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002828
Johannes Berg8ca151b2013-01-24 14:25:36 +01002829 switch (tid_data->state) {
2830 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002831 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002832
2833 IWL_DEBUG_TX_QUEUES(mvm,
2834 "ssn = %d, next_recl = %d\n",
2835 tid_data->ssn, tid_data->next_reclaimed);
2836
Liad Kaufman664e9682017-04-05 10:35:18 +03002837 /*
2838 * There are still packets for this RA / TID in the HW.
2839 * Not relevant for DQA mode, since there is no need to disable
2840 * the queue.
2841 */
2842 if (!iwl_mvm_is_dqa_supported(mvm) &&
2843 tid_data->ssn != tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002844 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2845 err = 0;
2846 break;
2847 }
2848
2849 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002850 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002851 spin_unlock_bh(&mvmsta->lock);
2852
2853 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2854
2855 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2856
Liad Kaufmancf961e12015-08-13 19:16:08 +03002857 if (!iwl_mvm_is_dqa_supported(mvm)) {
2858 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2859
2860 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2861 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002862 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002863 case IWL_AGG_STARTING:
2864 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2865 /*
2866 * The agg session has been stopped before it was set up. This
2867 * can happen when the AddBA timer times out for example.
2868 */
2869
2870 /* No barriers since we are under mutex */
2871 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002872
2873 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2874 tid_data->state = IWL_AGG_OFF;
2875 err = 0;
2876 break;
2877 default:
2878 IWL_ERR(mvm,
2879 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2880 mvmsta->sta_id, tid, tid_data->state);
2881 IWL_ERR(mvm,
2882 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2883 err = -EINVAL;
2884 }
2885
2886 spin_unlock_bh(&mvmsta->lock);
2887
2888 return err;
2889}
2890
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002891int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2892 struct ieee80211_sta *sta, u16 tid)
2893{
Johannes Berg5b577a92013-11-14 18:20:04 +01002894 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002895 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2896 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002897 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002898
2899 /*
2900 * First set the agg state to OFF to avoid calling
2901 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2902 */
2903 spin_lock_bh(&mvmsta->lock);
2904 txq_id = tid_data->txq_id;
2905 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2906 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002907 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002908 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002909 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002910 spin_unlock_bh(&mvmsta->lock);
2911
Sara Sharon34e10862017-02-23 13:15:07 +02002912 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002913
Johannes Bergb6658ff2013-07-24 13:55:51 +02002914 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002915 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02002916
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002917 if (iwl_mvm_has_new_tx_api(mvm)) {
2918 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2919 BIT(tid), 0))
2920 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02002921 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002922 } else {
2923 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2924 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02002925 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002926 }
Sara Sharond6d517b2017-03-06 10:16:11 +02002927
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002928 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002929
Johannes Bergf7f89e72014-08-05 15:24:44 +02002930 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2931
Liad Kaufmancf961e12015-08-13 19:16:08 +03002932 if (!iwl_mvm_is_dqa_supported(mvm)) {
2933 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2934
2935 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2936 tid, 0);
2937 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002938 }
2939
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002940 return 0;
2941}
2942
Johannes Berg8ca151b2013-01-24 14:25:36 +01002943static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2944{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002945 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002946
2947 lockdep_assert_held(&mvm->mutex);
2948
Johannes Berg2dc2a152015-06-16 17:09:18 +02002949 /* Pick the unused key offset with the highest 'deleted'
2950 * counter. Every time a key is deleted, all the counters
2951 * are incremented and the one that was just deleted is
2952 * reset to zero. Thus, the highest counter is the one
2953 * that was deleted longest ago. Pick that one.
2954 */
2955 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2956 if (test_bit(i, mvm->fw_key_table))
2957 continue;
2958 if (mvm->fw_key_deleted[i] > max) {
2959 max = mvm->fw_key_deleted[i];
2960 max_offs = i;
2961 }
2962 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002963
Johannes Berg2dc2a152015-06-16 17:09:18 +02002964 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002965 return STA_KEY_IDX_INVALID;
2966
Johannes Berg2dc2a152015-06-16 17:09:18 +02002967 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002968}
2969
Johannes Berg5f7a1842015-12-11 09:36:10 +01002970static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2971 struct ieee80211_vif *vif,
2972 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002973{
Johannes Berg5b530e92014-12-23 16:00:17 +01002974 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002975
Johannes Berg5f7a1842015-12-11 09:36:10 +01002976 if (sta)
2977 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002978
2979 /*
2980 * The device expects GTKs for station interfaces to be
2981 * installed as GTKs for the AP station. If we have no
2982 * station ID, then use AP's station ID.
2983 */
2984 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02002985 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02002986 u8 sta_id = mvmvif->ap_sta_id;
2987
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002988 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2989 lockdep_is_held(&mvm->mutex));
2990
Avri Altman9513c5e2015-10-19 16:29:11 +02002991 /*
2992 * It is possible that the 'sta' parameter is NULL,
2993 * for example when a GTK is removed - the sta_id will then
2994 * be the AP ID, and no station was passed by mac80211.
2995 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002996 if (IS_ERR_OR_NULL(sta))
2997 return NULL;
2998
2999 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02003000 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003001
Johannes Berg5f7a1842015-12-11 09:36:10 +01003002 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003003}
3004
3005static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03003006 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02003007 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003008 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3009 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003010{
Sara Sharon45c458b2016-11-09 15:43:26 +02003011 union {
3012 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3013 struct iwl_mvm_add_sta_key_cmd cmd;
3014 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003015 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01003016 int ret;
3017 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003018 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02003019 u64 pn = 0;
3020 int i, size;
3021 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3022 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003023
David Spinadel85aeb582017-03-30 19:43:53 +03003024 if (sta_id == IWL_MVM_INVALID_STA)
3025 return -EINVAL;
3026
Sara Sharon45c458b2016-11-09 15:43:26 +02003027 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01003028 STA_KEY_FLG_KEYID_MSK;
3029 key_flags = cpu_to_le16(keyidx);
3030 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3031
Sara Sharon45c458b2016-11-09 15:43:26 +02003032 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003033 case WLAN_CIPHER_SUITE_TKIP:
3034 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003035 if (new_api) {
3036 memcpy((void *)&u.cmd.tx_mic_key,
3037 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3038 IWL_MIC_KEY_SIZE);
3039
3040 memcpy((void *)&u.cmd.rx_mic_key,
3041 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3042 IWL_MIC_KEY_SIZE);
3043 pn = atomic64_read(&key->tx_pn);
3044
3045 } else {
3046 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3047 for (i = 0; i < 5; i++)
3048 u.cmd_v1.tkip_rx_ttak[i] =
3049 cpu_to_le16(tkip_p1k[i]);
3050 }
3051 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003052 break;
3053 case WLAN_CIPHER_SUITE_CCMP:
3054 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02003055 memcpy(u.cmd.common.key, key->key, key->keylen);
3056 if (new_api)
3057 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003058 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01003059 case WLAN_CIPHER_SUITE_WEP104:
3060 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05003061 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01003062 case WLAN_CIPHER_SUITE_WEP40:
3063 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003064 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01003065 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03003066 case WLAN_CIPHER_SUITE_GCMP_256:
3067 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3068 /* fall through */
3069 case WLAN_CIPHER_SUITE_GCMP:
3070 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003071 memcpy(u.cmd.common.key, key->key, key->keylen);
3072 if (new_api)
3073 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03003074 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003075 default:
Max Stepanove36e5432013-08-27 19:56:13 +03003076 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02003077 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003078 }
3079
Johannes Bergba3943b2014-11-12 23:54:48 +01003080 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003081 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3082
Sara Sharon45c458b2016-11-09 15:43:26 +02003083 u.cmd.common.key_offset = key_offset;
3084 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03003085 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02003086
3087 if (new_api) {
3088 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3089 size = sizeof(u.cmd);
3090 } else {
3091 size = sizeof(u.cmd_v1);
3092 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003093
3094 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003095 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02003096 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3097 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003098 else
Sara Sharon45c458b2016-11-09 15:43:26 +02003099 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3100 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003101
3102 switch (status) {
3103 case ADD_STA_SUCCESS:
3104 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3105 break;
3106 default:
3107 ret = -EIO;
3108 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3109 break;
3110 }
3111
3112 return ret;
3113}
3114
3115static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3116 struct ieee80211_key_conf *keyconf,
3117 u8 sta_id, bool remove_key)
3118{
3119 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3120
3121 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003122 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3123 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3124 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3125 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3126 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3127 return -EINVAL;
3128
3129 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3130 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003131 return -EINVAL;
3132
3133 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3134 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3135
3136 if (remove_key) {
3137 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3138 } else {
3139 struct ieee80211_key_seq seq;
3140 const u8 *pn;
3141
Ayala Bekeraa950522016-06-01 00:28:09 +03003142 switch (keyconf->cipher) {
3143 case WLAN_CIPHER_SUITE_AES_CMAC:
3144 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3145 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003146 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3147 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3148 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3149 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003150 default:
3151 return -EINVAL;
3152 }
3153
Ayala Beker8e160ab2016-04-11 11:37:38 +03003154 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3155 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3156 igtk_cmd.ctrl_flags |=
3157 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003158 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3159 pn = seq.aes_cmac.pn;
3160 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3161 ((u64) pn[4] << 8) |
3162 ((u64) pn[3] << 16) |
3163 ((u64) pn[2] << 24) |
3164 ((u64) pn[1] << 32) |
3165 ((u64) pn[0] << 40));
3166 }
3167
3168 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3169 remove_key ? "removing" : "installing",
3170 igtk_cmd.sta_id);
3171
Ayala Beker8e160ab2016-04-11 11:37:38 +03003172 if (!iwl_mvm_has_new_rx_api(mvm)) {
3173 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3174 .ctrl_flags = igtk_cmd.ctrl_flags,
3175 .key_id = igtk_cmd.key_id,
3176 .sta_id = igtk_cmd.sta_id,
3177 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3178 };
3179
3180 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3181 ARRAY_SIZE(igtk_cmd_v1.igtk));
3182 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3183 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3184 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003185 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003186 sizeof(igtk_cmd), &igtk_cmd);
3187}
3188
3189
3190static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3191 struct ieee80211_vif *vif,
3192 struct ieee80211_sta *sta)
3193{
Johannes Berg5b530e92014-12-23 16:00:17 +01003194 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003195
3196 if (sta)
3197 return sta->addr;
3198
3199 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003200 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003201 u8 sta_id = mvmvif->ap_sta_id;
3202 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3203 lockdep_is_held(&mvm->mutex));
3204 return sta->addr;
3205 }
3206
3207
3208 return NULL;
3209}
3210
Johannes Berg2f6319d2014-11-12 23:39:56 +01003211static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3212 struct ieee80211_vif *vif,
3213 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003214 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003215 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003216 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003217{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003218 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003219 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003220 struct ieee80211_key_seq seq;
3221 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003222 u32 sta_id;
3223
3224 if (sta) {
3225 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3226
3227 sta_id = mvm_sta->sta_id;
3228 } else if (vif->type == NL80211_IFTYPE_AP &&
3229 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3230 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3231
3232 sta_id = mvmvif->mcast_sta.sta_id;
3233 } else {
3234 IWL_ERR(mvm, "Failed to find station id\n");
3235 return -EINVAL;
3236 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003237
Johannes Berg8ca151b2013-01-24 14:25:36 +01003238 switch (keyconf->cipher) {
3239 case WLAN_CIPHER_SUITE_TKIP:
David Spinadel85aeb582017-03-30 19:43:53 +03003240 if (vif->type == NL80211_IFTYPE_AP) {
3241 ret = -EINVAL;
3242 break;
3243 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003244 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3245 /* get phase 1 key from mac80211 */
3246 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3247 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003248 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003249 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003250 break;
3251 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003252 case WLAN_CIPHER_SUITE_WEP40:
3253 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003254 case WLAN_CIPHER_SUITE_GCMP:
3255 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003256 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003257 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003258 break;
3259 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003260 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003261 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003262 }
3263
Johannes Berg8ca151b2013-01-24 14:25:36 +01003264 return ret;
3265}
3266
Johannes Berg2f6319d2014-11-12 23:39:56 +01003267static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003268 struct ieee80211_key_conf *keyconf,
3269 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003270{
Sara Sharon45c458b2016-11-09 15:43:26 +02003271 union {
3272 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3273 struct iwl_mvm_add_sta_key_cmd cmd;
3274 } u = {};
3275 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3276 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003277 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003278 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003279 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003280
David Spinadel85aeb582017-03-30 19:43:53 +03003281 if (sta_id == IWL_MVM_INVALID_STA)
3282 return -EINVAL;
3283
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003284 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3285 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003286 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3287 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3288
Johannes Bergba3943b2014-11-12 23:54:48 +01003289 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003290 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3291
Sara Sharon45c458b2016-11-09 15:43:26 +02003292 /*
3293 * The fields assigned here are in the same location at the start
3294 * of the command, so we can do this union trick.
3295 */
3296 u.cmd.common.key_flags = key_flags;
3297 u.cmd.common.key_offset = keyconf->hw_key_idx;
3298 u.cmd.common.sta_id = sta_id;
3299
3300 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003301
Johannes Berg8ca151b2013-01-24 14:25:36 +01003302 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003303 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3304 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003305
3306 switch (status) {
3307 case ADD_STA_SUCCESS:
3308 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3309 break;
3310 default:
3311 ret = -EIO;
3312 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3313 break;
3314 }
3315
3316 return ret;
3317}
3318
Johannes Berg2f6319d2014-11-12 23:39:56 +01003319int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3320 struct ieee80211_vif *vif,
3321 struct ieee80211_sta *sta,
3322 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003323 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003324{
Johannes Bergba3943b2014-11-12 23:54:48 +01003325 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003326 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003327 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003328 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003329 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003330
3331 lockdep_assert_held(&mvm->mutex);
3332
David Spinadel85aeb582017-03-30 19:43:53 +03003333 if (vif->type != NL80211_IFTYPE_AP ||
3334 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3335 /* Get the station id from the mvm local station table */
3336 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3337 if (!mvm_sta) {
3338 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003339 return -EINVAL;
3340 }
David Spinadel85aeb582017-03-30 19:43:53 +03003341 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003342
David Spinadel85aeb582017-03-30 19:43:53 +03003343 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3344 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3345 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3346 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
3347 false);
3348 goto end;
3349 }
3350
3351 /*
3352 * It is possible that the 'sta' parameter is NULL, and thus
3353 * there is a need to retrieve the sta from the local station
3354 * table.
3355 */
3356 if (!sta) {
3357 sta = rcu_dereference_protected(
3358 mvm->fw_id_to_mac_id[sta_id],
3359 lockdep_is_held(&mvm->mutex));
3360 if (IS_ERR_OR_NULL(sta)) {
3361 IWL_ERR(mvm, "Invalid station id\n");
3362 return -EINVAL;
3363 }
3364 }
3365
3366 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3367 return -EINVAL;
3368 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003369
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003370 /* If the key_offset is not pre-assigned, we need to find a
3371 * new offset to use. In normal cases, the offset is not
3372 * pre-assigned, but during HW_RESTART we want to reuse the
3373 * same indices, so we pass them when this function is called.
3374 *
3375 * In D3 entry, we need to hardcoded the indices (because the
3376 * firmware hardcodes the PTK offset to 0). In this case, we
3377 * need to make sure we don't overwrite the hw_key_idx in the
3378 * keyconf structure, because otherwise we cannot configure
3379 * the original ones back when resuming.
3380 */
3381 if (key_offset == STA_KEY_IDX_INVALID) {
3382 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3383 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003384 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003385 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003386 }
3387
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003388 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003389 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003390 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003391
3392 /*
3393 * For WEP, the same key is used for multicast and unicast. Upload it
3394 * again, using the same key offset, and now pointing the other one
3395 * to the same key slot (offset).
3396 * If this fails, remove the original as well.
3397 */
David Spinadel85aeb582017-03-30 19:43:53 +03003398 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3399 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3400 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003401 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3402 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003403 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003404 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003405 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003406 }
3407 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003408
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003409 __set_bit(key_offset, mvm->fw_key_table);
3410
Johannes Berg2f6319d2014-11-12 23:39:56 +01003411end:
3412 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3413 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003414 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003415 return ret;
3416}
3417
3418int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3419 struct ieee80211_vif *vif,
3420 struct ieee80211_sta *sta,
3421 struct ieee80211_key_conf *keyconf)
3422{
Johannes Bergba3943b2014-11-12 23:54:48 +01003423 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003424 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003425 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003426 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003427
3428 lockdep_assert_held(&mvm->mutex);
3429
Johannes Berg5f7a1842015-12-11 09:36:10 +01003430 /* Get the station from the mvm local station table */
3431 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003432 if (mvm_sta)
3433 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003434 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3435 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3436
Johannes Berg2f6319d2014-11-12 23:39:56 +01003437
3438 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3439 keyconf->keyidx, sta_id);
3440
Luca Coelho71793b7d2017-03-30 12:04:47 +03003441 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3442 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3443 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003444 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3445
3446 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3447 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3448 keyconf->hw_key_idx);
3449 return -ENOENT;
3450 }
3451
Johannes Berg2dc2a152015-06-16 17:09:18 +02003452 /* track which key was deleted last */
3453 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3454 if (mvm->fw_key_deleted[i] < U8_MAX)
3455 mvm->fw_key_deleted[i]++;
3456 }
3457 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3458
David Spinadel85aeb582017-03-30 19:43:53 +03003459 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003460 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3461 return 0;
3462 }
3463
Johannes Bergba3943b2014-11-12 23:54:48 +01003464 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3465 if (ret)
3466 return ret;
3467
3468 /* delete WEP key twice to get rid of (now useless) offset */
3469 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3470 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3471 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3472
3473 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003474}
3475
Johannes Berg8ca151b2013-01-24 14:25:36 +01003476void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3477 struct ieee80211_vif *vif,
3478 struct ieee80211_key_conf *keyconf,
3479 struct ieee80211_sta *sta, u32 iv32,
3480 u16 *phase1key)
3481{
Beni Levc3eb5362013-02-06 17:22:18 +02003482 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003483 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003484
Beni Levc3eb5362013-02-06 17:22:18 +02003485 rcu_read_lock();
3486
Johannes Berg5f7a1842015-12-11 09:36:10 +01003487 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3488 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003489 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003490 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003491 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003492
3493 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003494 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003495}
3496
Johannes Berg9cc40712013-02-15 22:47:48 +01003497void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3498 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003499{
Johannes Berg5b577a92013-11-14 18:20:04 +01003500 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003501 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003502 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003503 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003504 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003505 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003506 };
3507 int ret;
3508
Sara Sharon854c5702016-01-26 13:17:47 +02003509 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3510 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003511 if (ret)
3512 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3513}
3514
Johannes Berg9cc40712013-02-15 22:47:48 +01003515void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3516 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003517 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003518 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003519 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003520{
Johannes Berg5b577a92013-11-14 18:20:04 +01003521 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003522 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003523 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003524 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003525 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3526 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003527 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003528 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003529 int tid, ret;
3530 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003531
Johannes Berg3e56ead2013-02-15 22:23:18 +01003532 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3533 * Note that this field is reserved and unused by firmware not
3534 * supporting GO uAPSD, so it's safe to always do this.
3535 */
3536 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3537 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3538
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003539 /* If we're releasing frames from aggregation or dqa queues then check
3540 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003541 * - more frames than the service period, in which case more_data
3542 * needs to be set
3543 * - fewer than 'cnt' frames, in which case we need to adjust the
3544 * firmware command (but do that unconditionally)
3545 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003546 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003547 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003548 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003549
3550 spin_lock_bh(&mvmsta->lock);
3551 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3552 struct iwl_mvm_tid_data *tid_data;
3553 u16 n_queued;
3554
3555 tid_data = &mvmsta->tid_data[tid];
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003556 if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
3557 tid_data->state != IWL_AGG_ON &&
Johannes Berg3e56ead2013-02-15 22:23:18 +01003558 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3559 "TID %d state is %d\n",
3560 tid, tid_data->state)) {
3561 spin_unlock_bh(&mvmsta->lock);
3562 ieee80211_sta_eosp(sta);
3563 return;
3564 }
3565
Liad Kaufmandd321622017-04-05 16:25:11 +03003566 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003567 if (n_queued > remaining) {
3568 more_data = true;
3569 remaining = 0;
3570 break;
3571 }
3572 remaining -= n_queued;
3573 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003574 sleep_tx_count = cnt - remaining;
3575 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3576 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003577 spin_unlock_bh(&mvmsta->lock);
3578
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003579 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003580 if (WARN_ON(cnt - remaining == 0)) {
3581 ieee80211_sta_eosp(sta);
3582 return;
3583 }
3584 }
3585
3586 /* Note: this is ignored by firmware not supporting GO uAPSD */
3587 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003588 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003589
3590 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3591 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003592 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003593 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003594 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003595 }
3596
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003597 /* block the Tx queues until the FW updated the sleep Tx count */
3598 iwl_trans_block_txq_ptrs(mvm->trans, true);
3599
3600 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3601 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003602 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003603 if (ret)
3604 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3605}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003606
Johannes Berg04168412015-06-23 21:22:09 +02003607void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3608 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003609{
3610 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3611 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3612 struct ieee80211_sta *sta;
3613 u32 sta_id = le32_to_cpu(notif->sta_id);
3614
3615 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003616 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003617
3618 rcu_read_lock();
3619 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3620 if (!IS_ERR_OR_NULL(sta))
3621 ieee80211_sta_eosp(sta);
3622 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003623}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003624
3625void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3626 struct iwl_mvm_sta *mvmsta, bool disable)
3627{
3628 struct iwl_mvm_add_sta_cmd cmd = {
3629 .add_modify = STA_MODE_MODIFY,
3630 .sta_id = mvmsta->sta_id,
3631 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3632 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3633 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3634 };
3635 int ret;
3636
Sara Sharon854c5702016-01-26 13:17:47 +02003637 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3638 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003639 if (ret)
3640 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3641}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003642
3643void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3644 struct ieee80211_sta *sta,
3645 bool disable)
3646{
3647 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3648
3649 spin_lock_bh(&mvm_sta->lock);
3650
3651 if (mvm_sta->disable_tx == disable) {
3652 spin_unlock_bh(&mvm_sta->lock);
3653 return;
3654 }
3655
3656 mvm_sta->disable_tx = disable;
3657
3658 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003659 * Tell mac80211 to start/stop queuing tx for this station,
3660 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003661 * for this station.
3662 */
3663 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3664 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3665
3666 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3667
3668 spin_unlock_bh(&mvm_sta->lock);
3669}
3670
Sara Sharonced19f22017-02-06 19:09:32 +02003671static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3672 struct iwl_mvm_vif *mvmvif,
3673 struct iwl_mvm_int_sta *sta,
3674 bool disable)
3675{
3676 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3677 struct iwl_mvm_add_sta_cmd cmd = {
3678 .add_modify = STA_MODE_MODIFY,
3679 .sta_id = sta->sta_id,
3680 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3681 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3682 .mac_id_n_color = cpu_to_le32(id),
3683 };
3684 int ret;
3685
3686 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3687 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3688 if (ret)
3689 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3690}
3691
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003692void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3693 struct iwl_mvm_vif *mvmvif,
3694 bool disable)
3695{
3696 struct ieee80211_sta *sta;
3697 struct iwl_mvm_sta *mvm_sta;
3698 int i;
3699
3700 lockdep_assert_held(&mvm->mutex);
3701
3702 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003703 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003704 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3705 lockdep_is_held(&mvm->mutex));
3706 if (IS_ERR_OR_NULL(sta))
3707 continue;
3708
3709 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3710 if (mvm_sta->mac_id_n_color !=
3711 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3712 continue;
3713
3714 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3715 }
Sara Sharonced19f22017-02-06 19:09:32 +02003716
3717 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3718 return;
3719
3720 /* Need to block/unblock also multicast station */
3721 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3722 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3723 &mvmvif->mcast_sta, disable);
3724
3725 /*
3726 * Only unblock the broadcast station (FW blocks it for immediate
3727 * quiet, not the driver)
3728 */
3729 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3730 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3731 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003732}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003733
3734void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3735{
3736 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3737 struct iwl_mvm_sta *mvmsta;
3738
3739 rcu_read_lock();
3740
3741 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3742
3743 if (!WARN_ON(!mvmsta))
3744 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3745
3746 rcu_read_unlock();
3747}
Liad Kaufmandd321622017-04-05 16:25:11 +03003748
3749u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3750{
3751 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3752
3753 /*
3754 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
3755 * to align the wrap around of ssn so we compare relevant values.
3756 */
3757 if (mvm->trans->cfg->gen2)
3758 sn &= 0xff;
3759
3760 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3761}