blob: 636c8b03e31892bd30e3a3d7a6b1e9b8a8eb02ea [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020010 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020037 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83}
84
Eliad Pellerb92e6612014-01-23 17:58:23 +020085static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010087{
88 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020089 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010090
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94 lockdep_assert_held(&mvm->mutex);
95
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
99
Johannes Berg8ca151b2013-01-24 14:25:36 +0100100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Eliad Pellerb92e6612014-01-23 17:58:23 +0200101 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
102 if (BIT(sta_id) & reserved_ids)
103 continue;
104
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
107 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200108 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100109 return IWL_MVM_STATION_COUNT;
110}
111
Johannes Berg7a453972013-02-12 13:10:44 +0100112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300114 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100115{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Liad Kaufman24afba72015-07-28 18:56:08 +0300129 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300132
133 if (flags & STA_MODIFY_QUEUES)
134 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
Johannes Berg7a453972013-02-12 13:10:44 +0100135 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100136
137 switch (sta->bandwidth) {
138 case IEEE80211_STA_RX_BW_160:
139 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
140 /* fall through */
141 case IEEE80211_STA_RX_BW_80:
142 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
143 /* fall through */
144 case IEEE80211_STA_RX_BW_40:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
146 /* fall through */
147 case IEEE80211_STA_RX_BW_20:
148 if (sta->ht_cap.ht_supported)
149 add_sta_cmd.station_flags |=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
151 break;
152 }
153
154 switch (sta->rx_nss) {
155 case 1:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157 break;
158 case 2:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
160 break;
161 case 3 ... 8:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
163 break;
164 }
165
166 switch (sta->smps_mode) {
167 case IEEE80211_SMPS_AUTOMATIC:
168 case IEEE80211_SMPS_NUM_MODES:
169 WARN_ON(1);
170 break;
171 case IEEE80211_SMPS_STATIC:
172 /* override NSS */
173 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
175 break;
176 case IEEE80211_SMPS_DYNAMIC:
177 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
178 break;
179 case IEEE80211_SMPS_OFF:
180 /* nothing */
181 break;
182 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100183
184 if (sta->ht_cap.ht_supported) {
185 add_sta_cmd.station_flags_msk |=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
187 STA_FLG_AGG_MPDU_DENS_MSK);
188
189 mpdu_dens = sta->ht_cap.ampdu_density;
190 }
191
192 if (sta->vht_cap.vht_supported) {
193 agg_size = sta->vht_cap.cap &
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
195 agg_size >>=
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
197 } else if (sta->ht_cap.ht_supported) {
198 agg_size = sta->ht_cap.ampdu_factor;
199 }
200
201 add_sta_cmd.station_flags |=
202 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
203 add_sta_cmd.station_flags |=
204 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Emmanuel Grumbach19c52f42016-10-09 15:43:24 +0300205 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100206
Johannes Berg65e25482016-04-13 14:24:22 +0200207 if (sta->wme) {
208 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
209
210 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
211 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
212 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
213 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
214 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
215 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
216 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
217 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
218 }
219
Johannes Berg8ca151b2013-01-24 14:25:36 +0100220 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200221 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
222 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300223 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100224 if (ret)
225 return ret;
226
Sara Sharon837c4da2016-01-07 16:50:45 +0200227 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100228 case ADD_STA_SUCCESS:
229 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
230 break;
231 default:
232 ret = -EIO;
233 IWL_ERR(mvm, "ADD_STA failed\n");
234 break;
235 }
236
237 return ret;
238}
239
Sara Sharon10b2b202016-03-20 16:23:41 +0200240static void iwl_mvm_rx_agg_session_expired(unsigned long data)
241{
242 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
243 struct iwl_mvm_baid_data *ba_data;
244 struct ieee80211_sta *sta;
245 struct iwl_mvm_sta *mvm_sta;
246 unsigned long timeout;
247
248 rcu_read_lock();
249
250 ba_data = rcu_dereference(*rcu_ptr);
251
252 if (WARN_ON(!ba_data))
253 goto unlock;
254
255 if (!ba_data->timeout)
256 goto unlock;
257
258 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
259 if (time_is_after_jiffies(timeout)) {
260 mod_timer(&ba_data->session_timer, timeout);
261 goto unlock;
262 }
263
264 /* Timer expired */
265 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
266 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
267 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
268 sta->addr, ba_data->tid);
269unlock:
270 rcu_read_unlock();
271}
272
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300273static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
274 struct ieee80211_sta *sta)
275{
276 unsigned long used_hw_queues;
277 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200278 unsigned int wdg_timeout =
279 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300280 u32 ac;
281
282 lockdep_assert_held(&mvm->mutex);
283
284 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
285
286 /* Find available queues, and allocate them to the ACs */
287 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
288 u8 queue = find_first_zero_bit(&used_hw_queues,
289 mvm->first_agg_queue);
290
291 if (queue >= mvm->first_agg_queue) {
292 IWL_ERR(mvm, "Failed to allocate STA queue\n");
293 return -EBUSY;
294 }
295
296 __set_bit(queue, &used_hw_queues);
297 mvmsta->hw_queue[ac] = queue;
298 }
299
300 /* Found a place for all queues - enable them */
301 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
302 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300303 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300304 iwl_mvm_ac_to_tx_fifo[ac], 0,
305 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300306 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
307 }
308
309 return 0;
310}
311
312static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
313 struct ieee80211_sta *sta)
314{
315 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
316 unsigned long sta_msk;
317 int i;
318
319 lockdep_assert_held(&mvm->mutex);
320
321 /* disable the TDLS STA-specific queues */
322 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200323 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300324 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300325}
326
Liad Kaufman9794c642015-08-19 17:34:28 +0300327/* Disable aggregations for a bitmap of TIDs for a given station */
328static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
329 unsigned long disable_agg_tids,
330 bool remove_queue)
331{
332 struct iwl_mvm_add_sta_cmd cmd = {};
333 struct ieee80211_sta *sta;
334 struct iwl_mvm_sta *mvmsta;
335 u32 status;
336 u8 sta_id;
337 int ret;
338
339 spin_lock_bh(&mvm->queue_info_lock);
340 sta_id = mvm->queue_info[queue].ra_sta_id;
341 spin_unlock_bh(&mvm->queue_info_lock);
342
343 rcu_read_lock();
344
345 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
346
347 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
348 rcu_read_unlock();
349 return -EINVAL;
350 }
351
352 mvmsta = iwl_mvm_sta_from_mac80211(sta);
353
354 mvmsta->tid_disable_agg |= disable_agg_tids;
355
356 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
357 cmd.sta_id = mvmsta->sta_id;
358 cmd.add_modify = STA_MODE_MODIFY;
359 cmd.modify_mask = STA_MODIFY_QUEUES;
360 if (disable_agg_tids)
361 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
362 if (remove_queue)
363 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
364 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
365 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
366
367 rcu_read_unlock();
368
369 /* Notify FW of queue removal from the STA queues */
370 status = ADD_STA_SUCCESS;
371 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
372 iwl_mvm_add_sta_cmd_size(mvm),
373 &cmd, &status);
374
375 return ret;
376}
377
Liad Kaufman42db09c2016-05-02 14:01:14 +0300378static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
379{
380 struct ieee80211_sta *sta;
381 struct iwl_mvm_sta *mvmsta;
382 unsigned long tid_bitmap;
383 unsigned long agg_tids = 0;
384 s8 sta_id;
385 int tid;
386
387 lockdep_assert_held(&mvm->mutex);
388
389 spin_lock_bh(&mvm->queue_info_lock);
390 sta_id = mvm->queue_info[queue].ra_sta_id;
391 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
392 spin_unlock_bh(&mvm->queue_info_lock);
393
394 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
395 lockdep_is_held(&mvm->mutex));
396
397 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
398 return -EINVAL;
399
400 mvmsta = iwl_mvm_sta_from_mac80211(sta);
401
402 spin_lock_bh(&mvmsta->lock);
403 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
404 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
405 agg_tids |= BIT(tid);
406 }
407 spin_unlock_bh(&mvmsta->lock);
408
409 return agg_tids;
410}
411
Liad Kaufman9794c642015-08-19 17:34:28 +0300412/*
413 * Remove a queue from a station's resources.
414 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
415 * doesn't disable the queue
416 */
417static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
418{
419 struct ieee80211_sta *sta;
420 struct iwl_mvm_sta *mvmsta;
421 unsigned long tid_bitmap;
422 unsigned long disable_agg_tids = 0;
423 u8 sta_id;
424 int tid;
425
426 lockdep_assert_held(&mvm->mutex);
427
428 spin_lock_bh(&mvm->queue_info_lock);
429 sta_id = mvm->queue_info[queue].ra_sta_id;
430 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
431 spin_unlock_bh(&mvm->queue_info_lock);
432
433 rcu_read_lock();
434
435 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
436
437 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
438 rcu_read_unlock();
439 return 0;
440 }
441
442 mvmsta = iwl_mvm_sta_from_mac80211(sta);
443
444 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300445 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300446 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300447 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
448 disable_agg_tids |= BIT(tid);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300449 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300450 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300451
Liad Kaufman42db09c2016-05-02 14:01:14 +0300452 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300453 spin_unlock_bh(&mvmsta->lock);
454
455 rcu_read_unlock();
456
Liad Kaufman42db09c2016-05-02 14:01:14 +0300457 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman9794c642015-08-19 17:34:28 +0300458 /* Unmap MAC queues and TIDs from this queue */
459 mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
460 mvm->queue_info[queue].hw_queue_refcount = 0;
461 mvm->queue_info[queue].tid_bitmap = 0;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300462 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman9794c642015-08-19 17:34:28 +0300463
464 return disable_agg_tids;
465}
466
Liad Kaufman42db09c2016-05-02 14:01:14 +0300467static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
468 unsigned long tfd_queue_mask, u8 ac)
469{
470 int queue = 0;
471 u8 ac_to_queue[IEEE80211_NUM_ACS];
472 int i;
473
474 lockdep_assert_held(&mvm->queue_info_lock);
475
476 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
477
478 /* See what ACs the existing queues for this STA have */
479 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
480 /* Only DATA queues can be shared */
481 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
482 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
483 continue;
484
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200485 /* Don't try and take queues being reconfigured */
486 if (mvm->queue_info[queue].status ==
487 IWL_MVM_QUEUE_RECONFIGURING)
488 continue;
489
Liad Kaufman42db09c2016-05-02 14:01:14 +0300490 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
491 }
492
493 /*
494 * The queue to share is chosen only from DATA queues as follows (in
495 * descending priority):
496 * 1. An AC_BE queue
497 * 2. Same AC queue
498 * 3. Highest AC queue that is lower than new AC
499 * 4. Any existing AC (there always is at least 1 DATA queue)
500 */
501
502 /* Priority 1: An AC_BE queue */
503 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
504 queue = ac_to_queue[IEEE80211_AC_BE];
505 /* Priority 2: Same AC queue */
506 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
507 queue = ac_to_queue[ac];
508 /* Priority 3a: If new AC is VO and VI exists - use VI */
509 else if (ac == IEEE80211_AC_VO &&
510 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
511 queue = ac_to_queue[IEEE80211_AC_VI];
512 /* Priority 3b: No BE so only AC less than the new one is BK */
513 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
514 queue = ac_to_queue[IEEE80211_AC_BK];
515 /* Priority 4a: No BE nor BK - use VI if exists */
516 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
517 queue = ac_to_queue[IEEE80211_AC_VI];
518 /* Priority 4b: No BE, BK nor VI - use VO if exists */
519 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
520 queue = ac_to_queue[IEEE80211_AC_VO];
521
522 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200523 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
524 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
525 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300526 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200527 return -ENOSPC;
528 }
529
530 /* Make sure the queue isn't in the middle of being reconfigured */
531 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
532 IWL_ERR(mvm,
533 "TXQ %d is in the middle of re-config - try again\n",
534 queue);
535 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300536 }
537
538 return queue;
539}
540
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200541/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200542 * If a given queue has a higher AC than the TID stream that is being compared
543 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200544 * in such a case, otherwise - if no redirection required - it does nothing,
545 * unless the %force param is true.
546 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200547int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
548 int ac, int ssn, unsigned int wdg_timeout,
549 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200550{
551 struct iwl_scd_txq_cfg_cmd cmd = {
552 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200553 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200554 };
555 bool shared_queue;
556 unsigned long mq;
557 int ret;
558
559 /*
560 * If the AC is lower than current one - FIFO needs to be redirected to
561 * the lowest one of the streams in the queue. Check if this is needed
562 * here.
563 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
564 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
565 * we need to check if the numerical value of X is LARGER than of Y.
566 */
567 spin_lock_bh(&mvm->queue_info_lock);
568 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
569 spin_unlock_bh(&mvm->queue_info_lock);
570
571 IWL_DEBUG_TX_QUEUES(mvm,
572 "No redirection needed on TXQ #%d\n",
573 queue);
574 return 0;
575 }
576
577 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
578 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200579 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200580 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
581 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
582 spin_unlock_bh(&mvm->queue_info_lock);
583
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200584 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200585 queue, iwl_mvm_ac_to_tx_fifo[ac]);
586
587 /* Stop MAC queues and wait for this queue to empty */
588 iwl_mvm_stop_mac_queues(mvm, mq);
589 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
590 if (ret) {
591 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
592 queue);
593 ret = -EIO;
594 goto out;
595 }
596
597 /* Before redirecting the queue we need to de-activate it */
598 iwl_trans_txq_disable(mvm->trans, queue, false);
599 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
600 if (ret)
601 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
602 ret);
603
604 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300605 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200606
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200607 /* Update the TID "owner" of the queue */
608 spin_lock_bh(&mvm->queue_info_lock);
609 mvm->queue_info[queue].txq_tid = tid;
610 spin_unlock_bh(&mvm->queue_info_lock);
611
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200612 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
613
614 /* Redirect to lower AC */
615 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
616 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
617 ssn);
618
619 /* Update AC marking of the queue */
620 spin_lock_bh(&mvm->queue_info_lock);
621 mvm->queue_info[queue].mac80211_ac = ac;
622 spin_unlock_bh(&mvm->queue_info_lock);
623
624 /*
625 * Mark queue as shared in transport if shared
626 * Note this has to be done after queue enablement because enablement
627 * can also set this value, and there is no indication there to shared
628 * queues
629 */
630 if (shared_queue)
631 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
632
633out:
634 /* Continue using the MAC queues */
635 iwl_mvm_start_mac_queues(mvm, mq);
636
637 return ret;
638}
639
Liad Kaufman24afba72015-07-28 18:56:08 +0300640static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
641 struct ieee80211_sta *sta, u8 ac, int tid,
642 struct ieee80211_hdr *hdr)
643{
644 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
645 struct iwl_trans_txq_scd_cfg cfg = {
646 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
647 .sta_id = mvmsta->sta_id,
648 .tid = tid,
649 .frame_limit = IWL_FRAME_LIMIT,
650 };
651 unsigned int wdg_timeout =
652 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
653 u8 mac_queue = mvmsta->vif->hw_queue[ac];
654 int queue = -1;
Liad Kaufman9794c642015-08-19 17:34:28 +0300655 bool using_inactive_queue = false;
656 unsigned long disable_agg_tids = 0;
657 enum iwl_mvm_agg_state queue_state;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300658 bool shared_queue = false;
Liad Kaufman24afba72015-07-28 18:56:08 +0300659 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300660 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300661 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300662
663 lockdep_assert_held(&mvm->mutex);
664
Liad Kaufman42db09c2016-05-02 14:01:14 +0300665 spin_lock_bh(&mvmsta->lock);
666 tfd_queue_mask = mvmsta->tfd_queue_msk;
667 spin_unlock_bh(&mvmsta->lock);
668
Liad Kaufmand2515a92016-03-23 16:31:08 +0200669 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300670
671 /*
672 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
673 * exists
674 */
675 if (!ieee80211_is_data_qos(hdr->frame_control) ||
676 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300677 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
678 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300679 IWL_MVM_DQA_MAX_MGMT_QUEUE);
680 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
681 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
682 queue);
683
684 /* If no such queue is found, we'll use a DATA queue instead */
685 }
686
Liad Kaufman9794c642015-08-19 17:34:28 +0300687 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
688 (mvm->queue_info[mvmsta->reserved_queue].status ==
689 IWL_MVM_QUEUE_RESERVED ||
690 mvm->queue_info[mvmsta->reserved_queue].status ==
691 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300692 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300693 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300694 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
695 }
696
697 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300698 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
699 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300700 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300701
702 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300703 * Check if this queue is already allocated but inactive.
704 * In such a case, we'll need to first free this queue before enabling
705 * it again, so we'll mark it as reserved to make sure no new traffic
706 * arrives on it
707 */
708 if (queue > 0 &&
709 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
710 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
711 using_inactive_queue = true;
712 IWL_DEBUG_TX_QUEUES(mvm,
713 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
714 queue, mvmsta->sta_id, tid);
715 }
716
Liad Kaufman42db09c2016-05-02 14:01:14 +0300717 /* No free queue - we'll have to share */
718 if (queue <= 0) {
719 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
720 if (queue > 0) {
721 shared_queue = true;
722 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
723 }
724 }
725
Liad Kaufman9794c642015-08-19 17:34:28 +0300726 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300727 * Mark TXQ as ready, even though it hasn't been fully configured yet,
728 * to make sure no one else takes it.
729 * This will allow avoiding re-acquiring the lock at the end of the
730 * configuration. On error we'll mark it back as free.
731 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300732 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300733 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300734
Liad Kaufmand2515a92016-03-23 16:31:08 +0200735 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300736
Liad Kaufman42db09c2016-05-02 14:01:14 +0300737 /* This shouldn't happen - out of queues */
738 if (WARN_ON(queue <= 0)) {
739 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
740 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200741 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300742 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300743
744 /*
745 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
746 * but for configuring the SCD to send A-MPDUs we need to mark the queue
747 * as aggregatable.
748 * Mark all DATA queues as allowing to be aggregated at some point
749 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300750 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
751 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300752
Liad Kaufman9794c642015-08-19 17:34:28 +0300753 /*
754 * If this queue was previously inactive (idle) - we need to free it
755 * first
756 */
757 if (using_inactive_queue) {
758 struct iwl_scd_txq_cfg_cmd cmd = {
759 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200760 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman9794c642015-08-19 17:34:28 +0300761 };
Liad Kaufmand55092b2016-08-03 18:41:27 +0300762 u8 txq_curr_ac;
Liad Kaufman9794c642015-08-19 17:34:28 +0300763
764 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
765
Liad Kaufman93f436e2015-08-31 13:41:26 +0300766 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmand55092b2016-08-03 18:41:27 +0300767 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
Liad Kaufman93f436e2015-08-31 13:41:26 +0300768 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
Liad Kaufmand55092b2016-08-03 18:41:27 +0300769 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200770 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman93f436e2015-08-31 13:41:26 +0300771 spin_unlock_bh(&mvm->queue_info_lock);
772
Liad Kaufman9794c642015-08-19 17:34:28 +0300773 /* Disable the queue */
Liad Kaufman8d98ae62016-02-02 16:02:46 +0200774 if (disable_agg_tids)
775 iwl_mvm_invalidate_sta_queue(mvm, queue,
776 disable_agg_tids, false);
Liad Kaufman9794c642015-08-19 17:34:28 +0300777 iwl_trans_txq_disable(mvm->trans, queue, false);
778 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
779 &cmd);
780 if (ret) {
781 IWL_ERR(mvm,
782 "Failed to free inactive queue %d (ret=%d)\n",
783 queue, ret);
784
785 /* Re-mark the inactive queue as inactive */
786 spin_lock_bh(&mvm->queue_info_lock);
787 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
788 spin_unlock_bh(&mvm->queue_info_lock);
789
790 return ret;
791 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +0200792
793 /* If TXQ is allocated to another STA, update removal in FW */
794 if (cmd.sta_id != mvmsta->sta_id)
795 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
Liad Kaufman9794c642015-08-19 17:34:28 +0300796 }
797
Liad Kaufman42db09c2016-05-02 14:01:14 +0300798 IWL_DEBUG_TX_QUEUES(mvm,
799 "Allocating %squeue #%d to sta %d on tid %d\n",
800 shared_queue ? "shared " : "", queue,
801 mvmsta->sta_id, tid);
802
803 if (shared_queue) {
804 /* Disable any open aggs on this queue */
805 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
806
807 if (disable_agg_tids) {
808 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
809 queue);
810 iwl_mvm_invalidate_sta_queue(mvm, queue,
811 disable_agg_tids, false);
812 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300813 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300814
815 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
816 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
817 wdg_timeout);
818
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200819 /*
820 * Mark queue as shared in transport if shared
821 * Note this has to be done after queue enablement because enablement
822 * can also set this value, and there is no indication there to shared
823 * queues
824 */
825 if (shared_queue)
826 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
827
Liad Kaufman24afba72015-07-28 18:56:08 +0300828 spin_lock_bh(&mvmsta->lock);
829 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300830 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300831 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300832 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300833
834 if (mvmsta->reserved_queue == queue)
835 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
836 spin_unlock_bh(&mvmsta->lock);
837
Liad Kaufman42db09c2016-05-02 14:01:14 +0300838 if (!shared_queue) {
839 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
840 if (ret)
841 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300842
Liad Kaufman42db09c2016-05-02 14:01:14 +0300843 /* If we need to re-enable aggregations... */
844 if (queue_state == IWL_AGG_ON) {
845 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
846 if (ret)
847 goto out_err;
848 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200849 } else {
850 /* Redirect queue, if needed */
851 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
852 wdg_timeout, false);
853 if (ret)
854 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300855 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300856
Liad Kaufman42db09c2016-05-02 14:01:14 +0300857 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300858
859out_err:
860 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
861
862 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300863}
864
Liad Kaufman19aefa42016-03-08 14:29:51 +0200865static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
866{
867 struct iwl_scd_txq_cfg_cmd cmd = {
868 .scd_queue = queue,
869 .action = SCD_CFG_UPDATE_QUEUE_TID,
870 };
871 s8 sta_id;
872 int tid;
873 unsigned long tid_bitmap;
874 int ret;
875
876 lockdep_assert_held(&mvm->mutex);
877
878 spin_lock_bh(&mvm->queue_info_lock);
879 sta_id = mvm->queue_info[queue].ra_sta_id;
880 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
881 spin_unlock_bh(&mvm->queue_info_lock);
882
883 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
884 return;
885
886 /* Find any TID for queue */
887 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
888 cmd.tid = tid;
889 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
890
891 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300892 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200893 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
894 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300895 return;
896 }
897
898 spin_lock_bh(&mvm->queue_info_lock);
899 mvm->queue_info[queue].txq_tid = tid;
900 spin_unlock_bh(&mvm->queue_info_lock);
901 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
902 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200903}
904
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200905static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
906{
907 struct ieee80211_sta *sta;
908 struct iwl_mvm_sta *mvmsta;
909 s8 sta_id;
910 int tid = -1;
911 unsigned long tid_bitmap;
912 unsigned int wdg_timeout;
913 int ssn;
914 int ret = true;
915
916 lockdep_assert_held(&mvm->mutex);
917
918 spin_lock_bh(&mvm->queue_info_lock);
919 sta_id = mvm->queue_info[queue].ra_sta_id;
920 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
921 spin_unlock_bh(&mvm->queue_info_lock);
922
923 /* Find TID for queue, and make sure it is the only one on the queue */
924 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
925 if (tid_bitmap != BIT(tid)) {
926 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
927 queue, tid_bitmap);
928 return;
929 }
930
931 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
932 tid);
933
934 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
935 lockdep_is_held(&mvm->mutex));
936
937 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
938 return;
939
940 mvmsta = iwl_mvm_sta_from_mac80211(sta);
941 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
942
943 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
944
945 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
946 tid_to_mac80211_ac[tid], ssn,
947 wdg_timeout, true);
948 if (ret) {
949 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
950 return;
951 }
952
953 /* If aggs should be turned back on - do it */
954 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +0300955 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200956
957 mvmsta->tid_disable_agg &= ~BIT(tid);
958
959 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
960 cmd.sta_id = mvmsta->sta_id;
961 cmd.add_modify = STA_MODE_MODIFY;
962 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
963 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
964 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
965
966 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
967 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
968 if (!ret) {
969 IWL_DEBUG_TX_QUEUES(mvm,
970 "TXQ #%d is now aggregated again\n",
971 queue);
972
973 /* Mark queue intenally as aggregating again */
974 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
975 }
976 }
977
978 spin_lock_bh(&mvm->queue_info_lock);
979 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
980 spin_unlock_bh(&mvm->queue_info_lock);
981}
982
Liad Kaufman24afba72015-07-28 18:56:08 +0300983static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
984{
985 if (tid == IWL_MAX_TID_COUNT)
986 return IEEE80211_AC_VO; /* MGMT */
987
988 return tid_to_mac80211_ac[tid];
989}
990
991static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
992 struct ieee80211_sta *sta, int tid)
993{
994 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
995 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
996 struct sk_buff *skb;
997 struct ieee80211_hdr *hdr;
998 struct sk_buff_head deferred_tx;
999 u8 mac_queue;
1000 bool no_queue = false; /* Marks if there is a problem with the queue */
1001 u8 ac;
1002
1003 lockdep_assert_held(&mvm->mutex);
1004
1005 skb = skb_peek(&tid_data->deferred_tx_frames);
1006 if (!skb)
1007 return;
1008 hdr = (void *)skb->data;
1009
1010 ac = iwl_mvm_tid_to_ac_queue(tid);
1011 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1012
1013 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
1014 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1015 IWL_ERR(mvm,
1016 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1017 mvmsta->sta_id, tid);
1018
1019 /*
1020 * Mark queue as problematic so later the deferred traffic is
1021 * freed, as we can do nothing with it
1022 */
1023 no_queue = true;
1024 }
1025
1026 __skb_queue_head_init(&deferred_tx);
1027
Liad Kaufmand2515a92016-03-23 16:31:08 +02001028 /* Disable bottom-halves when entering TX path */
1029 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001030 spin_lock(&mvmsta->lock);
1031 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001032 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001033 spin_unlock(&mvmsta->lock);
1034
Liad Kaufman24afba72015-07-28 18:56:08 +03001035 while ((skb = __skb_dequeue(&deferred_tx)))
1036 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1037 ieee80211_free_txskb(mvm->hw, skb);
1038 local_bh_enable();
1039
1040 /* Wake queue */
1041 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1042}
1043
1044void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1045{
1046 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1047 add_stream_wk);
1048 struct ieee80211_sta *sta;
1049 struct iwl_mvm_sta *mvmsta;
1050 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001051 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001052
Liad Kaufman9794c642015-08-19 17:34:28 +03001053 /* Check inactivity of queues */
1054 iwl_mvm_inactivity_check(mvm);
1055
Liad Kaufman24afba72015-07-28 18:56:08 +03001056 mutex_lock(&mvm->mutex);
1057
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001058 /* Reconfigure queues requiring reconfiguation */
1059 for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1060 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001061 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001062
1063 spin_lock_bh(&mvm->queue_info_lock);
1064 reconfig = (mvm->queue_info[queue].status ==
1065 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001066
1067 /*
1068 * We need to take into account a situation in which a TXQ was
1069 * allocated to TID x, and then turned shared by adding TIDs y
1070 * and z. If TID x becomes inactive and is removed from the TXQ,
1071 * ownership must be given to one of the remaining TIDs.
1072 * This is mainly because if TID x continues - a new queue can't
1073 * be allocated for it as long as it is an owner of another TXQ.
1074 */
1075 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1076 BIT(mvm->queue_info[queue].txq_tid)) &&
1077 (mvm->queue_info[queue].status ==
1078 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001079 spin_unlock_bh(&mvm->queue_info_lock);
1080
1081 if (reconfig)
1082 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001083 else if (change_owner)
1084 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001085 }
1086
Liad Kaufman24afba72015-07-28 18:56:08 +03001087 /* Go over all stations with deferred traffic */
1088 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1089 IWL_MVM_STATION_COUNT) {
1090 clear_bit(sta_id, mvm->sta_deferred_frames);
1091 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1092 lockdep_is_held(&mvm->mutex));
1093 if (IS_ERR_OR_NULL(sta))
1094 continue;
1095
1096 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1097 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1098
1099 for_each_set_bit(tid, &deferred_tid_traffic,
1100 IWL_MAX_TID_COUNT + 1)
1101 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1102 }
1103
1104 mutex_unlock(&mvm->mutex);
1105}
1106
1107static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001108 struct ieee80211_sta *sta,
1109 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001110{
1111 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1112 int queue;
1113
Liad Kaufman9794c642015-08-19 17:34:28 +03001114 /*
1115 * Check for inactive queues, so we don't reach a situation where we
1116 * can't add a STA due to a shortage in queues that doesn't really exist
1117 */
1118 iwl_mvm_inactivity_check(mvm);
1119
Liad Kaufman24afba72015-07-28 18:56:08 +03001120 spin_lock_bh(&mvm->queue_info_lock);
1121
1122 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001123 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1124 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001125 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1126 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001127 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1128 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001129 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1130 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001131 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001132 if (queue < 0) {
1133 spin_unlock_bh(&mvm->queue_info_lock);
1134 IWL_ERR(mvm, "No available queues for new station\n");
1135 return -ENOSPC;
1136 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001137 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001138
1139 spin_unlock_bh(&mvm->queue_info_lock);
1140
1141 mvmsta->reserved_queue = queue;
1142
1143 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1144 queue, mvmsta->sta_id);
1145
1146 return 0;
1147}
1148
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001149/*
1150 * In DQA mode, after a HW restart the queues should be allocated as before, in
1151 * order to avoid race conditions when there are shared queues. This function
1152 * does the re-mapping and queue allocation.
1153 *
1154 * Note that re-enabling aggregations isn't done in this function.
1155 */
1156static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1157 struct iwl_mvm_sta *mvm_sta)
1158{
1159 unsigned int wdg_timeout =
1160 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1161 int i;
1162 struct iwl_trans_txq_scd_cfg cfg = {
1163 .sta_id = mvm_sta->sta_id,
1164 .frame_limit = IWL_FRAME_LIMIT,
1165 };
1166
1167 /* Make sure reserved queue is still marked as such (or allocated) */
1168 mvm->queue_info[mvm_sta->reserved_queue].status =
1169 IWL_MVM_QUEUE_RESERVED;
1170
1171 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1172 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1173 int txq_id = tid_data->txq_id;
1174 int ac;
1175 u8 mac_queue;
1176
1177 if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1178 continue;
1179
1180 skb_queue_head_init(&tid_data->deferred_tx_frames);
1181
1182 ac = tid_to_mac80211_ac[i];
1183 mac_queue = mvm_sta->vif->hw_queue[ac];
1184
1185 cfg.tid = i;
1186 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1187 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1188 txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1189
1190 IWL_DEBUG_TX_QUEUES(mvm,
1191 "Re-mapping sta %d tid %d to queue %d\n",
1192 mvm_sta->sta_id, i, txq_id);
1193
1194 iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
1195 IEEE80211_SEQ_TO_SN(tid_data->seq_number),
1196 &cfg, wdg_timeout);
1197
1198 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1199 }
1200
1201 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1202}
1203
Johannes Berg8ca151b2013-01-24 14:25:36 +01001204int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1205 struct ieee80211_vif *vif,
1206 struct ieee80211_sta *sta)
1207{
1208 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001209 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001210 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001211 int i, ret, sta_id;
1212
1213 lockdep_assert_held(&mvm->mutex);
1214
1215 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001216 sta_id = iwl_mvm_find_free_sta_id(mvm,
1217 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001218 else
1219 sta_id = mvm_sta->sta_id;
1220
Johannes Berg36f46312015-03-10 20:32:08 +01001221 if (sta_id == IWL_MVM_STATION_COUNT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001222 return -ENOSPC;
1223
1224 spin_lock_init(&mvm_sta->lock);
1225
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001226 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1227 if (iwl_mvm_is_dqa_supported(mvm) &&
1228 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1229 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1230 goto update_fw;
1231 }
1232
Johannes Berg8ca151b2013-01-24 14:25:36 +01001233 mvm_sta->sta_id = sta_id;
1234 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1235 mvmvif->color);
1236 mvm_sta->vif = vif;
1237 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001238 mvm_sta->tx_protection = 0;
1239 mvm_sta->tt_tx_protection = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001240
1241 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001242 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001243 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001244 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001245
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001246 /*
1247 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1248 * and then they'll be allocated dynamically
1249 */
1250 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001251 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1252 if (ret)
1253 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001254 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001255 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1256 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1257 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1258 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001259
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001260 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001261 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001262 u16 seq = mvm_sta->tid_data[i].seq_number;
1263 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1264 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001265
1266 if (!iwl_mvm_is_dqa_supported(mvm))
1267 continue;
1268
1269 /*
1270 * Mark all queues for this STA as unallocated and defer TX
1271 * frames until the queue is allocated
1272 */
1273 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1274 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001275 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001276 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001277 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001278
Sara Sharona571f5f2015-12-07 12:50:58 +02001279 if (iwl_mvm_has_new_rx_api(mvm) &&
1280 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1281 dup_data = kcalloc(mvm->trans->num_rx_queues,
1282 sizeof(*dup_data),
1283 GFP_KERNEL);
1284 if (!dup_data)
1285 return -ENOMEM;
1286 mvm_sta->dup_data = dup_data;
1287 }
1288
Liad Kaufman24afba72015-07-28 18:56:08 +03001289 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001290 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1291 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001292 if (ret)
1293 goto err;
1294 }
1295
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001296update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001297 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001298 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001299 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001300
Johannes Berg9e848012014-08-04 14:33:42 +02001301 if (vif->type == NL80211_IFTYPE_STATION) {
1302 if (!sta->tdls) {
1303 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
1304 mvmvif->ap_sta_id = sta_id;
1305 } else {
1306 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
1307 }
1308 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001309
1310 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1311
1312 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001313
1314err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001315 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1316 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001317 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001318}
1319
1320int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1321 bool drain)
1322{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001323 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001324 int ret;
1325 u32 status;
1326
1327 lockdep_assert_held(&mvm->mutex);
1328
1329 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1330 cmd.sta_id = mvmsta->sta_id;
1331 cmd.add_modify = STA_MODE_MODIFY;
1332 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1333 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1334
1335 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001336 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1337 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001338 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001339 if (ret)
1340 return ret;
1341
Sara Sharon837c4da2016-01-07 16:50:45 +02001342 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001343 case ADD_STA_SUCCESS:
1344 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1345 mvmsta->sta_id);
1346 break;
1347 default:
1348 ret = -EIO;
1349 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1350 mvmsta->sta_id);
1351 break;
1352 }
1353
1354 return ret;
1355}
1356
1357/*
1358 * Remove a station from the FW table. Before sending the command to remove
1359 * the station validate that the station is indeed known to the driver (sanity
1360 * only).
1361 */
1362static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1363{
1364 struct ieee80211_sta *sta;
1365 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1366 .sta_id = sta_id,
1367 };
1368 int ret;
1369
1370 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1371 lockdep_is_held(&mvm->mutex));
1372
1373 /* Note: internal stations are marked as error values */
1374 if (!sta) {
1375 IWL_ERR(mvm, "Invalid station id\n");
1376 return -EINVAL;
1377 }
1378
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001379 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001380 sizeof(rm_sta_cmd), &rm_sta_cmd);
1381 if (ret) {
1382 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1383 return ret;
1384 }
1385
1386 return 0;
1387}
1388
1389void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1390{
1391 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1392 u8 sta_id;
1393
1394 /*
1395 * The mutex is needed because of the SYNC cmd, but not only: if the
1396 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1397 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1398 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1399 * that later.
1400 */
1401 mutex_lock(&mvm->mutex);
1402
1403 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1404 int ret;
1405 struct ieee80211_sta *sta =
1406 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1407 lockdep_is_held(&mvm->mutex));
1408
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001409 /*
1410 * This station is in use or RCU-removed; the latter happens in
1411 * managed mode, where mac80211 removes the station before we
1412 * can remove it from firmware (we can only do that after the
1413 * MAC is marked unassociated), and possibly while the deauth
1414 * frame to disconnect from the AP is still queued. Then, the
1415 * station pointer is -ENOENT when the last skb is reclaimed.
1416 */
1417 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001418 continue;
1419
1420 if (PTR_ERR(sta) == -EINVAL) {
1421 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1422 sta_id);
1423 continue;
1424 }
1425
1426 if (!sta) {
1427 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1428 sta_id);
1429 continue;
1430 }
1431
1432 WARN_ON(PTR_ERR(sta) != -EBUSY);
1433 /* This station was removed and we waited until it got drained,
1434 * we can now proceed and remove it.
1435 */
1436 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1437 if (ret) {
1438 IWL_ERR(mvm,
1439 "Couldn't remove sta %d after it was drained\n",
1440 sta_id);
1441 continue;
1442 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301443 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001444 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001445
1446 if (mvm->tfd_drained[sta_id]) {
1447 unsigned long i, msk = mvm->tfd_drained[sta_id];
1448
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001449 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001450 iwl_mvm_disable_txq(mvm, i, i,
1451 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001452
1453 mvm->tfd_drained[sta_id] = 0;
1454 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1455 sta_id, msk);
1456 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001457 }
1458
1459 mutex_unlock(&mvm->mutex);
1460}
1461
Liad Kaufman24afba72015-07-28 18:56:08 +03001462static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1463 struct ieee80211_vif *vif,
1464 struct iwl_mvm_sta *mvm_sta)
1465{
1466 int ac;
1467 int i;
1468
1469 lockdep_assert_held(&mvm->mutex);
1470
1471 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1472 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1473 continue;
1474
1475 ac = iwl_mvm_tid_to_ac_queue(i);
1476 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1477 vif->hw_queue[ac], i, 0);
1478 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1479 }
1480}
1481
Johannes Berg8ca151b2013-01-24 14:25:36 +01001482int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1483 struct ieee80211_vif *vif,
1484 struct ieee80211_sta *sta)
1485{
1486 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001487 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001488 int ret;
1489
1490 lockdep_assert_held(&mvm->mutex);
1491
Sara Sharona571f5f2015-12-07 12:50:58 +02001492 if (iwl_mvm_has_new_rx_api(mvm))
1493 kfree(mvm_sta->dup_data);
1494
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001495 if ((vif->type == NL80211_IFTYPE_STATION &&
1496 mvmvif->ap_sta_id == mvm_sta->sta_id) ||
1497 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001498 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1499 if (ret)
1500 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001501 /* flush its queues here since we are freeing mvm_sta */
Luca Coelho5888a402015-10-06 09:54:57 +03001502 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001503 if (ret)
1504 return ret;
1505 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1506 mvm_sta->tfd_queue_msk);
1507 if (ret)
1508 return ret;
1509 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001510
Liad Kaufman24afba72015-07-28 18:56:08 +03001511 /* If DQA is supported - the queues can be disabled now */
Liad Kaufman56214742016-09-22 15:14:08 +03001512 if (iwl_mvm_is_dqa_supported(mvm))
1513 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1514
1515 /* If there is a TXQ still marked as reserved - free it */
1516 if (iwl_mvm_is_dqa_supported(mvm) &&
1517 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001518 u8 reserved_txq = mvm_sta->reserved_queue;
1519 enum iwl_mvm_queue_status *status;
1520
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001521 /*
1522 * If no traffic has gone through the reserved TXQ - it
1523 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1524 * should be manually marked as free again
1525 */
1526 spin_lock_bh(&mvm->queue_info_lock);
1527 status = &mvm->queue_info[reserved_txq].status;
1528 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1529 (*status != IWL_MVM_QUEUE_FREE),
1530 "sta_id %d reserved txq %d status %d",
1531 mvm_sta->sta_id, reserved_txq, *status)) {
1532 spin_unlock_bh(&mvm->queue_info_lock);
1533 return -EINVAL;
1534 }
1535
1536 *status = IWL_MVM_QUEUE_FREE;
1537 spin_unlock_bh(&mvm->queue_info_lock);
1538 }
1539
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001540 if (vif->type == NL80211_IFTYPE_STATION &&
1541 mvmvif->ap_sta_id == mvm_sta->sta_id) {
1542 /* if associated - we can't remove the AP STA now */
1543 if (vif->bss_conf.assoc)
1544 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001545
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001546 /* unassoc - go ahead - remove the AP STA now */
1547 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
Eliad Peller37577fe2013-12-05 17:19:39 +02001548
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001549 /* clear d0i3_ap_sta_id if no longer relevant */
1550 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
1551 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1552 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001553 }
1554
1555 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001556 * This shouldn't happen - the TDLS channel switch should be canceled
1557 * before the STA is removed.
1558 */
1559 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
1560 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1561 cancel_delayed_work(&mvm->tdls_cs.dwork);
1562 }
1563
1564 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001565 * Make sure that the tx response code sees the station as -EBUSY and
1566 * calls the drain worker.
1567 */
1568 spin_lock_bh(&mvm_sta->lock);
1569 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001570 * There are frames pending on the AC queues for this station.
1571 * We need to wait until all the frames are drained...
1572 */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001573 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001574 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
1575 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001576 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001577
1578 /* disable TDLS sta queues on drain complete */
1579 if (sta->tdls) {
1580 mvm->tfd_drained[mvm_sta->sta_id] =
1581 mvm_sta->tfd_queue_msk;
1582 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
1583 mvm_sta->sta_id);
1584 }
1585
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001586 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001587 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001588 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001589
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001590 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001591 iwl_mvm_tdls_sta_deinit(mvm, sta);
1592
Johannes Berg8ca151b2013-01-24 14:25:36 +01001593 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301594 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001595 }
1596
1597 return ret;
1598}
1599
1600int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1601 struct ieee80211_vif *vif,
1602 u8 sta_id)
1603{
1604 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1605
1606 lockdep_assert_held(&mvm->mutex);
1607
Monam Agarwalc531c772014-03-24 00:05:56 +05301608 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001609 return ret;
1610}
1611
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001612int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1613 struct iwl_mvm_int_sta *sta,
1614 u32 qmask, enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001615{
1616 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001617 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001618 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
1619 return -ENOSPC;
1620 }
1621
1622 sta->tfd_queue_msk = qmask;
1623
1624 /* put a non-NULL value so iterating over the stations won't stop */
1625 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1626 return 0;
1627}
1628
Johannes Berg712b24a2014-08-04 14:14:14 +02001629static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
1630 struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001631{
Monam Agarwalc531c772014-03-24 00:05:56 +05301632 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001633 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1634 sta->sta_id = IWL_MVM_STATION_COUNT;
1635}
1636
1637static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1638 struct iwl_mvm_int_sta *sta,
1639 const u8 *addr,
1640 u16 mac_id, u16 color)
1641{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001642 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001643 int ret;
1644 u32 status;
1645
1646 lockdep_assert_held(&mvm->mutex);
1647
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001648 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001649 cmd.sta_id = sta->sta_id;
1650 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1651 color));
1652
1653 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001654 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001655
1656 if (addr)
1657 memcpy(cmd.addr, addr, ETH_ALEN);
1658
Sara Sharon854c5702016-01-26 13:17:47 +02001659 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1660 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001661 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001662 if (ret)
1663 return ret;
1664
Sara Sharon837c4da2016-01-07 16:50:45 +02001665 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001666 case ADD_STA_SUCCESS:
1667 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1668 return 0;
1669 default:
1670 ret = -EIO;
1671 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1672 status);
1673 break;
1674 }
1675 return ret;
1676}
1677
1678int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1679{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001680 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1681 mvm->cfg->base_params->wd_timeout :
1682 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001683 int ret;
1684
1685 lockdep_assert_held(&mvm->mutex);
1686
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001687 /* Map Aux queue to fifo - needs to happen before adding Aux station */
Liad Kaufman28d07932015-09-01 16:36:25 +03001688 if (!iwl_mvm_is_dqa_supported(mvm))
1689 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1690 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001691
1692 /* Allocate aux station and assign to it the aux queue */
1693 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Eliad Pellerb92e6612014-01-23 17:58:23 +02001694 NL80211_IFTYPE_UNSPECIFIED);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001695 if (ret)
1696 return ret;
1697
Liad Kaufman28d07932015-09-01 16:36:25 +03001698 if (iwl_mvm_is_dqa_supported(mvm)) {
1699 struct iwl_trans_txq_scd_cfg cfg = {
1700 .fifo = IWL_MVM_TX_FIFO_MCAST,
1701 .sta_id = mvm->aux_sta.sta_id,
1702 .tid = IWL_MAX_TID_COUNT,
1703 .aggregate = false,
1704 .frame_limit = IWL_FRAME_LIMIT,
1705 };
1706
1707 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1708 wdg_timeout);
1709 }
1710
Johannes Berg8ca151b2013-01-24 14:25:36 +01001711 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1712 MAC_INDEX_AUX, 0);
1713
1714 if (ret)
1715 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1716 return ret;
1717}
1718
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001719int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1720{
1721 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1722
1723 lockdep_assert_held(&mvm->mutex);
1724 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1725 mvmvif->id, 0);
1726}
1727
1728int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1729{
1730 int ret;
1731
1732 lockdep_assert_held(&mvm->mutex);
1733
1734 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1735 if (ret)
1736 IWL_WARN(mvm, "Failed sending remove station\n");
1737
1738 return ret;
1739}
1740
1741void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1742{
1743 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1744}
1745
Johannes Berg712b24a2014-08-04 14:14:14 +02001746void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1747{
1748 lockdep_assert_held(&mvm->mutex);
1749
1750 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1751}
1752
Johannes Berg8ca151b2013-01-24 14:25:36 +01001753/*
1754 * Send the add station command for the vif's broadcast station.
1755 * Assumes that the station was already allocated.
1756 *
1757 * @mvm: the mvm component
1758 * @vif: the interface to which the broadcast station is added
1759 * @bsta: the broadcast station to add.
1760 */
Johannes Berg013290a2014-08-04 13:38:48 +02001761int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001762{
1763 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001764 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001765 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001766 const u8 *baddr = _baddr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001767
1768 lockdep_assert_held(&mvm->mutex);
1769
Liad Kaufmande24f632015-08-04 15:19:18 +03001770 if (iwl_mvm_is_dqa_supported(mvm)) {
1771 struct iwl_trans_txq_scd_cfg cfg = {
1772 .fifo = IWL_MVM_TX_FIFO_VO,
1773 .sta_id = mvmvif->bcast_sta.sta_id,
1774 .tid = IWL_MAX_TID_COUNT,
1775 .aggregate = false,
1776 .frame_limit = IWL_FRAME_LIMIT,
1777 };
1778 unsigned int wdg_timeout =
1779 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1780 int queue;
1781
1782 if ((vif->type == NL80211_IFTYPE_AP) &&
1783 (mvmvif->bcast_sta.tfd_queue_msk &
1784 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
1785 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
Liad Kaufman4c965132015-08-09 19:26:56 +03001786 else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
1787 (mvmvif->bcast_sta.tfd_queue_msk &
1788 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
1789 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
Liad Kaufmande24f632015-08-04 15:19:18 +03001790 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1791 return -EINVAL;
1792
1793 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
1794 wdg_timeout);
1795 }
1796
Johannes Berg5023d962013-07-31 14:07:43 +02001797 if (vif->type == NL80211_IFTYPE_ADHOC)
1798 baddr = vif->bss_conf.bssid;
1799
Johannes Berg8ca151b2013-01-24 14:25:36 +01001800 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
1801 return -ENOSPC;
1802
1803 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1804 mvmvif->id, mvmvif->color);
1805}
1806
1807/* Send the FW a request to remove the station from it's internal data
1808 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001809int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001810{
Johannes Berg013290a2014-08-04 13:38:48 +02001811 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001812 int ret;
1813
1814 lockdep_assert_held(&mvm->mutex);
1815
Johannes Berg013290a2014-08-04 13:38:48 +02001816 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001817 if (ret)
1818 IWL_WARN(mvm, "Failed sending remove station\n");
1819 return ret;
1820}
1821
Johannes Berg013290a2014-08-04 13:38:48 +02001822int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1823{
1824 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03001825 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02001826
1827 lockdep_assert_held(&mvm->mutex);
1828
Liad Kaufmande24f632015-08-04 15:19:18 +03001829 if (!iwl_mvm_is_dqa_supported(mvm))
1830 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001831
Liad Kaufmande24f632015-08-04 15:19:18 +03001832 if (vif->type == NL80211_IFTYPE_AP) {
1833 /*
1834 * The firmware defines the TFD queue mask to only be relevant
1835 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1836 * be included.
1837 */
Johannes Berg013290a2014-08-04 13:38:48 +02001838 qmask &= ~BIT(vif->cab_queue);
1839
Liad Kaufmande24f632015-08-04 15:19:18 +03001840 if (iwl_mvm_is_dqa_supported(mvm))
1841 qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
Liad Kaufman4c965132015-08-09 19:26:56 +03001842 } else if (iwl_mvm_is_dqa_supported(mvm) &&
1843 vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1844 qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
Liad Kaufmande24f632015-08-04 15:19:18 +03001845 }
1846
Johannes Berg013290a2014-08-04 13:38:48 +02001847 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
1848 ieee80211_vif_type_p2p(vif));
1849}
1850
Johannes Berg8ca151b2013-01-24 14:25:36 +01001851/* Allocate a new station entry for the broadcast station to the given vif,
1852 * and send it to the FW.
1853 * Note that each P2P mac should have its own broadcast station.
1854 *
1855 * @mvm: the mvm component
1856 * @vif: the interface to which the broadcast station is added
1857 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02001858int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001859{
1860 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001861 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001862 int ret;
1863
1864 lockdep_assert_held(&mvm->mutex);
1865
Johannes Berg013290a2014-08-04 13:38:48 +02001866 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001867 if (ret)
1868 return ret;
1869
Johannes Berg013290a2014-08-04 13:38:48 +02001870 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001871
1872 if (ret)
1873 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02001874
Johannes Berg8ca151b2013-01-24 14:25:36 +01001875 return ret;
1876}
1877
Johannes Berg013290a2014-08-04 13:38:48 +02001878void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1879{
1880 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1881
1882 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1883}
1884
Johannes Berg8ca151b2013-01-24 14:25:36 +01001885/*
1886 * Send the FW a request to remove the station from it's internal data
1887 * structures, and in addition remove it from the local data structure.
1888 */
Johannes Berg013290a2014-08-04 13:38:48 +02001889int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001890{
1891 int ret;
1892
1893 lockdep_assert_held(&mvm->mutex);
1894
Johannes Berg013290a2014-08-04 13:38:48 +02001895 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001896
Johannes Berg013290a2014-08-04 13:38:48 +02001897 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1898
Johannes Berg8ca151b2013-01-24 14:25:36 +01001899 return ret;
1900}
1901
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001902#define IWL_MAX_RX_BA_SESSIONS 16
1903
Sara Sharonb915c102016-03-23 16:32:02 +02001904static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02001905{
Sara Sharonb915c102016-03-23 16:32:02 +02001906 struct iwl_mvm_delba_notif notif = {
1907 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
1908 .metadata.sync = 1,
1909 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02001910 };
Sara Sharonb915c102016-03-23 16:32:02 +02001911 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
1912};
Sara Sharon10b2b202016-03-20 16:23:41 +02001913
Sara Sharonb915c102016-03-23 16:32:02 +02001914static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
1915 struct iwl_mvm_baid_data *data)
1916{
1917 int i;
1918
1919 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
1920
1921 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1922 int j;
1923 struct iwl_mvm_reorder_buffer *reorder_buf =
1924 &data->reorder_buf[i];
1925
Sara Sharon06904052016-02-28 20:28:17 +02001926 spin_lock_bh(&reorder_buf->lock);
1927 if (likely(!reorder_buf->num_stored)) {
1928 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02001929 continue;
Sara Sharon06904052016-02-28 20:28:17 +02001930 }
Sara Sharonb915c102016-03-23 16:32:02 +02001931
1932 /*
1933 * This shouldn't happen in regular DELBA since the internal
1934 * delBA notification should trigger a release of all frames in
1935 * the reorder buffer.
1936 */
1937 WARN_ON(1);
1938
1939 for (j = 0; j < reorder_buf->buf_size; j++)
1940 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02001941 /*
1942 * Prevent timer re-arm. This prevents a very far fetched case
1943 * where we timed out on the notification. There may be prior
1944 * RX frames pending in the RX queue before the notification
1945 * that might get processed between now and the actual deletion
1946 * and we would re-arm the timer although we are deleting the
1947 * reorder buffer.
1948 */
1949 reorder_buf->removed = true;
1950 spin_unlock_bh(&reorder_buf->lock);
1951 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02001952 }
1953}
1954
1955static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
1956 u32 sta_id,
1957 struct iwl_mvm_baid_data *data,
1958 u16 ssn, u8 buf_size)
1959{
1960 int i;
1961
1962 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1963 struct iwl_mvm_reorder_buffer *reorder_buf =
1964 &data->reorder_buf[i];
1965 int j;
1966
1967 reorder_buf->num_stored = 0;
1968 reorder_buf->head_sn = ssn;
1969 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02001970 /* rx reorder timer */
1971 reorder_buf->reorder_timer.function =
1972 iwl_mvm_reorder_timer_expired;
1973 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
1974 init_timer(&reorder_buf->reorder_timer);
1975 spin_lock_init(&reorder_buf->lock);
1976 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02001977 reorder_buf->queue = i;
1978 reorder_buf->sta_id = sta_id;
1979 for (j = 0; j < reorder_buf->buf_size; j++)
1980 __skb_queue_head_init(&reorder_buf->entries[j]);
1981 }
Sara Sharon10b2b202016-03-20 16:23:41 +02001982}
1983
Johannes Berg8ca151b2013-01-24 14:25:36 +01001984int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02001985 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001986{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001987 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001988 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02001989 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001990 int ret;
1991 u32 status;
1992
1993 lockdep_assert_held(&mvm->mutex);
1994
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001995 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
1996 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
1997 return -ENOSPC;
1998 }
1999
Sara Sharon10b2b202016-03-20 16:23:41 +02002000 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2001 /*
2002 * Allocate here so if allocation fails we can bail out early
2003 * before starting the BA session in the firmware
2004 */
Sara Sharonb915c102016-03-23 16:32:02 +02002005 baid_data = kzalloc(sizeof(*baid_data) +
2006 mvm->trans->num_rx_queues *
2007 sizeof(baid_data->reorder_buf[0]),
2008 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002009 if (!baid_data)
2010 return -ENOMEM;
2011 }
2012
Johannes Berg8ca151b2013-01-24 14:25:36 +01002013 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2014 cmd.sta_id = mvm_sta->sta_id;
2015 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002016 if (start) {
2017 cmd.add_immediate_ba_tid = (u8) tid;
2018 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002019 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002020 } else {
2021 cmd.remove_immediate_ba_tid = (u8) tid;
2022 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002023 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2024 STA_MODIFY_REMOVE_BA_TID;
2025
2026 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002027 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2028 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002029 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002030 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002031 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002032
Sara Sharon837c4da2016-01-07 16:50:45 +02002033 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002034 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002035 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2036 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002037 break;
2038 case ADD_STA_IMMEDIATE_BA_FAILURE:
2039 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2040 ret = -ENOSPC;
2041 break;
2042 default:
2043 ret = -EIO;
2044 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2045 start ? "start" : "stopp", status);
2046 break;
2047 }
2048
Sara Sharon10b2b202016-03-20 16:23:41 +02002049 if (ret)
2050 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002051
Sara Sharon10b2b202016-03-20 16:23:41 +02002052 if (start) {
2053 u8 baid;
2054
2055 mvm->rx_ba_sessions++;
2056
2057 if (!iwl_mvm_has_new_rx_api(mvm))
2058 return 0;
2059
2060 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2061 ret = -EINVAL;
2062 goto out_free;
2063 }
2064 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2065 IWL_ADD_STA_BAID_SHIFT);
2066 baid_data->baid = baid;
2067 baid_data->timeout = timeout;
2068 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002069 setup_timer(&baid_data->session_timer,
2070 iwl_mvm_rx_agg_session_expired,
2071 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002072 baid_data->mvm = mvm;
2073 baid_data->tid = tid;
2074 baid_data->sta_id = mvm_sta->sta_id;
2075
2076 mvm_sta->tid_to_baid[tid] = baid;
2077 if (timeout)
2078 mod_timer(&baid_data->session_timer,
2079 TU_TO_EXP_TIME(timeout * 2));
2080
Sara Sharonb915c102016-03-23 16:32:02 +02002081 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2082 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002083 /*
2084 * protect the BA data with RCU to cover a case where our
2085 * internal RX sync mechanism will timeout (not that it's
2086 * supposed to happen) and we will free the session data while
2087 * RX is being processed in parallel
2088 */
Sara Sharon35263a02016-06-21 12:12:10 +03002089 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2090 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002091 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2092 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002093 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002094 u8 baid = mvm_sta->tid_to_baid[tid];
2095
Sara Sharon60dec522016-06-21 14:14:08 +03002096 if (mvm->rx_ba_sessions > 0)
2097 /* check that restart flow didn't zero the counter */
2098 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002099 if (!iwl_mvm_has_new_rx_api(mvm))
2100 return 0;
2101
2102 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2103 return -EINVAL;
2104
2105 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2106 if (WARN_ON(!baid_data))
2107 return -EINVAL;
2108
2109 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002110 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002111 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002112 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2113 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002114 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002115 }
2116 return 0;
2117
2118out_free:
2119 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002120 return ret;
2121}
2122
Liad Kaufman9794c642015-08-19 17:34:28 +03002123int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2124 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002125{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002126 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002127 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002128 int ret;
2129 u32 status;
2130
2131 lockdep_assert_held(&mvm->mutex);
2132
2133 if (start) {
2134 mvm_sta->tfd_queue_msk |= BIT(queue);
2135 mvm_sta->tid_disable_agg &= ~BIT(tid);
2136 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002137 /* In DQA-mode the queue isn't removed on agg termination */
2138 if (!iwl_mvm_is_dqa_supported(mvm))
2139 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002140 mvm_sta->tid_disable_agg |= BIT(tid);
2141 }
2142
2143 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2144 cmd.sta_id = mvm_sta->sta_id;
2145 cmd.add_modify = STA_MODE_MODIFY;
2146 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
2147 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2148 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2149
2150 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002151 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2152 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002153 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002154 if (ret)
2155 return ret;
2156
Sara Sharon837c4da2016-01-07 16:50:45 +02002157 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002158 case ADD_STA_SUCCESS:
2159 break;
2160 default:
2161 ret = -EIO;
2162 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2163 start ? "start" : "stopp", status);
2164 break;
2165 }
2166
2167 return ret;
2168}
2169
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002170const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002171 IEEE80211_AC_BE,
2172 IEEE80211_AC_BK,
2173 IEEE80211_AC_BK,
2174 IEEE80211_AC_BE,
2175 IEEE80211_AC_VI,
2176 IEEE80211_AC_VI,
2177 IEEE80211_AC_VO,
2178 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002179 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002180};
2181
Johannes Berg3e56ead2013-02-15 22:23:18 +01002182static const u8 tid_to_ucode_ac[] = {
2183 AC_BE,
2184 AC_BK,
2185 AC_BK,
2186 AC_BE,
2187 AC_VI,
2188 AC_VI,
2189 AC_VO,
2190 AC_VO,
2191};
2192
Johannes Berg8ca151b2013-01-24 14:25:36 +01002193int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2194 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2195{
Johannes Berg5b577a92013-11-14 18:20:04 +01002196 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002197 struct iwl_mvm_tid_data *tid_data;
2198 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002199 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002200
2201 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2202 return -EINVAL;
2203
2204 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2205 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2206 mvmsta->tid_data[tid].state);
2207 return -ENXIO;
2208 }
2209
2210 lockdep_assert_held(&mvm->mutex);
2211
Arik Nemtsovb2492502014-03-13 12:21:50 +02002212 spin_lock_bh(&mvmsta->lock);
2213
2214 /* possible race condition - we entered D0i3 while starting agg */
2215 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2216 spin_unlock_bh(&mvmsta->lock);
2217 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2218 return -EIO;
2219 }
2220
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002221 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002222
Liad Kaufmancf961e12015-08-13 19:16:08 +03002223 /*
2224 * Note the possible cases:
2225 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2226 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2227 * one and mark it as reserved
2228 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2229 * non-DQA mode, since the TXQ hasn't yet been allocated
2230 */
2231 txq_id = mvmsta->tid_data[tid].txq_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002232 if (iwl_mvm_is_dqa_supported(mvm) &&
2233 unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2234 ret = -ENXIO;
2235 IWL_DEBUG_TX_QUEUES(mvm,
2236 "Can't start tid %d agg on shared queue!\n",
2237 tid);
2238 goto release_locks;
2239 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002240 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002241 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2242 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002243 mvm->last_agg_queue);
2244 if (txq_id < 0) {
2245 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002246 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2247 goto release_locks;
2248 }
2249
2250 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2251 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002252 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002253
2254 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002255
Liad Kaufmancf961e12015-08-13 19:16:08 +03002256 IWL_DEBUG_TX_QUEUES(mvm,
2257 "AGG for tid %d will be on queue #%d\n",
2258 tid, txq_id);
2259
Johannes Berg8ca151b2013-01-24 14:25:36 +01002260 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002261 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002262 tid_data->txq_id = txq_id;
2263 *ssn = tid_data->ssn;
2264
2265 IWL_DEBUG_TX_QUEUES(mvm,
2266 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2267 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2268 tid_data->next_reclaimed);
2269
2270 if (tid_data->ssn == tid_data->next_reclaimed) {
2271 tid_data->state = IWL_AGG_STARTING;
2272 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2273 } else {
2274 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2275 }
2276
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002277 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002278 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002279
2280release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002281 spin_unlock(&mvm->queue_info_lock);
2282out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002283 spin_unlock_bh(&mvmsta->lock);
2284
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002285 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002286}
2287
2288int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002289 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2290 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002291{
Johannes Berg5b577a92013-11-14 18:20:04 +01002292 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002293 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002294 unsigned int wdg_timeout =
2295 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002296 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002297 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002298 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002299 u16 ssn;
2300
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002301 struct iwl_trans_txq_scd_cfg cfg = {
2302 .sta_id = mvmsta->sta_id,
2303 .tid = tid,
2304 .frame_limit = buf_size,
2305 .aggregate = true,
2306 };
2307
Eyal Shapiraefed6642014-09-14 15:58:53 +03002308 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2309 != IWL_MAX_TID_COUNT);
2310
Johannes Berg8ca151b2013-01-24 14:25:36 +01002311 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2312
2313 spin_lock_bh(&mvmsta->lock);
2314 ssn = tid_data->ssn;
2315 queue = tid_data->txq_id;
2316 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002317 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002318 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002319 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002320 spin_unlock_bh(&mvmsta->lock);
2321
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002322 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002323
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002324 spin_lock_bh(&mvm->queue_info_lock);
2325 queue_status = mvm->queue_info[queue].status;
2326 spin_unlock_bh(&mvm->queue_info_lock);
2327
Liad Kaufmancf961e12015-08-13 19:16:08 +03002328 /* In DQA mode, the existing queue might need to be reconfigured */
2329 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002330 /* Maybe there is no need to even alloc a queue... */
2331 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2332 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002333
2334 /*
2335 * Only reconfig the SCD for the queue if the window size has
2336 * changed from current (become smaller)
2337 */
2338 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2339 /*
2340 * If reconfiguring an existing queue, it first must be
2341 * drained
2342 */
2343 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2344 BIT(queue));
2345 if (ret) {
2346 IWL_ERR(mvm,
2347 "Error draining queue before reconfig\n");
2348 return ret;
2349 }
2350
2351 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2352 mvmsta->sta_id, tid,
2353 buf_size, ssn);
2354 if (ret) {
2355 IWL_ERR(mvm,
2356 "Error reconfiguring TXQ #%d\n", queue);
2357 return ret;
2358 }
2359 }
2360 }
2361
2362 if (alloc_queue)
2363 iwl_mvm_enable_txq(mvm, queue,
2364 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2365 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002366
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002367 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2368 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2369 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2370 if (ret)
2371 return -EIO;
2372 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002373
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002374 /* No need to mark as reserved */
2375 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002376 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002377 spin_unlock_bh(&mvm->queue_info_lock);
2378
Johannes Berg8ca151b2013-01-24 14:25:36 +01002379 /*
2380 * Even though in theory the peer could have different
2381 * aggregation reorder buffer sizes for different sessions,
2382 * our ucode doesn't allow for that and has a global limit
2383 * for each station. Therefore, use the minimum of all the
2384 * aggregation sessions and our default value.
2385 */
2386 mvmsta->max_agg_bufsize =
2387 min(mvmsta->max_agg_bufsize, buf_size);
2388 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2389
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002390 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2391 sta->addr, tid);
2392
Eyal Shapira9e680942013-11-09 00:16:16 +02002393 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002394}
2395
2396int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2397 struct ieee80211_sta *sta, u16 tid)
2398{
Johannes Berg5b577a92013-11-14 18:20:04 +01002399 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002400 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2401 u16 txq_id;
2402 int err;
2403
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002404 /*
2405 * If mac80211 is cleaning its state, then say that we finished since
2406 * our state has been cleared anyway.
2407 */
2408 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2409 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2410 return 0;
2411 }
2412
Johannes Berg8ca151b2013-01-24 14:25:36 +01002413 spin_lock_bh(&mvmsta->lock);
2414
2415 txq_id = tid_data->txq_id;
2416
2417 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2418 mvmsta->sta_id, tid, txq_id, tid_data->state);
2419
Eyal Shapiraefed6642014-09-14 15:58:53 +03002420 mvmsta->agg_tids &= ~BIT(tid);
2421
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002422 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002423 /*
2424 * The TXQ is marked as reserved only if no traffic came through yet
2425 * This means no traffic has been sent on this TID (agg'd or not), so
2426 * we no longer have use for the queue. Since it hasn't even been
2427 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2428 * free.
2429 */
2430 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2431 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002432
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002433 spin_unlock_bh(&mvm->queue_info_lock);
2434
Johannes Berg8ca151b2013-01-24 14:25:36 +01002435 switch (tid_data->state) {
2436 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002437 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002438
2439 IWL_DEBUG_TX_QUEUES(mvm,
2440 "ssn = %d, next_recl = %d\n",
2441 tid_data->ssn, tid_data->next_reclaimed);
2442
2443 /* There are still packets for this RA / TID in the HW */
2444 if (tid_data->ssn != tid_data->next_reclaimed) {
2445 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2446 err = 0;
2447 break;
2448 }
2449
2450 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002451 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002452 spin_unlock_bh(&mvmsta->lock);
2453
2454 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2455
2456 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2457
Liad Kaufmancf961e12015-08-13 19:16:08 +03002458 if (!iwl_mvm_is_dqa_supported(mvm)) {
2459 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2460
2461 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2462 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002463 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002464 case IWL_AGG_STARTING:
2465 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2466 /*
2467 * The agg session has been stopped before it was set up. This
2468 * can happen when the AddBA timer times out for example.
2469 */
2470
2471 /* No barriers since we are under mutex */
2472 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002473
2474 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2475 tid_data->state = IWL_AGG_OFF;
2476 err = 0;
2477 break;
2478 default:
2479 IWL_ERR(mvm,
2480 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2481 mvmsta->sta_id, tid, tid_data->state);
2482 IWL_ERR(mvm,
2483 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2484 err = -EINVAL;
2485 }
2486
2487 spin_unlock_bh(&mvmsta->lock);
2488
2489 return err;
2490}
2491
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002492int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2493 struct ieee80211_sta *sta, u16 tid)
2494{
Johannes Berg5b577a92013-11-14 18:20:04 +01002495 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002496 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2497 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002498 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002499
2500 /*
2501 * First set the agg state to OFF to avoid calling
2502 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2503 */
2504 spin_lock_bh(&mvmsta->lock);
2505 txq_id = tid_data->txq_id;
2506 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2507 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002508 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002509 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002510 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002511 spin_unlock_bh(&mvmsta->lock);
2512
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002513 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002514 /*
2515 * The TXQ is marked as reserved only if no traffic came through yet
2516 * This means no traffic has been sent on this TID (agg'd or not), so
2517 * we no longer have use for the queue. Since it hasn't even been
2518 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2519 * free.
2520 */
2521 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2522 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002523 spin_unlock_bh(&mvm->queue_info_lock);
2524
Johannes Bergb6658ff2013-07-24 13:55:51 +02002525 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002526 iwl_mvm_drain_sta(mvm, mvmsta, true);
Luca Coelho5888a402015-10-06 09:54:57 +03002527 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
Johannes Bergb6658ff2013-07-24 13:55:51 +02002528 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002529 iwl_trans_wait_tx_queue_empty(mvm->trans,
2530 mvmsta->tfd_queue_msk);
2531 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002532
Johannes Bergf7f89e72014-08-05 15:24:44 +02002533 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2534
Liad Kaufmancf961e12015-08-13 19:16:08 +03002535 if (!iwl_mvm_is_dqa_supported(mvm)) {
2536 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2537
2538 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2539 tid, 0);
2540 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002541 }
2542
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002543 return 0;
2544}
2545
Johannes Berg8ca151b2013-01-24 14:25:36 +01002546static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2547{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002548 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002549
2550 lockdep_assert_held(&mvm->mutex);
2551
Johannes Berg2dc2a152015-06-16 17:09:18 +02002552 /* Pick the unused key offset with the highest 'deleted'
2553 * counter. Every time a key is deleted, all the counters
2554 * are incremented and the one that was just deleted is
2555 * reset to zero. Thus, the highest counter is the one
2556 * that was deleted longest ago. Pick that one.
2557 */
2558 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2559 if (test_bit(i, mvm->fw_key_table))
2560 continue;
2561 if (mvm->fw_key_deleted[i] > max) {
2562 max = mvm->fw_key_deleted[i];
2563 max_offs = i;
2564 }
2565 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002566
Johannes Berg2dc2a152015-06-16 17:09:18 +02002567 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002568 return STA_KEY_IDX_INVALID;
2569
Johannes Berg2dc2a152015-06-16 17:09:18 +02002570 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002571}
2572
Johannes Berg5f7a1842015-12-11 09:36:10 +01002573static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2574 struct ieee80211_vif *vif,
2575 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002576{
Johannes Berg5b530e92014-12-23 16:00:17 +01002577 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002578
Johannes Berg5f7a1842015-12-11 09:36:10 +01002579 if (sta)
2580 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002581
2582 /*
2583 * The device expects GTKs for station interfaces to be
2584 * installed as GTKs for the AP station. If we have no
2585 * station ID, then use AP's station ID.
2586 */
2587 if (vif->type == NL80211_IFTYPE_STATION &&
Avri Altman9513c5e2015-10-19 16:29:11 +02002588 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2589 u8 sta_id = mvmvif->ap_sta_id;
2590
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002591 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2592 lockdep_is_held(&mvm->mutex));
2593
Avri Altman9513c5e2015-10-19 16:29:11 +02002594 /*
2595 * It is possible that the 'sta' parameter is NULL,
2596 * for example when a GTK is removed - the sta_id will then
2597 * be the AP ID, and no station was passed by mac80211.
2598 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002599 if (IS_ERR_OR_NULL(sta))
2600 return NULL;
2601
2602 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02002603 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002604
Johannes Berg5f7a1842015-12-11 09:36:10 +01002605 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002606}
2607
2608static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2609 struct iwl_mvm_sta *mvm_sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002610 struct ieee80211_key_conf *keyconf, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002611 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2612 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002613{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002614 struct iwl_mvm_add_sta_key_cmd cmd = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002615 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002616 int ret;
2617 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002618 u16 keyidx;
2619 int i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002620 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002621
2622 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2623 STA_KEY_FLG_KEYID_MSK;
2624 key_flags = cpu_to_le16(keyidx);
2625 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2626
2627 switch (keyconf->cipher) {
2628 case WLAN_CIPHER_SUITE_TKIP:
2629 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002630 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002631 for (i = 0; i < 5; i++)
Max Stepanov5a258aa2013-04-07 09:11:21 +03002632 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
2633 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002634 break;
2635 case WLAN_CIPHER_SUITE_CCMP:
2636 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002637 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002638 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002639 case WLAN_CIPHER_SUITE_WEP104:
2640 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002641 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002642 case WLAN_CIPHER_SUITE_WEP40:
2643 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2644 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
2645 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03002646 case WLAN_CIPHER_SUITE_GCMP_256:
2647 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2648 /* fall through */
2649 case WLAN_CIPHER_SUITE_GCMP:
2650 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2651 memcpy(cmd.key, keyconf->key, keyconf->keylen);
2652 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002653 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002654 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2655 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002656 }
2657
Johannes Bergba3943b2014-11-12 23:54:48 +01002658 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002659 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2660
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002661 cmd.key_offset = key_offset;
Max Stepanov5a258aa2013-04-07 09:11:21 +03002662 cmd.key_flags = key_flags;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002663 cmd.sta_id = sta_id;
2664
2665 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002666 if (cmd_flags & CMD_ASYNC)
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002667 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
2668 sizeof(cmd), &cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002669 else
2670 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2671 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002672
2673 switch (status) {
2674 case ADD_STA_SUCCESS:
2675 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2676 break;
2677 default:
2678 ret = -EIO;
2679 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2680 break;
2681 }
2682
2683 return ret;
2684}
2685
2686static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2687 struct ieee80211_key_conf *keyconf,
2688 u8 sta_id, bool remove_key)
2689{
2690 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2691
2692 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03002693 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2694 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2695 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2696 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2697 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2698 return -EINVAL;
2699
2700 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2701 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002702 return -EINVAL;
2703
2704 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2705 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2706
2707 if (remove_key) {
2708 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2709 } else {
2710 struct ieee80211_key_seq seq;
2711 const u8 *pn;
2712
Ayala Bekeraa950522016-06-01 00:28:09 +03002713 switch (keyconf->cipher) {
2714 case WLAN_CIPHER_SUITE_AES_CMAC:
2715 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2716 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03002717 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2718 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2719 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2720 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03002721 default:
2722 return -EINVAL;
2723 }
2724
Ayala Beker8e160ab2016-04-11 11:37:38 +03002725 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2726 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2727 igtk_cmd.ctrl_flags |=
2728 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002729 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2730 pn = seq.aes_cmac.pn;
2731 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2732 ((u64) pn[4] << 8) |
2733 ((u64) pn[3] << 16) |
2734 ((u64) pn[2] << 24) |
2735 ((u64) pn[1] << 32) |
2736 ((u64) pn[0] << 40));
2737 }
2738
2739 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2740 remove_key ? "removing" : "installing",
2741 igtk_cmd.sta_id);
2742
Ayala Beker8e160ab2016-04-11 11:37:38 +03002743 if (!iwl_mvm_has_new_rx_api(mvm)) {
2744 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
2745 .ctrl_flags = igtk_cmd.ctrl_flags,
2746 .key_id = igtk_cmd.key_id,
2747 .sta_id = igtk_cmd.sta_id,
2748 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
2749 };
2750
2751 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
2752 ARRAY_SIZE(igtk_cmd_v1.igtk));
2753 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2754 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
2755 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002756 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01002757 sizeof(igtk_cmd), &igtk_cmd);
2758}
2759
2760
2761static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
2762 struct ieee80211_vif *vif,
2763 struct ieee80211_sta *sta)
2764{
Johannes Berg5b530e92014-12-23 16:00:17 +01002765 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002766
2767 if (sta)
2768 return sta->addr;
2769
2770 if (vif->type == NL80211_IFTYPE_STATION &&
2771 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2772 u8 sta_id = mvmvif->ap_sta_id;
2773 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2774 lockdep_is_held(&mvm->mutex));
2775 return sta->addr;
2776 }
2777
2778
2779 return NULL;
2780}
2781
Johannes Berg2f6319d2014-11-12 23:39:56 +01002782static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2783 struct ieee80211_vif *vif,
2784 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002785 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002786 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01002787 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002788{
Johannes Berg2f6319d2014-11-12 23:39:56 +01002789 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002790 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002791 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002792 struct ieee80211_key_seq seq;
2793 u16 p1k[5];
2794
Johannes Berg8ca151b2013-01-24 14:25:36 +01002795 switch (keyconf->cipher) {
2796 case WLAN_CIPHER_SUITE_TKIP:
2797 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
2798 /* get phase 1 key from mac80211 */
2799 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2800 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
Johannes Bergba3943b2014-11-12 23:54:48 +01002801 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002802 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002803 break;
2804 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01002805 case WLAN_CIPHER_SUITE_WEP40:
2806 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03002807 case WLAN_CIPHER_SUITE_GCMP:
2808 case WLAN_CIPHER_SUITE_GCMP_256:
Johannes Bergba3943b2014-11-12 23:54:48 +01002809 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002810 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002811 break;
2812 default:
Johannes Bergba3943b2014-11-12 23:54:48 +01002813 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002814 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002815 }
2816
Johannes Berg8ca151b2013-01-24 14:25:36 +01002817 return ret;
2818}
2819
Johannes Berg2f6319d2014-11-12 23:39:56 +01002820static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01002821 struct ieee80211_key_conf *keyconf,
2822 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002823{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002824 struct iwl_mvm_add_sta_key_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002825 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002826 int ret;
2827 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002828
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02002829 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2830 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002831 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2832 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2833
Johannes Bergba3943b2014-11-12 23:54:48 +01002834 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002835 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2836
Max Stepanov5a258aa2013-04-07 09:11:21 +03002837 cmd.key_flags = key_flags;
2838 cmd.key_offset = keyconf->hw_key_idx;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002839 cmd.sta_id = sta_id;
2840
Johannes Berg8ca151b2013-01-24 14:25:36 +01002841 status = ADD_STA_SUCCESS;
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002842 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2843 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002844
2845 switch (status) {
2846 case ADD_STA_SUCCESS:
2847 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2848 break;
2849 default:
2850 ret = -EIO;
2851 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2852 break;
2853 }
2854
2855 return ret;
2856}
2857
Johannes Berg2f6319d2014-11-12 23:39:56 +01002858int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2859 struct ieee80211_vif *vif,
2860 struct ieee80211_sta *sta,
2861 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002862 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002863{
Johannes Bergba3943b2014-11-12 23:54:48 +01002864 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002865 struct iwl_mvm_sta *mvm_sta;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002866 u8 sta_id;
2867 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03002868 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01002869
2870 lockdep_assert_held(&mvm->mutex);
2871
2872 /* Get the station id from the mvm local station table */
Johannes Berg5f7a1842015-12-11 09:36:10 +01002873 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2874 if (!mvm_sta) {
2875 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01002876 return -EINVAL;
2877 }
Johannes Berg5f7a1842015-12-11 09:36:10 +01002878 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002879
Ayala Beker8e160ab2016-04-11 11:37:38 +03002880 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2881 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2882 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01002883 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
2884 goto end;
2885 }
2886
2887 /*
2888 * It is possible that the 'sta' parameter is NULL, and thus
2889 * there is a need to retrieve the sta from the local station table.
2890 */
2891 if (!sta) {
2892 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2893 lockdep_is_held(&mvm->mutex));
2894 if (IS_ERR_OR_NULL(sta)) {
2895 IWL_ERR(mvm, "Invalid station id\n");
2896 return -EINVAL;
2897 }
2898 }
2899
2900 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
2901 return -EINVAL;
2902
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002903 /* If the key_offset is not pre-assigned, we need to find a
2904 * new offset to use. In normal cases, the offset is not
2905 * pre-assigned, but during HW_RESTART we want to reuse the
2906 * same indices, so we pass them when this function is called.
2907 *
2908 * In D3 entry, we need to hardcoded the indices (because the
2909 * firmware hardcodes the PTK offset to 0). In this case, we
2910 * need to make sure we don't overwrite the hw_key_idx in the
2911 * keyconf structure, because otherwise we cannot configure
2912 * the original ones back when resuming.
2913 */
2914 if (key_offset == STA_KEY_IDX_INVALID) {
2915 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2916 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002917 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002918 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002919 }
2920
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002921 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002922 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01002923 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002924
2925 /*
2926 * For WEP, the same key is used for multicast and unicast. Upload it
2927 * again, using the same key offset, and now pointing the other one
2928 * to the same key slot (offset).
2929 * If this fails, remove the original as well.
2930 */
2931 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2932 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002933 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
2934 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01002935 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01002936 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002937 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002938 }
2939 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01002940
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002941 __set_bit(key_offset, mvm->fw_key_table);
2942
Johannes Berg2f6319d2014-11-12 23:39:56 +01002943end:
2944 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2945 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03002946 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002947 return ret;
2948}
2949
2950int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
2951 struct ieee80211_vif *vif,
2952 struct ieee80211_sta *sta,
2953 struct ieee80211_key_conf *keyconf)
2954{
Johannes Bergba3943b2014-11-12 23:54:48 +01002955 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002956 struct iwl_mvm_sta *mvm_sta;
2957 u8 sta_id = IWL_MVM_STATION_COUNT;
Johannes Berg2dc2a152015-06-16 17:09:18 +02002958 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002959
2960 lockdep_assert_held(&mvm->mutex);
2961
Johannes Berg5f7a1842015-12-11 09:36:10 +01002962 /* Get the station from the mvm local station table */
2963 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002964
2965 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
2966 keyconf->keyidx, sta_id);
2967
Ayala Beker8e160ab2016-04-11 11:37:38 +03002968 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2969 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2970 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002971 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
2972
2973 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
2974 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2975 keyconf->hw_key_idx);
2976 return -ENOENT;
2977 }
2978
Johannes Berg2dc2a152015-06-16 17:09:18 +02002979 /* track which key was deleted last */
2980 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2981 if (mvm->fw_key_deleted[i] < U8_MAX)
2982 mvm->fw_key_deleted[i]++;
2983 }
2984 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
2985
Johannes Berg5f7a1842015-12-11 09:36:10 +01002986 if (!mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01002987 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
2988 return 0;
2989 }
2990
Johannes Berg5f7a1842015-12-11 09:36:10 +01002991 sta_id = mvm_sta->sta_id;
2992
Johannes Bergba3943b2014-11-12 23:54:48 +01002993 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2994 if (ret)
2995 return ret;
2996
2997 /* delete WEP key twice to get rid of (now useless) offset */
2998 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2999 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3000 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3001
3002 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003003}
3004
Johannes Berg8ca151b2013-01-24 14:25:36 +01003005void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3006 struct ieee80211_vif *vif,
3007 struct ieee80211_key_conf *keyconf,
3008 struct ieee80211_sta *sta, u32 iv32,
3009 u16 *phase1key)
3010{
Beni Levc3eb5362013-02-06 17:22:18 +02003011 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003012 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003013
Beni Levc3eb5362013-02-06 17:22:18 +02003014 rcu_read_lock();
3015
Johannes Berg5f7a1842015-12-11 09:36:10 +01003016 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3017 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003018 goto unlock;
Johannes Bergba3943b2014-11-12 23:54:48 +01003019 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003020 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003021
3022 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003023 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003024}
3025
Johannes Berg9cc40712013-02-15 22:47:48 +01003026void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3027 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003028{
Johannes Berg5b577a92013-11-14 18:20:04 +01003029 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003030 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003031 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003032 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003033 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003034 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003035 };
3036 int ret;
3037
Sara Sharon854c5702016-01-26 13:17:47 +02003038 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3039 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003040 if (ret)
3041 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3042}
3043
Johannes Berg9cc40712013-02-15 22:47:48 +01003044void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3045 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003046 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003047 u16 cnt, u16 tids, bool more_data,
3048 bool agg)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003049{
Johannes Berg5b577a92013-11-14 18:20:04 +01003050 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003051 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003052 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003053 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003054 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3055 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003056 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003057 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003058 int tid, ret;
3059 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003060
Johannes Berg3e56ead2013-02-15 22:23:18 +01003061 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3062 * Note that this field is reserved and unused by firmware not
3063 * supporting GO uAPSD, so it's safe to always do this.
3064 */
3065 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3066 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3067
3068 /* If we're releasing frames from aggregation queues then check if the
3069 * all queues combined that we're releasing frames from have
3070 * - more frames than the service period, in which case more_data
3071 * needs to be set
3072 * - fewer than 'cnt' frames, in which case we need to adjust the
3073 * firmware command (but do that unconditionally)
3074 */
3075 if (agg) {
3076 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003077 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003078
3079 spin_lock_bh(&mvmsta->lock);
3080 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3081 struct iwl_mvm_tid_data *tid_data;
3082 u16 n_queued;
3083
3084 tid_data = &mvmsta->tid_data[tid];
3085 if (WARN(tid_data->state != IWL_AGG_ON &&
3086 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3087 "TID %d state is %d\n",
3088 tid, tid_data->state)) {
3089 spin_unlock_bh(&mvmsta->lock);
3090 ieee80211_sta_eosp(sta);
3091 return;
3092 }
3093
3094 n_queued = iwl_mvm_tid_queued(tid_data);
3095 if (n_queued > remaining) {
3096 more_data = true;
3097 remaining = 0;
3098 break;
3099 }
3100 remaining -= n_queued;
3101 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003102 sleep_tx_count = cnt - remaining;
3103 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3104 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003105 spin_unlock_bh(&mvmsta->lock);
3106
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003107 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003108 if (WARN_ON(cnt - remaining == 0)) {
3109 ieee80211_sta_eosp(sta);
3110 return;
3111 }
3112 }
3113
3114 /* Note: this is ignored by firmware not supporting GO uAPSD */
3115 if (more_data)
3116 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3117
3118 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3119 mvmsta->next_status_eosp = true;
3120 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3121 } else {
3122 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3123 }
3124
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003125 /* block the Tx queues until the FW updated the sleep Tx count */
3126 iwl_trans_block_txq_ptrs(mvm->trans, true);
3127
3128 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3129 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003130 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003131 if (ret)
3132 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3133}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003134
Johannes Berg04168412015-06-23 21:22:09 +02003135void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3136 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003137{
3138 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3139 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3140 struct ieee80211_sta *sta;
3141 u32 sta_id = le32_to_cpu(notif->sta_id);
3142
3143 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003144 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003145
3146 rcu_read_lock();
3147 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3148 if (!IS_ERR_OR_NULL(sta))
3149 ieee80211_sta_eosp(sta);
3150 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003151}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003152
3153void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3154 struct iwl_mvm_sta *mvmsta, bool disable)
3155{
3156 struct iwl_mvm_add_sta_cmd cmd = {
3157 .add_modify = STA_MODE_MODIFY,
3158 .sta_id = mvmsta->sta_id,
3159 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3160 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3161 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3162 };
3163 int ret;
3164
Sara Sharon854c5702016-01-26 13:17:47 +02003165 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3166 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003167 if (ret)
3168 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3169}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003170
3171void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3172 struct ieee80211_sta *sta,
3173 bool disable)
3174{
3175 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3176
3177 spin_lock_bh(&mvm_sta->lock);
3178
3179 if (mvm_sta->disable_tx == disable) {
3180 spin_unlock_bh(&mvm_sta->lock);
3181 return;
3182 }
3183
3184 mvm_sta->disable_tx = disable;
3185
3186 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003187 * Tell mac80211 to start/stop queuing tx for this station,
3188 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003189 * for this station.
3190 */
3191 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3192 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3193
3194 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3195
3196 spin_unlock_bh(&mvm_sta->lock);
3197}
3198
3199void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3200 struct iwl_mvm_vif *mvmvif,
3201 bool disable)
3202{
3203 struct ieee80211_sta *sta;
3204 struct iwl_mvm_sta *mvm_sta;
3205 int i;
3206
3207 lockdep_assert_held(&mvm->mutex);
3208
3209 /* Block/unblock all the stations of the given mvmvif */
3210 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3211 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3212 lockdep_is_held(&mvm->mutex));
3213 if (IS_ERR_OR_NULL(sta))
3214 continue;
3215
3216 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3217 if (mvm_sta->mac_id_n_color !=
3218 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3219 continue;
3220
3221 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3222 }
3223}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003224
3225void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3226{
3227 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3228 struct iwl_mvm_sta *mvmsta;
3229
3230 rcu_read_lock();
3231
3232 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3233
3234 if (!WARN_ON(!mvmsta))
3235 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3236
3237 rcu_read_unlock();
3238}