blob: 84384a4321d975508cd96ae9b47c3362d4eace80 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020010 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020037 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83}
84
Eliad Pellerb92e6612014-01-23 17:58:23 +020085static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010087{
88 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020089 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010090
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94 lockdep_assert_held(&mvm->mutex);
95
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
99
Johannes Berg8ca151b2013-01-24 14:25:36 +0100100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Eliad Pellerb92e6612014-01-23 17:58:23 +0200101 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
102 if (BIT(sta_id) & reserved_ids)
103 continue;
104
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
107 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200108 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100109 return IWL_MVM_STATION_COUNT;
110}
111
Johannes Berg7a453972013-02-12 13:10:44 +0100112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300114 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100115{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Liad Kaufman24afba72015-07-28 18:56:08 +0300129 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300132
133 if (flags & STA_MODIFY_QUEUES)
134 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
Johannes Berg7a453972013-02-12 13:10:44 +0100135 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100136
137 switch (sta->bandwidth) {
138 case IEEE80211_STA_RX_BW_160:
139 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
140 /* fall through */
141 case IEEE80211_STA_RX_BW_80:
142 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
143 /* fall through */
144 case IEEE80211_STA_RX_BW_40:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
146 /* fall through */
147 case IEEE80211_STA_RX_BW_20:
148 if (sta->ht_cap.ht_supported)
149 add_sta_cmd.station_flags |=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
151 break;
152 }
153
154 switch (sta->rx_nss) {
155 case 1:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157 break;
158 case 2:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
160 break;
161 case 3 ... 8:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
163 break;
164 }
165
166 switch (sta->smps_mode) {
167 case IEEE80211_SMPS_AUTOMATIC:
168 case IEEE80211_SMPS_NUM_MODES:
169 WARN_ON(1);
170 break;
171 case IEEE80211_SMPS_STATIC:
172 /* override NSS */
173 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
175 break;
176 case IEEE80211_SMPS_DYNAMIC:
177 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
178 break;
179 case IEEE80211_SMPS_OFF:
180 /* nothing */
181 break;
182 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100183
184 if (sta->ht_cap.ht_supported) {
185 add_sta_cmd.station_flags_msk |=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
187 STA_FLG_AGG_MPDU_DENS_MSK);
188
189 mpdu_dens = sta->ht_cap.ampdu_density;
190 }
191
192 if (sta->vht_cap.vht_supported) {
193 agg_size = sta->vht_cap.cap &
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
195 agg_size >>=
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
197 } else if (sta->ht_cap.ht_supported) {
198 agg_size = sta->ht_cap.ampdu_factor;
199 }
200
201 add_sta_cmd.station_flags |=
202 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
203 add_sta_cmd.station_flags |=
204 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
205
206 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200207 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
208 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300209 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100210 if (ret)
211 return ret;
212
Sara Sharon837c4da2016-01-07 16:50:45 +0200213 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100214 case ADD_STA_SUCCESS:
215 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
216 break;
217 default:
218 ret = -EIO;
219 IWL_ERR(mvm, "ADD_STA failed\n");
220 break;
221 }
222
223 return ret;
224}
225
Sara Sharon10b2b202016-03-20 16:23:41 +0200226static void iwl_mvm_rx_agg_session_expired(unsigned long data)
227{
228 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
229 struct iwl_mvm_baid_data *ba_data;
230 struct ieee80211_sta *sta;
231 struct iwl_mvm_sta *mvm_sta;
232 unsigned long timeout;
233
234 rcu_read_lock();
235
236 ba_data = rcu_dereference(*rcu_ptr);
237
238 if (WARN_ON(!ba_data))
239 goto unlock;
240
241 if (!ba_data->timeout)
242 goto unlock;
243
244 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
245 if (time_is_after_jiffies(timeout)) {
246 mod_timer(&ba_data->session_timer, timeout);
247 goto unlock;
248 }
249
250 /* Timer expired */
251 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
252 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
253 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
254 sta->addr, ba_data->tid);
255unlock:
256 rcu_read_unlock();
257}
258
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300259static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
260 struct ieee80211_sta *sta)
261{
262 unsigned long used_hw_queues;
263 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200264 unsigned int wdg_timeout =
265 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300266 u32 ac;
267
268 lockdep_assert_held(&mvm->mutex);
269
270 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
271
272 /* Find available queues, and allocate them to the ACs */
273 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
274 u8 queue = find_first_zero_bit(&used_hw_queues,
275 mvm->first_agg_queue);
276
277 if (queue >= mvm->first_agg_queue) {
278 IWL_ERR(mvm, "Failed to allocate STA queue\n");
279 return -EBUSY;
280 }
281
282 __set_bit(queue, &used_hw_queues);
283 mvmsta->hw_queue[ac] = queue;
284 }
285
286 /* Found a place for all queues - enable them */
287 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
288 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300289 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300290 iwl_mvm_ac_to_tx_fifo[ac], 0,
291 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300292 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
293 }
294
295 return 0;
296}
297
298static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
299 struct ieee80211_sta *sta)
300{
301 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
302 unsigned long sta_msk;
303 int i;
304
305 lockdep_assert_held(&mvm->mutex);
306
307 /* disable the TDLS STA-specific queues */
308 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200309 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300310 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300311}
312
Liad Kaufman9794c642015-08-19 17:34:28 +0300313/* Disable aggregations for a bitmap of TIDs for a given station */
314static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
315 unsigned long disable_agg_tids,
316 bool remove_queue)
317{
318 struct iwl_mvm_add_sta_cmd cmd = {};
319 struct ieee80211_sta *sta;
320 struct iwl_mvm_sta *mvmsta;
321 u32 status;
322 u8 sta_id;
323 int ret;
324
325 spin_lock_bh(&mvm->queue_info_lock);
326 sta_id = mvm->queue_info[queue].ra_sta_id;
327 spin_unlock_bh(&mvm->queue_info_lock);
328
329 rcu_read_lock();
330
331 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
332
333 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
334 rcu_read_unlock();
335 return -EINVAL;
336 }
337
338 mvmsta = iwl_mvm_sta_from_mac80211(sta);
339
340 mvmsta->tid_disable_agg |= disable_agg_tids;
341
342 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
343 cmd.sta_id = mvmsta->sta_id;
344 cmd.add_modify = STA_MODE_MODIFY;
345 cmd.modify_mask = STA_MODIFY_QUEUES;
346 if (disable_agg_tids)
347 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
348 if (remove_queue)
349 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
350 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
351 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
352
353 rcu_read_unlock();
354
355 /* Notify FW of queue removal from the STA queues */
356 status = ADD_STA_SUCCESS;
357 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
358 iwl_mvm_add_sta_cmd_size(mvm),
359 &cmd, &status);
360
361 return ret;
362}
363
364/*
365 * Remove a queue from a station's resources.
366 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
367 * doesn't disable the queue
368 */
369static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
370{
371 struct ieee80211_sta *sta;
372 struct iwl_mvm_sta *mvmsta;
373 unsigned long tid_bitmap;
374 unsigned long disable_agg_tids = 0;
375 u8 sta_id;
376 int tid;
377
378 lockdep_assert_held(&mvm->mutex);
379
380 spin_lock_bh(&mvm->queue_info_lock);
381 sta_id = mvm->queue_info[queue].ra_sta_id;
382 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
383 spin_unlock_bh(&mvm->queue_info_lock);
384
385 rcu_read_lock();
386
387 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
388
389 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
390 rcu_read_unlock();
391 return 0;
392 }
393
394 mvmsta = iwl_mvm_sta_from_mac80211(sta);
395
396 spin_lock_bh(&mvmsta->lock);
397 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
398 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
399
400 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
401 disable_agg_tids |= BIT(tid);
402 }
403 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
404
405 spin_unlock_bh(&mvmsta->lock);
406
407 rcu_read_unlock();
408
409 spin_lock(&mvm->queue_info_lock);
410 /* Unmap MAC queues and TIDs from this queue */
411 mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
412 mvm->queue_info[queue].hw_queue_refcount = 0;
413 mvm->queue_info[queue].tid_bitmap = 0;
414 spin_unlock(&mvm->queue_info_lock);
415
416 return disable_agg_tids;
417}
418
Liad Kaufman24afba72015-07-28 18:56:08 +0300419static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
420 struct ieee80211_sta *sta, u8 ac, int tid,
421 struct ieee80211_hdr *hdr)
422{
423 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
424 struct iwl_trans_txq_scd_cfg cfg = {
425 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
426 .sta_id = mvmsta->sta_id,
427 .tid = tid,
428 .frame_limit = IWL_FRAME_LIMIT,
429 };
430 unsigned int wdg_timeout =
431 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
432 u8 mac_queue = mvmsta->vif->hw_queue[ac];
433 int queue = -1;
Liad Kaufman9794c642015-08-19 17:34:28 +0300434 bool using_inactive_queue = false;
435 unsigned long disable_agg_tids = 0;
436 enum iwl_mvm_agg_state queue_state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300437 int ssn;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300438 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300439
440 lockdep_assert_held(&mvm->mutex);
441
Liad Kaufmand2515a92016-03-23 16:31:08 +0200442 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300443
444 /*
445 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
446 * exists
447 */
448 if (!ieee80211_is_data_qos(hdr->frame_control) ||
449 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300450 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
451 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300452 IWL_MVM_DQA_MAX_MGMT_QUEUE);
453 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
454 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
455 queue);
456
457 /* If no such queue is found, we'll use a DATA queue instead */
458 }
459
Liad Kaufman9794c642015-08-19 17:34:28 +0300460 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
461 (mvm->queue_info[mvmsta->reserved_queue].status ==
462 IWL_MVM_QUEUE_RESERVED ||
463 mvm->queue_info[mvmsta->reserved_queue].status ==
464 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300465 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300466 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300467 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
468 }
469
470 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300471 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
472 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300473 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300474
475 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300476 * Check if this queue is already allocated but inactive.
477 * In such a case, we'll need to first free this queue before enabling
478 * it again, so we'll mark it as reserved to make sure no new traffic
479 * arrives on it
480 */
481 if (queue > 0 &&
482 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
483 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
484 using_inactive_queue = true;
485 IWL_DEBUG_TX_QUEUES(mvm,
486 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
487 queue, mvmsta->sta_id, tid);
488 }
489
490 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300491 * Mark TXQ as ready, even though it hasn't been fully configured yet,
492 * to make sure no one else takes it.
493 * This will allow avoiding re-acquiring the lock at the end of the
494 * configuration. On error we'll mark it back as free.
495 */
Liad Kaufman24afba72015-07-28 18:56:08 +0300496 if (queue >= 0)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300497 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300498
Liad Kaufmand2515a92016-03-23 16:31:08 +0200499 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300500
501 /* TODO: support shared queues for same RA */
502 if (queue < 0)
503 return -ENOSPC;
504
505 /*
506 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
507 * but for configuring the SCD to send A-MPDUs we need to mark the queue
508 * as aggregatable.
509 * Mark all DATA queues as allowing to be aggregated at some point
510 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300511 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
512 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300513
Liad Kaufman9794c642015-08-19 17:34:28 +0300514 /*
515 * If this queue was previously inactive (idle) - we need to free it
516 * first
517 */
518 if (using_inactive_queue) {
519 struct iwl_scd_txq_cfg_cmd cmd = {
520 .scd_queue = queue,
521 .enable = 0,
522 };
523
524 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
525
526 /* Disable the queue */
527 iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
528 true);
529 iwl_trans_txq_disable(mvm->trans, queue, false);
530 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
531 &cmd);
532 if (ret) {
533 IWL_ERR(mvm,
534 "Failed to free inactive queue %d (ret=%d)\n",
535 queue, ret);
536
537 /* Re-mark the inactive queue as inactive */
538 spin_lock_bh(&mvm->queue_info_lock);
539 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
540 spin_unlock_bh(&mvm->queue_info_lock);
541
542 return ret;
543 }
544 }
545
Liad Kaufman24afba72015-07-28 18:56:08 +0300546 IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
547 queue, mvmsta->sta_id, tid);
548
549 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
550 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
551 wdg_timeout);
552
553 spin_lock_bh(&mvmsta->lock);
554 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300555 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300556 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300557 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300558
559 if (mvmsta->reserved_queue == queue)
560 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
561 spin_unlock_bh(&mvmsta->lock);
562
Liad Kaufmancf961e12015-08-13 19:16:08 +0300563 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
564 if (ret)
565 goto out_err;
566
Liad Kaufman9794c642015-08-19 17:34:28 +0300567 /* If we need to re-enable aggregations... */
568 if (queue_state == IWL_AGG_ON)
569 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
570
571 return ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300572
573out_err:
574 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
575
576 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300577}
578
579static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
580{
581 if (tid == IWL_MAX_TID_COUNT)
582 return IEEE80211_AC_VO; /* MGMT */
583
584 return tid_to_mac80211_ac[tid];
585}
586
587static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
588 struct ieee80211_sta *sta, int tid)
589{
590 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
591 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
592 struct sk_buff *skb;
593 struct ieee80211_hdr *hdr;
594 struct sk_buff_head deferred_tx;
595 u8 mac_queue;
596 bool no_queue = false; /* Marks if there is a problem with the queue */
597 u8 ac;
598
599 lockdep_assert_held(&mvm->mutex);
600
601 skb = skb_peek(&tid_data->deferred_tx_frames);
602 if (!skb)
603 return;
604 hdr = (void *)skb->data;
605
606 ac = iwl_mvm_tid_to_ac_queue(tid);
607 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
608
609 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
610 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
611 IWL_ERR(mvm,
612 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
613 mvmsta->sta_id, tid);
614
615 /*
616 * Mark queue as problematic so later the deferred traffic is
617 * freed, as we can do nothing with it
618 */
619 no_queue = true;
620 }
621
622 __skb_queue_head_init(&deferred_tx);
623
Liad Kaufmand2515a92016-03-23 16:31:08 +0200624 /* Disable bottom-halves when entering TX path */
625 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +0300626 spin_lock(&mvmsta->lock);
627 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
628 spin_unlock(&mvmsta->lock);
629
Liad Kaufman24afba72015-07-28 18:56:08 +0300630 while ((skb = __skb_dequeue(&deferred_tx)))
631 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
632 ieee80211_free_txskb(mvm->hw, skb);
633 local_bh_enable();
634
635 /* Wake queue */
636 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
637}
638
639void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
640{
641 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
642 add_stream_wk);
643 struct ieee80211_sta *sta;
644 struct iwl_mvm_sta *mvmsta;
645 unsigned long deferred_tid_traffic;
646 int sta_id, tid;
647
Liad Kaufman9794c642015-08-19 17:34:28 +0300648 /* Check inactivity of queues */
649 iwl_mvm_inactivity_check(mvm);
650
Liad Kaufman24afba72015-07-28 18:56:08 +0300651 mutex_lock(&mvm->mutex);
652
653 /* Go over all stations with deferred traffic */
654 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
655 IWL_MVM_STATION_COUNT) {
656 clear_bit(sta_id, mvm->sta_deferred_frames);
657 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
658 lockdep_is_held(&mvm->mutex));
659 if (IS_ERR_OR_NULL(sta))
660 continue;
661
662 mvmsta = iwl_mvm_sta_from_mac80211(sta);
663 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
664
665 for_each_set_bit(tid, &deferred_tid_traffic,
666 IWL_MAX_TID_COUNT + 1)
667 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
668 }
669
670 mutex_unlock(&mvm->mutex);
671}
672
673static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +0300674 struct ieee80211_sta *sta,
675 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +0300676{
677 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
678 int queue;
679
Liad Kaufman9794c642015-08-19 17:34:28 +0300680 /*
681 * Check for inactive queues, so we don't reach a situation where we
682 * can't add a STA due to a shortage in queues that doesn't really exist
683 */
684 iwl_mvm_inactivity_check(mvm);
685
Liad Kaufman24afba72015-07-28 18:56:08 +0300686 spin_lock_bh(&mvm->queue_info_lock);
687
688 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300689 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
690 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +0300691 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
692 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +0300693 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
694 else
Liad Kaufman9794c642015-08-19 17:34:28 +0300695 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
696 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +0300697 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300698 if (queue < 0) {
699 spin_unlock_bh(&mvm->queue_info_lock);
700 IWL_ERR(mvm, "No available queues for new station\n");
701 return -ENOSPC;
702 }
Liad Kaufmancf961e12015-08-13 19:16:08 +0300703 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +0300704
705 spin_unlock_bh(&mvm->queue_info_lock);
706
707 mvmsta->reserved_queue = queue;
708
709 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
710 queue, mvmsta->sta_id);
711
712 return 0;
713}
714
Johannes Berg8ca151b2013-01-24 14:25:36 +0100715int iwl_mvm_add_sta(struct iwl_mvm *mvm,
716 struct ieee80211_vif *vif,
717 struct ieee80211_sta *sta)
718{
719 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100720 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +0200721 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100722 int i, ret, sta_id;
723
724 lockdep_assert_held(&mvm->mutex);
725
726 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +0200727 sta_id = iwl_mvm_find_free_sta_id(mvm,
728 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100729 else
730 sta_id = mvm_sta->sta_id;
731
Johannes Berg36f46312015-03-10 20:32:08 +0100732 if (sta_id == IWL_MVM_STATION_COUNT)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100733 return -ENOSPC;
734
735 spin_lock_init(&mvm_sta->lock);
736
737 mvm_sta->sta_id = sta_id;
738 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
739 mvmvif->color);
740 mvm_sta->vif = vif;
741 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300742 mvm_sta->tx_protection = 0;
743 mvm_sta->tt_tx_protection = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100744
745 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +0300746 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +0300747 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100748 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300749
750 /* allocate new queues for a TDLS station */
751 if (sta->tdls) {
752 ret = iwl_mvm_tdls_sta_init(mvm, sta);
753 if (ret)
754 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300755 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300756 for (i = 0; i < IEEE80211_NUM_ACS; i++)
757 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
758 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
759 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100760
Johannes Berg6d9d32b2013-08-06 18:58:56 +0200761 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +0300762 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +0200763 u16 seq = mvm_sta->tid_data[i].seq_number;
764 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
765 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +0300766
767 if (!iwl_mvm_is_dqa_supported(mvm))
768 continue;
769
770 /*
771 * Mark all queues for this STA as unallocated and defer TX
772 * frames until the queue is allocated
773 */
774 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
775 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +0200776 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300777 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +0300778 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100779
Sara Sharona571f5f2015-12-07 12:50:58 +0200780 if (iwl_mvm_has_new_rx_api(mvm) &&
781 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
782 dup_data = kcalloc(mvm->trans->num_rx_queues,
783 sizeof(*dup_data),
784 GFP_KERNEL);
785 if (!dup_data)
786 return -ENOMEM;
787 mvm_sta->dup_data = dup_data;
788 }
789
Liad Kaufman24afba72015-07-28 18:56:08 +0300790 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +0300791 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
792 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +0300793 if (ret)
794 goto err;
795 }
796
797 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100798 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300799 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100800
Johannes Berg9e848012014-08-04 14:33:42 +0200801 if (vif->type == NL80211_IFTYPE_STATION) {
802 if (!sta->tdls) {
803 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
804 mvmvif->ap_sta_id = sta_id;
805 } else {
806 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
807 }
808 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100809
810 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
811
812 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300813
814err:
815 iwl_mvm_tdls_sta_deinit(mvm, sta);
816 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100817}
818
Johannes Berg7a453972013-02-12 13:10:44 +0100819int iwl_mvm_update_sta(struct iwl_mvm *mvm,
820 struct ieee80211_vif *vif,
821 struct ieee80211_sta *sta)
822{
Liad Kaufman24afba72015-07-28 18:56:08 +0300823 return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
Johannes Berg7a453972013-02-12 13:10:44 +0100824}
825
Johannes Berg8ca151b2013-01-24 14:25:36 +0100826int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
827 bool drain)
828{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300829 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +0100830 int ret;
831 u32 status;
832
833 lockdep_assert_held(&mvm->mutex);
834
835 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
836 cmd.sta_id = mvmsta->sta_id;
837 cmd.add_modify = STA_MODE_MODIFY;
838 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
839 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
840
841 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200842 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
843 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300844 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100845 if (ret)
846 return ret;
847
Sara Sharon837c4da2016-01-07 16:50:45 +0200848 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100849 case ADD_STA_SUCCESS:
850 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
851 mvmsta->sta_id);
852 break;
853 default:
854 ret = -EIO;
855 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
856 mvmsta->sta_id);
857 break;
858 }
859
860 return ret;
861}
862
863/*
864 * Remove a station from the FW table. Before sending the command to remove
865 * the station validate that the station is indeed known to the driver (sanity
866 * only).
867 */
868static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
869{
870 struct ieee80211_sta *sta;
871 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
872 .sta_id = sta_id,
873 };
874 int ret;
875
876 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
877 lockdep_is_held(&mvm->mutex));
878
879 /* Note: internal stations are marked as error values */
880 if (!sta) {
881 IWL_ERR(mvm, "Invalid station id\n");
882 return -EINVAL;
883 }
884
Emmanuel Grumbacha1022922014-05-12 11:36:41 +0300885 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100886 sizeof(rm_sta_cmd), &rm_sta_cmd);
887 if (ret) {
888 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
889 return ret;
890 }
891
892 return 0;
893}
894
895void iwl_mvm_sta_drained_wk(struct work_struct *wk)
896{
897 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
898 u8 sta_id;
899
900 /*
901 * The mutex is needed because of the SYNC cmd, but not only: if the
902 * work would run concurrently with iwl_mvm_rm_sta, it would run before
903 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
904 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
905 * that later.
906 */
907 mutex_lock(&mvm->mutex);
908
909 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
910 int ret;
911 struct ieee80211_sta *sta =
912 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
913 lockdep_is_held(&mvm->mutex));
914
Johannes Berg1ddbbb02013-12-04 22:39:17 +0100915 /*
916 * This station is in use or RCU-removed; the latter happens in
917 * managed mode, where mac80211 removes the station before we
918 * can remove it from firmware (we can only do that after the
919 * MAC is marked unassociated), and possibly while the deauth
920 * frame to disconnect from the AP is still queued. Then, the
921 * station pointer is -ENOENT when the last skb is reclaimed.
922 */
923 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100924 continue;
925
926 if (PTR_ERR(sta) == -EINVAL) {
927 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
928 sta_id);
929 continue;
930 }
931
932 if (!sta) {
933 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
934 sta_id);
935 continue;
936 }
937
938 WARN_ON(PTR_ERR(sta) != -EBUSY);
939 /* This station was removed and we waited until it got drained,
940 * we can now proceed and remove it.
941 */
942 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
943 if (ret) {
944 IWL_ERR(mvm,
945 "Couldn't remove sta %d after it was drained\n",
946 sta_id);
947 continue;
948 }
Monam Agarwalc531c772014-03-24 00:05:56 +0530949 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100950 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300951
952 if (mvm->tfd_drained[sta_id]) {
953 unsigned long i, msk = mvm->tfd_drained[sta_id];
954
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200955 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300956 iwl_mvm_disable_txq(mvm, i, i,
957 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300958
959 mvm->tfd_drained[sta_id] = 0;
960 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
961 sta_id, msk);
962 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100963 }
964
965 mutex_unlock(&mvm->mutex);
966}
967
Liad Kaufman24afba72015-07-28 18:56:08 +0300968static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
969 struct ieee80211_vif *vif,
970 struct iwl_mvm_sta *mvm_sta)
971{
972 int ac;
973 int i;
974
975 lockdep_assert_held(&mvm->mutex);
976
977 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
978 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
979 continue;
980
981 ac = iwl_mvm_tid_to_ac_queue(i);
982 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
983 vif->hw_queue[ac], i, 0);
984 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
985 }
986}
987
Johannes Berg8ca151b2013-01-24 14:25:36 +0100988int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
989 struct ieee80211_vif *vif,
990 struct ieee80211_sta *sta)
991{
992 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100993 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100994 int ret;
995
996 lockdep_assert_held(&mvm->mutex);
997
Sara Sharona571f5f2015-12-07 12:50:58 +0200998 if (iwl_mvm_has_new_rx_api(mvm))
999 kfree(mvm_sta->dup_data);
1000
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001001 if ((vif->type == NL80211_IFTYPE_STATION &&
1002 mvmvif->ap_sta_id == mvm_sta->sta_id) ||
1003 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001004 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1005 if (ret)
1006 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001007 /* flush its queues here since we are freeing mvm_sta */
Luca Coelho5888a402015-10-06 09:54:57 +03001008 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001009 if (ret)
1010 return ret;
1011 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1012 mvm_sta->tfd_queue_msk);
1013 if (ret)
1014 return ret;
1015 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001016
Liad Kaufman24afba72015-07-28 18:56:08 +03001017 /* If DQA is supported - the queues can be disabled now */
1018 if (iwl_mvm_is_dqa_supported(mvm))
1019 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1020
Johannes Berg8ca151b2013-01-24 14:25:36 +01001021 /* if we are associated - we can't remove the AP STA now */
1022 if (vif->bss_conf.assoc)
1023 return ret;
1024
1025 /* unassoc - go ahead - remove the AP STA now */
1026 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
Eliad Peller37577fe2013-12-05 17:19:39 +02001027
1028 /* clear d0i3_ap_sta_id if no longer relevant */
1029 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
1030 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001031 }
1032
1033 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001034 * This shouldn't happen - the TDLS channel switch should be canceled
1035 * before the STA is removed.
1036 */
1037 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
1038 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1039 cancel_delayed_work(&mvm->tdls_cs.dwork);
1040 }
1041
1042 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001043 * Make sure that the tx response code sees the station as -EBUSY and
1044 * calls the drain worker.
1045 */
1046 spin_lock_bh(&mvm_sta->lock);
1047 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001048 * There are frames pending on the AC queues for this station.
1049 * We need to wait until all the frames are drained...
1050 */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001051 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001052 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
1053 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001054 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001055
1056 /* disable TDLS sta queues on drain complete */
1057 if (sta->tdls) {
1058 mvm->tfd_drained[mvm_sta->sta_id] =
1059 mvm_sta->tfd_queue_msk;
1060 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
1061 mvm_sta->sta_id);
1062 }
1063
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001064 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001065 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001066 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001067
1068 if (sta->tdls)
1069 iwl_mvm_tdls_sta_deinit(mvm, sta);
1070
Johannes Berg8ca151b2013-01-24 14:25:36 +01001071 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301072 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001073 }
1074
1075 return ret;
1076}
1077
1078int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1079 struct ieee80211_vif *vif,
1080 u8 sta_id)
1081{
1082 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1083
1084 lockdep_assert_held(&mvm->mutex);
1085
Monam Agarwalc531c772014-03-24 00:05:56 +05301086 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001087 return ret;
1088}
1089
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001090int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1091 struct iwl_mvm_int_sta *sta,
1092 u32 qmask, enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001093{
1094 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001095 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001096 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
1097 return -ENOSPC;
1098 }
1099
1100 sta->tfd_queue_msk = qmask;
1101
1102 /* put a non-NULL value so iterating over the stations won't stop */
1103 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1104 return 0;
1105}
1106
Johannes Berg712b24a2014-08-04 14:14:14 +02001107static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
1108 struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001109{
Monam Agarwalc531c772014-03-24 00:05:56 +05301110 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001111 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1112 sta->sta_id = IWL_MVM_STATION_COUNT;
1113}
1114
1115static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1116 struct iwl_mvm_int_sta *sta,
1117 const u8 *addr,
1118 u16 mac_id, u16 color)
1119{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001120 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001121 int ret;
1122 u32 status;
1123
1124 lockdep_assert_held(&mvm->mutex);
1125
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001126 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001127 cmd.sta_id = sta->sta_id;
1128 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1129 color));
1130
1131 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001132 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001133
1134 if (addr)
1135 memcpy(cmd.addr, addr, ETH_ALEN);
1136
Sara Sharon854c5702016-01-26 13:17:47 +02001137 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1138 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001139 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001140 if (ret)
1141 return ret;
1142
Sara Sharon837c4da2016-01-07 16:50:45 +02001143 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001144 case ADD_STA_SUCCESS:
1145 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1146 return 0;
1147 default:
1148 ret = -EIO;
1149 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1150 status);
1151 break;
1152 }
1153 return ret;
1154}
1155
1156int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1157{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001158 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1159 mvm->cfg->base_params->wd_timeout :
1160 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001161 int ret;
1162
1163 lockdep_assert_held(&mvm->mutex);
1164
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001165 /* Map Aux queue to fifo - needs to happen before adding Aux station */
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001166 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
Liad Kaufman5c1156e2015-07-22 17:59:53 +03001167 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001168
1169 /* Allocate aux station and assign to it the aux queue */
1170 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Eliad Pellerb92e6612014-01-23 17:58:23 +02001171 NL80211_IFTYPE_UNSPECIFIED);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001172 if (ret)
1173 return ret;
1174
1175 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1176 MAC_INDEX_AUX, 0);
1177
1178 if (ret)
1179 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1180 return ret;
1181}
1182
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001183int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1184{
1185 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1186
1187 lockdep_assert_held(&mvm->mutex);
1188 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1189 mvmvif->id, 0);
1190}
1191
1192int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1193{
1194 int ret;
1195
1196 lockdep_assert_held(&mvm->mutex);
1197
1198 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1199 if (ret)
1200 IWL_WARN(mvm, "Failed sending remove station\n");
1201
1202 return ret;
1203}
1204
1205void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1206{
1207 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1208}
1209
Johannes Berg712b24a2014-08-04 14:14:14 +02001210void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1211{
1212 lockdep_assert_held(&mvm->mutex);
1213
1214 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1215}
1216
Johannes Berg8ca151b2013-01-24 14:25:36 +01001217/*
1218 * Send the add station command for the vif's broadcast station.
1219 * Assumes that the station was already allocated.
1220 *
1221 * @mvm: the mvm component
1222 * @vif: the interface to which the broadcast station is added
1223 * @bsta: the broadcast station to add.
1224 */
Johannes Berg013290a2014-08-04 13:38:48 +02001225int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001226{
1227 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001228 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001229 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001230 const u8 *baddr = _baddr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001231
1232 lockdep_assert_held(&mvm->mutex);
1233
Liad Kaufmande24f632015-08-04 15:19:18 +03001234 if (iwl_mvm_is_dqa_supported(mvm)) {
1235 struct iwl_trans_txq_scd_cfg cfg = {
1236 .fifo = IWL_MVM_TX_FIFO_VO,
1237 .sta_id = mvmvif->bcast_sta.sta_id,
1238 .tid = IWL_MAX_TID_COUNT,
1239 .aggregate = false,
1240 .frame_limit = IWL_FRAME_LIMIT,
1241 };
1242 unsigned int wdg_timeout =
1243 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1244 int queue;
1245
1246 if ((vif->type == NL80211_IFTYPE_AP) &&
1247 (mvmvif->bcast_sta.tfd_queue_msk &
1248 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
1249 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
Liad Kaufman4c965132015-08-09 19:26:56 +03001250 else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
1251 (mvmvif->bcast_sta.tfd_queue_msk &
1252 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
1253 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
Liad Kaufmande24f632015-08-04 15:19:18 +03001254 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1255 return -EINVAL;
1256
1257 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
1258 wdg_timeout);
1259 }
1260
Johannes Berg5023d962013-07-31 14:07:43 +02001261 if (vif->type == NL80211_IFTYPE_ADHOC)
1262 baddr = vif->bss_conf.bssid;
1263
Johannes Berg8ca151b2013-01-24 14:25:36 +01001264 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
1265 return -ENOSPC;
1266
1267 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1268 mvmvif->id, mvmvif->color);
1269}
1270
1271/* Send the FW a request to remove the station from it's internal data
1272 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001273int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001274{
Johannes Berg013290a2014-08-04 13:38:48 +02001275 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001276 int ret;
1277
1278 lockdep_assert_held(&mvm->mutex);
1279
Johannes Berg013290a2014-08-04 13:38:48 +02001280 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001281 if (ret)
1282 IWL_WARN(mvm, "Failed sending remove station\n");
1283 return ret;
1284}
1285
Johannes Berg013290a2014-08-04 13:38:48 +02001286int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1287{
1288 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03001289 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02001290
1291 lockdep_assert_held(&mvm->mutex);
1292
Liad Kaufmande24f632015-08-04 15:19:18 +03001293 if (!iwl_mvm_is_dqa_supported(mvm))
1294 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001295
Liad Kaufmande24f632015-08-04 15:19:18 +03001296 if (vif->type == NL80211_IFTYPE_AP) {
1297 /*
1298 * The firmware defines the TFD queue mask to only be relevant
1299 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1300 * be included.
1301 */
Johannes Berg013290a2014-08-04 13:38:48 +02001302 qmask &= ~BIT(vif->cab_queue);
1303
Liad Kaufmande24f632015-08-04 15:19:18 +03001304 if (iwl_mvm_is_dqa_supported(mvm))
1305 qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
Liad Kaufman4c965132015-08-09 19:26:56 +03001306 } else if (iwl_mvm_is_dqa_supported(mvm) &&
1307 vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1308 qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
Liad Kaufmande24f632015-08-04 15:19:18 +03001309 }
1310
Johannes Berg013290a2014-08-04 13:38:48 +02001311 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
1312 ieee80211_vif_type_p2p(vif));
1313}
1314
Johannes Berg8ca151b2013-01-24 14:25:36 +01001315/* Allocate a new station entry for the broadcast station to the given vif,
1316 * and send it to the FW.
1317 * Note that each P2P mac should have its own broadcast station.
1318 *
1319 * @mvm: the mvm component
1320 * @vif: the interface to which the broadcast station is added
1321 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02001322int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001323{
1324 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001325 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001326 int ret;
1327
1328 lockdep_assert_held(&mvm->mutex);
1329
Johannes Berg013290a2014-08-04 13:38:48 +02001330 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001331 if (ret)
1332 return ret;
1333
Johannes Berg013290a2014-08-04 13:38:48 +02001334 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001335
1336 if (ret)
1337 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02001338
Johannes Berg8ca151b2013-01-24 14:25:36 +01001339 return ret;
1340}
1341
Johannes Berg013290a2014-08-04 13:38:48 +02001342void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1343{
1344 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1345
1346 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1347}
1348
Johannes Berg8ca151b2013-01-24 14:25:36 +01001349/*
1350 * Send the FW a request to remove the station from it's internal data
1351 * structures, and in addition remove it from the local data structure.
1352 */
Johannes Berg013290a2014-08-04 13:38:48 +02001353int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001354{
1355 int ret;
1356
1357 lockdep_assert_held(&mvm->mutex);
1358
Johannes Berg013290a2014-08-04 13:38:48 +02001359 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001360
Johannes Berg013290a2014-08-04 13:38:48 +02001361 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1362
Johannes Berg8ca151b2013-01-24 14:25:36 +01001363 return ret;
1364}
1365
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001366#define IWL_MAX_RX_BA_SESSIONS 16
1367
Sara Sharonb915c102016-03-23 16:32:02 +02001368static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02001369{
Sara Sharonb915c102016-03-23 16:32:02 +02001370 struct iwl_mvm_delba_notif notif = {
1371 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
1372 .metadata.sync = 1,
1373 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02001374 };
Sara Sharonb915c102016-03-23 16:32:02 +02001375 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
1376};
Sara Sharon10b2b202016-03-20 16:23:41 +02001377
Sara Sharonb915c102016-03-23 16:32:02 +02001378static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
1379 struct iwl_mvm_baid_data *data)
1380{
1381 int i;
1382
1383 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
1384
1385 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1386 int j;
1387 struct iwl_mvm_reorder_buffer *reorder_buf =
1388 &data->reorder_buf[i];
1389
Sara Sharon06904052016-02-28 20:28:17 +02001390 spin_lock_bh(&reorder_buf->lock);
1391 if (likely(!reorder_buf->num_stored)) {
1392 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02001393 continue;
Sara Sharon06904052016-02-28 20:28:17 +02001394 }
Sara Sharonb915c102016-03-23 16:32:02 +02001395
1396 /*
1397 * This shouldn't happen in regular DELBA since the internal
1398 * delBA notification should trigger a release of all frames in
1399 * the reorder buffer.
1400 */
1401 WARN_ON(1);
1402
1403 for (j = 0; j < reorder_buf->buf_size; j++)
1404 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02001405 /*
1406 * Prevent timer re-arm. This prevents a very far fetched case
1407 * where we timed out on the notification. There may be prior
1408 * RX frames pending in the RX queue before the notification
1409 * that might get processed between now and the actual deletion
1410 * and we would re-arm the timer although we are deleting the
1411 * reorder buffer.
1412 */
1413 reorder_buf->removed = true;
1414 spin_unlock_bh(&reorder_buf->lock);
1415 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02001416 }
1417}
1418
1419static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
1420 u32 sta_id,
1421 struct iwl_mvm_baid_data *data,
1422 u16 ssn, u8 buf_size)
1423{
1424 int i;
1425
1426 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1427 struct iwl_mvm_reorder_buffer *reorder_buf =
1428 &data->reorder_buf[i];
1429 int j;
1430
1431 reorder_buf->num_stored = 0;
1432 reorder_buf->head_sn = ssn;
1433 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02001434 /* rx reorder timer */
1435 reorder_buf->reorder_timer.function =
1436 iwl_mvm_reorder_timer_expired;
1437 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
1438 init_timer(&reorder_buf->reorder_timer);
1439 spin_lock_init(&reorder_buf->lock);
1440 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02001441 reorder_buf->queue = i;
1442 reorder_buf->sta_id = sta_id;
1443 for (j = 0; j < reorder_buf->buf_size; j++)
1444 __skb_queue_head_init(&reorder_buf->entries[j]);
1445 }
Sara Sharon10b2b202016-03-20 16:23:41 +02001446}
1447
Johannes Berg8ca151b2013-01-24 14:25:36 +01001448int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02001449 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001450{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001451 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001452 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02001453 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001454 int ret;
1455 u32 status;
1456
1457 lockdep_assert_held(&mvm->mutex);
1458
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001459 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
1460 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
1461 return -ENOSPC;
1462 }
1463
Sara Sharon10b2b202016-03-20 16:23:41 +02001464 if (iwl_mvm_has_new_rx_api(mvm) && start) {
1465 /*
1466 * Allocate here so if allocation fails we can bail out early
1467 * before starting the BA session in the firmware
1468 */
Sara Sharonb915c102016-03-23 16:32:02 +02001469 baid_data = kzalloc(sizeof(*baid_data) +
1470 mvm->trans->num_rx_queues *
1471 sizeof(baid_data->reorder_buf[0]),
1472 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02001473 if (!baid_data)
1474 return -ENOMEM;
1475 }
1476
Johannes Berg8ca151b2013-01-24 14:25:36 +01001477 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
1478 cmd.sta_id = mvm_sta->sta_id;
1479 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03001480 if (start) {
1481 cmd.add_immediate_ba_tid = (u8) tid;
1482 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02001483 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03001484 } else {
1485 cmd.remove_immediate_ba_tid = (u8) tid;
1486 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001487 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
1488 STA_MODIFY_REMOVE_BA_TID;
1489
1490 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001491 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1492 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001493 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001494 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02001495 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001496
Sara Sharon837c4da2016-01-07 16:50:45 +02001497 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001498 case ADD_STA_SUCCESS:
1499 IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
1500 start ? "start" : "stopp");
1501 break;
1502 case ADD_STA_IMMEDIATE_BA_FAILURE:
1503 IWL_WARN(mvm, "RX BA Session refused by fw\n");
1504 ret = -ENOSPC;
1505 break;
1506 default:
1507 ret = -EIO;
1508 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
1509 start ? "start" : "stopp", status);
1510 break;
1511 }
1512
Sara Sharon10b2b202016-03-20 16:23:41 +02001513 if (ret)
1514 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001515
Sara Sharon10b2b202016-03-20 16:23:41 +02001516 if (start) {
1517 u8 baid;
1518
1519 mvm->rx_ba_sessions++;
1520
1521 if (!iwl_mvm_has_new_rx_api(mvm))
1522 return 0;
1523
1524 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
1525 ret = -EINVAL;
1526 goto out_free;
1527 }
1528 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
1529 IWL_ADD_STA_BAID_SHIFT);
1530 baid_data->baid = baid;
1531 baid_data->timeout = timeout;
1532 baid_data->last_rx = jiffies;
1533 init_timer(&baid_data->session_timer);
1534 baid_data->session_timer.function =
1535 iwl_mvm_rx_agg_session_expired;
1536 baid_data->session_timer.data =
1537 (unsigned long)&mvm->baid_map[baid];
1538 baid_data->mvm = mvm;
1539 baid_data->tid = tid;
1540 baid_data->sta_id = mvm_sta->sta_id;
1541
1542 mvm_sta->tid_to_baid[tid] = baid;
1543 if (timeout)
1544 mod_timer(&baid_data->session_timer,
1545 TU_TO_EXP_TIME(timeout * 2));
1546
Sara Sharonb915c102016-03-23 16:32:02 +02001547 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
1548 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02001549 /*
1550 * protect the BA data with RCU to cover a case where our
1551 * internal RX sync mechanism will timeout (not that it's
1552 * supposed to happen) and we will free the session data while
1553 * RX is being processed in parallel
1554 */
1555 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
1556 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
1557 } else if (mvm->rx_ba_sessions > 0) {
1558 u8 baid = mvm_sta->tid_to_baid[tid];
1559
1560 /* check that restart flow didn't zero the counter */
1561 mvm->rx_ba_sessions--;
1562 if (!iwl_mvm_has_new_rx_api(mvm))
1563 return 0;
1564
1565 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
1566 return -EINVAL;
1567
1568 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
1569 if (WARN_ON(!baid_data))
1570 return -EINVAL;
1571
1572 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02001573 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02001574 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02001575 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
1576 kfree_rcu(baid_data, rcu_head);
1577 }
1578 return 0;
1579
1580out_free:
1581 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001582 return ret;
1583}
1584
Liad Kaufman9794c642015-08-19 17:34:28 +03001585int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1586 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001587{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001588 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001589 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001590 int ret;
1591 u32 status;
1592
1593 lockdep_assert_held(&mvm->mutex);
1594
1595 if (start) {
1596 mvm_sta->tfd_queue_msk |= BIT(queue);
1597 mvm_sta->tid_disable_agg &= ~BIT(tid);
1598 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03001599 /* In DQA-mode the queue isn't removed on agg termination */
1600 if (!iwl_mvm_is_dqa_supported(mvm))
1601 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001602 mvm_sta->tid_disable_agg |= BIT(tid);
1603 }
1604
1605 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
1606 cmd.sta_id = mvm_sta->sta_id;
1607 cmd.add_modify = STA_MODE_MODIFY;
1608 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
1609 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
1610 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
1611
1612 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001613 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1614 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001615 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001616 if (ret)
1617 return ret;
1618
Sara Sharon837c4da2016-01-07 16:50:45 +02001619 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001620 case ADD_STA_SUCCESS:
1621 break;
1622 default:
1623 ret = -EIO;
1624 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
1625 start ? "start" : "stopp", status);
1626 break;
1627 }
1628
1629 return ret;
1630}
1631
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02001632const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001633 IEEE80211_AC_BE,
1634 IEEE80211_AC_BK,
1635 IEEE80211_AC_BK,
1636 IEEE80211_AC_BE,
1637 IEEE80211_AC_VI,
1638 IEEE80211_AC_VI,
1639 IEEE80211_AC_VO,
1640 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03001641 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001642};
1643
Johannes Berg3e56ead2013-02-15 22:23:18 +01001644static const u8 tid_to_ucode_ac[] = {
1645 AC_BE,
1646 AC_BK,
1647 AC_BK,
1648 AC_BE,
1649 AC_VI,
1650 AC_VI,
1651 AC_VO,
1652 AC_VO,
1653};
1654
Johannes Berg8ca151b2013-01-24 14:25:36 +01001655int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1656 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
1657{
Johannes Berg5b577a92013-11-14 18:20:04 +01001658 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001659 struct iwl_mvm_tid_data *tid_data;
1660 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001661 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001662
1663 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1664 return -EINVAL;
1665
1666 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
1667 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
1668 mvmsta->tid_data[tid].state);
1669 return -ENXIO;
1670 }
1671
1672 lockdep_assert_held(&mvm->mutex);
1673
Arik Nemtsovb2492502014-03-13 12:21:50 +02001674 spin_lock_bh(&mvmsta->lock);
1675
1676 /* possible race condition - we entered D0i3 while starting agg */
1677 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
1678 spin_unlock_bh(&mvmsta->lock);
1679 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
1680 return -EIO;
1681 }
1682
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001683 spin_lock_bh(&mvm->queue_info_lock);
1684
Liad Kaufmancf961e12015-08-13 19:16:08 +03001685 /*
1686 * Note the possible cases:
1687 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
1688 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
1689 * one and mark it as reserved
1690 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
1691 * non-DQA mode, since the TXQ hasn't yet been allocated
1692 */
1693 txq_id = mvmsta->tid_data[tid].txq_id;
1694 if (!iwl_mvm_is_dqa_supported(mvm) ||
1695 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03001696 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1697 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03001698 mvm->last_agg_queue);
1699 if (txq_id < 0) {
1700 ret = txq_id;
1701 spin_unlock_bh(&mvm->queue_info_lock);
1702 IWL_ERR(mvm, "Failed to allocate agg queue\n");
1703 goto release_locks;
1704 }
1705
1706 /* TXQ hasn't yet been enabled, so mark it only as reserved */
1707 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001708 }
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001709 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001710
Liad Kaufmancf961e12015-08-13 19:16:08 +03001711 IWL_DEBUG_TX_QUEUES(mvm,
1712 "AGG for tid %d will be on queue #%d\n",
1713 tid, txq_id);
1714
Johannes Berg8ca151b2013-01-24 14:25:36 +01001715 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01001716 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001717 tid_data->txq_id = txq_id;
1718 *ssn = tid_data->ssn;
1719
1720 IWL_DEBUG_TX_QUEUES(mvm,
1721 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
1722 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
1723 tid_data->next_reclaimed);
1724
1725 if (tid_data->ssn == tid_data->next_reclaimed) {
1726 tid_data->state = IWL_AGG_STARTING;
1727 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1728 } else {
1729 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1730 }
1731
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001732 ret = 0;
1733
1734release_locks:
Johannes Berg8ca151b2013-01-24 14:25:36 +01001735 spin_unlock_bh(&mvmsta->lock);
1736
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001737 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001738}
1739
1740int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02001741 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
1742 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001743{
Johannes Berg5b577a92013-11-14 18:20:04 +01001744 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001745 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02001746 unsigned int wdg_timeout =
1747 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02001748 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001749 bool alloc_queue = true;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001750 u16 ssn;
1751
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02001752 struct iwl_trans_txq_scd_cfg cfg = {
1753 .sta_id = mvmsta->sta_id,
1754 .tid = tid,
1755 .frame_limit = buf_size,
1756 .aggregate = true,
1757 };
1758
Eyal Shapiraefed6642014-09-14 15:58:53 +03001759 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
1760 != IWL_MAX_TID_COUNT);
1761
Johannes Berg8ca151b2013-01-24 14:25:36 +01001762 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
1763
1764 spin_lock_bh(&mvmsta->lock);
1765 ssn = tid_data->ssn;
1766 queue = tid_data->txq_id;
1767 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001768 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001769 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02001770 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001771 spin_unlock_bh(&mvmsta->lock);
1772
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02001773 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01001774
Liad Kaufmancf961e12015-08-13 19:16:08 +03001775 /* In DQA mode, the existing queue might need to be reconfigured */
1776 if (iwl_mvm_is_dqa_supported(mvm)) {
1777 spin_lock_bh(&mvm->queue_info_lock);
1778 /* Maybe there is no need to even alloc a queue... */
1779 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
1780 alloc_queue = false;
1781 spin_unlock_bh(&mvm->queue_info_lock);
1782
1783 /*
1784 * Only reconfig the SCD for the queue if the window size has
1785 * changed from current (become smaller)
1786 */
1787 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
1788 /*
1789 * If reconfiguring an existing queue, it first must be
1790 * drained
1791 */
1792 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1793 BIT(queue));
1794 if (ret) {
1795 IWL_ERR(mvm,
1796 "Error draining queue before reconfig\n");
1797 return ret;
1798 }
1799
1800 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
1801 mvmsta->sta_id, tid,
1802 buf_size, ssn);
1803 if (ret) {
1804 IWL_ERR(mvm,
1805 "Error reconfiguring TXQ #%d\n", queue);
1806 return ret;
1807 }
1808 }
1809 }
1810
1811 if (alloc_queue)
1812 iwl_mvm_enable_txq(mvm, queue,
1813 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
1814 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03001815
Johannes Berg8ca151b2013-01-24 14:25:36 +01001816 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1817 if (ret)
1818 return -EIO;
1819
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001820 /* No need to mark as reserved */
1821 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001822 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001823 spin_unlock_bh(&mvm->queue_info_lock);
1824
Johannes Berg8ca151b2013-01-24 14:25:36 +01001825 /*
1826 * Even though in theory the peer could have different
1827 * aggregation reorder buffer sizes for different sessions,
1828 * our ucode doesn't allow for that and has a global limit
1829 * for each station. Therefore, use the minimum of all the
1830 * aggregation sessions and our default value.
1831 */
1832 mvmsta->max_agg_bufsize =
1833 min(mvmsta->max_agg_bufsize, buf_size);
1834 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
1835
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001836 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
1837 sta->addr, tid);
1838
Eyal Shapira9e680942013-11-09 00:16:16 +02001839 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001840}
1841
1842int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1843 struct ieee80211_sta *sta, u16 tid)
1844{
Johannes Berg5b577a92013-11-14 18:20:04 +01001845 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001846 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1847 u16 txq_id;
1848 int err;
1849
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02001850
1851 /*
1852 * If mac80211 is cleaning its state, then say that we finished since
1853 * our state has been cleared anyway.
1854 */
1855 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1856 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1857 return 0;
1858 }
1859
Johannes Berg8ca151b2013-01-24 14:25:36 +01001860 spin_lock_bh(&mvmsta->lock);
1861
1862 txq_id = tid_data->txq_id;
1863
1864 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
1865 mvmsta->sta_id, tid, txq_id, tid_data->state);
1866
Eyal Shapiraefed6642014-09-14 15:58:53 +03001867 mvmsta->agg_tids &= ~BIT(tid);
1868
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001869 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001870 /*
1871 * The TXQ is marked as reserved only if no traffic came through yet
1872 * This means no traffic has been sent on this TID (agg'd or not), so
1873 * we no longer have use for the queue. Since it hasn't even been
1874 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1875 * free.
1876 */
1877 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
1878 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001879 spin_unlock_bh(&mvm->queue_info_lock);
1880
Johannes Berg8ca151b2013-01-24 14:25:36 +01001881 switch (tid_data->state) {
1882 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01001883 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001884
1885 IWL_DEBUG_TX_QUEUES(mvm,
1886 "ssn = %d, next_recl = %d\n",
1887 tid_data->ssn, tid_data->next_reclaimed);
1888
1889 /* There are still packets for this RA / TID in the HW */
1890 if (tid_data->ssn != tid_data->next_reclaimed) {
1891 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
1892 err = 0;
1893 break;
1894 }
1895
1896 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02001897 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02001898 spin_unlock_bh(&mvmsta->lock);
1899
1900 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1901
1902 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1903
Liad Kaufmancf961e12015-08-13 19:16:08 +03001904 if (!iwl_mvm_is_dqa_supported(mvm)) {
1905 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
1906
1907 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
1908 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02001909 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001910 case IWL_AGG_STARTING:
1911 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1912 /*
1913 * The agg session has been stopped before it was set up. This
1914 * can happen when the AddBA timer times out for example.
1915 */
1916
1917 /* No barriers since we are under mutex */
1918 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001919
1920 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1921 tid_data->state = IWL_AGG_OFF;
1922 err = 0;
1923 break;
1924 default:
1925 IWL_ERR(mvm,
1926 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
1927 mvmsta->sta_id, tid, tid_data->state);
1928 IWL_ERR(mvm,
1929 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
1930 err = -EINVAL;
1931 }
1932
1933 spin_unlock_bh(&mvmsta->lock);
1934
1935 return err;
1936}
1937
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02001938int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1939 struct ieee80211_sta *sta, u16 tid)
1940{
Johannes Berg5b577a92013-11-14 18:20:04 +01001941 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02001942 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1943 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02001944 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02001945
1946 /*
1947 * First set the agg state to OFF to avoid calling
1948 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
1949 */
1950 spin_lock_bh(&mvmsta->lock);
1951 txq_id = tid_data->txq_id;
1952 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
1953 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02001954 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02001955 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001956 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02001957 spin_unlock_bh(&mvmsta->lock);
1958
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001959 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001960 /*
1961 * The TXQ is marked as reserved only if no traffic came through yet
1962 * This means no traffic has been sent on this TID (agg'd or not), so
1963 * we no longer have use for the queue. Since it hasn't even been
1964 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1965 * free.
1966 */
1967 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
1968 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001969 spin_unlock_bh(&mvm->queue_info_lock);
1970
Johannes Bergb6658ff2013-07-24 13:55:51 +02001971 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001972 iwl_mvm_drain_sta(mvm, mvmsta, true);
Luca Coelho5888a402015-10-06 09:54:57 +03001973 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
Johannes Bergb6658ff2013-07-24 13:55:51 +02001974 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001975 iwl_trans_wait_tx_queue_empty(mvm->trans,
1976 mvmsta->tfd_queue_msk);
1977 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02001978
Johannes Bergf7f89e72014-08-05 15:24:44 +02001979 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1980
Liad Kaufmancf961e12015-08-13 19:16:08 +03001981 if (!iwl_mvm_is_dqa_supported(mvm)) {
1982 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
1983
1984 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
1985 tid, 0);
1986 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02001987 }
1988
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02001989 return 0;
1990}
1991
Johannes Berg8ca151b2013-01-24 14:25:36 +01001992static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1993{
Johannes Berg2dc2a152015-06-16 17:09:18 +02001994 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001995
1996 lockdep_assert_held(&mvm->mutex);
1997
Johannes Berg2dc2a152015-06-16 17:09:18 +02001998 /* Pick the unused key offset with the highest 'deleted'
1999 * counter. Every time a key is deleted, all the counters
2000 * are incremented and the one that was just deleted is
2001 * reset to zero. Thus, the highest counter is the one
2002 * that was deleted longest ago. Pick that one.
2003 */
2004 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2005 if (test_bit(i, mvm->fw_key_table))
2006 continue;
2007 if (mvm->fw_key_deleted[i] > max) {
2008 max = mvm->fw_key_deleted[i];
2009 max_offs = i;
2010 }
2011 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002012
Johannes Berg2dc2a152015-06-16 17:09:18 +02002013 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002014 return STA_KEY_IDX_INVALID;
2015
Johannes Berg2dc2a152015-06-16 17:09:18 +02002016 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002017}
2018
Johannes Berg5f7a1842015-12-11 09:36:10 +01002019static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2020 struct ieee80211_vif *vif,
2021 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002022{
Johannes Berg5b530e92014-12-23 16:00:17 +01002023 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002024
Johannes Berg5f7a1842015-12-11 09:36:10 +01002025 if (sta)
2026 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002027
2028 /*
2029 * The device expects GTKs for station interfaces to be
2030 * installed as GTKs for the AP station. If we have no
2031 * station ID, then use AP's station ID.
2032 */
2033 if (vif->type == NL80211_IFTYPE_STATION &&
Avri Altman9513c5e2015-10-19 16:29:11 +02002034 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2035 u8 sta_id = mvmvif->ap_sta_id;
2036
Avri Altman9513c5e2015-10-19 16:29:11 +02002037 /*
2038 * It is possible that the 'sta' parameter is NULL,
2039 * for example when a GTK is removed - the sta_id will then
2040 * be the AP ID, and no station was passed by mac80211.
2041 */
Sara Sharon13303c02016-04-10 15:51:54 +03002042 return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Avri Altman9513c5e2015-10-19 16:29:11 +02002043 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002044
Johannes Berg5f7a1842015-12-11 09:36:10 +01002045 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002046}
2047
2048static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2049 struct iwl_mvm_sta *mvm_sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002050 struct ieee80211_key_conf *keyconf, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002051 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2052 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002053{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002054 struct iwl_mvm_add_sta_key_cmd cmd = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002055 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002056 int ret;
2057 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002058 u16 keyidx;
2059 int i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002060 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002061
2062 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2063 STA_KEY_FLG_KEYID_MSK;
2064 key_flags = cpu_to_le16(keyidx);
2065 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2066
2067 switch (keyconf->cipher) {
2068 case WLAN_CIPHER_SUITE_TKIP:
2069 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002070 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002071 for (i = 0; i < 5; i++)
Max Stepanov5a258aa2013-04-07 09:11:21 +03002072 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
2073 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002074 break;
2075 case WLAN_CIPHER_SUITE_CCMP:
2076 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002077 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002078 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002079 case WLAN_CIPHER_SUITE_WEP104:
2080 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002081 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002082 case WLAN_CIPHER_SUITE_WEP40:
2083 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2084 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
2085 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002086 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002087 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2088 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002089 }
2090
Johannes Bergba3943b2014-11-12 23:54:48 +01002091 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002092 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2093
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002094 cmd.key_offset = key_offset;
Max Stepanov5a258aa2013-04-07 09:11:21 +03002095 cmd.key_flags = key_flags;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002096 cmd.sta_id = sta_id;
2097
2098 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002099 if (cmd_flags & CMD_ASYNC)
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002100 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
2101 sizeof(cmd), &cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002102 else
2103 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2104 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002105
2106 switch (status) {
2107 case ADD_STA_SUCCESS:
2108 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2109 break;
2110 default:
2111 ret = -EIO;
2112 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2113 break;
2114 }
2115
2116 return ret;
2117}
2118
2119static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2120 struct ieee80211_key_conf *keyconf,
2121 u8 sta_id, bool remove_key)
2122{
2123 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2124
2125 /* verify the key details match the required command's expectations */
2126 if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
2127 (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2128 (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
2129 return -EINVAL;
2130
2131 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2132 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2133
2134 if (remove_key) {
2135 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2136 } else {
2137 struct ieee80211_key_seq seq;
2138 const u8 *pn;
2139
2140 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002141 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2142 pn = seq.aes_cmac.pn;
2143 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2144 ((u64) pn[4] << 8) |
2145 ((u64) pn[3] << 16) |
2146 ((u64) pn[2] << 24) |
2147 ((u64) pn[1] << 32) |
2148 ((u64) pn[0] << 40));
2149 }
2150
2151 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2152 remove_key ? "removing" : "installing",
2153 igtk_cmd.sta_id);
2154
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002155 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01002156 sizeof(igtk_cmd), &igtk_cmd);
2157}
2158
2159
2160static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
2161 struct ieee80211_vif *vif,
2162 struct ieee80211_sta *sta)
2163{
Johannes Berg5b530e92014-12-23 16:00:17 +01002164 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002165
2166 if (sta)
2167 return sta->addr;
2168
2169 if (vif->type == NL80211_IFTYPE_STATION &&
2170 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2171 u8 sta_id = mvmvif->ap_sta_id;
2172 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2173 lockdep_is_held(&mvm->mutex));
2174 return sta->addr;
2175 }
2176
2177
2178 return NULL;
2179}
2180
Johannes Berg2f6319d2014-11-12 23:39:56 +01002181static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2182 struct ieee80211_vif *vif,
2183 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002184 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002185 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01002186 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002187{
Johannes Berg2f6319d2014-11-12 23:39:56 +01002188 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002189 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002190 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002191 struct ieee80211_key_seq seq;
2192 u16 p1k[5];
2193
Johannes Berg8ca151b2013-01-24 14:25:36 +01002194 switch (keyconf->cipher) {
2195 case WLAN_CIPHER_SUITE_TKIP:
2196 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
2197 /* get phase 1 key from mac80211 */
2198 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2199 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
Johannes Bergba3943b2014-11-12 23:54:48 +01002200 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002201 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002202 break;
2203 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01002204 case WLAN_CIPHER_SUITE_WEP40:
2205 case WLAN_CIPHER_SUITE_WEP104:
2206 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002207 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002208 break;
2209 default:
Johannes Bergba3943b2014-11-12 23:54:48 +01002210 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002211 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002212 }
2213
Johannes Berg8ca151b2013-01-24 14:25:36 +01002214 return ret;
2215}
2216
Johannes Berg2f6319d2014-11-12 23:39:56 +01002217static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01002218 struct ieee80211_key_conf *keyconf,
2219 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002220{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002221 struct iwl_mvm_add_sta_key_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002222 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002223 int ret;
2224 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002225
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02002226 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2227 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002228 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2229 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2230
Johannes Bergba3943b2014-11-12 23:54:48 +01002231 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002232 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2233
Max Stepanov5a258aa2013-04-07 09:11:21 +03002234 cmd.key_flags = key_flags;
2235 cmd.key_offset = keyconf->hw_key_idx;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002236 cmd.sta_id = sta_id;
2237
Johannes Berg8ca151b2013-01-24 14:25:36 +01002238 status = ADD_STA_SUCCESS;
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002239 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2240 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002241
2242 switch (status) {
2243 case ADD_STA_SUCCESS:
2244 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2245 break;
2246 default:
2247 ret = -EIO;
2248 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2249 break;
2250 }
2251
2252 return ret;
2253}
2254
Johannes Berg2f6319d2014-11-12 23:39:56 +01002255int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2256 struct ieee80211_vif *vif,
2257 struct ieee80211_sta *sta,
2258 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002259 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002260{
Johannes Bergba3943b2014-11-12 23:54:48 +01002261 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002262 struct iwl_mvm_sta *mvm_sta;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002263 u8 sta_id;
2264 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03002265 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01002266
2267 lockdep_assert_held(&mvm->mutex);
2268
2269 /* Get the station id from the mvm local station table */
Johannes Berg5f7a1842015-12-11 09:36:10 +01002270 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2271 if (!mvm_sta) {
2272 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01002273 return -EINVAL;
2274 }
Johannes Berg5f7a1842015-12-11 09:36:10 +01002275 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002276
2277 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
2278 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
2279 goto end;
2280 }
2281
2282 /*
2283 * It is possible that the 'sta' parameter is NULL, and thus
2284 * there is a need to retrieve the sta from the local station table.
2285 */
2286 if (!sta) {
2287 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2288 lockdep_is_held(&mvm->mutex));
2289 if (IS_ERR_OR_NULL(sta)) {
2290 IWL_ERR(mvm, "Invalid station id\n");
2291 return -EINVAL;
2292 }
2293 }
2294
2295 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
2296 return -EINVAL;
2297
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002298 /* If the key_offset is not pre-assigned, we need to find a
2299 * new offset to use. In normal cases, the offset is not
2300 * pre-assigned, but during HW_RESTART we want to reuse the
2301 * same indices, so we pass them when this function is called.
2302 *
2303 * In D3 entry, we need to hardcoded the indices (because the
2304 * firmware hardcodes the PTK offset to 0). In this case, we
2305 * need to make sure we don't overwrite the hw_key_idx in the
2306 * keyconf structure, because otherwise we cannot configure
2307 * the original ones back when resuming.
2308 */
2309 if (key_offset == STA_KEY_IDX_INVALID) {
2310 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2311 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002312 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002313 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002314 }
2315
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002316 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002317 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01002318 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002319
2320 /*
2321 * For WEP, the same key is used for multicast and unicast. Upload it
2322 * again, using the same key offset, and now pointing the other one
2323 * to the same key slot (offset).
2324 * If this fails, remove the original as well.
2325 */
2326 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2327 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002328 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
2329 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01002330 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01002331 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002332 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002333 }
2334 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01002335
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002336 __set_bit(key_offset, mvm->fw_key_table);
2337
Johannes Berg2f6319d2014-11-12 23:39:56 +01002338end:
2339 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2340 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03002341 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002342 return ret;
2343}
2344
2345int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
2346 struct ieee80211_vif *vif,
2347 struct ieee80211_sta *sta,
2348 struct ieee80211_key_conf *keyconf)
2349{
Johannes Bergba3943b2014-11-12 23:54:48 +01002350 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002351 struct iwl_mvm_sta *mvm_sta;
2352 u8 sta_id = IWL_MVM_STATION_COUNT;
Johannes Berg2dc2a152015-06-16 17:09:18 +02002353 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002354
2355 lockdep_assert_held(&mvm->mutex);
2356
Johannes Berg5f7a1842015-12-11 09:36:10 +01002357 /* Get the station from the mvm local station table */
2358 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002359
2360 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
2361 keyconf->keyidx, sta_id);
2362
2363 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
2364 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
2365
2366 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
2367 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2368 keyconf->hw_key_idx);
2369 return -ENOENT;
2370 }
2371
Johannes Berg2dc2a152015-06-16 17:09:18 +02002372 /* track which key was deleted last */
2373 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2374 if (mvm->fw_key_deleted[i] < U8_MAX)
2375 mvm->fw_key_deleted[i]++;
2376 }
2377 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
2378
Johannes Berg5f7a1842015-12-11 09:36:10 +01002379 if (!mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01002380 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
2381 return 0;
2382 }
2383
Johannes Berg5f7a1842015-12-11 09:36:10 +01002384 sta_id = mvm_sta->sta_id;
2385
Johannes Bergba3943b2014-11-12 23:54:48 +01002386 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2387 if (ret)
2388 return ret;
2389
2390 /* delete WEP key twice to get rid of (now useless) offset */
2391 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2392 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
2393 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
2394
2395 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002396}
2397
Johannes Berg8ca151b2013-01-24 14:25:36 +01002398void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
2399 struct ieee80211_vif *vif,
2400 struct ieee80211_key_conf *keyconf,
2401 struct ieee80211_sta *sta, u32 iv32,
2402 u16 *phase1key)
2403{
Beni Levc3eb5362013-02-06 17:22:18 +02002404 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01002405 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002406
Beni Levc3eb5362013-02-06 17:22:18 +02002407 rcu_read_lock();
2408
Johannes Berg5f7a1842015-12-11 09:36:10 +01002409 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2410 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02002411 goto unlock;
Johannes Bergba3943b2014-11-12 23:54:48 +01002412 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002413 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02002414
2415 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02002416 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01002417}
2418
Johannes Berg9cc40712013-02-15 22:47:48 +01002419void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
2420 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002421{
Johannes Berg5b577a92013-11-14 18:20:04 +01002422 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002423 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002424 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01002425 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03002426 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01002427 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01002428 };
2429 int ret;
2430
Sara Sharon854c5702016-01-26 13:17:47 +02002431 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
2432 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002433 if (ret)
2434 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
2435}
2436
Johannes Berg9cc40712013-02-15 22:47:48 +01002437void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
2438 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01002439 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01002440 u16 cnt, u16 tids, bool more_data,
2441 bool agg)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002442{
Johannes Berg5b577a92013-11-14 18:20:04 +01002443 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002444 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002445 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01002446 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01002447 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
2448 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01002449 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01002450 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01002451 int tid, ret;
2452 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002453
Johannes Berg3e56ead2013-02-15 22:23:18 +01002454 /* convert TIDs to ACs - we don't support TSPEC so that's OK
2455 * Note that this field is reserved and unused by firmware not
2456 * supporting GO uAPSD, so it's safe to always do this.
2457 */
2458 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
2459 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
2460
2461 /* If we're releasing frames from aggregation queues then check if the
2462 * all queues combined that we're releasing frames from have
2463 * - more frames than the service period, in which case more_data
2464 * needs to be set
2465 * - fewer than 'cnt' frames, in which case we need to adjust the
2466 * firmware command (but do that unconditionally)
2467 */
2468 if (agg) {
2469 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02002470 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01002471
2472 spin_lock_bh(&mvmsta->lock);
2473 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
2474 struct iwl_mvm_tid_data *tid_data;
2475 u16 n_queued;
2476
2477 tid_data = &mvmsta->tid_data[tid];
2478 if (WARN(tid_data->state != IWL_AGG_ON &&
2479 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
2480 "TID %d state is %d\n",
2481 tid, tid_data->state)) {
2482 spin_unlock_bh(&mvmsta->lock);
2483 ieee80211_sta_eosp(sta);
2484 return;
2485 }
2486
2487 n_queued = iwl_mvm_tid_queued(tid_data);
2488 if (n_queued > remaining) {
2489 more_data = true;
2490 remaining = 0;
2491 break;
2492 }
2493 remaining -= n_queued;
2494 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02002495 sleep_tx_count = cnt - remaining;
2496 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
2497 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01002498 spin_unlock_bh(&mvmsta->lock);
2499
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02002500 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01002501 if (WARN_ON(cnt - remaining == 0)) {
2502 ieee80211_sta_eosp(sta);
2503 return;
2504 }
2505 }
2506
2507 /* Note: this is ignored by firmware not supporting GO uAPSD */
2508 if (more_data)
2509 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
2510
2511 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
2512 mvmsta->next_status_eosp = true;
2513 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
2514 } else {
2515 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
2516 }
2517
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02002518 /* block the Tx queues until the FW updated the sleep Tx count */
2519 iwl_trans_block_txq_ptrs(mvm->trans, true);
2520
2521 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
2522 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02002523 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002524 if (ret)
2525 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
2526}
Johannes Berg3e56ead2013-02-15 22:23:18 +01002527
Johannes Berg04168412015-06-23 21:22:09 +02002528void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
2529 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01002530{
2531 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2532 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
2533 struct ieee80211_sta *sta;
2534 u32 sta_id = le32_to_cpu(notif->sta_id);
2535
2536 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02002537 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01002538
2539 rcu_read_lock();
2540 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
2541 if (!IS_ERR_OR_NULL(sta))
2542 ieee80211_sta_eosp(sta);
2543 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01002544}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03002545
2546void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
2547 struct iwl_mvm_sta *mvmsta, bool disable)
2548{
2549 struct iwl_mvm_add_sta_cmd cmd = {
2550 .add_modify = STA_MODE_MODIFY,
2551 .sta_id = mvmsta->sta_id,
2552 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
2553 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
2554 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
2555 };
2556 int ret;
2557
Sara Sharon854c5702016-01-26 13:17:47 +02002558 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
2559 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03002560 if (ret)
2561 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
2562}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03002563
2564void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
2565 struct ieee80211_sta *sta,
2566 bool disable)
2567{
2568 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2569
2570 spin_lock_bh(&mvm_sta->lock);
2571
2572 if (mvm_sta->disable_tx == disable) {
2573 spin_unlock_bh(&mvm_sta->lock);
2574 return;
2575 }
2576
2577 mvm_sta->disable_tx = disable;
2578
2579 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03002580 * Tell mac80211 to start/stop queuing tx for this station,
2581 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03002582 * for this station.
2583 */
2584 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
2585 ieee80211_sta_block_awake(mvm->hw, sta, disable);
2586
2587 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
2588
2589 spin_unlock_bh(&mvm_sta->lock);
2590}
2591
2592void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
2593 struct iwl_mvm_vif *mvmvif,
2594 bool disable)
2595{
2596 struct ieee80211_sta *sta;
2597 struct iwl_mvm_sta *mvm_sta;
2598 int i;
2599
2600 lockdep_assert_held(&mvm->mutex);
2601
2602 /* Block/unblock all the stations of the given mvmvif */
2603 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
2604 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
2605 lockdep_is_held(&mvm->mutex));
2606 if (IS_ERR_OR_NULL(sta))
2607 continue;
2608
2609 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2610 if (mvm_sta->mac_id_n_color !=
2611 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
2612 continue;
2613
2614 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
2615 }
2616}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02002617
2618void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2619{
2620 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2621 struct iwl_mvm_sta *mvmsta;
2622
2623 rcu_read_lock();
2624
2625 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
2626
2627 if (!WARN_ON(!mvmsta))
2628 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
2629
2630 rcu_read_unlock();
2631}