blob: 1fb684693040eac8130cf62c348b0bf96dd4f005 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Johannes Berg8b4139d2014-07-24 14:05:26 +02009 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010010 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020026 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020034 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Johannes Berg8b4139d2014-07-24 14:05:26 +020035 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010036 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/kernel.h>
66#include <linux/slab.h>
67#include <linux/skbuff.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
Johannes Bergf0c26462013-01-22 20:41:58 +010070#include <linux/ip.h>
Eliad Peller2ee8f022014-01-13 19:07:09 +020071#include <linux/if_arp.h>
Johannes Bergaadede62014-10-09 17:01:36 +020072#include <linux/devcoredump.h>
Johannes Berg8ca151b2013-01-24 14:25:36 +010073#include <net/mac80211.h>
Emmanuel Grumbach7b1dd042014-02-04 15:32:43 +020074#include <net/ieee80211_radiotap.h>
Johannes Bergf0c26462013-01-22 20:41:58 +010075#include <net/tcp.h>
Johannes Berg8ca151b2013-01-24 14:25:36 +010076
77#include "iwl-op-mode.h"
78#include "iwl-io.h"
79#include "mvm.h"
80#include "sta.h"
81#include "time-event.h"
82#include "iwl-eeprom-parse.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010083#include "iwl-phy-db.h"
David Spinadel507cadf2013-07-31 18:07:21 +030084#include "testmode.h"
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +030085#include "iwl-fw-error-dump.h"
86#include "iwl-prph.h"
Eran Harary363039b2014-12-02 15:19:22 +020087#include "iwl-csr.h"
Arik Nemtsov88931cc2014-03-05 12:26:15 +020088#include "iwl-nvm-parse.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010089
90static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 {
92 .max = 1,
Ilan Peer8eb38712013-06-01 20:17:18 +030093 .types = BIT(NL80211_IFTYPE_STATION),
Johannes Berg8ca151b2013-01-24 14:25:36 +010094 },
Johannes Berg3c15a0f2013-05-31 10:17:19 +020095 {
96 .max = 1,
Ilan Peer8eb38712013-06-01 20:17:18 +030097 .types = BIT(NL80211_IFTYPE_AP) |
98 BIT(NL80211_IFTYPE_P2P_CLIENT) |
Johannes Berg3c15a0f2013-05-31 10:17:19 +020099 BIT(NL80211_IFTYPE_P2P_GO),
100 },
101 {
102 .max = 1,
103 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
104 },
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105};
106
107static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
108 {
Emmanuel Grumbach2624a5c2014-12-16 13:02:03 +0200109 .num_different_channels = 2,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100110 .max_interfaces = 3,
111 .limits = iwl_mvm_limits,
112 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
113 },
114};
115
Johannes Bergf0c26462013-01-22 20:41:58 +0100116#ifdef CONFIG_PM_SLEEP
117static const struct nl80211_wowlan_tcp_data_token_feature
118iwl_mvm_wowlan_tcp_token_feature = {
119 .min_len = 0,
120 .max_len = 255,
121 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
122};
123
124static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
125 .tok = &iwl_mvm_wowlan_tcp_token_feature,
126 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
127 sizeof(struct ethhdr) -
128 sizeof(struct iphdr) -
129 sizeof(struct tcphdr),
130 .data_interval_max = 65535, /* __le16 in API */
131 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
132 sizeof(struct ethhdr) -
133 sizeof(struct iphdr) -
134 sizeof(struct tcphdr),
135 .seq = true,
136};
137#endif
138
Eliad Peller77736922014-01-14 12:35:49 +0200139#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
Eliad Peller2ee8f022014-01-13 19:07:09 +0200140/*
141 * Use the reserved field to indicate magic values.
142 * these values will only be used internally by the driver,
143 * and won't make it to the fw (reserved will be 0).
144 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
145 * be the vif's ip address. in case there is not a single
146 * ip address (0, or more than 1), this attribute will
147 * be skipped.
148 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
149 * the LSB bytes of the vif's mac address
150 */
151enum {
152 BC_FILTER_MAGIC_NONE = 0,
153 BC_FILTER_MAGIC_IP,
154 BC_FILTER_MAGIC_MAC,
155};
156
Eliad Peller77736922014-01-14 12:35:49 +0200157static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
158 {
159 /* arp */
160 .discard = 0,
161 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
162 .attrs = {
163 {
164 /* frame type - arp, hw type - ethernet */
165 .offset_type =
166 BCAST_FILTER_OFFSET_PAYLOAD_START,
167 .offset = sizeof(rfc1042_header),
168 .val = cpu_to_be32(0x08060001),
169 .mask = cpu_to_be32(0xffffffff),
170 },
Eliad Peller2ee8f022014-01-13 19:07:09 +0200171 {
172 /* arp dest ip */
173 .offset_type =
174 BCAST_FILTER_OFFSET_PAYLOAD_START,
175 .offset = sizeof(rfc1042_header) + 2 +
176 sizeof(struct arphdr) +
177 ETH_ALEN + sizeof(__be32) +
178 ETH_ALEN,
179 .mask = cpu_to_be32(0xffffffff),
180 /* mark it as special field */
181 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
182 },
183 },
184 },
185 {
186 /* dhcp offer bcast */
187 .discard = 0,
188 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
189 .attrs = {
190 {
191 /* udp dest port - 68 (bootp client)*/
192 .offset_type = BCAST_FILTER_OFFSET_IP_END,
193 .offset = offsetof(struct udphdr, dest),
194 .val = cpu_to_be32(0x00440000),
195 .mask = cpu_to_be32(0xffff0000),
196 },
197 {
198 /* dhcp - lsb bytes of client hw address */
199 .offset_type = BCAST_FILTER_OFFSET_IP_END,
200 .offset = 38,
201 .mask = cpu_to_be32(0xffffffff),
202 /* mark it as special field */
203 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
204 },
Eliad Peller77736922014-01-14 12:35:49 +0200205 },
206 },
207 /* last filter must be empty */
208 {},
209};
210#endif
211
Eliad Peller7498cf42014-01-16 17:10:44 +0200212void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
213{
Eliad Peller7bb426e2014-02-24 12:54:37 +0200214 if (!iwl_mvm_is_d0i3_supported(mvm))
Eliad Peller7498cf42014-01-16 17:10:44 +0200215 return;
216
217 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
Eliad Peller576eeee2014-07-01 18:38:38 +0300218 spin_lock_bh(&mvm->refs_lock);
219 mvm->refs[ref_type]++;
220 spin_unlock_bh(&mvm->refs_lock);
Eliad Peller7498cf42014-01-16 17:10:44 +0200221 iwl_trans_ref(mvm->trans);
222}
223
224void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
225{
Eliad Peller7bb426e2014-02-24 12:54:37 +0200226 if (!iwl_mvm_is_d0i3_supported(mvm))
Eliad Peller7498cf42014-01-16 17:10:44 +0200227 return;
228
229 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
Eliad Peller576eeee2014-07-01 18:38:38 +0300230 spin_lock_bh(&mvm->refs_lock);
231 WARN_ON(!mvm->refs[ref_type]--);
232 spin_unlock_bh(&mvm->refs_lock);
Eliad Peller7498cf42014-01-16 17:10:44 +0200233 iwl_trans_unref(mvm->trans);
234}
235
Eliad Peller576eeee2014-07-01 18:38:38 +0300236static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
237 enum iwl_mvm_ref_type except_ref)
Eliad Peller7498cf42014-01-16 17:10:44 +0200238{
Eliad Peller576eeee2014-07-01 18:38:38 +0300239 int i, j;
Eliad Peller7498cf42014-01-16 17:10:44 +0200240
Eliad Peller7bb426e2014-02-24 12:54:37 +0200241 if (!iwl_mvm_is_d0i3_supported(mvm))
Eliad Peller7498cf42014-01-16 17:10:44 +0200242 return;
243
Eliad Peller576eeee2014-07-01 18:38:38 +0300244 spin_lock_bh(&mvm->refs_lock);
245 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
246 if (except_ref == i || !mvm->refs[i])
Eliad Peller7498cf42014-01-16 17:10:44 +0200247 continue;
248
Eliad Peller576eeee2014-07-01 18:38:38 +0300249 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
250 i, mvm->refs[i]);
251 for (j = 0; j < mvm->refs[i]; j++)
252 iwl_trans_unref(mvm->trans);
253 mvm->refs[i] = 0;
Eliad Peller7498cf42014-01-16 17:10:44 +0200254 }
Eliad Peller576eeee2014-07-01 18:38:38 +0300255 spin_unlock_bh(&mvm->refs_lock);
Eliad Peller7498cf42014-01-16 17:10:44 +0200256}
257
Eliad Pellerf4cf8682014-11-04 16:57:06 +0200258bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
259{
260 int i;
261 bool taken = false;
262
263 if (!iwl_mvm_is_d0i3_supported(mvm))
264 return true;
265
266 spin_lock_bh(&mvm->refs_lock);
267 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
268 if (mvm->refs[i]) {
269 taken = true;
270 break;
271 }
272 }
273 spin_unlock_bh(&mvm->refs_lock);
274
275 return taken;
276}
277
Eliad Peller576eeee2014-07-01 18:38:38 +0300278int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
Gregory Greenmand40fc482014-06-25 14:08:50 +0200279{
280 iwl_mvm_ref(mvm, ref_type);
281
282 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
283 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
284 HZ)) {
285 WARN_ON_ONCE(1);
286 iwl_mvm_unref(mvm, ref_type);
287 return -EIO;
288 }
289
290 return 0;
291}
292
Ilan Peerfe0f2de2013-03-21 10:23:52 +0200293static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
294{
295 int i;
296
297 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
298 for (i = 0; i < NUM_PHY_CTX; i++) {
299 mvm->phy_ctxts[i].id = i;
300 mvm->phy_ctxts[i].ref = 0;
301 }
302}
303
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200304struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200305 const char *alpha2,
Jonathan Doron47c8b152014-11-27 16:55:25 +0200306 enum iwl_mcc_source src_id,
307 bool *changed)
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200308{
309 struct ieee80211_regdomain *regd = NULL;
310 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
311 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
312 struct iwl_mcc_update_resp *resp;
313
314 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
315
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200316 lockdep_assert_held(&mvm->mutex);
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200317
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200318 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200319 if (IS_ERR_OR_NULL(resp)) {
320 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
Nicholas Krauseb8c474d2015-05-23 20:53:21 -0400321 PTR_ERR_OR_ZERO(resp));
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200322 goto out;
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200323 }
324
Jonathan Doron47c8b152014-11-27 16:55:25 +0200325 if (changed)
326 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
327
Arik Nemtsov162ee3c2014-06-10 11:25:35 +0300328 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200329 __le32_to_cpu(resp->n_channels),
330 resp->channels,
331 __le16_to_cpu(resp->mcc));
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200332 /* Store the return source id */
333 src_id = resp->source_id;
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200334 kfree(resp);
335 if (IS_ERR_OR_NULL(regd)) {
336 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
Nicholas Krauseb8c474d2015-05-23 20:53:21 -0400337 PTR_ERR_OR_ZERO(regd));
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200338 goto out;
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200339 }
340
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200341 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
342 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200343 mvm->lar_regdom_set = true;
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200344 mvm->mcc_src = src_id;
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200345
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200346out:
Arik Nemtsov88931cc2014-03-05 12:26:15 +0200347 return regd;
348}
349
Jonathan Doron47c8b152014-11-27 16:55:25 +0200350void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
351{
352 bool changed;
353 struct ieee80211_regdomain *regd;
354
355 if (!iwl_mvm_is_lar_supported(mvm))
356 return;
357
358 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
359 if (!IS_ERR_OR_NULL(regd)) {
360 /* only update the regulatory core if changed */
361 if (changed)
362 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
363
364 kfree(regd);
365 }
366}
367
368struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
369 bool *changed)
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200370{
371 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
372 iwl_mvm_is_wifi_mcc_supported(mvm) ?
373 MCC_SOURCE_GET_CURRENT :
Jonathan Doron47c8b152014-11-27 16:55:25 +0200374 MCC_SOURCE_OLD_FW, changed);
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200375}
376
377int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
378{
379 enum iwl_mcc_source used_src;
380 struct ieee80211_regdomain *regd;
Arik Nemtsovb6e160a2015-03-23 14:32:53 +0200381 int ret;
382 bool changed;
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200383 const struct ieee80211_regdomain *r =
384 rtnl_dereference(mvm->hw->wiphy->regd);
385
386 if (!r)
Arik Nemtsovb6e160a2015-03-23 14:32:53 +0200387 return -ENOENT;
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200388
389 /* save the last source in case we overwrite it below */
390 used_src = mvm->mcc_src;
391 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
392 /* Notify the firmware we support wifi location updates */
Jonathan Doron47c8b152014-11-27 16:55:25 +0200393 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200394 if (!IS_ERR_OR_NULL(regd))
395 kfree(regd);
396 }
397
398 /* Now set our last stored MCC and source */
Arik Nemtsovb6e160a2015-03-23 14:32:53 +0200399 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
400 &changed);
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200401 if (IS_ERR_OR_NULL(regd))
402 return -EIO;
403
Arik Nemtsovb6e160a2015-03-23 14:32:53 +0200404 /* update cfg80211 if the regdomain was changed */
405 if (changed)
406 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
407 else
408 ret = 0;
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200409
Arik Nemtsovb6e160a2015-03-23 14:32:53 +0200410 kfree(regd);
411 return ret;
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200412}
413
Johannes Berg8ca151b2013-01-24 14:25:36 +0100414int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
415{
416 struct ieee80211_hw *hw = mvm->hw;
Ilan Peer831e85f2013-02-07 17:09:09 +0200417 int num_mac, ret, i;
Johannes Berg5f4c02e2015-05-20 16:51:28 +0200418 static const u32 mvm_ciphers[] = {
419 WLAN_CIPHER_SUITE_WEP40,
420 WLAN_CIPHER_SUITE_WEP104,
421 WLAN_CIPHER_SUITE_TKIP,
422 WLAN_CIPHER_SUITE_CCMP,
423 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100424
425 /* Tell mac80211 our characteristics */
Johannes Berg30686bf2015-06-02 21:39:54 +0200426 ieee80211_hw_set(hw, SIGNAL_DBM);
427 ieee80211_hw_set(hw, SPECTRUM_MGMT);
428 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
429 ieee80211_hw_set(hw, QUEUE_CONTROL);
430 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
431 ieee80211_hw_set(hw, SUPPORTS_PS);
432 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
433 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
434 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
435 ieee80211_hw_set(hw, CONNECTION_MONITOR);
436 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
437 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
438 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100439
Eytan Lifshitz19e737c2013-09-09 13:30:15 +0200440 hw->queues = mvm->first_agg_queue;
Ilan Peer398e8c62013-03-13 15:20:35 +0200441 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
Emmanuel Grumbach7b1dd042014-02-04 15:32:43 +0200442 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
443 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
Eyal Shapira339b3082014-11-18 16:43:55 +0200444 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
445 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100446 hw->rate_control_algorithm = "iwl-mvm-rs";
Johannes Berg848955c2014-11-11 12:48:42 +0100447 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
448 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100449
Johannes Berg5f4c02e2015-05-20 16:51:28 +0200450 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
451 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
452 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
453 hw->wiphy->cipher_suites = mvm->ciphers;
454
Johannes Berg8ca151b2013-01-24 14:25:36 +0100455 /*
456 * Enable 11w if advertised by firmware and software crypto
457 * is not enabled (as the firmware will interpret some mgmt
458 * packets, so enabling it with software crypto isn't safe)
459 */
460 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
Johannes Berg5f4c02e2015-05-20 16:51:28 +0200461 !iwlwifi_mod_params.sw_crypto) {
Johannes Berg30686bf2015-06-02 21:39:54 +0200462 ieee80211_hw_set(hw, MFP_CAPABLE);
Johannes Berg5f4c02e2015-05-20 16:51:28 +0200463 mvm->ciphers[hw->wiphy->n_cipher_suites] =
464 WLAN_CIPHER_SUITE_AES_CMAC;
465 hw->wiphy->n_cipher_suites++;
466 }
467
468 /* currently FW API supports only one optional cipher scheme */
469 if (mvm->fw->cs[0].cipher) {
470 mvm->hw->n_cipher_schemes = 1;
471 mvm->hw->cipher_schemes = &mvm->fw->cs[0];
472 mvm->ciphers[hw->wiphy->n_cipher_suites] =
473 mvm->fw->cs[0].cipher;
474 hw->wiphy->n_cipher_suites++;
475 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100476
Johannes Berg30686bf2015-06-02 21:39:54 +0200477 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
Luciano Coelho1f940382015-02-10 13:03:38 +0200478 hw->wiphy->features |=
479 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
Johannes Berg3db93422015-05-06 14:56:51 +0200480 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
481 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
David Spinadelfb98be52014-05-04 12:51:10 +0300482
Johannes Berg8ca151b2013-01-24 14:25:36 +0100483 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
484 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
Ilan Peerfe0f2de2013-03-21 10:23:52 +0200485 hw->chanctx_data_size = sizeof(u16);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100486
487 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
Johannes Berg3c15a0f2013-05-31 10:17:19 +0200488 BIT(NL80211_IFTYPE_P2P_CLIENT) |
489 BIT(NL80211_IFTYPE_AP) |
490 BIT(NL80211_IFTYPE_P2P_GO) |
Emmanuel Grumbachc13b1722014-03-27 19:12:12 +0200491 BIT(NL80211_IFTYPE_P2P_DEVICE) |
492 BIT(NL80211_IFTYPE_ADHOC);
Johannes Berg5023d962013-07-31 14:07:43 +0200493
Luis R. Rodrigueza2f73b62013-11-11 22:15:29 +0100494 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
Eran Harary8ba2d7a2015-02-08 11:41:43 +0200495 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
496 if (iwl_mvm_is_lar_supported(mvm))
497 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
498 else
499 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
500 REGULATORY_DISABLE_BEACON_HINTS;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100501
Johannes Berg3e56ead2013-02-15 22:23:18 +0100502 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
503 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
504
Emmanuel Grumbach94bbed72014-11-24 08:53:33 +0200505 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +0200506
Johannes Berg8ca151b2013-01-24 14:25:36 +0100507 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
508 hw->wiphy->n_iface_combinations =
509 ARRAY_SIZE(iwl_mvm_iface_combinations);
510
Ilan Peerc451e6d2013-02-20 08:41:54 +0200511 hw->wiphy->max_remain_on_channel_duration = 10000;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100512 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
Emmanuel Grumbachf1a68542014-12-16 12:31:13 +0200513 /* we can compensate an offset of up to 3 channels = 15 MHz */
514 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100515
516 /* Extract MAC address */
517 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
518 hw->wiphy->addresses = mvm->addresses;
519 hw->wiphy->n_addresses = 1;
Ilan Peer831e85f2013-02-07 17:09:09 +0200520
521 /* Extract additional MAC addresses if available */
522 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
523 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
524
525 for (i = 1; i < num_mac; i++) {
526 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100527 ETH_ALEN);
Ilan Peer831e85f2013-02-07 17:09:09 +0200528 mvm->addresses[i].addr[5]++;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100529 hw->wiphy->n_addresses++;
530 }
531
Ilan Peerfe0f2de2013-03-21 10:23:52 +0200532 iwl_mvm_reset_phy_ctxts(mvm);
533
Luciano Coelho999d2562015-03-27 10:28:26 +0300534 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
David Spinadel20f1a5d2013-08-21 09:14:27 +0300535
Johannes Berg8ca151b2013-01-24 14:25:36 +0100536 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
537
Luciano Coelhoc7d42482015-05-07 16:00:26 +0300538 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
Luciano Coelho507e4cd2015-03-19 22:58:33 +0200539 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
540 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
541
Johannes Berg859d9142015-06-01 17:11:11 +0200542 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
Luciano Coelho507e4cd2015-03-19 22:58:33 +0200543 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
544 else
545 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
546
Johannes Berg8ca151b2013-01-24 14:25:36 +0100547 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
548 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
549 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
Eyal Shapira3d44eeb2015-01-16 22:37:04 +0200550 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100551 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
552 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
553
Johannes Berg859d9142015-06-01 17:11:11 +0200554 if (fw_has_capa(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
556 fw_has_api(&mvm->fw->ucode_capa,
557 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
Eyal Shapira3d44eeb2015-01-16 22:37:04 +0200558 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
559 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
560 }
561
Johannes Berg8ca151b2013-01-24 14:25:36 +0100562 hw->wiphy->hw_version = mvm->trans->hw_id;
563
Alexander Bondarade50652013-04-03 16:28:47 +0300564 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100565 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
566 else
567 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
568
Emmanuel Grumbach9954b372015-03-16 14:49:55 +0200569 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
570 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
571 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
572 /* we create the 802.11 header and zero length SSID IE. */
573 hw->wiphy->max_sched_scan_ie_len =
574 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
Avraham Sterncd55cce2015-08-19 12:46:12 +0300575 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
576 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
577
578 /*
579 * the firmware uses u8 for num of iterations, but 0xff is saved for
580 * infinite loop, so the maximum number of iterations is actually 254.
581 */
582 hw->wiphy->max_sched_scan_plan_iterations = 254;
David Spinadel35a000b2013-08-28 09:29:43 +0300583
Johannes Berg8ca151b2013-01-24 14:25:36 +0100584 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
Johannes Bergab480032014-06-04 10:13:50 +0200585 NL80211_FEATURE_LOW_PRIORITY_SCAN |
Eliad Peller0d8614b2014-09-10 14:07:36 +0300586 NL80211_FEATURE_P2P_GO_OPPPS |
587 NL80211_FEATURE_DYNAMIC_SMPS |
Emmanuel Grumbach9b5452f2014-10-07 10:38:53 +0300588 NL80211_FEATURE_STATIC_SMPS |
589 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100590
Johannes Berg859d9142015-06-01 17:11:11 +0200591 if (fw_has_capa(&mvm->fw->ucode_capa,
592 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
Andrei Otcheretianskif1daa002014-07-01 12:54:25 +0300593 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
Johannes Berg859d9142015-06-01 17:11:11 +0200594 if (fw_has_capa(&mvm->fw->ucode_capa,
595 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
Assaf Krauss226bcd42014-03-13 08:12:15 +0200596 hw->wiphy->features |= NL80211_FEATURE_QUIET;
Andrei Otcheretianskif1daa002014-07-01 12:54:25 +0300597
Johannes Berg859d9142015-06-01 17:11:11 +0200598 if (fw_has_capa(&mvm->fw->ucode_capa,
599 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
Andrei Otcheretianski73897bd2014-07-09 18:59:14 +0300600 hw->wiphy->features |=
601 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
602
Johannes Berg859d9142015-06-01 17:11:11 +0200603 if (fw_has_capa(&mvm->fw->ucode_capa,
604 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
Andrei Otcheretianski73897bd2014-07-09 18:59:14 +0300605 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
606
Johannes Berg8ca151b2013-01-24 14:25:36 +0100607 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
608
609#ifdef CONFIG_PM_SLEEP
Eliad Pellerd15a7472014-03-27 18:53:12 +0200610 if (iwl_mvm_is_d0i3_supported(mvm) &&
611 device_can_wakeup(mvm->trans->dev)) {
612 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
613 hw->wiphy->wowlan = &mvm->wowlan;
Eliad Peller91742442014-12-09 15:54:46 +0200614 }
615
616 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
Johannes Berg8ca151b2013-01-24 14:25:36 +0100617 mvm->trans->ops->d3_suspend &&
618 mvm->trans->ops->d3_resume &&
619 device_can_wakeup(mvm->trans->dev)) {
Eliad Peller91742442014-12-09 15:54:46 +0200620 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
621 WIPHY_WOWLAN_DISCONNECT |
622 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
623 WIPHY_WOWLAN_RFKILL_RELEASE |
624 WIPHY_WOWLAN_NET_DETECT;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100625 if (!iwlwifi_mod_params.sw_crypto)
Johannes Berg964dc9e2013-06-03 17:25:34 +0200626 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
627 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
628 WIPHY_WOWLAN_4WAY_HANDSHAKE;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100629
Johannes Berg964dc9e2013-06-03 17:25:34 +0200630 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
631 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
632 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
Luciano Coelhoc55385f2014-10-24 10:39:51 +0300633 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
Johannes Berg964dc9e2013-06-03 17:25:34 +0200634 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
635 hw->wiphy->wowlan = &mvm->wowlan;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100636 }
637#endif
638
Eliad Peller77736922014-01-14 12:35:49 +0200639#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
640 /* assign default bcast filtering configuration */
641 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
642#endif
643
Johannes Berg8ca151b2013-01-24 14:25:36 +0100644 ret = iwl_mvm_leds_init(mvm);
645 if (ret)
646 return ret;
647
Johannes Berg859d9142015-06-01 17:11:11 +0200648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
Arik Nemtsovd8f1c512014-05-11 18:13:04 +0300650 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
651 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
Arik Nemtsov7c4f0842015-04-30 18:31:45 +0300652 ieee80211_hw_set(hw, TDLS_WIDER_BW);
Arik Nemtsovd8f1c512014-05-11 18:13:04 +0300653 }
654
Johannes Berg859d9142015-06-01 17:11:11 +0200655 if (fw_has_capa(&mvm->fw->ucode_capa,
656 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300657 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
658 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
659 }
660
Avri Altman93190fb2014-12-27 09:09:47 +0200661 hw->netdev_features |= mvm->cfg->features;
662 if (!iwl_mvm_is_csum_supported(mvm))
663 hw->netdev_features &= ~NETIF_F_RXCSUM;
664
Emmanuel Grumbachb7327d82013-06-24 15:44:03 +0300665 ret = ieee80211_register_hw(mvm->hw);
666 if (ret)
667 iwl_mvm_leds_exit(mvm);
668
669 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100670}
671
Arik Nemtsovb2492502014-03-13 12:21:50 +0200672static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
673 struct ieee80211_sta *sta,
674 struct sk_buff *skb)
675{
676 struct iwl_mvm_sta *mvmsta;
677 bool defer = false;
678
679 /*
680 * double check the IN_D0I3 flag both before and after
681 * taking the spinlock, in order to prevent taking
682 * the spinlock when not needed.
683 */
684 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
685 return false;
686
687 spin_lock(&mvm->d0i3_tx_lock);
688 /*
689 * testing the flag again ensures the skb dequeue
690 * loop (on d0i3 exit) hasn't run yet.
691 */
692 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
693 goto out;
694
695 mvmsta = iwl_mvm_sta_from_mac80211(sta);
696 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
697 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
698 goto out;
699
700 __skb_queue_tail(&mvm->d0i3_tx, skb);
701 ieee80211_stop_queues(mvm->hw);
702
703 /* trigger wakeup */
704 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
705 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
706
707 defer = true;
708out:
709 spin_unlock(&mvm->d0i3_tx_lock);
710 return defer;
711}
712
Johannes Berg8ca151b2013-01-24 14:25:36 +0100713static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
714 struct ieee80211_tx_control *control,
715 struct sk_buff *skb)
716{
717 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Johannes Berg3e56ead2013-02-15 22:23:18 +0100718 struct ieee80211_sta *sta = control->sta;
719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
720 struct ieee80211_hdr *hdr = (void *)skb->data;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100721
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300722 if (iwl_mvm_is_radio_killed(mvm)) {
723 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
Johannes Berg8ca151b2013-01-24 14:25:36 +0100724 goto drop;
725 }
726
Ilan Peer398e8c62013-03-13 15:20:35 +0200727 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
Matti Gottlieba6cc5162014-09-29 11:46:04 +0300728 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
729 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100730 goto drop;
731
Johannes Berg3e56ead2013-02-15 22:23:18 +0100732 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
733 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
734 ieee80211_is_mgmt(hdr->frame_control) &&
735 !ieee80211_is_deauth(hdr->frame_control) &&
736 !ieee80211_is_disassoc(hdr->frame_control) &&
737 !ieee80211_is_action(hdr->frame_control)))
738 sta = NULL;
739
740 if (sta) {
Arik Nemtsovb2492502014-03-13 12:21:50 +0200741 if (iwl_mvm_defer_tx(mvm, sta, skb))
742 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +0100743 if (iwl_mvm_tx_skb(mvm, skb, sta))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100744 goto drop;
745 return;
746 }
747
748 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
749 goto drop;
750 return;
751 drop:
752 ieee80211_free_txskb(hw, skb);
753}
754
Emmanuel Grumbach205e2212014-02-12 15:15:05 +0200755static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
756{
757 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
758 return false;
759 return true;
760}
761
762static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
763{
764 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
765 return false;
766 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
767 return true;
768
769 /* enabled by default */
770 return true;
771}
772
Emmanuel Grumbach42032632015-04-15 12:43:46 +0300773#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
774 do { \
775 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
776 break; \
777 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
778 } while (0)
779
780static void
781iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
782 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
783 enum ieee80211_ampdu_mlme_action action)
784{
785 struct iwl_fw_dbg_trigger_tlv *trig;
786 struct iwl_fw_dbg_trigger_ba *ba_trig;
787
788 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
789 return;
790
791 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
792 ba_trig = (void *)trig->data;
793
794 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
795 return;
796
797 switch (action) {
798 case IEEE80211_AMPDU_TX_OPERATIONAL: {
799 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
800 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
801
802 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
803 "TX AGG START: MAC %pM tid %d ssn %d\n",
804 sta->addr, tid, tid_data->ssn);
805 break;
806 }
807 case IEEE80211_AMPDU_TX_STOP_CONT:
808 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
809 "TX AGG STOP: MAC %pM tid %d\n",
810 sta->addr, tid);
811 break;
812 case IEEE80211_AMPDU_RX_START:
813 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
814 "RX AGG START: MAC %pM tid %d ssn %d\n",
815 sta->addr, tid, rx_ba_ssn);
816 break;
817 case IEEE80211_AMPDU_RX_STOP:
818 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
819 "RX AGG STOP: MAC %pM tid %d\n",
820 sta->addr, tid);
821 break;
822 default:
823 break;
824 }
825}
826
Johannes Berg8ca151b2013-01-24 14:25:36 +0100827static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
828 struct ieee80211_vif *vif,
829 enum ieee80211_ampdu_mlme_action action,
830 struct ieee80211_sta *sta, u16 tid,
Emmanuel Grumbache3abc8f2015-08-16 11:13:22 +0300831 u16 *ssn, u8 buf_size, bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100832{
833 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
834 int ret;
Arik Nemtsovb2492502014-03-13 12:21:50 +0200835 bool tx_agg_ref = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100836
837 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
838 sta->addr, tid, action);
839
840 if (!(mvm->nvm_data->sku_cap_11n_enable))
841 return -EACCES;
842
Arik Nemtsovb2492502014-03-13 12:21:50 +0200843 /* return from D0i3 before starting a new Tx aggregation */
Eliad Peller9256c202014-04-22 13:33:29 +0300844 switch (action) {
845 case IEEE80211_AMPDU_TX_START:
846 case IEEE80211_AMPDU_TX_STOP_CONT:
847 case IEEE80211_AMPDU_TX_STOP_FLUSH:
848 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
849 case IEEE80211_AMPDU_TX_OPERATIONAL:
Arik Nemtsovb2492502014-03-13 12:21:50 +0200850 /*
Eliad Peller9256c202014-04-22 13:33:29 +0300851 * for tx start, wait synchronously until D0i3 exit to
852 * get the correct sequence number for the tid.
853 * additionally, some other ampdu actions use direct
854 * target access, which is not handled automatically
855 * by the trans layer (unlike commands), so wait for
856 * d0i3 exit in these cases as well.
Arik Nemtsovb2492502014-03-13 12:21:50 +0200857 */
Gregory Greenmand40fc482014-06-25 14:08:50 +0200858 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
859 if (ret)
860 return ret;
861
862 tx_agg_ref = true;
Eliad Peller9256c202014-04-22 13:33:29 +0300863 break;
864 default:
865 break;
Arik Nemtsovb2492502014-03-13 12:21:50 +0200866 }
867
Johannes Berg8ca151b2013-01-24 14:25:36 +0100868 mutex_lock(&mvm->mutex);
869
870 switch (action) {
871 case IEEE80211_AMPDU_RX_START:
Emmanuel Grumbach205e2212014-02-12 15:15:05 +0200872 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100873 ret = -EINVAL;
874 break;
875 }
876 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
877 break;
878 case IEEE80211_AMPDU_RX_STOP:
879 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
880 break;
881 case IEEE80211_AMPDU_TX_START:
Emmanuel Grumbach205e2212014-02-12 15:15:05 +0200882 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
Emmanuel Grumbach5d158ef2013-02-19 14:39:58 +0200883 ret = -EINVAL;
884 break;
885 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100886 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
887 break;
888 case IEEE80211_AMPDU_TX_STOP_CONT:
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +0200889 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
890 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100891 case IEEE80211_AMPDU_TX_STOP_FLUSH:
892 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +0200893 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100894 break;
895 case IEEE80211_AMPDU_TX_OPERATIONAL:
896 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
897 break;
898 default:
899 WARN_ON_ONCE(1);
900 ret = -EINVAL;
901 break;
902 }
Emmanuel Grumbach42032632015-04-15 12:43:46 +0300903
904 if (!ret) {
905 u16 rx_ba_ssn = 0;
906
907 if (action == IEEE80211_AMPDU_RX_START)
908 rx_ba_ssn = *ssn;
909
910 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
911 rx_ba_ssn, action);
912 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100913 mutex_unlock(&mvm->mutex);
914
Arik Nemtsovb2492502014-03-13 12:21:50 +0200915 /*
916 * If the tid is marked as started, we won't use it for offloaded
917 * traffic on the next D0i3 entry. It's safe to unref.
918 */
919 if (tx_agg_ref)
920 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
921
Johannes Berg8ca151b2013-01-24 14:25:36 +0100922 return ret;
923}
924
925static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
926 struct ieee80211_vif *vif)
927{
928 struct iwl_mvm *mvm = data;
929 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
930
931 mvmvif->uploaded = false;
932 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
933
Johannes Berg8ca151b2013-01-24 14:25:36 +0100934 spin_lock_bh(&mvm->time_event_lock);
935 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
936 spin_unlock_bh(&mvm->time_event_lock);
937
Ilan Peerfe0f2de2013-03-21 10:23:52 +0200938 mvmvif->phy_ctxt = NULL;
Emmanuel Grumbach8a275ba2014-07-13 09:12:11 +0300939 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
Johannes Berg8ca151b2013-01-24 14:25:36 +0100940}
941
Johannes Bergaadede62014-10-09 17:01:36 +0200942static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
943 const void *data, size_t datalen)
944{
945 const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
946 ssize_t bytes_read;
947 ssize_t bytes_read_trans;
948
949 if (offset < dump_ptrs->op_mode_len) {
950 bytes_read = min_t(ssize_t, count,
951 dump_ptrs->op_mode_len - offset);
952 memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
953 bytes_read);
954 offset += bytes_read;
955 count -= bytes_read;
956
957 if (count == 0)
958 return bytes_read;
959 } else {
960 bytes_read = 0;
961 }
962
963 if (!dump_ptrs->trans_ptr)
964 return bytes_read;
965
966 offset -= dump_ptrs->op_mode_len;
967 bytes_read_trans = min_t(ssize_t, count,
968 dump_ptrs->trans_ptr->len - offset);
969 memcpy(buffer + bytes_read,
970 (u8 *)dump_ptrs->trans_ptr->data + offset,
971 bytes_read_trans);
972
973 return bytes_read + bytes_read_trans;
974}
975
976static void iwl_mvm_free_coredump(const void *data)
977{
978 const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
979
980 vfree(fw_error_dump->op_mode_ptr);
981 vfree(fw_error_dump->trans_ptr);
982 kfree(fw_error_dump);
983}
984
Liad Kaufman04fd2c22014-12-15 17:54:16 +0200985static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
986 struct iwl_fw_error_dump_data **dump_data)
987{
988 struct iwl_fw_error_dump_fifo *fifo_hdr;
989 u32 *fifo_data;
990 u32 fifo_len;
991 unsigned long flags;
992 int i, j;
993
994 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
995 return;
996
997 /* Pull RXF data from all RXFs */
998 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
999 /*
1000 * Keep aside the additional offset that might be needed for
1001 * next RXF
1002 */
1003 u32 offset_diff = RXF_DIFF_FROM_PREV * i;
1004
1005 fifo_hdr = (void *)(*dump_data)->data;
1006 fifo_data = (void *)fifo_hdr->data;
1007 fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
1008
1009 /* No need to try to read the data if the length is 0 */
1010 if (fifo_len == 0)
1011 continue;
1012
1013 /* Add a TLV for the RXF */
1014 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
1015 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1016
1017 fifo_hdr->fifo_num = cpu_to_le32(i);
1018 fifo_hdr->available_bytes =
1019 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1020 RXF_RD_D_SPACE +
1021 offset_diff));
1022 fifo_hdr->wr_ptr =
1023 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1024 RXF_RD_WR_PTR +
1025 offset_diff));
1026 fifo_hdr->rd_ptr =
1027 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1028 RXF_RD_RD_PTR +
1029 offset_diff));
1030 fifo_hdr->fence_ptr =
1031 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1032 RXF_RD_FENCE_PTR +
1033 offset_diff));
1034 fifo_hdr->fence_mode =
1035 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1036 RXF_SET_FENCE_MODE +
1037 offset_diff));
1038
1039 /* Lock fence */
1040 iwl_trans_write_prph(mvm->trans,
1041 RXF_SET_FENCE_MODE + offset_diff, 0x1);
1042 /* Set fence pointer to the same place like WR pointer */
1043 iwl_trans_write_prph(mvm->trans,
1044 RXF_LD_WR2FENCE + offset_diff, 0x1);
1045 /* Set fence offset */
1046 iwl_trans_write_prph(mvm->trans,
1047 RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
1048 0x0);
1049
1050 /* Read FIFO */
1051 fifo_len /= sizeof(u32); /* Size in DWORDS */
1052 for (j = 0; j < fifo_len; j++)
1053 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1054 RXF_FIFO_RD_FENCE_INC +
1055 offset_diff);
1056 *dump_data = iwl_fw_error_next_data(*dump_data);
1057 }
1058
1059 /* Pull TXF data from all TXFs */
1060 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
1061 /* Mark the number of TXF we're pulling now */
1062 iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
1063
1064 fifo_hdr = (void *)(*dump_data)->data;
1065 fifo_data = (void *)fifo_hdr->data;
1066 fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
1067
1068 /* No need to try to read the data if the length is 0 */
1069 if (fifo_len == 0)
1070 continue;
1071
1072 /* Add a TLV for the FIFO */
1073 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
1074 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1075
1076 fifo_hdr->fifo_num = cpu_to_le32(i);
1077 fifo_hdr->available_bytes =
1078 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1079 TXF_FIFO_ITEM_CNT));
1080 fifo_hdr->wr_ptr =
1081 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1082 TXF_WR_PTR));
1083 fifo_hdr->rd_ptr =
1084 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1085 TXF_RD_PTR));
1086 fifo_hdr->fence_ptr =
1087 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1088 TXF_FENCE_PTR));
1089 fifo_hdr->fence_mode =
1090 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1091 TXF_LOCK_FENCE));
1092
1093 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1094 iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
1095 TXF_WR_PTR);
1096
1097 /* Dummy-read to advance the read pointer to the head */
1098 iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
1099
1100 /* Read FIFO */
1101 fifo_len /= sizeof(u32); /* Size in DWORDS */
1102 for (j = 0; j < fifo_len; j++)
1103 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1104 TXF_READ_MODIFY_DATA);
1105 *dump_data = iwl_fw_error_next_data(*dump_data);
1106 }
1107
1108 iwl_trans_release_nic_access(mvm->trans, &flags);
1109}
1110
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001111void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1112{
1113 if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1114 !mvm->fw_dump_desc)
1115 return;
1116
1117 kfree(mvm->fw_dump_desc);
1118 mvm->fw_dump_desc = NULL;
1119}
1120
Liad Kaufmane5397612015-03-02 11:46:46 +02001121#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
1122#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
1123
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001124void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001125{
1126 struct iwl_fw_error_dump_file *dump_file;
1127 struct iwl_fw_error_dump_data *dump_data;
1128 struct iwl_fw_error_dump_info *dump_info;
Emmanuel Grumbacha549b292014-12-09 14:47:57 +02001129 struct iwl_fw_error_dump_mem *dump_mem;
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001130 struct iwl_fw_error_dump_trigger_desc *dump_trig;
Emmanuel Grumbach48eb7b32014-07-08 19:45:17 +03001131 struct iwl_mvm_dump_ptrs *fw_error_dump;
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001132 u32 sram_len, sram_ofs;
Liad Kaufman04fd2c22014-12-15 17:54:16 +02001133 u32 file_len, fifo_data_len = 0;
Liad Kaufmanaddfaad2014-12-02 11:16:04 +02001134 u32 smem_len = mvm->cfg->smem_len;
Ido Yariv86138322014-12-10 12:39:27 -05001135 u32 sram2_len = mvm->cfg->dccm2_len;
Oren Givon36fb9012015-07-15 15:47:28 +03001136 bool monitor_dump_only = false;
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001137
1138 lockdep_assert_held(&mvm->mutex);
1139
Eliad Peller053225d2015-09-10 15:00:22 +03001140 /* there's no point in fw dump if the bus is dead */
1141 if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1142 IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
1143 return;
1144 }
1145
Oren Givon36fb9012015-07-15 15:47:28 +03001146 if (mvm->fw_dump_trig &&
1147 mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
1148 monitor_dump_only = true;
1149
Johannes Bergaadede62014-10-09 17:01:36 +02001150 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
Emmanuel Grumbach48eb7b32014-07-08 19:45:17 +03001151 if (!fw_error_dump)
1152 return;
1153
Liad Kaufmanf53bf4c2014-12-01 10:44:18 +02001154 /* SRAM - include stack CCM if driver knows the values for it */
1155 if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1156 const struct fw_img *img;
1157
1158 img = &mvm->fw->img[mvm->cur_ucode];
1159 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1160 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1161 } else {
1162 sram_ofs = mvm->cfg->dccm_offset;
1163 sram_len = mvm->cfg->dccm_len;
1164 }
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001165
Liad Kaufman04fd2c22014-12-15 17:54:16 +02001166 /* reading RXF/TXF sizes */
1167 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1168 struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1169 int i;
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001170
Liad Kaufman04fd2c22014-12-15 17:54:16 +02001171 fifo_data_len = 0;
1172
1173 /* Count RXF size */
1174 for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1175 if (!mem_cfg->rxfifo_size[i])
1176 continue;
1177
1178 /* Add header info */
1179 fifo_data_len += mem_cfg->rxfifo_size[i] +
1180 sizeof(*dump_data) +
1181 sizeof(struct iwl_fw_error_dump_fifo);
1182 }
1183
1184 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1185 if (!mem_cfg->txfifo_size[i])
1186 continue;
1187
1188 /* Add header info */
1189 fifo_data_len += mem_cfg->txfifo_size[i] +
1190 sizeof(*dump_data) +
1191 sizeof(struct iwl_fw_error_dump_fifo);
1192 }
1193 }
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001194
1195 file_len = sizeof(*dump_file) +
Liad Kaufman04fd2c22014-12-15 17:54:16 +02001196 sizeof(*dump_data) * 2 +
Emmanuel Grumbacha549b292014-12-09 14:47:57 +02001197 sram_len + sizeof(*dump_mem) +
Liad Kaufman04fd2c22014-12-15 17:54:16 +02001198 fifo_data_len +
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001199 sizeof(*dump_info);
1200
Oren Givon36fb9012015-07-15 15:47:28 +03001201 /* Make room for the SMEM, if it exists */
1202 if (smem_len)
1203 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1204
1205 /* Make room for the secondary SRAM, if it exists */
1206 if (sram2_len)
1207 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1208
Matti Gottliebf0afea52015-07-20 17:55:51 +03001209 /* Make room for fw's virtual image pages, if it exists */
1210 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
1211 file_len += mvm->num_of_paging_blk *
1212 (sizeof(*dump_data) +
1213 sizeof(struct iwl_fw_error_dump_paging) +
1214 PAGING_BLOCK_SIZE);
1215
Oren Givon36fb9012015-07-15 15:47:28 +03001216 /* If we only want a monitor dump, reset the file length */
1217 if (monitor_dump_only) {
1218 file_len = sizeof(*dump_file) + sizeof(*dump_data) +
1219 sizeof(*dump_info);
1220 }
1221
Liad Kaufmane5397612015-03-02 11:46:46 +02001222 /*
1223 * In 8000 HW family B-step include the ICCM (which resides separately)
1224 */
1225 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1226 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1227 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1228 IWL8260_ICCM_LEN;
1229
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001230 if (mvm->fw_dump_desc)
1231 file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1232 mvm->fw_dump_desc->len;
1233
Emmanuel Grumbach5bfe6f52014-06-25 16:21:43 +03001234 dump_file = vzalloc(file_len);
Emmanuel Grumbach48eb7b32014-07-08 19:45:17 +03001235 if (!dump_file) {
1236 kfree(fw_error_dump);
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001237 iwl_mvm_free_fw_dump_desc(mvm);
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001238 return;
Emmanuel Grumbach48eb7b32014-07-08 19:45:17 +03001239 }
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001240
Emmanuel Grumbach48eb7b32014-07-08 19:45:17 +03001241 fw_error_dump->op_mode_ptr = dump_file;
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001242
1243 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001244 dump_data = (void *)dump_file->data;
1245
1246 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1247 dump_data->len = cpu_to_le32(sizeof(*dump_info));
1248 dump_info = (void *) dump_data->data;
1249 dump_info->device_family =
1250 mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1251 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1252 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
Ido Yariv435da2c2014-12-11 16:11:01 -05001253 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001254 memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1255 sizeof(dump_info->fw_human_readable));
1256 strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1257 sizeof(dump_info->dev_human_readable));
1258 strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1259 sizeof(dump_info->bus_human_readable));
1260
1261 dump_data = iwl_fw_error_next_data(dump_data);
Liad Kaufman04fd2c22014-12-15 17:54:16 +02001262 /* We only dump the FIFOs if the FW is in error state */
1263 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1264 iwl_mvm_dump_fifos(mvm, &dump_data);
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001265
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001266 if (mvm->fw_dump_desc) {
1267 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1268 dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1269 mvm->fw_dump_desc->len);
1270 dump_trig = (void *)dump_data->data;
1271 memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1272 sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1273
1274 /* now we can free this copy */
1275 iwl_mvm_free_fw_dump_desc(mvm);
1276 dump_data = iwl_fw_error_next_data(dump_data);
1277 }
1278
Oren Givon36fb9012015-07-15 15:47:28 +03001279 /* In case we only want monitor dump, skip to dump trasport data */
1280 if (monitor_dump_only)
1281 goto dump_trans_data;
1282
Emmanuel Grumbacha549b292014-12-09 14:47:57 +02001283 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1284 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1285 dump_mem = (void *)dump_data->data;
1286 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1287 dump_mem->offset = cpu_to_le32(sram_ofs);
1288 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001289 sram_len);
1290
Liad Kaufmanaddfaad2014-12-02 11:16:04 +02001291 if (smem_len) {
1292 dump_data = iwl_fw_error_next_data(dump_data);
Emmanuel Grumbache06d8432014-12-09 14:36:41 +02001293 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1294 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1295 dump_mem = (void *)dump_data->data;
1296 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1297 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
Liad Kaufmanaddfaad2014-12-02 11:16:04 +02001298 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
Emmanuel Grumbache06d8432014-12-09 14:36:41 +02001299 dump_mem->data, smem_len);
Liad Kaufmanaddfaad2014-12-02 11:16:04 +02001300 }
1301
Ido Yariv86138322014-12-10 12:39:27 -05001302 if (sram2_len) {
1303 dump_data = iwl_fw_error_next_data(dump_data);
1304 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1305 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1306 dump_mem = (void *)dump_data->data;
1307 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1308 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1309 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1310 dump_mem->data, sram2_len);
1311 }
1312
Liad Kaufmane5397612015-03-02 11:46:46 +02001313 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1314 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1315 dump_data = iwl_fw_error_next_data(dump_data);
1316 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1317 dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1318 sizeof(*dump_mem));
1319 dump_mem = (void *)dump_data->data;
1320 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1321 dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1322 iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1323 dump_mem->data, IWL8260_ICCM_LEN);
1324 }
1325
Matti Gottliebf0afea52015-07-20 17:55:51 +03001326 /* Dump fw's virtual image */
1327 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
1328 u32 i;
1329
1330 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
1331 struct iwl_fw_error_dump_paging *paging;
1332 struct page *pages =
1333 mvm->fw_paging_db[i].fw_paging_block;
1334
1335 dump_data = iwl_fw_error_next_data(dump_data);
1336 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
1337 dump_data->len = cpu_to_le32(sizeof(*paging) +
1338 PAGING_BLOCK_SIZE);
1339 paging = (void *)dump_data->data;
1340 paging->index = cpu_to_le32(i);
1341 memcpy(paging->data, page_address(pages),
1342 PAGING_BLOCK_SIZE);
1343 }
1344 }
1345
Oren Givon36fb9012015-07-15 15:47:28 +03001346dump_trans_data:
1347 fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
1348 mvm->fw_dump_trig);
Emmanuel Grumbach48eb7b32014-07-08 19:45:17 +03001349 fw_error_dump->op_mode_len = file_len;
1350 if (fw_error_dump->trans_ptr)
1351 file_len += fw_error_dump->trans_ptr->len;
1352 dump_file->file_len = cpu_to_le32(file_len);
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001353
Johannes Bergaadede62014-10-09 17:01:36 +02001354 dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1355 GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
Emmanuel Grumbachd2709ad2015-01-29 14:58:06 +02001356
Oren Givon36fb9012015-07-15 15:47:28 +03001357 mvm->fw_dump_trig = NULL;
Emmanuel Grumbachd2709ad2015-01-29 14:58:06 +02001358 clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001359}
Emmanuel Grumbach655e6d62014-06-25 14:08:58 +03001360
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001361struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1362 .trig_desc = {
1363 .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1364 },
1365};
1366
Johannes Berg8ca151b2013-01-24 14:25:36 +01001367static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1368{
Johannes Berg58629d92014-11-06 09:40:50 +01001369 /* clear the D3 reconfig, we only need it to avoid dumping a
1370 * firmware coredump on reconfiguration, we shouldn't do that
1371 * on D3->D0 transition
1372 */
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001373 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1374 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
Johannes Berg58629d92014-11-06 09:40:50 +01001375 iwl_mvm_fw_error_dump(mvm);
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001376 }
Emmanuel Grumbach1bd3cbc2014-03-18 21:15:06 +02001377
Eliad Peller744cb692014-12-10 18:44:13 +02001378 /* cleanup all stale references (scan, roc), but keep the
1379 * ucode_down ref until reconfig is complete
1380 */
1381 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1382
Johannes Berg8ca151b2013-01-24 14:25:36 +01001383 iwl_trans_stop_device(mvm->trans);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001384
Luciano Coelho9af91f42015-02-10 10:42:26 +02001385 mvm->scan_status = 0;
Luciano Coelhob1873302014-08-08 17:12:07 +03001386 mvm->ps_disabled = false;
Emmanuel Grumbach31b8b342014-11-02 15:48:09 +02001387 mvm->calibrating = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001388
1389 /* just in case one was running */
1390 ieee80211_remain_on_channel_expired(mvm->hw);
1391
Arik Nemtsov737719f2015-02-23 14:42:41 +02001392 /*
1393 * cleanup all interfaces, even inactive ones, as some might have
1394 * gone down during the HW restart
1395 */
1396 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001397
Ilan Peerfe0f2de2013-03-21 10:23:52 +02001398 mvm->p2p_device_vif = NULL;
Eliad Peller37577fe2013-12-05 17:19:39 +02001399 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
Ilan Peerfe0f2de2013-03-21 10:23:52 +02001400
1401 iwl_mvm_reset_phy_ctxts(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001402 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001403 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
Emmanuel Grumbach8a275ba2014-07-13 09:12:11 +03001404 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1405 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1406 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1407 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1408 memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1409 memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001410
1411 ieee80211_wake_queues(mvm->hw);
1412
Eliad Peller228670b2014-08-10 17:00:15 +03001413 /* clear any stale d0i3 state */
1414 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1415
Johannes Berg8ca151b2013-01-24 14:25:36 +01001416 mvm->vif_count = 0;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001417 mvm->rx_ba_sessions = 0;
Emmanuel Grumbachd2709ad2015-01-29 14:58:06 +02001418 mvm->fw_dbg_conf = FW_DBG_INVALID;
Johannes Berg91a8bcd2015-01-14 18:12:41 +01001419
1420 /* keep statistics ticking */
1421 iwl_mvm_accu_radio_stats(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001422}
1423
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001424int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001425{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001426 int ret;
1427
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001428 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001429
1430 /* Clean up some internal and mac80211 state on restart */
1431 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1432 iwl_mvm_restart_cleanup(mvm);
1433
1434 ret = iwl_mvm_up(mvm);
Johannes Bergc47af222014-04-30 16:34:45 +02001435
1436 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1437 /* Something went wrong - we need to finish some cleanup
1438 * that normally iwl_mvm_mac_restart_complete() below
1439 * would do.
1440 */
1441 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1442 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1443 }
1444
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001445 return ret;
1446}
1447
1448static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1449{
1450 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1451 int ret;
1452
Eliad Peller37948fc2014-12-11 10:48:18 +02001453 /* Some hw restart cleanups must not hold the mutex */
1454 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1455 /*
1456 * Make sure we are out of d0i3. This is needed
1457 * to make sure the reference accounting is correct
1458 * (and there is no stale d0i3_exit_work).
1459 */
1460 wait_event_timeout(mvm->d0i3_exit_waitq,
1461 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1462 &mvm->status),
1463 HZ);
1464 }
1465
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001466 mutex_lock(&mvm->mutex);
1467 ret = __iwl_mvm_mac_start(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001468 mutex_unlock(&mvm->mutex);
1469
1470 return ret;
1471}
1472
Eliad Pellercf2c92d2014-11-04 11:43:54 +02001473static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001474{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001475 int ret;
1476
1477 mutex_lock(&mvm->mutex);
1478
1479 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001480 iwl_mvm_d0i3_enable_tx(mvm, NULL);
Johannes Berge7afe892015-04-21 09:21:46 +02001481 ret = iwl_mvm_update_quotas(mvm, true, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001482 if (ret)
1483 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1484 ret);
1485
Eliad Peller7498cf42014-01-16 17:10:44 +02001486 /* allow transport/FW low power modes */
1487 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1488
Arik Nemtsovcbd2ae22014-09-14 19:13:54 +03001489 /*
1490 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1491 * of packets the FW sent out, so we must reconnect.
1492 */
1493 iwl_mvm_teardown_tdls_peers(mvm);
1494
Johannes Berg8ca151b2013-01-24 14:25:36 +01001495 mutex_unlock(&mvm->mutex);
1496}
1497
Eliad Peller088070a2014-10-20 18:42:58 +03001498static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1499{
Eliad Peller088070a2014-10-20 18:42:58 +03001500 if (!iwl_mvm_is_d0i3_supported(mvm))
1501 return;
1502
Eliad Peller67359432014-12-09 15:23:54 +02001503 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1504 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1505 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1506 &mvm->status),
1507 HZ))
1508 WARN_ONCE(1, "D0i3 exit on resume timed out\n");
Eliad Peller088070a2014-10-20 18:42:58 +03001509}
1510
Eliad Pellercf2c92d2014-11-04 11:43:54 +02001511static void
1512iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1513 enum ieee80211_reconfig_type reconfig_type)
1514{
1515 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1516
1517 switch (reconfig_type) {
1518 case IEEE80211_RECONFIG_TYPE_RESTART:
1519 iwl_mvm_restart_complete(mvm);
1520 break;
1521 case IEEE80211_RECONFIG_TYPE_SUSPEND:
Eliad Peller088070a2014-10-20 18:42:58 +03001522 iwl_mvm_resume_complete(mvm);
Eliad Pellercf2c92d2014-11-04 11:43:54 +02001523 break;
1524 }
1525}
1526
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001527void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001528{
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001529 lockdep_assert_held(&mvm->mutex);
Eliad Peller7498cf42014-01-16 17:10:44 +02001530
Johannes Berg91a8bcd2015-01-14 18:12:41 +01001531 /* firmware counters are obviously reset now, but we shouldn't
1532 * partially track so also clear the fw_reset_accu counters.
1533 */
1534 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1535
Eliad Peller0a79a0c02014-12-04 10:27:20 +02001536 /*
1537 * Disallow low power states when the FW is down by taking
1538 * the UCODE_DOWN ref. in case of ongoing hw restart the
1539 * ref is already taken, so don't take it again.
1540 */
1541 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1542 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
Eliad Peller7498cf42014-01-16 17:10:44 +02001543
Johannes Berg8ca151b2013-01-24 14:25:36 +01001544 /* async_handlers_wk is now blocked */
1545
1546 /*
1547 * The work item could be running or queued if the
1548 * ROC time event stops just as we get here.
1549 */
Eliad Pellerc7792732015-04-19 11:41:04 +03001550 flush_work(&mvm->roc_done_wk);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001551
1552 iwl_trans_stop_device(mvm->trans);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001553
1554 iwl_mvm_async_handlers_purge(mvm);
1555 /* async_handlers_list is empty and will stay empty: HW is stopped */
1556
1557 /* the fw is stopped, the aux sta is dead: clean up driver state */
Johannes Berg712b24a2014-08-04 14:14:14 +02001558 iwl_mvm_del_aux_sta(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001559
Eliad Peller0a79a0c02014-12-04 10:27:20 +02001560 /*
1561 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1562 * won't be called in this case).
Arik Nemtsov8b2b9fb2015-03-04 13:16:03 +02001563 * But make sure to cleanup interfaces that have gone down before/during
1564 * HW restart was requested.
Eliad Peller0a79a0c02014-12-04 10:27:20 +02001565 */
Arik Nemtsov8b2b9fb2015-03-04 13:16:03 +02001566 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1567 ieee80211_iterate_interfaces(mvm->hw, 0,
1568 iwl_mvm_cleanup_iterator, mvm);
Eliad Peller0a79a0c02014-12-04 10:27:20 +02001569
Alexander Bondar963221b2015-03-26 11:07:35 +02001570 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1571 * make sure there's nothing left there and warn if any is found.
1572 */
Johannes Berg859d9142015-06-01 17:11:11 +02001573 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
Alexander Bondar963221b2015-03-26 11:07:35 +02001574 int i;
1575
Luciano Coelho507e4cd2015-03-19 22:58:33 +02001576 for (i = 0; i < mvm->max_scans; i++) {
Luciano Coelho6185af2a2015-05-07 11:13:24 +03001577 if (WARN_ONCE(mvm->scan_uid_status[i],
1578 "UMAC scan UID %d status was not cleaned\n",
1579 i))
1580 mvm->scan_uid_status[i] = 0;
Alexander Bondar963221b2015-03-26 11:07:35 +02001581 }
1582 }
1583
Luciano Coelhobc448862014-08-20 11:49:11 +03001584 mvm->ucode_loaded = false;
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001585}
Luciano Coelhobc448862014-08-20 11:49:11 +03001586
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001587static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1588{
1589 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1590
1591 flush_work(&mvm->d0i3_exit_work);
1592 flush_work(&mvm->async_handlers_wk);
Emmanuel Grumbachd2709ad2015-01-29 14:58:06 +02001593 cancel_delayed_work_sync(&mvm->fw_dump_wk);
Emmanuel Grumbachb6eaa452015-01-29 14:58:20 +02001594 iwl_mvm_free_fw_dump_desc(mvm);
Luciano Coelhoa0a092432014-09-04 12:29:15 +03001595
1596 mutex_lock(&mvm->mutex);
1597 __iwl_mvm_mac_stop(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001598 mutex_unlock(&mvm->mutex);
1599
1600 /*
1601 * The worker might have been waiting for the mutex, let it run and
1602 * discover that its list is now empty.
1603 */
1604 cancel_work_sync(&mvm->async_handlers_wk);
1605}
1606
Ilan Peerfe0f2de2013-03-21 10:23:52 +02001607static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1608{
1609 u16 i;
1610
1611 lockdep_assert_held(&mvm->mutex);
1612
1613 for (i = 0; i < NUM_PHY_CTX; i++)
1614 if (!mvm->phy_ctxts[i].ref)
1615 return &mvm->phy_ctxts[i];
1616
1617 IWL_ERR(mvm, "No available PHY context\n");
1618 return NULL;
1619}
1620
Avri Altmand44c3fe2015-04-18 22:16:42 +03001621static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1622 s16 tx_power)
1623{
1624 struct iwl_dev_tx_power_cmd cmd = {
Johannes Bergda03f022015-08-18 15:15:36 +02001625 .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1626 .v2.mac_context_id =
Avri Altmand44c3fe2015-04-18 22:16:42 +03001627 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
Johannes Bergda03f022015-08-18 15:15:36 +02001628 .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
Avri Altmand44c3fe2015-04-18 22:16:42 +03001629 };
Johannes Bergda03f022015-08-18 15:15:36 +02001630 int len = sizeof(cmd);
Avri Altmand44c3fe2015-04-18 22:16:42 +03001631
Avri Altmand44c3fe2015-04-18 22:16:42 +03001632 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
Johannes Bergda03f022015-08-18 15:15:36 +02001633 cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
Avri Altmand44c3fe2015-04-18 22:16:42 +03001634
Johannes Bergda03f022015-08-18 15:15:36 +02001635 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1636 len = sizeof(cmd.v2);
1637
1638 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
Avri Altmand44c3fe2015-04-18 22:16:42 +03001639}
1640
Johannes Berg8ca151b2013-01-24 14:25:36 +01001641static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1642 struct ieee80211_vif *vif)
1643{
1644 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1645 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1646 int ret;
1647
Emmanuel Grumbachaa5e1832015-03-07 19:35:37 +02001648 mvmvif->mvm = mvm;
1649
Johannes Berg8ca151b2013-01-24 14:25:36 +01001650 /*
Gregory Greenmand40fc482014-06-25 14:08:50 +02001651 * make sure D0i3 exit is completed, otherwise a target access
1652 * during tx queue configuration could be done when still in
1653 * D0i3 state.
1654 */
1655 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1656 if (ret)
1657 return ret;
1658
1659 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001660 * Not much to do here. The stack will not allow interface
1661 * types or combinations that we didn't advertise, so we
1662 * don't really have to check the types.
1663 */
1664
1665 mutex_lock(&mvm->mutex);
1666
Johannes Berg33cef922015-01-21 21:41:29 +01001667 /* make sure that beacon statistics don't go backwards with FW reset */
1668 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1669 mvmvif->beacon_stats.accu_num_beacons +=
1670 mvmvif->beacon_stats.num_beacons;
1671
Eliad Pellere89044d2013-07-16 17:33:26 +03001672 /* Allocate resources for the MAC context, and add it to the fw */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001673 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1674 if (ret)
1675 goto out_unlock;
1676
Alexander Bondar1c2abf72013-08-27 20:31:48 +03001677 /* Counting number of interfaces is needed for legacy PM */
Ilan Peerea183d02013-07-23 14:41:53 +03001678 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1679 mvm->vif_count++;
Ilan Peerea183d02013-07-23 14:41:53 +03001680
1681 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001682 * The AP binding flow can be done only after the beacon
1683 * template is configured (which happens only in the mac80211
1684 * start_ap() flow), and adding the broadcast station can happen
1685 * only after the binding.
1686 * In addition, since modifying the MAC before adding a bcast
1687 * station is not allowed by the FW, delay the adding of MAC context to
1688 * the point where we can also add the bcast station.
1689 * In short: there's not much we can do at this point, other than
1690 * allocating resources :)
1691 */
Johannes Berg5023d962013-07-31 14:07:43 +02001692 if (vif->type == NL80211_IFTYPE_AP ||
1693 vif->type == NL80211_IFTYPE_ADHOC) {
Johannes Berg013290a2014-08-04 13:38:48 +02001694 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001695 if (ret) {
1696 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1697 goto out_release;
1698 }
1699
Emmanuel Grumbach77740cb2013-06-26 23:51:41 +03001700 iwl_mvm_vif_dbgfs_register(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001701 goto out_unlock;
1702 }
1703
Avri Altman93190fb2014-12-27 09:09:47 +02001704 mvmvif->features |= hw->netdev_features;
1705
Johannes Berg8ca151b2013-01-24 14:25:36 +01001706 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1707 if (ret)
1708 goto out_release;
1709
Arik Nemtsov999609f2014-05-15 17:31:51 +03001710 ret = iwl_mvm_power_update_mac(mvm);
Emmanuel Grumbache5e7aa82014-01-27 16:57:33 +02001711 if (ret)
Luciano Coelhofd66fc12015-01-27 15:06:57 +02001712 goto out_remove_mac;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001713
Hila Gonen7df15b12012-12-12 11:16:19 +02001714 /* beacon filtering */
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001715 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
Eliad Pellerbd3351b2013-07-16 17:50:17 +03001716 if (ret)
1717 goto out_remove_mac;
1718
Hila Gonen7df15b12012-12-12 11:16:19 +02001719 if (!mvm->bf_allowed_vif &&
Emmanuel Grumbach73e5f2c2014-03-30 08:57:30 +03001720 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
Hila Gonen7df15b12012-12-12 11:16:19 +02001721 mvm->bf_allowed_vif = mvmvif;
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03001722 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1723 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
Hila Gonen7df15b12012-12-12 11:16:19 +02001724 }
1725
Johannes Berg8ca151b2013-01-24 14:25:36 +01001726 /*
1727 * P2P_DEVICE interface does not have a channel context assigned to it,
1728 * so a dedicated PHY context is allocated to it and the corresponding
1729 * MAC context is bound to it at this stage.
1730 */
1731 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001732
Ilan Peerfe0f2de2013-03-21 10:23:52 +02001733 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1734 if (!mvmvif->phy_ctxt) {
1735 ret = -ENOSPC;
Eliad Pellerbd3351b2013-07-16 17:50:17 +03001736 goto out_free_bf;
Ilan Peerfe0f2de2013-03-21 10:23:52 +02001737 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001738
Ilan Peer53a9d612013-04-28 11:55:08 +03001739 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001740 ret = iwl_mvm_binding_add_vif(mvm, vif);
1741 if (ret)
Ilan Peer53a9d612013-04-28 11:55:08 +03001742 goto out_unref_phy;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001743
Johannes Berg013290a2014-08-04 13:38:48 +02001744 ret = iwl_mvm_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001745 if (ret)
1746 goto out_unbind;
1747
1748 /* Save a pointer to p2p device vif, so it can later be used to
1749 * update the p2p device MAC when a GO is started/stopped */
1750 mvm->p2p_device_vif = vif;
1751 }
1752
Johannes Berg63494372013-03-26 10:47:53 +01001753 iwl_mvm_vif_dbgfs_register(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001754 goto out_unlock;
1755
1756 out_unbind:
1757 iwl_mvm_binding_remove_vif(mvm, vif);
Ilan Peer53a9d612013-04-28 11:55:08 +03001758 out_unref_phy:
Ilan Peerfe0f2de2013-03-21 10:23:52 +02001759 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
Eliad Pellerbd3351b2013-07-16 17:50:17 +03001760 out_free_bf:
1761 if (mvm->bf_allowed_vif == mvmvif) {
1762 mvm->bf_allowed_vif = NULL;
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03001763 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1764 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
Eliad Pellerbd3351b2013-07-16 17:50:17 +03001765 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001766 out_remove_mac:
1767 mvmvif->phy_ctxt = NULL;
1768 iwl_mvm_mac_ctxt_remove(mvm, vif);
1769 out_release:
Alexander Bondar5ee2b212013-03-05 10:16:40 +02001770 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1771 mvm->vif_count--;
Alexander Bondar1c2abf72013-08-27 20:31:48 +03001772
Johannes Berg8ca151b2013-01-24 14:25:36 +01001773 iwl_mvm_mac_ctxt_release(mvm, vif);
1774 out_unlock:
1775 mutex_unlock(&mvm->mutex);
1776
Gregory Greenmand40fc482014-06-25 14:08:50 +02001777 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1778
Johannes Berg8ca151b2013-01-24 14:25:36 +01001779 return ret;
1780}
1781
Johannes Berg38a12b52013-02-22 14:07:56 +01001782static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1783 struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001784{
Arik Nemtsovd92b732e2014-09-21 19:00:42 +03001785 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001786
1787 if (tfd_msk) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001788 /*
1789 * mac80211 first removes all the stations of the vif and
1790 * then removes the vif. When it removes a station it also
1791 * flushes the AMPDU session. So by now, all the AMPDU sessions
1792 * of all the stations of this vif are closed, and the queues
1793 * of these AMPDU sessions are properly closed.
1794 * We still need to take care of the shared queues of the vif.
1795 * Flush them here.
1796 */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001797 mutex_lock(&mvm->mutex);
Luca Coelho5888a402015-10-06 09:54:57 +03001798 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001799 mutex_unlock(&mvm->mutex);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001800
1801 /*
1802 * There are transports that buffer a few frames in the host.
1803 * For these, the flush above isn't enough since while we were
1804 * flushing, the transport might have sent more frames to the
1805 * device. To solve this, wait here until the transport is
1806 * empty. Technically, this could have replaced the flush
1807 * above, but flush is much faster than draining. So flush
1808 * first, and drain to make sure we have no frames in the
1809 * transport anymore.
1810 * If a station still had frames on the shared queues, it is
1811 * already marked as draining, so to complete the draining, we
1812 * just need to wait until the transport is empty.
1813 */
1814 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001815 }
1816
1817 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1818 /*
1819 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1820 * We assume here that all the packets sent to the OFFCHANNEL
1821 * queue are sent in ROC session.
1822 */
1823 flush_work(&mvm->roc_done_wk);
1824 } else {
1825 /*
1826 * By now, all the AC queues are empty. The AGG queues are
1827 * empty too. We already got all the Tx responses for all the
1828 * packets in the queues. The drain work can have been
Emmanuel Grumbach0742a752013-06-10 14:10:33 +03001829 * triggered. Flush it.
Johannes Berg8ca151b2013-01-24 14:25:36 +01001830 */
1831 flush_work(&mvm->sta_drained_wk);
1832 }
Johannes Berg38a12b52013-02-22 14:07:56 +01001833}
1834
1835static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1836 struct ieee80211_vif *vif)
1837{
1838 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1839 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1840
1841 iwl_mvm_prepare_mac_removal(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001842
1843 mutex_lock(&mvm->mutex);
1844
Hila Gonen7df15b12012-12-12 11:16:19 +02001845 if (mvm->bf_allowed_vif == mvmvif) {
1846 mvm->bf_allowed_vif = NULL;
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03001847 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1848 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
Hila Gonen7df15b12012-12-12 11:16:19 +02001849 }
1850
Johannes Berg63494372013-03-26 10:47:53 +01001851 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1852
Johannes Berg8ca151b2013-01-24 14:25:36 +01001853 /*
1854 * For AP/GO interface, the tear down of the resources allocated to the
Johannes Berg38a12b52013-02-22 14:07:56 +01001855 * interface is be handled as part of the stop_ap flow.
Johannes Berg8ca151b2013-01-24 14:25:36 +01001856 */
Johannes Berg5023d962013-07-31 14:07:43 +02001857 if (vif->type == NL80211_IFTYPE_AP ||
1858 vif->type == NL80211_IFTYPE_ADHOC) {
David Spinadel507cadf2013-07-31 18:07:21 +03001859#ifdef CONFIG_NL80211_TESTMODE
1860 if (vif == mvm->noa_vif) {
1861 mvm->noa_vif = NULL;
1862 mvm->noa_duration = 0;
1863 }
1864#endif
Johannes Berg013290a2014-08-04 13:38:48 +02001865 iwl_mvm_dealloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001866 goto out_release;
1867 }
1868
1869 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1870 mvm->p2p_device_vif = NULL;
Johannes Berg013290a2014-08-04 13:38:48 +02001871 iwl_mvm_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001872 iwl_mvm_binding_remove_vif(mvm, vif);
Ilan Peerfe0f2de2013-03-21 10:23:52 +02001873 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001874 mvmvif->phy_ctxt = NULL;
1875 }
1876
Alexander Bondar5ee2b212013-03-05 10:16:40 +02001877 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001878 mvm->vif_count--;
Alexander Bondar1c2abf72013-08-27 20:31:48 +03001879
Arik Nemtsov999609f2014-05-15 17:31:51 +03001880 iwl_mvm_power_update_mac(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001881 iwl_mvm_mac_ctxt_remove(mvm, vif);
1882
1883out_release:
1884 iwl_mvm_mac_ctxt_release(mvm, vif);
1885 mutex_unlock(&mvm->mutex);
1886}
1887
1888static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1889{
1890 return 0;
1891}
1892
Eliad Pellere59647e2013-11-28 14:08:50 +02001893struct iwl_mvm_mc_iter_data {
1894 struct iwl_mvm *mvm;
1895 int port_id;
1896};
1897
1898static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1899 struct ieee80211_vif *vif)
1900{
1901 struct iwl_mvm_mc_iter_data *data = _data;
1902 struct iwl_mvm *mvm = data->mvm;
1903 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1904 int ret, len;
1905
1906 /* if we don't have free ports, mcast frames will be dropped */
1907 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1908 return;
1909
1910 if (vif->type != NL80211_IFTYPE_STATION ||
1911 !vif->bss_conf.assoc)
1912 return;
1913
1914 cmd->port_id = data->port_id++;
1915 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1916 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1917
Emmanuel Grumbach1c4abec2014-05-08 09:48:10 +03001918 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
Eliad Pellere59647e2013-11-28 14:08:50 +02001919 if (ret)
1920 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1921}
1922
1923static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1924{
1925 struct iwl_mvm_mc_iter_data iter_data = {
1926 .mvm = mvm,
1927 };
1928
1929 lockdep_assert_held(&mvm->mutex);
1930
1931 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1932 return;
1933
Emmanuel Grumbach1c4abec2014-05-08 09:48:10 +03001934 ieee80211_iterate_active_interfaces_atomic(
Eliad Pellere59647e2013-11-28 14:08:50 +02001935 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1936 iwl_mvm_mc_iface_iterator, &iter_data);
1937}
1938
1939static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1940 struct netdev_hw_addr_list *mc_list)
1941{
1942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1943 struct iwl_mcast_filter_cmd *cmd;
1944 struct netdev_hw_addr *addr;
Max Stepanovf3bd58f2014-08-04 13:55:01 +03001945 int addr_count;
1946 bool pass_all;
Eliad Pellere59647e2013-11-28 14:08:50 +02001947 int len;
1948
Max Stepanovf3bd58f2014-08-04 13:55:01 +03001949 addr_count = netdev_hw_addr_list_count(mc_list);
1950 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1951 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1952 if (pass_all)
Eliad Pellere59647e2013-11-28 14:08:50 +02001953 addr_count = 0;
Eliad Pellere59647e2013-11-28 14:08:50 +02001954
1955 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1956 cmd = kzalloc(len, GFP_ATOMIC);
1957 if (!cmd)
1958 return 0;
1959
1960 if (pass_all) {
1961 cmd->pass_all = 1;
1962 return (u64)(unsigned long)cmd;
1963 }
1964
1965 netdev_hw_addr_list_for_each(addr, mc_list) {
1966 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1967 cmd->count, addr->addr);
1968 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1969 addr->addr, ETH_ALEN);
1970 cmd->count++;
1971 }
1972
1973 return (u64)(unsigned long)cmd;
1974}
1975
Johannes Berg8ca151b2013-01-24 14:25:36 +01001976static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1977 unsigned int changed_flags,
1978 unsigned int *total_flags,
1979 u64 multicast)
1980{
Eliad Pellere59647e2013-11-28 14:08:50 +02001981 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1982 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1983
1984 mutex_lock(&mvm->mutex);
1985
1986 /* replace previous configuration */
1987 kfree(mvm->mcast_filter_cmd);
1988 mvm->mcast_filter_cmd = cmd;
1989
1990 if (!cmd)
1991 goto out;
1992
1993 iwl_mvm_recalc_multicast(mvm);
1994out:
1995 mutex_unlock(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001996 *total_flags = 0;
1997}
1998
Andrei Otcheretianskieffd1922015-06-30 12:08:28 +03001999static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
2000 struct ieee80211_vif *vif,
2001 unsigned int filter_flags,
2002 unsigned int changed_flags)
2003{
2004 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2005
2006 /* We support only filter for probe requests */
2007 if (!(changed_flags & FIF_PROBE_REQ))
2008 return;
2009
2010 /* Supported only for p2p client interfaces */
2011 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
2012 !vif->p2p)
2013 return;
2014
2015 mutex_lock(&mvm->mutex);
2016 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2017 mutex_unlock(&mvm->mutex);
2018}
2019
Eliad Pellerc87163b2014-01-08 10:11:11 +02002020#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
2021struct iwl_bcast_iter_data {
2022 struct iwl_mvm *mvm;
2023 struct iwl_bcast_filter_cmd *cmd;
2024 u8 current_filter;
2025};
2026
2027static void
2028iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
2029 const struct iwl_fw_bcast_filter *in_filter,
2030 struct iwl_fw_bcast_filter *out_filter)
2031{
2032 struct iwl_fw_bcast_filter_attr *attr;
2033 int i;
2034
2035 memcpy(out_filter, in_filter, sizeof(*out_filter));
2036
2037 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
2038 attr = &out_filter->attrs[i];
2039
2040 if (!attr->mask)
2041 break;
2042
Eliad Peller2ee8f022014-01-13 19:07:09 +02002043 switch (attr->reserved1) {
2044 case cpu_to_le16(BC_FILTER_MAGIC_IP):
2045 if (vif->bss_conf.arp_addr_cnt != 1) {
2046 attr->mask = 0;
2047 continue;
2048 }
2049
2050 attr->val = vif->bss_conf.arp_addr_list[0];
2051 break;
2052 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
2053 attr->val = *(__be32 *)&vif->addr[2];
2054 break;
2055 default:
2056 break;
2057 }
2058 attr->reserved1 = 0;
Eliad Pellerc87163b2014-01-08 10:11:11 +02002059 out_filter->num_attrs++;
2060 }
2061}
2062
2063static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
2064 struct ieee80211_vif *vif)
2065{
2066 struct iwl_bcast_iter_data *data = _data;
2067 struct iwl_mvm *mvm = data->mvm;
2068 struct iwl_bcast_filter_cmd *cmd = data->cmd;
2069 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2070 struct iwl_fw_bcast_mac *bcast_mac;
2071 int i;
2072
2073 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
2074 return;
2075
2076 bcast_mac = &cmd->macs[mvmvif->id];
2077
Ilan Peere48393e2014-05-22 11:19:02 +03002078 /*
2079 * enable filtering only for associated stations, but not for P2P
2080 * Clients
2081 */
2082 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
2083 !vif->bss_conf.assoc)
Eliad Pellerc87163b2014-01-08 10:11:11 +02002084 return;
2085
2086 bcast_mac->default_discard = 1;
2087
2088 /* copy all configured filters */
2089 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
2090 /*
2091 * Make sure we don't exceed our filters limit.
2092 * if there is still a valid filter to be configured,
2093 * be on the safe side and just allow bcast for this mac.
2094 */
2095 if (WARN_ON_ONCE(data->current_filter >=
2096 ARRAY_SIZE(cmd->filters))) {
2097 bcast_mac->default_discard = 0;
2098 bcast_mac->attached_filters = 0;
2099 break;
2100 }
2101
2102 iwl_mvm_set_bcast_filter(vif,
2103 &mvm->bcast_filters[i],
2104 &cmd->filters[data->current_filter]);
2105
2106 /* skip current filter if it contains no attributes */
2107 if (!cmd->filters[data->current_filter].num_attrs)
2108 continue;
2109
2110 /* attach the filter to current mac */
2111 bcast_mac->attached_filters |=
2112 cpu_to_le16(BIT(data->current_filter));
2113
2114 data->current_filter++;
2115 }
2116}
2117
Eliad Pellerde06a592014-01-08 10:11:12 +02002118bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
2119 struct iwl_bcast_filter_cmd *cmd)
Eliad Pellerc87163b2014-01-08 10:11:11 +02002120{
Eliad Pellerc87163b2014-01-08 10:11:11 +02002121 struct iwl_bcast_iter_data iter_data = {
2122 .mvm = mvm,
Eliad Pellerde06a592014-01-08 10:11:12 +02002123 .cmd = cmd,
Eliad Pellerc87163b2014-01-08 10:11:11 +02002124 };
2125
Max Stepanov3b8983b2014-10-15 11:27:16 +03002126 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
2127 return false;
2128
Eliad Pellerde06a592014-01-08 10:11:12 +02002129 memset(cmd, 0, sizeof(*cmd));
2130 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2131 cmd->max_macs = ARRAY_SIZE(cmd->macs);
2132
2133#ifdef CONFIG_IWLWIFI_DEBUGFS
2134 /* use debugfs filters/macs if override is configured */
2135 if (mvm->dbgfs_bcast_filtering.override) {
2136 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2137 sizeof(cmd->filters));
2138 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2139 sizeof(cmd->macs));
2140 return true;
2141 }
2142#endif
Eliad Pellerc87163b2014-01-08 10:11:11 +02002143
2144 /* if no filters are configured, do nothing */
2145 if (!mvm->bcast_filters)
Eliad Pellerde06a592014-01-08 10:11:12 +02002146 return false;
Eliad Pellerc87163b2014-01-08 10:11:11 +02002147
2148 /* configure and attach these filters for each associated sta vif */
2149 ieee80211_iterate_active_interfaces(
2150 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2151 iwl_mvm_bcast_filter_iterator, &iter_data);
2152
Eliad Pellerde06a592014-01-08 10:11:12 +02002153 return true;
2154}
2155static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2156 struct ieee80211_vif *vif)
2157{
2158 struct iwl_bcast_filter_cmd cmd;
2159
2160 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2161 return 0;
2162
2163 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2164 return 0;
2165
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002166 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
Eliad Pellerc87163b2014-01-08 10:11:11 +02002167 sizeof(cmd), &cmd);
2168}
2169#else
2170static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2171 struct ieee80211_vif *vif)
2172{
2173 return 0;
2174}
2175#endif
2176
Johannes Berg8ca151b2013-01-24 14:25:36 +01002177static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2178 struct ieee80211_vif *vif,
2179 struct ieee80211_bss_conf *bss_conf,
2180 u32 changes)
2181{
2182 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2183 int ret;
2184
Ilan Peer6e97b0d2013-12-23 22:18:02 +02002185 /*
2186 * Re-calculate the tsf id, as the master-slave relations depend on the
2187 * beacon interval, which was not known when the station interface was
2188 * added.
2189 */
2190 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2191 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2192
Johannes Berg3dfd3a92014-08-11 21:37:30 +02002193 /*
2194 * If we're not associated yet, take the (new) BSSID before associating
2195 * so the firmware knows. If we're already associated, then use the old
2196 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2197 * branch for disassociation below.
2198 */
2199 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2200 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2201
2202 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002203 if (ret)
2204 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2205
Johannes Berg3dfd3a92014-08-11 21:37:30 +02002206 /* after sending it once, adopt mac80211 data */
2207 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2208 mvmvif->associated = bss_conf->assoc;
2209
Johannes Berg8ca151b2013-01-24 14:25:36 +01002210 if (changes & BSS_CHANGED_ASSOC) {
2211 if (bss_conf->assoc) {
Johannes Berg33cef922015-01-21 21:41:29 +01002212 /* clear statistics to get clean beacon counter */
2213 iwl_mvm_request_statistics(mvm, true);
2214 memset(&mvmvif->beacon_stats, 0,
2215 sizeof(mvmvif->beacon_stats));
2216
Johannes Berg8ca151b2013-01-24 14:25:36 +01002217 /* add quota for this interface */
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02002218 ret = iwl_mvm_update_quotas(mvm, true, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002219 if (ret) {
2220 IWL_ERR(mvm, "failed to update quotas\n");
2221 return;
2222 }
Johannes Berg016d27e2013-05-03 11:16:15 +02002223
2224 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2225 &mvm->status)) {
2226 /*
2227 * If we're restarting then the firmware will
2228 * obviously have lost synchronisation with
2229 * the AP. It will attempt to synchronise by
2230 * itself, but we can make it more reliable by
2231 * scheduling a session protection time event.
2232 *
2233 * The firmware needs to receive a beacon to
2234 * catch up with synchronisation, use 110% of
2235 * the beacon interval.
2236 *
2237 * Set a large maximum delay to allow for more
2238 * than a single interface.
2239 */
2240 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2241 iwl_mvm_protect_session(mvm, vif, dur, dur,
Liad Kaufmand20d37b2014-07-06 17:14:39 +03002242 5 * dur, false);
Johannes Berg016d27e2013-05-03 11:16:15 +02002243 }
Lilach Edelstein1f3b0ff2013-10-06 13:03:32 +02002244
2245 iwl_mvm_sf_update(mvm, vif, false);
Alexander Bondar175a70b2013-04-14 20:59:37 +03002246 iwl_mvm_power_vif_assoc(mvm, vif);
Emmanuel Grumbach697162a2014-07-30 15:56:42 +03002247 if (vif->p2p) {
Eliad Peller29a90a42013-11-05 14:06:29 +02002248 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
Emmanuel Grumbach697162a2014-07-30 15:56:42 +03002249 iwl_mvm_update_smps(mvm, vif,
2250 IWL_MVM_SMPS_REQ_PROT,
2251 IEEE80211_SMPS_DYNAMIC);
2252 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002253 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
Lilach Edelstein1f3b0ff2013-10-06 13:03:32 +02002254 /*
2255 * If update fails - SF might be running in associated
2256 * mode while disassociated - which is forbidden.
2257 */
2258 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2259 "Failed to update SF upon disassociation\n");
2260
Johannes Berg8ca151b2013-01-24 14:25:36 +01002261 /* remove AP station now that the MAC is unassoc */
2262 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2263 if (ret)
2264 IWL_ERR(mvm, "failed to remove AP station\n");
Eliad Peller37577fe2013-12-05 17:19:39 +02002265
2266 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2267 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002268 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2269 /* remove quota for this interface */
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02002270 ret = iwl_mvm_update_quotas(mvm, false, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002271 if (ret)
2272 IWL_ERR(mvm, "failed to update quotas\n");
Eliad Peller29a90a42013-11-05 14:06:29 +02002273
2274 if (vif->p2p)
2275 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
Johannes Berg3dfd3a92014-08-11 21:37:30 +02002276
2277 /* this will take the cleared BSSID from bss_conf */
2278 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2279 if (ret)
2280 IWL_ERR(mvm,
2281 "failed to update MAC %pM (clear after unassoc)\n",
2282 vif->addr);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002283 }
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03002284
Eliad Pellere59647e2013-11-28 14:08:50 +02002285 iwl_mvm_recalc_multicast(mvm);
Eliad Pellerc87163b2014-01-08 10:11:11 +02002286 iwl_mvm_configure_bcast_filter(mvm, vif);
Eliad Pellere59647e2013-11-28 14:08:50 +02002287
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03002288 /* reset rssi values */
2289 mvmvif->bf_data.ave_beacon_signal = 0;
2290
Emmanuel Grumbach8e484f02013-10-02 15:02:25 +03002291 iwl_mvm_bt_coex_vif_change(mvm);
Emmanuel Grumbachf94045e2014-01-06 13:38:55 +02002292 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2293 IEEE80211_SMPS_AUTOMATIC);
Alexander Bondar989c6502013-05-16 17:34:17 +03002294 } else if (changes & BSS_CHANGED_BEACON_INFO) {
Johannes Berg210a5442013-01-24 23:48:23 +01002295 /*
2296 * We received a beacon _after_ association so
2297 * remove the session protection.
2298 */
2299 iwl_mvm_remove_time_event(mvm, mvmvif,
2300 &mvmvif->time_event_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002301 }
Eran Hararycc87d322014-07-15 14:04:23 +03002302
2303 if (changes & BSS_CHANGED_BEACON_INFO) {
2304 iwl_mvm_sf_update(mvm, vif, false);
2305 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2306 }
2307
Johannes Berg1bc10d32014-08-26 14:25:46 +02002308 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2309 ret = iwl_mvm_power_update_mac(mvm);
2310 if (ret)
2311 IWL_ERR(mvm, "failed to update power mode\n");
2312 }
2313
Matti Gottlieb88f2fd72013-07-09 15:25:46 +03002314 if (changes & BSS_CHANGED_TXPOWER) {
2315 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2316 bss_conf->txpower);
2317 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2318 }
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03002319
2320 if (changes & BSS_CHANGED_CQM) {
Johannes Berg3c6acb62014-05-07 11:47:53 +02002321 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03002322 /* reset cqm events tracking */
2323 mvmvif->bf_data.last_cqm_event = 0;
Avri Altmanfa7b2e72014-05-20 08:03:24 +03002324 if (mvmvif->bf_data.bf_enabled) {
2325 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2326 if (ret)
2327 IWL_ERR(mvm,
2328 "failed to update CQM thresholds\n");
2329 }
Andrei Otcheretianskia20fd392013-07-21 17:23:59 +03002330 }
Eliad Peller2ee8f022014-01-13 19:07:09 +02002331
2332 if (changes & BSS_CHANGED_ARP_FILTER) {
Johannes Berg3c6acb62014-05-07 11:47:53 +02002333 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
Eliad Peller2ee8f022014-01-13 19:07:09 +02002334 iwl_mvm_configure_bcast_filter(mvm, vif);
2335 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002336}
2337
Johannes Berg5023d962013-07-31 14:07:43 +02002338static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2339 struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002340{
2341 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2343 int ret;
2344
Eliad Peller576eeee2014-07-01 18:38:38 +03002345 /*
2346 * iwl_mvm_mac_ctxt_add() might read directly from the device
2347 * (the system time), so make sure it is available.
2348 */
2349 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2350 if (ret)
2351 return ret;
2352
Johannes Berg8ca151b2013-01-24 14:25:36 +01002353 mutex_lock(&mvm->mutex);
2354
2355 /* Send the beacon template */
2356 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2357 if (ret)
2358 goto out_unlock;
2359
Ilan Peer6e97b0d2013-12-23 22:18:02 +02002360 /*
2361 * Re-calculate the tsf id, as the master-slave relations depend on the
2362 * beacon interval, which was not known when the AP interface was added.
2363 */
2364 if (vif->type == NL80211_IFTYPE_AP)
2365 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2366
Gregory Greenman94939082015-08-24 14:38:35 +03002367 mvmvif->ap_assoc_sta_count = 0;
2368
Johannes Berg8ca151b2013-01-24 14:25:36 +01002369 /* Add the mac context */
2370 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2371 if (ret)
2372 goto out_unlock;
2373
2374 /* Perform the binding */
2375 ret = iwl_mvm_binding_add_vif(mvm, vif);
2376 if (ret)
2377 goto out_remove;
2378
Johannes Berg8ca151b2013-01-24 14:25:36 +01002379 /* Send the bcast station. At this stage the TBTT and DTIM time events
2380 * are added and applied to the scheduler */
Johannes Berg013290a2014-08-04 13:38:48 +02002381 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002382 if (ret)
2383 goto out_unbind;
2384
Ilan Peer5691e212013-12-31 21:05:50 +02002385 /* must be set before quota calculations */
2386 mvmvif->ap_ibss_active = true;
2387
Ilan Peera11e1442013-12-31 21:19:55 +02002388 /* power updated needs to be done before quotas */
Arik Nemtsov999609f2014-05-15 17:31:51 +03002389 iwl_mvm_power_update_mac(mvm);
Ilan Peera11e1442013-12-31 21:19:55 +02002390
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02002391 ret = iwl_mvm_update_quotas(mvm, false, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002392 if (ret)
Ilan Peera11e1442013-12-31 21:19:55 +02002393 goto out_quota_failed;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002394
Johannes Berg5023d962013-07-31 14:07:43 +02002395 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002396 if (vif->p2p && mvm->p2p_device_vif)
Johannes Berg3dfd3a92014-08-11 21:37:30 +02002397 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002398
Eliad Peller29a90a42013-11-05 14:06:29 +02002399 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2400
Emmanuel Grumbach8e484f02013-10-02 15:02:25 +03002401 iwl_mvm_bt_coex_vif_change(mvm);
Emmanuel Grumbachdac94da2013-06-18 07:35:27 +03002402
Arik Nemtsovf6972672014-06-15 16:03:55 +03002403 /* we don't support TDLS during DCM */
2404 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2405 iwl_mvm_teardown_tdls_peers(mvm);
2406
Arik Nemtsov939e4902015-03-08 12:19:42 +02002407 goto out_unlock;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002408
Ilan Peera11e1442013-12-31 21:19:55 +02002409out_quota_failed:
Arik Nemtsov999609f2014-05-15 17:31:51 +03002410 iwl_mvm_power_update_mac(mvm);
Ilan Peer5691e212013-12-31 21:05:50 +02002411 mvmvif->ap_ibss_active = false;
Johannes Berg013290a2014-08-04 13:38:48 +02002412 iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002413out_unbind:
2414 iwl_mvm_binding_remove_vif(mvm, vif);
2415out_remove:
2416 iwl_mvm_mac_ctxt_remove(mvm, vif);
2417out_unlock:
2418 mutex_unlock(&mvm->mutex);
Eliad Peller576eeee2014-07-01 18:38:38 +03002419 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002420 return ret;
2421}
2422
Johannes Berg5023d962013-07-31 14:07:43 +02002423static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2424 struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002425{
2426 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2427 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2428
Johannes Berg38a12b52013-02-22 14:07:56 +01002429 iwl_mvm_prepare_mac_removal(mvm, vif);
2430
Johannes Berg8ca151b2013-01-24 14:25:36 +01002431 mutex_lock(&mvm->mutex);
2432
Andrei Otcheretianski664322f2014-06-05 16:40:36 +03002433 /* Handle AP stop while in CSA */
Andrei Otcheretianski7f0a7c62014-05-04 11:48:12 +03002434 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2435 iwl_mvm_remove_time_event(mvm, mvmvif,
2436 &mvmvif->time_event_data);
Andrei Otcheretianski664322f2014-06-05 16:40:36 +03002437 RCU_INIT_POINTER(mvm->csa_vif, NULL);
Avraham Sterne9cb0322015-08-31 11:08:27 +03002438 mvmvif->csa_countdown = false;
Andrei Otcheretianski7f0a7c62014-05-04 11:48:12 +03002439 }
Andrei Otcheretianski664322f2014-06-05 16:40:36 +03002440
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03002441 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2442 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2443 mvm->csa_tx_block_bcn_timeout = 0;
2444 }
2445
Johannes Berg5023d962013-07-31 14:07:43 +02002446 mvmvif->ap_ibss_active = false;
David Spinadel1c87bba2014-02-27 16:41:52 +02002447 mvm->ap_last_beacon_gp2 = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002448
Emmanuel Grumbach8e484f02013-10-02 15:02:25 +03002449 iwl_mvm_bt_coex_vif_change(mvm);
Emmanuel Grumbachdac94da2013-06-18 07:35:27 +03002450
Eliad Peller29a90a42013-11-05 14:06:29 +02002451 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2452
Johannes Berg5023d962013-07-31 14:07:43 +02002453 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002454 if (vif->p2p && mvm->p2p_device_vif)
Johannes Berg3dfd3a92014-08-11 21:37:30 +02002455 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002456
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02002457 iwl_mvm_update_quotas(mvm, false, NULL);
Johannes Berg013290a2014-08-04 13:38:48 +02002458 iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002459 iwl_mvm_binding_remove_vif(mvm, vif);
Ilan Peera11e1442013-12-31 21:19:55 +02002460
Arik Nemtsov999609f2014-05-15 17:31:51 +03002461 iwl_mvm_power_update_mac(mvm);
Ilan Peera11e1442013-12-31 21:19:55 +02002462
Johannes Berg8ca151b2013-01-24 14:25:36 +01002463 iwl_mvm_mac_ctxt_remove(mvm, vif);
2464
2465 mutex_unlock(&mvm->mutex);
2466}
2467
Johannes Berg5023d962013-07-31 14:07:43 +02002468static void
2469iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2470 struct ieee80211_vif *vif,
2471 struct ieee80211_bss_conf *bss_conf,
2472 u32 changes)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002473{
Ilan Peerbe2056f2013-12-04 16:47:14 +02002474 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Avri Altman8a5e3662013-11-12 19:16:03 +02002475
Ilan Peerbe2056f2013-12-04 16:47:14 +02002476 /* Changes will be applied when the AP/IBSS is started */
2477 if (!mvmvif->ap_ibss_active)
2478 return;
2479
Johannes Berg863230da2013-12-04 17:08:40 +01002480 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
Johannes Bergf7d8b702014-09-09 15:49:19 +02002481 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
Johannes Berg3dfd3a92014-08-11 21:37:30 +02002482 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
Johannes Berg863230da2013-12-04 17:08:40 +01002483 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
Avri Altman8a5e3662013-11-12 19:16:03 +02002484
Johannes Berg8ca151b2013-01-24 14:25:36 +01002485 /* Need to send a new beacon template to the FW */
Johannes Berg863230da2013-12-04 17:08:40 +01002486 if (changes & BSS_CHANGED_BEACON &&
2487 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2488 IWL_WARN(mvm, "Failed updating beacon data\n");
Haim Dreyfuss79b7a692014-09-14 12:40:00 +03002489
2490 if (changes & BSS_CHANGED_TXPOWER) {
2491 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2492 bss_conf->txpower);
2493 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2494 }
2495
Johannes Berg8ca151b2013-01-24 14:25:36 +01002496}
2497
2498static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2499 struct ieee80211_vif *vif,
2500 struct ieee80211_bss_conf *bss_conf,
2501 u32 changes)
2502{
2503 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2504
Eliad Peller576eeee2014-07-01 18:38:38 +03002505 /*
2506 * iwl_mvm_bss_info_changed_station() might call
2507 * iwl_mvm_protect_session(), which reads directly from
2508 * the device (the system time), so make sure it is available.
2509 */
2510 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2511 return;
2512
Johannes Berg8ca151b2013-01-24 14:25:36 +01002513 mutex_lock(&mvm->mutex);
2514
David Spinadel723f02e2014-04-27 09:54:54 +03002515 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
Luciano Coelhoc7d42482015-05-07 16:00:26 +03002516 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
David Spinadel723f02e2014-04-27 09:54:54 +03002517
Johannes Berg8ca151b2013-01-24 14:25:36 +01002518 switch (vif->type) {
2519 case NL80211_IFTYPE_STATION:
2520 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2521 break;
2522 case NL80211_IFTYPE_AP:
Johannes Berg5023d962013-07-31 14:07:43 +02002523 case NL80211_IFTYPE_ADHOC:
2524 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002525 break;
2526 default:
2527 /* shouldn't happen */
2528 WARN_ON_ONCE(1);
2529 }
2530
2531 mutex_unlock(&mvm->mutex);
Eliad Peller576eeee2014-07-01 18:38:38 +03002532 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002533}
2534
2535static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2536 struct ieee80211_vif *vif,
David Spinadelc56ef672014-02-05 15:21:13 +02002537 struct ieee80211_scan_request *hw_req)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002538{
2539 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2540 int ret;
2541
Luciano Coelho6749dd82015-03-20 15:51:36 +02002542 if (hw_req->req.n_channels == 0 ||
2543 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002544 return -EINVAL;
2545
2546 mutex_lock(&mvm->mutex);
Luciano Coelho6749dd82015-03-20 15:51:36 +02002547 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002548 mutex_unlock(&mvm->mutex);
Luciano Coelho6749dd82015-03-20 15:51:36 +02002549
Johannes Berg8ca151b2013-01-24 14:25:36 +01002550 return ret;
2551}
2552
2553static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2554 struct ieee80211_vif *vif)
2555{
2556 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2557
2558 mutex_lock(&mvm->mutex);
2559
Luciano Coelhoe7d3aba2015-02-06 10:19:05 +02002560 /* Due to a race condition, it's possible that mac80211 asks
2561 * us to stop a hw_scan when it's already stopped. This can
2562 * happen, for instance, if we stopped the scan ourselves,
2563 * called ieee80211_scan_completed() and the userspace called
2564 * cancel scan scan before ieee80211_scan_work() could run.
2565 * To handle that, simply return if the scan is not running.
2566 */
Luciano Coelho262888f2015-05-07 17:21:09 +03002567 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
Luciano Coelhoc7d42482015-05-07 16:00:26 +03002568 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002569
2570 mutex_unlock(&mvm->mutex);
2571}
2572
2573static void
2574iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
Johannes Berg3e56ead2013-02-15 22:23:18 +01002575 struct ieee80211_sta *sta, u16 tids,
Johannes Berg8ca151b2013-01-24 14:25:36 +01002576 int num_frames,
2577 enum ieee80211_frame_release_type reason,
2578 bool more_data)
2579{
2580 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002581
Johannes Berg3e56ead2013-02-15 22:23:18 +01002582 /* Called when we need to transmit (a) frame(s) from mac80211 */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002583
Johannes Berg3e56ead2013-02-15 22:23:18 +01002584 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2585 tids, more_data, false);
2586}
2587
2588static void
2589iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2590 struct ieee80211_sta *sta, u16 tids,
2591 int num_frames,
2592 enum ieee80211_frame_release_type reason,
2593 bool more_data)
2594{
2595 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2596
2597 /* Called when we need to transmit (a) frame(s) from agg queue */
2598
2599 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2600 tids, more_data, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002601}
2602
2603static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2604 struct ieee80211_vif *vif,
2605 enum sta_notify_cmd cmd,
2606 struct ieee80211_sta *sta)
2607{
2608 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Johannes Berg5b577a92013-11-14 18:20:04 +01002609 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachc22b0ff2015-01-22 13:15:15 +02002610 unsigned long txqs = 0, tids = 0;
Johannes Berg3e56ead2013-02-15 22:23:18 +01002611 int tid;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002612
Emmanuel Grumbachc22b0ff2015-01-22 13:15:15 +02002613 spin_lock_bh(&mvmsta->lock);
2614 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2615 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2616
2617 if (tid_data->state != IWL_AGG_ON &&
2618 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2619 continue;
2620
2621 __set_bit(tid_data->txq_id, &txqs);
2622
2623 if (iwl_mvm_tid_queued(tid_data) == 0)
2624 continue;
2625
2626 __set_bit(tid, &tids);
2627 }
2628
Johannes Berg8ca151b2013-01-24 14:25:36 +01002629 switch (cmd) {
2630 case STA_NOTIFY_SLEEP:
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03002631 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002632 ieee80211_sta_block_awake(hw, sta, true);
Johannes Berg3e56ead2013-02-15 22:23:18 +01002633
Emmanuel Grumbachc22b0ff2015-01-22 13:15:15 +02002634 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
Johannes Berg3e56ead2013-02-15 22:23:18 +01002635 ieee80211_sta_set_buffered(sta, tid, true);
Emmanuel Grumbachc22b0ff2015-01-22 13:15:15 +02002636
2637 if (txqs)
2638 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002639 /*
2640 * The fw updates the STA to be asleep. Tx packets on the Tx
2641 * queues to this station will not be transmitted. The fw will
2642 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2643 */
2644 break;
2645 case STA_NOTIFY_AWAKE:
Emmanuel Grumbach881acd82013-03-19 16:16:00 +02002646 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002647 break;
Emmanuel Grumbachc22b0ff2015-01-22 13:15:15 +02002648
2649 if (txqs)
2650 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
Johannes Berg9cc40712013-02-15 22:47:48 +01002651 iwl_mvm_sta_modify_ps_wake(mvm, sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002652 break;
2653 default:
2654 break;
2655 }
Emmanuel Grumbachc22b0ff2015-01-22 13:15:15 +02002656 spin_unlock_bh(&mvmsta->lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002657}
2658
Johannes Berg1ddbbb02013-12-04 22:39:17 +01002659static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2660 struct ieee80211_vif *vif,
2661 struct ieee80211_sta *sta)
2662{
2663 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Gregory Greenman94939082015-08-24 14:38:35 +03002664 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002665 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg1ddbbb02013-12-04 22:39:17 +01002666
2667 /*
2668 * This is called before mac80211 does RCU synchronisation,
2669 * so here we already invalidate our internal RCU-protected
2670 * station pointer. The rest of the code will thus no longer
2671 * be able to find the station this way, and we don't rely
2672 * on further RCU synchronisation after the sta_state()
2673 * callback deleted the station.
2674 */
2675 mutex_lock(&mvm->mutex);
2676 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2677 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2678 ERR_PTR(-ENOENT));
Gregory Greenman94939082015-08-24 14:38:35 +03002679
2680 if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
2681 mvmvif->ap_assoc_sta_count--;
Ilan Peerf82c8332015-09-10 12:54:38 +03002682 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
Gregory Greenman94939082015-08-24 14:38:35 +03002683 }
2684
Johannes Berg1ddbbb02013-12-04 22:39:17 +01002685 mutex_unlock(&mvm->mutex);
2686}
2687
Johannes Bergbd1ba662014-11-13 20:53:45 +01002688static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2689 const u8 *bssid)
2690{
2691 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2692 return;
2693
2694 if (iwlwifi_mod_params.uapsd_disable) {
2695 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2696 return;
2697 }
2698
2699 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2700}
2701
Johannes Berg8ca151b2013-01-24 14:25:36 +01002702static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2703 struct ieee80211_vif *vif,
2704 struct ieee80211_sta *sta,
2705 enum ieee80211_sta_state old_state,
2706 enum ieee80211_sta_state new_state)
2707{
2708 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2709 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2710 int ret;
2711
2712 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2713 sta->addr, old_state, new_state);
2714
2715 /* this would be a mac80211 bug ... but don't crash */
2716 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2717 return -EINVAL;
2718
2719 /* if a STA is being removed, reuse its ID */
2720 flush_work(&mvm->sta_drained_wk);
2721
2722 mutex_lock(&mvm->mutex);
2723 if (old_state == IEEE80211_STA_NOTEXIST &&
2724 new_state == IEEE80211_STA_NONE) {
Johannes Berg48bc1302013-07-04 15:55:29 +02002725 /*
2726 * Firmware bug - it'll crash if the beacon interval is less
2727 * than 16. We can't avoid connecting at all, so refuse the
2728 * station state change, this will cause mac80211 to abandon
2729 * attempts to connect to this AP, and eventually wpa_s will
2730 * blacklist the AP...
2731 */
2732 if (vif->type == NL80211_IFTYPE_STATION &&
2733 vif->bss_conf.beacon_int < 16) {
2734 IWL_ERR(mvm,
2735 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2736 sta->addr, vif->bss_conf.beacon_int);
2737 ret = -EINVAL;
2738 goto out_unlock;
2739 }
Arik Nemtsovcf7b4912014-05-15 11:44:40 +03002740
2741 if (sta->tdls &&
2742 (vif->p2p ||
Arik Nemtsovfa3d07e2014-05-15 18:59:32 +03002743 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2744 IWL_MVM_TDLS_STA_COUNT ||
Arik Nemtsovcf7b4912014-05-15 11:44:40 +03002745 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2746 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2747 ret = -EBUSY;
2748 goto out_unlock;
2749 }
2750
Johannes Berg8ca151b2013-01-24 14:25:36 +01002751 ret = iwl_mvm_add_sta(mvm, vif, sta);
Arik Nemtsovfa3d07e2014-05-15 18:59:32 +03002752 if (sta->tdls && ret == 0)
2753 iwl_mvm_recalc_tdls_state(mvm, vif, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002754 } else if (old_state == IEEE80211_STA_NONE &&
2755 new_state == IEEE80211_STA_AUTH) {
Haim Dreyfusse820c2d2014-04-06 11:19:09 +03002756 /*
2757 * EBS may be disabled due to previous failures reported by FW.
2758 * Reset EBS status here assuming environment has been changed.
2759 */
2760 mvm->last_ebs_successful = true;
Johannes Bergbd1ba662014-11-13 20:53:45 +01002761 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002762 ret = 0;
2763 } else if (old_state == IEEE80211_STA_AUTH &&
2764 new_state == IEEE80211_STA_ASSOC) {
Johannes Berg7a453972013-02-12 13:10:44 +01002765 ret = iwl_mvm_update_sta(mvm, vif, sta);
2766 if (ret == 0)
2767 iwl_mvm_rs_rate_init(mvm, sta,
Eyal Shapirab87c2172013-11-09 23:37:55 +02002768 mvmvif->phy_ctxt->channel->band,
2769 true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002770 } else if (old_state == IEEE80211_STA_ASSOC &&
2771 new_state == IEEE80211_STA_AUTHORIZED) {
Arik Nemtsovf59e0e3c2014-06-10 19:56:27 +03002772
2773 /* we don't support TDLS during DCM */
2774 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2775 iwl_mvm_teardown_tdls_peers(mvm);
2776
Hila Gonen7df15b12012-12-12 11:16:19 +02002777 /* enable beacon filtering */
Avri Altmanfa7b2e72014-05-20 08:03:24 +03002778 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
Johannes Berg8ca151b2013-01-24 14:25:36 +01002779 ret = 0;
2780 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2781 new_state == IEEE80211_STA_ASSOC) {
Hila Gonen7df15b12012-12-12 11:16:19 +02002782 /* disable beacon filtering */
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002783 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
Johannes Berg8ca151b2013-01-24 14:25:36 +01002784 ret = 0;
2785 } else if (old_state == IEEE80211_STA_ASSOC &&
2786 new_state == IEEE80211_STA_AUTH) {
2787 ret = 0;
2788 } else if (old_state == IEEE80211_STA_AUTH &&
2789 new_state == IEEE80211_STA_NONE) {
2790 ret = 0;
2791 } else if (old_state == IEEE80211_STA_NONE &&
2792 new_state == IEEE80211_STA_NOTEXIST) {
2793 ret = iwl_mvm_rm_sta(mvm, vif, sta);
Arik Nemtsovfa3d07e2014-05-15 18:59:32 +03002794 if (sta->tdls)
2795 iwl_mvm_recalc_tdls_state(mvm, vif, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002796 } else {
2797 ret = -EIO;
2798 }
Johannes Berg48bc1302013-07-04 15:55:29 +02002799 out_unlock:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002800 mutex_unlock(&mvm->mutex);
2801
Liad Kaufman9c126cd2014-10-06 19:08:56 +02002802 if (sta->tdls && ret == 0) {
2803 if (old_state == IEEE80211_STA_NOTEXIST &&
2804 new_state == IEEE80211_STA_NONE)
2805 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2806 else if (old_state == IEEE80211_STA_NONE &&
2807 new_state == IEEE80211_STA_NOTEXIST)
2808 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2809 }
2810
Johannes Berg8ca151b2013-01-24 14:25:36 +01002811 return ret;
2812}
2813
2814static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2815{
2816 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2817
2818 mvm->rts_threshold = value;
2819
2820 return 0;
2821}
2822
Lilach Edelstein1f3b0ff2013-10-06 13:03:32 +02002823static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2824 struct ieee80211_vif *vif,
2825 struct ieee80211_sta *sta, u32 changed)
2826{
2827 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2828
2829 if (vif->type == NL80211_IFTYPE_STATION &&
2830 changed & IEEE80211_RC_NSS_CHANGED)
2831 iwl_mvm_sf_update(mvm, vif, false);
2832}
2833
Johannes Berg8ca151b2013-01-24 14:25:36 +01002834static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2835 struct ieee80211_vif *vif, u16 ac,
2836 const struct ieee80211_tx_queue_params *params)
2837{
2838 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2839 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2840
2841 mvmvif->queue_params[ac] = *params;
2842
2843 /*
2844 * No need to update right away, we'll get BSS_CHANGED_QOS
2845 * The exception is P2P_DEVICE interface which needs immediate update.
2846 */
2847 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2848 int ret;
2849
2850 mutex_lock(&mvm->mutex);
Johannes Berg3dfd3a92014-08-11 21:37:30 +02002851 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002852 mutex_unlock(&mvm->mutex);
2853 return ret;
2854 }
2855 return 0;
2856}
2857
2858static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2859 struct ieee80211_vif *vif)
2860{
2861 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2862 u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2863 200 + vif->bss_conf.beacon_int);
2864 u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2865 100 + vif->bss_conf.beacon_int);
2866
2867 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2868 return;
2869
Eliad Peller576eeee2014-07-01 18:38:38 +03002870 /*
2871 * iwl_mvm_protect_session() reads directly from the device
2872 * (the system time), so make sure it is available.
2873 */
2874 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2875 return;
2876
Johannes Berg8ca151b2013-01-24 14:25:36 +01002877 mutex_lock(&mvm->mutex);
2878 /* Try really hard to protect the session and hear a beacon */
Liad Kaufmand20d37b2014-07-06 17:14:39 +03002879 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002880 mutex_unlock(&mvm->mutex);
Eliad Peller576eeee2014-07-01 18:38:38 +03002881
2882 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002883}
2884
David Spinadel35a000b2013-08-28 09:29:43 +03002885static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2886 struct ieee80211_vif *vif,
2887 struct cfg80211_sched_scan_request *req,
David Spinadel633e2712014-02-06 16:15:23 +02002888 struct ieee80211_scan_ies *ies)
David Spinadel35a000b2013-08-28 09:29:43 +03002889{
2890 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Luciano Coelho6749dd82015-03-20 15:51:36 +02002891
David Spinadel35a000b2013-08-28 09:29:43 +03002892 int ret;
2893
2894 mutex_lock(&mvm->mutex);
2895
Luciano Coelho1f940382015-02-10 13:03:38 +02002896 if (!vif->bss_conf.idle) {
David Spinadelbd5e4742014-04-24 13:15:29 +03002897 ret = -EBUSY;
2898 goto out;
2899 }
2900
Luciano Coelho19945df2015-03-20 16:11:28 +02002901 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
David Spinadeld2496222014-05-20 12:46:37 +03002902
David Spinadel35a000b2013-08-28 09:29:43 +03002903out:
2904 mutex_unlock(&mvm->mutex);
2905 return ret;
2906}
2907
Johannes Berg37e33082014-02-17 10:48:17 +01002908static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2909 struct ieee80211_vif *vif)
David Spinadel35a000b2013-08-28 09:29:43 +03002910{
2911 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02002912 int ret;
David Spinadel35a000b2013-08-28 09:29:43 +03002913
2914 mutex_lock(&mvm->mutex);
Luciano Coelhoe7d3aba2015-02-06 10:19:05 +02002915
2916 /* Due to a race condition, it's possible that mac80211 asks
2917 * us to stop a sched_scan when it's already stopped. This
2918 * can happen, for instance, if we stopped the scan ourselves,
2919 * called ieee80211_sched_scan_stopped() and the userspace called
2920 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2921 * could run. To handle this, simply return if the scan is
2922 * not running.
2923 */
Luciano Coelho262888f2015-05-07 17:21:09 +03002924 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
Luciano Coelhoe7d3aba2015-02-06 10:19:05 +02002925 mutex_unlock(&mvm->mutex);
2926 return 0;
2927 }
2928
Luciano Coelhoc7d42482015-05-07 16:00:26 +03002929 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
David Spinadel35a000b2013-08-28 09:29:43 +03002930 mutex_unlock(&mvm->mutex);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02002931 iwl_mvm_wait_for_async_handlers(mvm);
Johannes Berg37e33082014-02-17 10:48:17 +01002932
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02002933 return ret;
David Spinadel35a000b2013-08-28 09:29:43 +03002934}
2935
Johannes Berg8ca151b2013-01-24 14:25:36 +01002936static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2937 enum set_key_cmd cmd,
2938 struct ieee80211_vif *vif,
2939 struct ieee80211_sta *sta,
2940 struct ieee80211_key_conf *key)
2941{
2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2943 int ret;
2944
2945 if (iwlwifi_mod_params.sw_crypto) {
2946 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2947 return -EOPNOTSUPP;
2948 }
2949
2950 switch (key->cipher) {
2951 case WLAN_CIPHER_SUITE_TKIP:
2952 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002953 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2954 break;
Johannes Bergca8c0f42015-04-20 17:54:54 +02002955 case WLAN_CIPHER_SUITE_CCMP:
2956 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2957 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002958 case WLAN_CIPHER_SUITE_AES_CMAC:
Johannes Berg30686bf2015-06-02 21:39:54 +02002959 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
Johannes Berg8ca151b2013-01-24 14:25:36 +01002960 break;
2961 case WLAN_CIPHER_SUITE_WEP40:
2962 case WLAN_CIPHER_SUITE_WEP104:
Johannes Bergba3943b2014-11-12 23:54:48 +01002963 /* For non-client mode, only use WEP keys for TX as we probably
2964 * don't have a station yet anyway and would then have to keep
2965 * track of the keys, linking them to each of the clients/peers
2966 * as they appear. For now, don't do that, for performance WEP
2967 * offload doesn't really matter much, but we need it for some
2968 * other offload features in client mode.
Johannes Berg8ca151b2013-01-24 14:25:36 +01002969 */
Johannes Bergba3943b2014-11-12 23:54:48 +01002970 if (vif->type != NL80211_IFTYPE_STATION)
2971 return 0;
2972 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002973 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002974 /* currently FW supports only one optional cipher scheme */
2975 if (hw->n_cipher_schemes &&
2976 hw->cipher_schemes->cipher == key->cipher)
2977 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2978 else
2979 return -EOPNOTSUPP;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002980 }
2981
2982 mutex_lock(&mvm->mutex);
2983
2984 switch (cmd) {
2985 case SET_KEY:
Johannes Berg5023d962013-07-31 14:07:43 +02002986 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2987 vif->type == NL80211_IFTYPE_AP) && !sta) {
2988 /*
2989 * GTK on AP interface is a TX-only key, return 0;
2990 * on IBSS they're per-station and because we're lazy
2991 * we don't support them for RX, so do the same.
2992 */
Johannes Berg6caffd42013-03-06 13:15:21 +01002993 ret = 0;
2994 key->hw_key_idx = STA_KEY_IDX_INVALID;
2995 break;
2996 }
2997
Johannes Bergb546dcd2015-04-20 17:31:10 +02002998 /* During FW restart, in order to restore the state as it was,
2999 * don't try to reprogram keys we previously failed for.
3000 */
3001 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3002 key->hw_key_idx == STA_KEY_IDX_INVALID) {
3003 IWL_DEBUG_MAC80211(mvm,
3004 "skip invalid idx key programming during restart\n");
3005 ret = 0;
3006 break;
3007 }
3008
Johannes Berg8ca151b2013-01-24 14:25:36 +01003009 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
Johannes Bergb546dcd2015-04-20 17:31:10 +02003010 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
3011 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
3012 &mvm->status));
Johannes Berg8ca151b2013-01-24 14:25:36 +01003013 if (ret) {
3014 IWL_WARN(mvm, "set key failed\n");
3015 /*
3016 * can't add key for RX, but we don't need it
3017 * in the device for TX so still return 0
3018 */
Johannes Berg6caffd42013-03-06 13:15:21 +01003019 key->hw_key_idx = STA_KEY_IDX_INVALID;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003020 ret = 0;
3021 }
3022
3023 break;
3024 case DISABLE_KEY:
Johannes Berg6caffd42013-03-06 13:15:21 +01003025 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3026 ret = 0;
3027 break;
3028 }
3029
Johannes Berg8ca151b2013-01-24 14:25:36 +01003030 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3031 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3032 break;
3033 default:
3034 ret = -EINVAL;
3035 }
3036
3037 mutex_unlock(&mvm->mutex);
3038 return ret;
3039}
3040
3041static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3042 struct ieee80211_vif *vif,
3043 struct ieee80211_key_conf *keyconf,
3044 struct ieee80211_sta *sta,
3045 u32 iv32, u16 *phase1key)
3046{
3047 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3048
Johannes Berg5023d962013-07-31 14:07:43 +02003049 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3050 return;
3051
Johannes Berg8ca151b2013-01-24 14:25:36 +01003052 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3053}
3054
3055
Ariej Marjiehb1128892014-07-16 21:11:12 +03003056static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3057 struct iwl_rx_packet *pkt, void *data)
3058{
3059 struct iwl_mvm *mvm =
3060 container_of(notif_wait, struct iwl_mvm, notif_wait);
3061 struct iwl_hs20_roc_res *resp;
3062 int resp_len = iwl_rx_packet_payload_len(pkt);
3063 struct iwl_mvm_time_event_data *te_data = data;
3064
3065 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3066 return true;
3067
3068 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3069 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3070 return true;
3071 }
3072
3073 resp = (void *)pkt->data;
3074
3075 IWL_DEBUG_TE(mvm,
3076 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3077 resp->status, resp->event_unique_id);
3078
3079 te_data->uid = le32_to_cpu(resp->event_unique_id);
3080 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3081 te_data->uid);
3082
3083 spin_lock_bh(&mvm->time_event_lock);
3084 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3085 spin_unlock_bh(&mvm->time_event_lock);
3086
3087 return true;
3088}
3089
Matti Gottlieb35d3dab2015-03-29 13:38:16 +03003090#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
Ariej Marjiehb1128892014-07-16 21:11:12 +03003091static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3092 struct ieee80211_channel *channel,
3093 struct ieee80211_vif *vif,
3094 int duration)
3095{
3096 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3097 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3098 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
Sara Sharon6eb031d2015-07-13 14:50:47 +03003099 static const u16 time_event_response[] = { HOT_SPOT_CMD };
Ariej Marjiehb1128892014-07-16 21:11:12 +03003100 struct iwl_notification_wait wait_time_event;
3101 struct iwl_hs20_roc_req aux_roc_req = {
3102 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3103 .id_and_color =
3104 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3105 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3106 /* Set the channel info data */
3107 .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3108 PHY_BAND_24 : PHY_BAND_5,
3109 .channel_info.channel = channel->hw_value,
3110 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3111 /* Set the time and duration */
3112 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3113 .apply_time_max_delay =
3114 cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3115 .duration = cpu_to_le32(MSEC_TO_TU(duration)),
3116 };
3117
3118 /* Set the node address */
3119 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3120
Matti Gottlieba6cc5162014-09-29 11:46:04 +03003121 lockdep_assert_held(&mvm->mutex);
3122
3123 spin_lock_bh(&mvm->time_event_lock);
3124
3125 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3126 spin_unlock_bh(&mvm->time_event_lock);
3127 return -EIO;
3128 }
3129
Ariej Marjiehb1128892014-07-16 21:11:12 +03003130 te_data->vif = vif;
3131 te_data->duration = duration;
3132 te_data->id = HOT_SPOT_CMD;
3133
Ariej Marjiehb1128892014-07-16 21:11:12 +03003134 spin_unlock_bh(&mvm->time_event_lock);
3135
3136 /*
3137 * Use a notification wait, which really just processes the
3138 * command response and doesn't wait for anything, in order
3139 * to be able to process the response and get the UID inside
3140 * the RX path. Using CMD_WANT_SKB doesn't work because it
3141 * stores the buffer and then wakes up this thread, by which
3142 * time another notification (that the time event started)
3143 * might already be processed unsuccessfully.
3144 */
3145 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3146 time_event_response,
3147 ARRAY_SIZE(time_event_response),
3148 iwl_mvm_rx_aux_roc, te_data);
3149
3150 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3151 &aux_roc_req);
3152
3153 if (res) {
3154 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3155 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3156 goto out_clear_te;
3157 }
3158
3159 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3160 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3161 /* should never fail */
3162 WARN_ON_ONCE(res);
3163
3164 if (res) {
3165 out_clear_te:
3166 spin_lock_bh(&mvm->time_event_lock);
3167 iwl_mvm_te_clear_data(mvm, te_data);
3168 spin_unlock_bh(&mvm->time_event_lock);
3169 }
3170
3171 return res;
3172}
3173
Johannes Berg8ca151b2013-01-24 14:25:36 +01003174static int iwl_mvm_roc(struct ieee80211_hw *hw,
3175 struct ieee80211_vif *vif,
3176 struct ieee80211_channel *channel,
Ilan Peerd339d5c2013-02-12 09:34:13 +02003177 int duration,
3178 enum ieee80211_roc_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003179{
3180 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003181 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003182 struct cfg80211_chan_def chandef;
Ilan Peer31d385a2013-04-02 10:25:46 +03003183 struct iwl_mvm_phy_ctxt *phy_ctxt;
3184 int ret, i;
3185
3186 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3187 duration, type);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003188
Matti Gottlieb6ed13162015-03-30 16:50:07 +03003189 flush_work(&mvm->roc_done_wk);
3190
Matti Gottlieba6cc5162014-09-29 11:46:04 +03003191 mutex_lock(&mvm->mutex);
3192
Ariej Marjiehb1128892014-07-16 21:11:12 +03003193 switch (vif->type) {
3194 case NL80211_IFTYPE_STATION:
Johannes Berg859d9142015-06-01 17:11:11 +02003195 if (fw_has_capa(&mvm->fw->ucode_capa,
3196 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
Luciano Coelho5ac6c722014-10-21 16:12:18 +03003197 /* Use aux roc framework (HS20) */
3198 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3199 vif, duration);
3200 goto out_unlock;
3201 }
3202 IWL_ERR(mvm, "hotspot not supported\n");
3203 ret = -EINVAL;
Matti Gottlieba6cc5162014-09-29 11:46:04 +03003204 goto out_unlock;
Ariej Marjiehb1128892014-07-16 21:11:12 +03003205 case NL80211_IFTYPE_P2P_DEVICE:
3206 /* handle below */
3207 break;
3208 default:
3209 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
Matti Gottlieba6cc5162014-09-29 11:46:04 +03003210 ret = -EINVAL;
3211 goto out_unlock;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003212 }
3213
Ilan Peer31d385a2013-04-02 10:25:46 +03003214 for (i = 0; i < NUM_PHY_CTX; i++) {
3215 phy_ctxt = &mvm->phy_ctxts[i];
3216 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3217 continue;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003218
Ilan Peer31d385a2013-04-02 10:25:46 +03003219 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3220 /*
3221 * Unbind the P2P_DEVICE from the current PHY context,
3222 * and if the PHY context is not used remove it.
3223 */
3224 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3225 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3226 goto out_unlock;
3227
3228 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3229
3230 /* Bind the P2P_DEVICE to the current PHY Context */
3231 mvmvif->phy_ctxt = phy_ctxt;
3232
3233 ret = iwl_mvm_binding_add_vif(mvm, vif);
3234 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3235 goto out_unlock;
3236
3237 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3238 goto schedule_time_event;
3239 }
3240 }
3241
3242 /* Need to update the PHY context only if the ROC channel changed */
3243 if (channel == mvmvif->phy_ctxt->channel)
3244 goto schedule_time_event;
3245
3246 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3247
3248 /*
3249 * Change the PHY context configuration as it is currently referenced
3250 * only by the P2P Device MAC
3251 */
3252 if (mvmvif->phy_ctxt->ref == 1) {
3253 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3254 &chandef, 1, 1);
3255 if (ret)
3256 goto out_unlock;
3257 } else {
3258 /*
3259 * The PHY context is shared with other MACs. Need to remove the
3260 * P2P Device from the binding, allocate an new PHY context and
3261 * create a new binding
3262 */
3263 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3264 if (!phy_ctxt) {
3265 ret = -ENOSPC;
3266 goto out_unlock;
3267 }
3268
3269 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3270 1, 1);
3271 if (ret) {
3272 IWL_ERR(mvm, "Failed to change PHY context\n");
3273 goto out_unlock;
3274 }
3275
3276 /* Unbind the P2P_DEVICE from the current PHY context */
3277 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3278 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3279 goto out_unlock;
3280
3281 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3282
3283 /* Bind the P2P_DEVICE to the new allocated PHY context */
3284 mvmvif->phy_ctxt = phy_ctxt;
3285
3286 ret = iwl_mvm_binding_add_vif(mvm, vif);
3287 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3288 goto out_unlock;
3289
3290 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3291 }
3292
3293schedule_time_event:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003294 /* Schedule the time events */
Ilan Peere635c792013-02-13 11:05:18 +02003295 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003296
Ilan Peer31d385a2013-04-02 10:25:46 +03003297out_unlock:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003298 mutex_unlock(&mvm->mutex);
3299 IWL_DEBUG_MAC80211(mvm, "leave\n");
Johannes Berg8ca151b2013-01-24 14:25:36 +01003300 return ret;
3301}
3302
3303static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3304{
3305 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3306
3307 IWL_DEBUG_MAC80211(mvm, "enter\n");
3308
3309 mutex_lock(&mvm->mutex);
Matti Gottliebbf5da872014-11-16 10:25:12 +02003310 iwl_mvm_stop_roc(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003311 mutex_unlock(&mvm->mutex);
3312
3313 IWL_DEBUG_MAC80211(mvm, "leave\n");
3314 return 0;
3315}
3316
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003317static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3318 struct ieee80211_chanctx_conf *ctx)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003319{
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003320 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3321 struct iwl_mvm_phy_ctxt *phy_ctxt;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003322 int ret;
3323
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003324 lockdep_assert_held(&mvm->mutex);
3325
Ilan Peer53a9d612013-04-28 11:55:08 +03003326 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003327
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003328 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3329 if (!phy_ctxt) {
3330 ret = -ENOSPC;
3331 goto out;
3332 }
3333
Eliad Pellerdcbc3e12013-10-31 14:31:25 +02003334 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
Ilan Peer53a9d612013-04-28 11:55:08 +03003335 ctx->rx_chains_static,
3336 ctx->rx_chains_dynamic);
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003337 if (ret) {
3338 IWL_ERR(mvm, "Failed to add PHY context\n");
3339 goto out;
3340 }
3341
Ilan Peer53a9d612013-04-28 11:55:08 +03003342 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003343 *phy_ctxt_id = phy_ctxt->id;
3344out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003345 return ret;
3346}
3347
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003348static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3349 struct ieee80211_chanctx_conf *ctx)
3350{
3351 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3352 int ret;
3353
3354 mutex_lock(&mvm->mutex);
3355 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3356 mutex_unlock(&mvm->mutex);
3357
3358 return ret;
3359}
3360
3361static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3362 struct ieee80211_chanctx_conf *ctx)
3363{
3364 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3365 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3366
3367 lockdep_assert_held(&mvm->mutex);
3368
3369 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3370}
3371
Johannes Berg8ca151b2013-01-24 14:25:36 +01003372static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3373 struct ieee80211_chanctx_conf *ctx)
3374{
3375 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003376
3377 mutex_lock(&mvm->mutex);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003378 __iwl_mvm_remove_chanctx(mvm, ctx);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003379 mutex_unlock(&mvm->mutex);
3380}
3381
3382static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3383 struct ieee80211_chanctx_conf *ctx,
3384 u32 changed)
3385{
3386 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003387 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3388 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
Johannes Berg8ca151b2013-01-24 14:25:36 +01003389
Ilan Peer31d385a2013-04-02 10:25:46 +03003390 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3391 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3392 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
Arik Nemtsov2dceeda2013-12-29 17:57:53 +02003393 IEEE80211_CHANCTX_CHANGE_RADAR |
3394 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
Ilan Peer31d385a2013-04-02 10:25:46 +03003395 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3396 phy_ctxt->ref, changed))
3397 return;
3398
Johannes Berg8ca151b2013-01-24 14:25:36 +01003399 mutex_lock(&mvm->mutex);
Emmanuel Grumbach4d664492014-04-30 18:09:59 +03003400 iwl_mvm_bt_coex_vif_change(mvm);
Eliad Pellerdcbc3e12013-10-31 14:31:25 +02003401 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003402 ctx->rx_chains_static,
3403 ctx->rx_chains_dynamic);
3404 mutex_unlock(&mvm->mutex);
3405}
3406
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003407static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3408 struct ieee80211_vif *vif,
Luciano Coelhof0c97782014-03-05 15:41:48 +02003409 struct ieee80211_chanctx_conf *ctx,
3410 bool switching_chanctx)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003411{
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003412 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3413 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
Johannes Berg8ca151b2013-01-24 14:25:36 +01003414 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3415 int ret;
3416
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003417 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003418
Ilan Peerfe0f2de2013-03-21 10:23:52 +02003419 mvmvif->phy_ctxt = phy_ctxt;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003420
3421 switch (vif->type) {
3422 case NL80211_IFTYPE_AP:
Luciano Coelho4741dd02014-11-10 11:10:13 +02003423 /* only needed if we're switching chanctx (i.e. during CSA) */
3424 if (switching_chanctx) {
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003425 mvmvif->ap_ibss_active = true;
3426 break;
3427 }
Johannes Berg5023d962013-07-31 14:07:43 +02003428 case NL80211_IFTYPE_ADHOC:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003429 /*
3430 * The AP binding flow is handled as part of the start_ap flow
Johannes Berg5023d962013-07-31 14:07:43 +02003431 * (in bss_info_changed), similarly for IBSS.
Johannes Berg8ca151b2013-01-24 14:25:36 +01003432 */
3433 ret = 0;
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003434 goto out;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003435 case NL80211_IFTYPE_STATION:
Luciano Coelho2533edc2014-08-08 19:50:46 +03003436 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003437 case NL80211_IFTYPE_MONITOR:
Luciano Coelho2533edc2014-08-08 19:50:46 +03003438 /* always disable PS when a monitor interface is active */
3439 mvmvif->ps_disabled = true;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003440 break;
3441 default:
3442 ret = -EINVAL;
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003443 goto out;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003444 }
3445
3446 ret = iwl_mvm_binding_add_vif(mvm, vif);
3447 if (ret)
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003448 goto out;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003449
3450 /*
Alexander Bondar92d85562013-10-23 11:50:34 +02003451 * Power state must be updated before quotas,
3452 * otherwise fw will complain.
3453 */
Arik Nemtsov999609f2014-05-15 17:31:51 +03003454 iwl_mvm_power_update_mac(mvm);
Alexander Bondar92d85562013-10-23 11:50:34 +02003455
3456 /* Setting the quota at this stage is only required for monitor
Johannes Berg8ca151b2013-01-24 14:25:36 +01003457 * interfaces. For the other types, the bss_info changed flow
3458 * will handle quota settings.
3459 */
3460 if (vif->type == NL80211_IFTYPE_MONITOR) {
Ilan Peer1e1391c2013-03-13 14:52:04 +02003461 mvmvif->monitor_active = true;
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02003462 ret = iwl_mvm_update_quotas(mvm, false, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003463 if (ret)
3464 goto out_remove_binding;
3465 }
3466
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003467 /* Handle binding during CSA */
Luciano Coelhoa57c6882014-11-10 11:10:16 +02003468 if (vif->type == NL80211_IFTYPE_AP) {
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02003469 iwl_mvm_update_quotas(mvm, false, NULL);
Johannes Berg3dfd3a92014-08-11 21:37:30 +02003470 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003471 }
3472
Luciano Coelho4741dd02014-11-10 11:10:13 +02003473 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
Luciano Coelho686e7fe2014-11-10 11:10:21 +02003474 u32 duration = 2 * vif->bss_conf.beacon_int;
3475
3476 /* iwl_mvm_protect_session() reads directly from the
3477 * device (the system time), so make sure it is
3478 * available.
3479 */
3480 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3481 if (ret)
3482 goto out_remove_binding;
3483
3484 /* Protect the session to make sure we hear the first
3485 * beacon on the new channel.
3486 */
3487 iwl_mvm_protect_session(mvm, vif, duration, duration,
3488 vif->bss_conf.beacon_int / 2,
3489 true);
3490
3491 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3492
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02003493 iwl_mvm_update_quotas(mvm, false, NULL);
Luciano Coelho0ce04ce2014-05-08 16:03:39 +03003494 }
3495
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003496 goto out;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003497
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003498out_remove_binding:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003499 iwl_mvm_binding_remove_vif(mvm, vif);
Arik Nemtsov999609f2014-05-15 17:31:51 +03003500 iwl_mvm_power_update_mac(mvm);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003501out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003502 if (ret)
3503 mvmvif->phy_ctxt = NULL;
3504 return ret;
3505}
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003506static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3507 struct ieee80211_vif *vif,
3508 struct ieee80211_chanctx_conf *ctx)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003509{
3510 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003511 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003512
3513 mutex_lock(&mvm->mutex);
Luciano Coelhof0c97782014-03-05 15:41:48 +02003514 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003515 mutex_unlock(&mvm->mutex);
3516
3517 return ret;
3518}
3519
3520static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3521 struct ieee80211_vif *vif,
Luciano Coelhof0c97782014-03-05 15:41:48 +02003522 struct ieee80211_chanctx_conf *ctx,
3523 bool switching_chanctx)
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003524{
3525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Luciano Coelhof0c97782014-03-05 15:41:48 +02003526 struct ieee80211_vif *disabled_vif = NULL;
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003527
3528 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003529
3530 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3531
Johannes Berg8ca151b2013-01-24 14:25:36 +01003532 switch (vif->type) {
Johannes Berg5023d962013-07-31 14:07:43 +02003533 case NL80211_IFTYPE_ADHOC:
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003534 goto out;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003535 case NL80211_IFTYPE_MONITOR:
Ilan Peer1e1391c2013-03-13 14:52:04 +02003536 mvmvif->monitor_active = false;
Luciano Coelho2533edc2014-08-08 19:50:46 +03003537 mvmvif->ps_disabled = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003538 break;
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003539 case NL80211_IFTYPE_AP:
3540 /* This part is triggered only during CSA */
Luciano Coelho4741dd02014-11-10 11:10:13 +02003541 if (!switching_chanctx || !mvmvif->ap_ibss_active)
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003542 goto out;
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003543
Andrei Otcheretianski7ef0aab2014-11-10 11:10:11 +02003544 mvmvif->csa_countdown = false;
3545
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003546 /* Set CS bit on all the stations */
3547 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3548
3549 /* Save blocked iface, the timeout is set on the next beacon */
3550 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3551
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003552 mvmvif->ap_ibss_active = false;
Luciano Coelhof0c97782014-03-05 15:41:48 +02003553 break;
3554 case NL80211_IFTYPE_STATION:
3555 if (!switching_chanctx)
3556 break;
3557
3558 disabled_vif = vif;
3559
Johannes Berg3dfd3a92014-08-11 21:37:30 +02003560 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
Luciano Coelhof0c97782014-03-05 15:41:48 +02003561 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003562 default:
3563 break;
3564 }
3565
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02003566 iwl_mvm_update_quotas(mvm, false, disabled_vif);
Ilan Peer1e1391c2013-03-13 14:52:04 +02003567 iwl_mvm_binding_remove_vif(mvm, vif);
Alexander Bondar1c2abf72013-08-27 20:31:48 +03003568
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003569out:
Ilan Peera11e1442013-12-31 21:19:55 +02003570 mvmvif->phy_ctxt = NULL;
Arik Nemtsov999609f2014-05-15 17:31:51 +03003571 iwl_mvm_power_update_mac(mvm);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003572}
3573
3574static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3575 struct ieee80211_vif *vif,
3576 struct ieee80211_chanctx_conf *ctx)
3577{
3578 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3579
3580 mutex_lock(&mvm->mutex);
Luciano Coelhof0c97782014-03-05 15:41:48 +02003581 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003582 mutex_unlock(&mvm->mutex);
3583}
3584
Luciano Coelho50cc9572014-11-10 11:10:08 +02003585static int
3586iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3587 struct ieee80211_vif_chanctx_switch *vifs)
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003588{
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003589 int ret;
3590
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003591 mutex_lock(&mvm->mutex);
Luciano Coelhof0c97782014-03-05 15:41:48 +02003592 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003593 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3594
3595 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3596 if (ret) {
3597 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3598 goto out_reassign;
3599 }
3600
Luciano Coelhof0c97782014-03-05 15:41:48 +02003601 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3602 true);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003603 if (ret) {
3604 IWL_ERR(mvm,
3605 "failed to assign new_ctx during channel switch\n");
3606 goto out_remove;
3607 }
3608
Arik Nemtsovf6972672014-06-15 16:03:55 +03003609 /* we don't support TDLS during DCM - can be caused by channel switch */
3610 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3611 iwl_mvm_teardown_tdls_peers(mvm);
3612
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003613 goto out;
3614
3615out_remove:
3616 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3617
3618out_reassign:
Luciano Coelho6fd1fb62014-11-10 11:10:10 +02003619 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003620 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3621 goto out_restart;
3622 }
3623
Luciano Coelho6fd1fb62014-11-10 11:10:10 +02003624 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3625 true)) {
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003626 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3627 goto out_restart;
3628 }
3629
3630 goto out;
3631
3632out_restart:
3633 /* things keep failing, better restart the hw */
3634 iwl_mvm_nic_restart(mvm, false);
3635
3636out:
3637 mutex_unlock(&mvm->mutex);
Luciano Coelho50cc9572014-11-10 11:10:08 +02003638
3639 return ret;
3640}
3641
Luciano Coelho48a256e2014-11-10 11:10:09 +02003642static int
3643iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3644 struct ieee80211_vif_chanctx_switch *vifs)
3645{
3646 int ret;
3647
3648 mutex_lock(&mvm->mutex);
3649 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3650
3651 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3652 true);
3653 if (ret) {
3654 IWL_ERR(mvm,
3655 "failed to assign new_ctx during channel switch\n");
3656 goto out_reassign;
3657 }
3658
3659 goto out;
3660
3661out_reassign:
3662 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3663 true)) {
3664 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3665 goto out_restart;
3666 }
3667
3668 goto out;
3669
3670out_restart:
3671 /* things keep failing, better restart the hw */
3672 iwl_mvm_nic_restart(mvm, false);
3673
3674out:
3675 mutex_unlock(&mvm->mutex);
3676
3677 return ret;
3678}
3679
Luciano Coelho50cc9572014-11-10 11:10:08 +02003680static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3681 struct ieee80211_vif_chanctx_switch *vifs,
3682 int n_vifs,
3683 enum ieee80211_chanctx_switch_mode mode)
3684{
3685 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3686 int ret;
3687
3688 /* we only support a single-vif right now */
3689 if (n_vifs > 1)
3690 return -EOPNOTSUPP;
3691
3692 switch (mode) {
3693 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3694 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3695 break;
3696 case CHANCTX_SWMODE_REASSIGN_VIF:
Luciano Coelho48a256e2014-11-10 11:10:09 +02003697 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
Luciano Coelho50cc9572014-11-10 11:10:08 +02003698 break;
3699 default:
3700 ret = -EOPNOTSUPP;
3701 break;
3702 }
3703
Luciano Coelhob08c1d92014-05-20 23:31:05 +03003704 return ret;
3705}
3706
Johannes Berg8ca151b2013-01-24 14:25:36 +01003707static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3708 struct ieee80211_sta *sta,
3709 bool set)
3710{
3711 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01003712 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003713
3714 if (!mvm_sta || !mvm_sta->vif) {
3715 IWL_ERR(mvm, "Station is not associated to a vif\n");
3716 return -EINVAL;
3717 }
3718
3719 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3720}
3721
David Spinadel507cadf2013-07-31 18:07:21 +03003722#ifdef CONFIG_NL80211_TESTMODE
3723static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3724 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3725 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
Johannes Bergf6c6ad42013-08-01 14:17:15 +02003726 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
David Spinadel507cadf2013-07-31 18:07:21 +03003727};
3728
3729static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3730 struct ieee80211_vif *vif,
3731 void *data, int len)
3732{
3733 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3734 int err;
3735 u32 noa_duration;
3736
3737 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3738 if (err)
3739 return err;
3740
3741 if (!tb[IWL_MVM_TM_ATTR_CMD])
3742 return -EINVAL;
3743
3744 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3745 case IWL_MVM_TM_CMD_SET_NOA:
3746 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3747 !vif->bss_conf.enable_beacon ||
3748 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3749 return -EINVAL;
3750
3751 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3752 if (noa_duration >= vif->bss_conf.beacon_int)
3753 return -EINVAL;
3754
3755 mvm->noa_duration = noa_duration;
3756 mvm->noa_vif = vif;
3757
Emmanuel Grumbach7754ae72015-02-26 15:14:35 +02003758 return iwl_mvm_update_quotas(mvm, false, NULL);
Johannes Bergf6c6ad42013-08-01 14:17:15 +02003759 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3760 /* must be associated client vif - ignore authorized */
3761 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3762 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3763 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3764 return -EINVAL;
3765
3766 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003767 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3768 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
David Spinadel507cadf2013-07-31 18:07:21 +03003769 }
3770
3771 return -EOPNOTSUPP;
3772}
3773
3774static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3775 struct ieee80211_vif *vif,
3776 void *data, int len)
3777{
3778 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3779 int err;
3780
3781 mutex_lock(&mvm->mutex);
3782 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3783 mutex_unlock(&mvm->mutex);
3784
3785 return err;
3786}
3787#endif
3788
Luciano Coelho622e3f92014-11-10 11:10:17 +02003789static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3790 struct ieee80211_vif *vif,
3791 struct ieee80211_channel_switch *chsw)
3792{
3793 /* By implementing this operation, we prevent mac80211 from
3794 * starting its own channel switch timer, so that we can call
3795 * ieee80211_chswitch_done() ourselves at the right time
3796 * (which is when the absence time event starts).
3797 */
3798
3799 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3800 "dummy channel switch op\n");
3801}
3802
Luciano Coelhof0289052014-11-10 11:10:06 +02003803static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3804 struct ieee80211_vif *vif,
3805 struct ieee80211_channel_switch *chsw)
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003806{
3807 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
Andrei Otcheretianski664322f2014-06-05 16:40:36 +03003808 struct ieee80211_vif *csa_vif;
Luciano Coelhof6c34822014-11-10 11:10:12 +02003809 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003810 u32 apply_time;
Luciano Coelhof0289052014-11-10 11:10:06 +02003811 int ret;
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003812
3813 mutex_lock(&mvm->mutex);
Andrei Otcheretianski664322f2014-06-05 16:40:36 +03003814
Johannes Berg81d62d52015-03-10 14:44:00 +01003815 mvmvif->csa_failed = false;
3816
Luciano Coelho6b20d772014-11-10 11:10:07 +02003817 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
Luciano Coelhof0289052014-11-10 11:10:06 +02003818 chsw->chandef.center_freq1);
Luciano Coelho6b20d772014-11-10 11:10:07 +02003819
Johannes Berg21023b12015-03-31 08:58:16 +02003820 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
Emmanuel Grumbachf35d9c52015-02-10 10:49:51 +02003821
Luciano Coelho6b20d772014-11-10 11:10:07 +02003822 switch (vif->type) {
3823 case NL80211_IFTYPE_AP:
3824 csa_vif =
3825 rcu_dereference_protected(mvm->csa_vif,
3826 lockdep_is_held(&mvm->mutex));
3827 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3828 "Another CSA is already in progress")) {
3829 ret = -EBUSY;
3830 goto out_unlock;
3831 }
3832
3833 rcu_assign_pointer(mvm->csa_vif, vif);
Andrei Otcheretianski7ef0aab2014-11-10 11:10:11 +02003834
Andrei Otcheretianski7ef0aab2014-11-10 11:10:11 +02003835 if (WARN_ONCE(mvmvif->csa_countdown,
3836 "Previous CSA countdown didn't complete")) {
3837 ret = -EBUSY;
3838 goto out_unlock;
3839 }
3840
Luciano Coelho6b20d772014-11-10 11:10:07 +02003841 break;
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003842 case NL80211_IFTYPE_STATION:
Luciano Coelho4500e132014-11-10 11:10:15 +02003843 /* Schedule the time event to a bit before beacon 1,
3844 * to make sure we're in the new channel when the
3845 * GO/AP arrives.
3846 */
3847 apply_time = chsw->device_timestamp +
3848 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3849 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003850
3851 if (chsw->block_tx)
3852 iwl_mvm_csa_client_absent(mvm, vif);
3853
Luciano Coelho4500e132014-11-10 11:10:15 +02003854 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003855 apply_time);
Luciano Coelhoc6e0a3e02014-11-10 11:10:18 +02003856 if (mvmvif->bf_data.bf_enabled) {
3857 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3858 if (ret)
3859 goto out_unlock;
3860 }
3861
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003862 break;
Luciano Coelho6b20d772014-11-10 11:10:07 +02003863 default:
3864 break;
3865 }
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003866
Luciano Coelhof6c34822014-11-10 11:10:12 +02003867 mvmvif->ps_disabled = true;
3868
3869 ret = iwl_mvm_power_update_ps(mvm);
3870 if (ret)
3871 goto out_unlock;
Luciano Coelhof0289052014-11-10 11:10:06 +02003872
Arik Nemtsove198f5e2014-09-14 19:13:54 +03003873 /* we won't be on this channel any longer */
3874 iwl_mvm_teardown_tdls_peers(mvm);
3875
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003876out_unlock:
3877 mutex_unlock(&mvm->mutex);
Luciano Coelhof0289052014-11-10 11:10:06 +02003878
3879 return ret;
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02003880}
3881
Luciano Coelhof6c34822014-11-10 11:10:12 +02003882static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3883 struct ieee80211_vif *vif)
3884{
3885 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3886 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3887 int ret;
3888
3889 mutex_lock(&mvm->mutex);
3890
Johannes Berg81d62d52015-03-10 14:44:00 +01003891 if (mvmvif->csa_failed) {
3892 mvmvif->csa_failed = false;
3893 ret = -EIO;
3894 goto out_unlock;
3895 }
3896
Luciano Coelhoa57c6882014-11-10 11:10:16 +02003897 if (vif->type == NL80211_IFTYPE_STATION) {
3898 struct iwl_mvm_sta *mvmsta;
3899
3900 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3901 mvmvif->ap_sta_id);
3902
3903 if (WARN_ON(!mvmsta)) {
3904 ret = -EIO;
3905 goto out_unlock;
3906 }
3907
3908 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3909
3910 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
Luciano Coelhoc6e0a3e02014-11-10 11:10:18 +02003911
3912 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3913 if (ret)
3914 goto out_unlock;
Luciano Coelho686e7fe2014-11-10 11:10:21 +02003915
3916 iwl_mvm_stop_session_protection(mvm, vif);
Luciano Coelhoa57c6882014-11-10 11:10:16 +02003917 }
3918
Luciano Coelhof6c34822014-11-10 11:10:12 +02003919 mvmvif->ps_disabled = false;
3920
3921 ret = iwl_mvm_power_update_ps(mvm);
3922
Luciano Coelhoa57c6882014-11-10 11:10:16 +02003923out_unlock:
Luciano Coelhof6c34822014-11-10 11:10:12 +02003924 mutex_unlock(&mvm->mutex);
3925
3926 return ret;
3927}
3928
Emmanuel Grumbachc5b0e7c2014-03-24 12:08:53 +02003929static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3930 struct ieee80211_vif *vif, u32 queues, bool drop)
3931{
3932 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3933 struct iwl_mvm_vif *mvmvif;
3934 struct iwl_mvm_sta *mvmsta;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03003935 struct ieee80211_sta *sta;
3936 int i;
3937 u32 msk = 0;
Emmanuel Grumbachc5b0e7c2014-03-24 12:08:53 +02003938
3939 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3940 return;
3941
3942 mutex_lock(&mvm->mutex);
3943 mvmvif = iwl_mvm_vif_from_mac80211(vif);
Emmanuel Grumbachc5b0e7c2014-03-24 12:08:53 +02003944
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03003945 /* flush the AP-station and all TDLS peers */
3946 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3947 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3948 lockdep_is_held(&mvm->mutex));
3949 if (IS_ERR_OR_NULL(sta))
3950 continue;
3951
3952 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3953 if (mvmsta->vif != vif)
3954 continue;
3955
3956 /* make sure only TDLS peers or the AP are flushed */
3957 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3958
3959 msk |= mvmsta->tfd_queue_msk;
Johannes Berg480acbc2014-10-10 08:59:27 +02003960 }
Emmanuel Grumbachc5b0e7c2014-03-24 12:08:53 +02003961
Emmanuel Grumbach6d440b22015-01-20 09:25:19 +02003962 if (drop) {
Luca Coelho5888a402015-10-06 09:54:57 +03003963 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
Emmanuel Grumbach6d440b22015-01-20 09:25:19 +02003964 IWL_ERR(mvm, "flush request fail\n");
3965 mutex_unlock(&mvm->mutex);
3966 } else {
3967 mutex_unlock(&mvm->mutex);
Johannes Berg480acbc2014-10-10 08:59:27 +02003968
Emmanuel Grumbach6d440b22015-01-20 09:25:19 +02003969 /* this can take a while, and we may need/want other operations
3970 * to succeed while doing this, so do it without the mutex held
3971 */
3972 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3973 }
Emmanuel Grumbachc5b0e7c2014-03-24 12:08:53 +02003974}
3975
Johannes Berg91a8bcd2015-01-14 18:12:41 +01003976static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3977 struct survey_info *survey)
3978{
3979 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3980 int ret;
3981
3982 memset(survey, 0, sizeof(*survey));
3983
3984 /* only support global statistics right now */
3985 if (idx != 0)
3986 return -ENOENT;
3987
Johannes Berg859d9142015-06-01 17:11:11 +02003988 if (fw_has_capa(&mvm->fw->ucode_capa,
Johannes Berg91a8bcd2015-01-14 18:12:41 +01003989 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3990 return -ENOENT;
3991
3992 mutex_lock(&mvm->mutex);
3993
3994 if (mvm->ucode_loaded) {
Johannes Berg33cef922015-01-21 21:41:29 +01003995 ret = iwl_mvm_request_statistics(mvm, false);
Johannes Berg91a8bcd2015-01-14 18:12:41 +01003996 if (ret)
3997 goto out;
3998 }
3999
4000 survey->filled = SURVEY_INFO_TIME |
4001 SURVEY_INFO_TIME_RX |
4002 SURVEY_INFO_TIME_TX |
4003 SURVEY_INFO_TIME_SCAN;
4004 survey->time = mvm->accu_radio_stats.on_time_rf +
4005 mvm->radio_stats.on_time_rf;
4006 do_div(survey->time, USEC_PER_MSEC);
4007
4008 survey->time_rx = mvm->accu_radio_stats.rx_time +
4009 mvm->radio_stats.rx_time;
4010 do_div(survey->time_rx, USEC_PER_MSEC);
4011
4012 survey->time_tx = mvm->accu_radio_stats.tx_time +
4013 mvm->radio_stats.tx_time;
4014 do_div(survey->time_tx, USEC_PER_MSEC);
4015
4016 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4017 mvm->radio_stats.on_time_scan;
4018 do_div(survey->time_scan, USEC_PER_MSEC);
4019
Johannes Berg10a7c0282015-04-01 10:00:31 +02004020 ret = 0;
Johannes Berg91a8bcd2015-01-14 18:12:41 +01004021 out:
4022 mutex_unlock(&mvm->mutex);
4023 return ret;
4024}
4025
Johannes Berg33cef922015-01-21 21:41:29 +01004026static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4027 struct ieee80211_vif *vif,
4028 struct ieee80211_sta *sta,
4029 struct station_info *sinfo)
4030{
4031 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4032 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4033 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4034
Johannes Berg859d9142015-06-01 17:11:11 +02004035 if (fw_has_capa(&mvm->fw->ucode_capa,
4036 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
Johannes Berg33cef922015-01-21 21:41:29 +01004037 return;
4038
4039 /* if beacon filtering isn't on mac80211 does it anyway */
4040 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4041 return;
4042
4043 if (!vif->bss_conf.assoc)
4044 return;
4045
4046 mutex_lock(&mvm->mutex);
4047
4048 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4049 goto unlock;
4050
4051 if (iwl_mvm_request_statistics(mvm, false))
4052 goto unlock;
4053
4054 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4055 mvmvif->beacon_stats.accu_num_beacons;
4056 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4057 if (mvmvif->beacon_stats.avg_signal) {
4058 /* firmware only reports a value after RXing a few beacons */
4059 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4060 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4061 }
4062 unlock:
4063 mutex_unlock(&mvm->mutex);
4064}
4065
Emmanuel Grumbach42032632015-04-15 12:43:46 +03004066static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4067 struct ieee80211_vif *vif,
4068 const struct ieee80211_event *event)
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004069{
Johannes Berg5d4f9292015-03-31 09:12:54 +02004070#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004071 do { \
4072 if ((_cnt) && --(_cnt)) \
4073 break; \
Johannes Berg5d4f9292015-03-31 09:12:54 +02004074 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004075 } while (0)
4076
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004077 struct iwl_fw_dbg_trigger_tlv *trig;
4078 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004079
4080 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4081 return;
4082
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004083 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4084 trig_mlme = (void *)trig->data;
4085 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4086 return;
4087
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004088 if (event->u.mlme.data == ASSOC_EVENT) {
4089 if (event->u.mlme.status == MLME_DENIED)
4090 CHECK_MLME_TRIGGER(mvm, trig, buf,
4091 trig_mlme->stop_assoc_denied,
4092 "DENIED ASSOC: reason %d",
4093 event->u.mlme.reason);
4094 else if (event->u.mlme.status == MLME_TIMEOUT)
4095 CHECK_MLME_TRIGGER(mvm, trig, buf,
4096 trig_mlme->stop_assoc_timeout,
4097 "ASSOC TIMEOUT");
4098 } else if (event->u.mlme.data == AUTH_EVENT) {
4099 if (event->u.mlme.status == MLME_DENIED)
4100 CHECK_MLME_TRIGGER(mvm, trig, buf,
4101 trig_mlme->stop_auth_denied,
4102 "DENIED AUTH: reason %d",
4103 event->u.mlme.reason);
4104 else if (event->u.mlme.status == MLME_TIMEOUT)
4105 CHECK_MLME_TRIGGER(mvm, trig, buf,
4106 trig_mlme->stop_auth_timeout,
4107 "AUTH TIMEOUT");
4108 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4109 CHECK_MLME_TRIGGER(mvm, trig, buf,
4110 trig_mlme->stop_rx_deauth,
4111 "DEAUTH RX %d", event->u.mlme.reason);
4112 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4113 CHECK_MLME_TRIGGER(mvm, trig, buf,
4114 trig_mlme->stop_tx_deauth,
4115 "DEAUTH TX %d", event->u.mlme.reason);
4116 }
4117#undef CHECK_MLME_TRIGGER
4118}
4119
Emmanuel Grumbach42032632015-04-15 12:43:46 +03004120static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4121 struct ieee80211_vif *vif,
4122 const struct ieee80211_event *event)
4123{
4124 struct iwl_fw_dbg_trigger_tlv *trig;
4125 struct iwl_fw_dbg_trigger_ba *ba_trig;
4126
4127 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4128 return;
4129
4130 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4131 ba_trig = (void *)trig->data;
4132 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4133 return;
4134
4135 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4136 return;
4137
4138 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4139 "BAR received from %pM, tid %d, ssn %d",
4140 event->u.ba.sta->addr, event->u.ba.tid,
4141 event->u.ba.ssn);
4142}
4143
4144static void
4145iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4146 struct ieee80211_vif *vif,
4147 const struct ieee80211_event *event)
4148{
4149 struct iwl_fw_dbg_trigger_tlv *trig;
4150 struct iwl_fw_dbg_trigger_ba *ba_trig;
4151
4152 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4153 return;
4154
4155 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4156 ba_trig = (void *)trig->data;
4157 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4158 return;
4159
4160 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4161 return;
4162
4163 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4164 "Frame from %pM timed out, tid %d",
4165 event->u.ba.sta->addr, event->u.ba.tid);
4166}
4167
4168static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4169 struct ieee80211_vif *vif,
4170 const struct ieee80211_event *event)
4171{
4172 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4173
4174 switch (event->type) {
4175 case MLME_EVENT:
4176 iwl_mvm_event_mlme_callback(mvm, vif, event);
4177 break;
4178 case BAR_RX_EVENT:
4179 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4180 break;
4181 case BA_FRAME_TIMEOUT:
4182 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4183 break;
4184 default:
4185 break;
4186 }
4187}
4188
Johannes Berge5209262014-01-20 23:38:59 +01004189const struct ieee80211_ops iwl_mvm_hw_ops = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01004190 .tx = iwl_mvm_mac_tx,
4191 .ampdu_action = iwl_mvm_mac_ampdu_action,
4192 .start = iwl_mvm_mac_start,
Eliad Pellercf2c92d2014-11-04 11:43:54 +02004193 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004194 .stop = iwl_mvm_mac_stop,
4195 .add_interface = iwl_mvm_mac_add_interface,
4196 .remove_interface = iwl_mvm_mac_remove_interface,
4197 .config = iwl_mvm_mac_config,
Eliad Pellere59647e2013-11-28 14:08:50 +02004198 .prepare_multicast = iwl_mvm_prepare_multicast,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004199 .configure_filter = iwl_mvm_configure_filter,
Andrei Otcheretianskieffd1922015-06-30 12:08:28 +03004200 .config_iface_filter = iwl_mvm_config_iface_filter,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004201 .bss_info_changed = iwl_mvm_bss_info_changed,
4202 .hw_scan = iwl_mvm_mac_hw_scan,
4203 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
Johannes Berg1ddbbb02013-12-04 22:39:17 +01004204 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004205 .sta_state = iwl_mvm_mac_sta_state,
4206 .sta_notify = iwl_mvm_mac_sta_notify,
4207 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
Johannes Berg3e56ead2013-02-15 22:23:18 +01004208 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004209 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
Lilach Edelstein1f3b0ff2013-10-06 13:03:32 +02004210 .sta_rc_update = iwl_mvm_sta_rc_update,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004211 .conf_tx = iwl_mvm_mac_conf_tx,
4212 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
Arik Nemtsov07ecd892014-05-20 18:16:42 +03004213 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
Emmanuel Grumbachc5b0e7c2014-03-24 12:08:53 +02004214 .flush = iwl_mvm_mac_flush,
David Spinadel35a000b2013-08-28 09:29:43 +03004215 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4216 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004217 .set_key = iwl_mvm_mac_set_key,
4218 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4219 .remain_on_channel = iwl_mvm_roc,
4220 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004221 .add_chanctx = iwl_mvm_add_chanctx,
4222 .remove_chanctx = iwl_mvm_remove_chanctx,
4223 .change_chanctx = iwl_mvm_change_chanctx,
4224 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4225 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
Luciano Coelhob08c1d92014-05-20 23:31:05 +03004226 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004227
Johannes Berg5023d962013-07-31 14:07:43 +02004228 .start_ap = iwl_mvm_start_ap_ibss,
4229 .stop_ap = iwl_mvm_stop_ap_ibss,
4230 .join_ibss = iwl_mvm_start_ap_ibss,
4231 .leave_ibss = iwl_mvm_stop_ap_ibss,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004232
4233 .set_tim = iwl_mvm_set_tim,
4234
Luciano Coelho622e3f92014-11-10 11:10:17 +02004235 .channel_switch = iwl_mvm_channel_switch,
Luciano Coelhof0289052014-11-10 11:10:06 +02004236 .pre_channel_switch = iwl_mvm_pre_channel_switch,
Luciano Coelhof6c34822014-11-10 11:10:12 +02004237 .post_channel_switch = iwl_mvm_post_channel_switch,
Andrei Otcheretianskibd3398e2013-10-22 05:01:12 +02004238
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03004239 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4240 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4241 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4242
Emmanuel Grumbachd42f5352015-02-10 14:29:48 +02004243 .event_callback = iwl_mvm_mac_event_callback,
4244
David Spinadel507cadf2013-07-31 18:07:21 +03004245 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4246
Johannes Berg8ca151b2013-01-24 14:25:36 +01004247#ifdef CONFIG_PM_SLEEP
4248 /* look at d3.c */
4249 .suspend = iwl_mvm_suspend,
4250 .resume = iwl_mvm_resume,
4251 .set_wakeup = iwl_mvm_set_wakeup,
4252 .set_rekey_data = iwl_mvm_set_rekey_data,
4253#if IS_ENABLED(CONFIG_IPV6)
4254 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4255#endif
4256 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4257#endif
Johannes Berg91a8bcd2015-01-14 18:12:41 +01004258 .get_survey = iwl_mvm_mac_get_survey,
Johannes Berg33cef922015-01-21 21:41:29 +01004259 .sta_statistics = iwl_mvm_mac_sta_statistics,
Johannes Berg8ca151b2013-01-24 14:25:36 +01004260};