blob: 3d97436bbdf550cd1e015cf2f6c1b5dbc58331b9 [file] [log] [blame]
Arik Nemtsovd4317252014-09-07 19:18:31 +03001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Mobile Communications GmbH
Sara Sharon0ae98812017-01-04 14:53:58 +02009 * Copyright(c) 2017 Intel Deutschland GmbH
Arik Nemtsovd4317252014-09-07 19:18:31 +030010 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020029 * Intel Linux Wireless <linuxwifi@intel.com>
Arik Nemtsovd4317252014-09-07 19:18:31 +030030 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2014 Intel Mobile Communications GmbH
Sara Sharon0ae98812017-01-04 14:53:58 +020035 * Copyright(c) 2017 Intel Deutschland GmbH
Arik Nemtsovd4317252014-09-07 19:18:31 +030036 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +030066#include <linux/etherdevice.h>
Arik Nemtsovd4317252014-09-07 19:18:31 +030067#include "mvm.h"
68#include "time-event.h"
Arik Nemtsovb9dccdb2015-01-22 14:24:44 +020069#include "iwl-io.h"
70#include "iwl-prph.h"
Arik Nemtsovd4317252014-09-07 19:18:31 +030071
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +030072#define TU_TO_US(x) (x * 1024)
73#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
74
Arik Nemtsovd4317252014-09-07 19:18:31 +030075void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
76{
77 struct ieee80211_sta *sta;
78 struct iwl_mvm_sta *mvmsta;
79 int i;
80
81 lockdep_assert_held(&mvm->mutex);
82
Sara Sharon0ae98812017-01-04 14:53:58 +020083 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Arik Nemtsovd4317252014-09-07 19:18:31 +030084 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
85 lockdep_is_held(&mvm->mutex));
86 if (!sta || IS_ERR(sta) || !sta->tdls)
87 continue;
88
89 mvmsta = iwl_mvm_sta_from_mac80211(sta);
90 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
91 NL80211_TDLS_TEARDOWN,
92 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
93 GFP_KERNEL);
94 }
95}
96
97int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
98{
99 struct ieee80211_sta *sta;
100 struct iwl_mvm_sta *mvmsta;
101 int count = 0;
102 int i;
103
104 lockdep_assert_held(&mvm->mutex);
105
Sara Sharon0ae98812017-01-04 14:53:58 +0200106 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Arik Nemtsovd4317252014-09-07 19:18:31 +0300107 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
108 lockdep_is_held(&mvm->mutex));
109 if (!sta || IS_ERR(sta) || !sta->tdls)
110 continue;
111
112 if (vif) {
113 mvmsta = iwl_mvm_sta_from_mac80211(sta);
114 if (mvmsta->vif != vif)
115 continue;
116 }
117
118 count++;
119 }
120
121 return count;
122}
123
Arik Nemtsov307e4722014-09-15 18:48:59 +0300124static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
125{
126 struct iwl_rx_packet *pkt;
127 struct iwl_tdls_config_res *resp;
128 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
129 struct iwl_host_cmd cmd = {
130 .id = TDLS_CONFIG_CMD,
131 .flags = CMD_WANT_SKB,
132 .data = { &tdls_cfg_cmd, },
133 .len = { sizeof(struct iwl_tdls_config_cmd), },
134 };
135 struct ieee80211_sta *sta;
136 int ret, i, cnt;
137 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
138
139 lockdep_assert_held(&mvm->mutex);
140
141 tdls_cfg_cmd.id_and_color =
142 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
143 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
144 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
145
146 /* for now the Tx cmd is empty and unused */
147
148 /* populate TDLS peer data */
149 cnt = 0;
Sara Sharon0ae98812017-01-04 14:53:58 +0200150 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Arik Nemtsov307e4722014-09-15 18:48:59 +0300151 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
152 lockdep_is_held(&mvm->mutex));
153 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
154 continue;
155
156 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
157 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
158 IWL_MVM_TDLS_FW_TID;
159 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
160 tdls_cfg_cmd.sta_info[cnt].is_initiator =
161 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
162
163 cnt++;
164 }
165
166 tdls_cfg_cmd.tdls_peer_count = cnt;
167 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
168
169 ret = iwl_mvm_send_cmd(mvm, &cmd);
170 if (WARN_ON_ONCE(ret))
171 return;
172
173 pkt = cmd.resp_pkt;
Arik Nemtsov307e4722014-09-15 18:48:59 +0300174
Johannes Berg30f27df2015-06-30 14:45:40 +0200175 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
Arik Nemtsov307e4722014-09-15 18:48:59 +0300176
177 /* we don't really care about the response at this point */
178
Arik Nemtsov307e4722014-09-15 18:48:59 +0300179 iwl_free_resp(&cmd);
180}
181
Arik Nemtsovd4317252014-09-07 19:18:31 +0300182void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
183 bool sta_added)
184{
185 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
186
Arik Nemtsov307e4722014-09-15 18:48:59 +0300187 /* when the first peer joins, send a power update first */
188 if (tdls_sta_cnt == 1 && sta_added)
189 iwl_mvm_power_update_mac(mvm);
190
191 /* configure the FW with TDLS peer info */
192 iwl_mvm_tdls_config(mvm, vif);
193
194 /* when the last peer leaves, send a power update last */
195 if (tdls_sta_cnt == 0 && !sta_added)
Arik Nemtsovd4317252014-09-07 19:18:31 +0300196 iwl_mvm_power_update_mac(mvm);
197}
198
199void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
200 struct ieee80211_vif *vif)
201{
202 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
203 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
204
205 /*
206 * iwl_mvm_protect_session() reads directly from the device
207 * (the system time), so make sure it is available.
208 */
209 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
210 return;
211
212 mutex_lock(&mvm->mutex);
213 /* Protect the session to hear the TDLS setup response on the channel */
214 iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
215 mutex_unlock(&mvm->mutex);
216
217 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
218}
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300219
220static const char *
221iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
222{
223 switch (state) {
224 case IWL_MVM_TDLS_SW_IDLE:
225 return "IDLE";
226 case IWL_MVM_TDLS_SW_REQ_SENT:
227 return "REQ SENT";
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200228 case IWL_MVM_TDLS_SW_RESP_RCVD:
229 return "RESP RECEIVED";
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300230 case IWL_MVM_TDLS_SW_REQ_RCVD:
231 return "REQ RECEIVED";
232 case IWL_MVM_TDLS_SW_ACTIVE:
233 return "ACTIVE";
234 }
235
236 return NULL;
237}
238
239static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
240 enum iwl_mvm_tdls_cs_state state)
241{
242 if (mvm->tdls_cs.state == state)
243 return;
244
245 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
246 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
247 iwl_mvm_tdls_cs_state_str(state));
248 mvm->tdls_cs.state = state;
249
Arik Nemtsovb9dccdb2015-01-22 14:24:44 +0200250 /* we only send requests to our switching peer - update sent time */
251 if (state == IWL_MVM_TDLS_SW_REQ_SENT)
252 mvm->tdls_cs.peer.sent_timestamp =
253 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
254
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300255 if (state == IWL_MVM_TDLS_SW_IDLE)
Sara Sharon0ae98812017-01-04 14:53:58 +0200256 mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300257}
258
Johannes Berg04168412015-06-23 21:22:09 +0200259void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300260{
261 struct iwl_rx_packet *pkt = rxb_addr(rxb);
262 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
263 struct ieee80211_sta *sta;
264 unsigned int delay;
265 struct iwl_mvm_sta *mvmsta;
266 struct ieee80211_vif *vif;
267 u32 sta_id = le32_to_cpu(notif->sta_id);
268
269 lockdep_assert_held(&mvm->mutex);
270
271 /* can fail sometimes */
272 if (!le32_to_cpu(notif->status)) {
273 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
Johannes Berg04168412015-06-23 21:22:09 +0200274 return;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300275 }
276
277 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +0200278 return;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300279
280 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
281 lockdep_is_held(&mvm->mutex));
282 /* the station may not be here, but if it is, it must be a TDLS peer */
283 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
Johannes Berg04168412015-06-23 21:22:09 +0200284 return;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300285
286 mvmsta = iwl_mvm_sta_from_mac80211(sta);
287 vif = mvmsta->vif;
288
289 /*
290 * Update state and possibly switch again after this is over (DTIM).
291 * Also convert TU to msec.
292 */
293 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
294 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
295 msecs_to_jiffies(delay));
296
297 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300298}
299
300static int
301iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
302 enum iwl_tdls_channel_switch_type type,
Arik Nemtsovb9dccdb2015-01-22 14:24:44 +0200303 const u8 *peer, bool peer_initiator, u32 timestamp)
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300304{
305 bool same_peer = false;
306 int ret = 0;
307
308 /* get the existing peer if it's there */
309 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
Sara Sharon0ae98812017-01-04 14:53:58 +0200310 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300311 struct ieee80211_sta *sta = rcu_dereference_protected(
312 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
313 lockdep_is_held(&mvm->mutex));
314 if (!IS_ERR_OR_NULL(sta))
315 same_peer = ether_addr_equal(peer, sta->addr);
316 }
317
318 switch (mvm->tdls_cs.state) {
319 case IWL_MVM_TDLS_SW_IDLE:
320 /*
321 * might be spurious packet from the peer after the switch is
322 * already done
323 */
324 if (type == TDLS_MOVE_CH)
325 ret = -EINVAL;
326 break;
327 case IWL_MVM_TDLS_SW_REQ_SENT:
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200328 /* only allow requests from the same peer */
329 if (!same_peer)
330 ret = -EBUSY;
331 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
332 !peer_initiator)
333 /*
334 * We received a ch-switch request while an outgoing
335 * one is pending. Allow it if the peer is the link
336 * initiator.
337 */
338 ret = -EBUSY;
339 else if (type == TDLS_SEND_CHAN_SW_REQ)
340 /* wait for idle before sending another request */
341 ret = -EBUSY;
Arik Nemtsovb9dccdb2015-01-22 14:24:44 +0200342 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
343 /* we got a stale response - ignore it */
344 ret = -EINVAL;
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200345 break;
346 case IWL_MVM_TDLS_SW_RESP_RCVD:
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300347 /*
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200348 * we are waiting for the FW to give an "active" notification,
349 * so ignore requests in the meantime
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300350 */
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200351 ret = -EBUSY;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300352 break;
353 case IWL_MVM_TDLS_SW_REQ_RCVD:
354 /* as above, allow the link initiator to proceed */
355 if (type == TDLS_SEND_CHAN_SW_REQ) {
356 if (!same_peer)
357 ret = -EBUSY;
358 else if (peer_initiator) /* they are the initiator */
359 ret = -EBUSY;
360 } else if (type == TDLS_MOVE_CH) {
361 ret = -EINVAL;
362 }
363 break;
364 case IWL_MVM_TDLS_SW_ACTIVE:
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200365 /*
366 * the only valid request when active is a request to return
367 * to the base channel by the current off-channel peer
368 */
369 if (type != TDLS_MOVE_CH || !same_peer)
370 ret = -EBUSY;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300371 break;
372 }
373
374 if (ret)
375 IWL_DEBUG_TDLS(mvm,
376 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
377 type, mvm->tdls_cs.state, peer, same_peer,
378 peer_initiator);
379
380 return ret;
381}
382
383static int
384iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
385 struct ieee80211_vif *vif,
386 enum iwl_tdls_channel_switch_type type,
387 const u8 *peer, bool peer_initiator,
388 u8 oper_class,
389 struct cfg80211_chan_def *chandef,
390 u32 timestamp, u16 switch_time,
391 u16 switch_timeout, struct sk_buff *skb,
392 u32 ch_sw_tm_ie)
393{
394 struct ieee80211_sta *sta;
395 struct iwl_mvm_sta *mvmsta;
396 struct ieee80211_tx_info *info;
397 struct ieee80211_hdr *hdr;
398 struct iwl_tdls_channel_switch_cmd cmd = {0};
399 int ret;
400
401 lockdep_assert_held(&mvm->mutex);
402
Arik Nemtsovb9dccdb2015-01-22 14:24:44 +0200403 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
404 timestamp);
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300405 if (ret)
406 return ret;
407
408 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
409 ret = -EINVAL;
410 goto out;
411 }
412
413 cmd.switch_type = type;
414 cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
415 cmd.timing.switch_time = cpu_to_le32(switch_time);
416 cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
417
418 rcu_read_lock();
419 sta = ieee80211_find_sta(vif, peer);
420 if (!sta) {
421 rcu_read_unlock();
422 ret = -ENOENT;
423 goto out;
424 }
425 mvmsta = iwl_mvm_sta_from_mac80211(sta);
426 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
427
428 if (!chandef) {
429 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
430 mvm->tdls_cs.peer.chandef.chan) {
431 /* actually moving to the channel */
432 chandef = &mvm->tdls_cs.peer.chandef;
433 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
434 type == TDLS_MOVE_CH) {
435 /* we need to return to base channel */
436 struct ieee80211_chanctx_conf *chanctx =
437 rcu_dereference(vif->chanctx_conf);
438
439 if (WARN_ON_ONCE(!chanctx)) {
440 rcu_read_unlock();
441 goto out;
442 }
443
444 chandef = &chanctx->def;
445 }
446 }
447
448 if (chandef) {
Johannes Berg57fbcce2016-04-12 15:56:15 +0200449 cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300450 PHY_BAND_24 : PHY_BAND_5);
451 cmd.ci.channel = chandef->chan->hw_value;
452 cmd.ci.width = iwl_mvm_get_channel_width(chandef);
453 cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
454 }
455
456 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
457 cmd.timing.max_offchan_duration =
458 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
459 vif->bss_conf.beacon_int) / 2);
460
461 /* Switch time is the first element in the switch-timing IE. */
462 cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
463
464 info = IEEE80211_SKB_CB(skb);
Johannes Bergca8c0f42015-04-20 17:54:54 +0200465 hdr = (void *)skb->data;
466 if (info->control.hw_key) {
467 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
468 rcu_read_unlock();
469 ret = -EINVAL;
470 goto out;
471 }
472 iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
473 }
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300474
475 iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
476 mvmsta->sta_id);
477
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300478 iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
479 hdr->frame_control);
480 rcu_read_unlock();
481
482 memcpy(cmd.frame.data, skb->data, skb->len);
483
484 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
485 sizeof(cmd), &cmd);
486 if (ret) {
487 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
488 ret);
489 goto out;
490 }
491
492 /* channel switch has started, update state */
493 if (type != TDLS_MOVE_CH) {
494 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
495 iwl_mvm_tdls_update_cs_state(mvm,
496 type == TDLS_SEND_CHAN_SW_REQ ?
497 IWL_MVM_TDLS_SW_REQ_SENT :
498 IWL_MVM_TDLS_SW_REQ_RCVD);
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200499 } else {
500 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300501 }
502
503out:
504
505 /* channel switch failed - we are idle */
506 if (ret)
507 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
508
509 return ret;
510}
511
512void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
513{
514 struct iwl_mvm *mvm;
515 struct ieee80211_sta *sta;
516 struct iwl_mvm_sta *mvmsta;
517 struct ieee80211_vif *vif;
518 unsigned int delay;
519 int ret;
520
521 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
522 mutex_lock(&mvm->mutex);
523
524 /* called after an active channel switch has finished or timed-out */
525 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
526
527 /* station might be gone, in that case do nothing */
Sara Sharon0ae98812017-01-04 14:53:58 +0200528 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300529 goto out;
530
531 sta = rcu_dereference_protected(
532 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
533 lockdep_is_held(&mvm->mutex));
534 /* the station may not be here, but if it is, it must be a TDLS peer */
535 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
536 goto out;
537
538 mvmsta = iwl_mvm_sta_from_mac80211(sta);
539 vif = mvmsta->vif;
540 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
541 TDLS_SEND_CHAN_SW_REQ,
542 sta->addr,
543 mvm->tdls_cs.peer.initiator,
544 mvm->tdls_cs.peer.op_class,
545 &mvm->tdls_cs.peer.chandef,
546 0, 0, 0,
547 mvm->tdls_cs.peer.skb,
548 mvm->tdls_cs.peer.ch_sw_tm_ie);
549 if (ret)
550 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
551
552 /* retry after a DTIM if we failed sending now */
553 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
Johannes Berg86bbb1e2017-03-22 21:44:59 +0100554 schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300555out:
556 mutex_unlock(&mvm->mutex);
557}
558
559int
560iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
561 struct ieee80211_vif *vif,
562 struct ieee80211_sta *sta, u8 oper_class,
563 struct cfg80211_chan_def *chandef,
564 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
565{
566 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
567 struct iwl_mvm_sta *mvmsta;
568 unsigned int delay;
569 int ret;
570
571 mutex_lock(&mvm->mutex);
572
573 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
574 sta->addr, chandef->chan->center_freq, chandef->width);
575
576 /* we only support a single peer for channel switching */
Sara Sharon0ae98812017-01-04 14:53:58 +0200577 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300578 IWL_DEBUG_TDLS(mvm,
579 "Existing peer. Can't start switch with %pM\n",
580 sta->addr);
581 ret = -EBUSY;
582 goto out;
583 }
584
585 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
586 TDLS_SEND_CHAN_SW_REQ,
587 sta->addr, sta->tdls_initiator,
588 oper_class, chandef, 0, 0, 0,
589 tmpl_skb, ch_sw_tm_ie);
590 if (ret)
591 goto out;
592
593 /*
594 * Mark the peer as "in tdls switch" for this vif. We only allow a
595 * single such peer per vif.
596 */
597 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
598 if (!mvm->tdls_cs.peer.skb) {
599 ret = -ENOMEM;
600 goto out;
601 }
602
603 mvmsta = iwl_mvm_sta_from_mac80211(sta);
604 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
605 mvm->tdls_cs.peer.chandef = *chandef;
606 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
607 mvm->tdls_cs.peer.op_class = oper_class;
608 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
609
610 /*
611 * Wait for 2 DTIM periods before attempting the next switch. The next
612 * switch will be made sooner if the current one completes before that.
613 */
614 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
615 vif->bss_conf.beacon_int);
616 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
617 msecs_to_jiffies(delay));
618
619out:
620 mutex_unlock(&mvm->mutex);
621 return ret;
622}
623
624void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
625 struct ieee80211_vif *vif,
626 struct ieee80211_sta *sta)
627{
628 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
629 struct ieee80211_sta *cur_sta;
630 bool wait_for_phy = false;
631
632 mutex_lock(&mvm->mutex);
633
634 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
635
636 /* we only support a single peer for channel switching */
Sara Sharon0ae98812017-01-04 14:53:58 +0200637 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300638 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
639 goto out;
640 }
641
642 cur_sta = rcu_dereference_protected(
643 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
644 lockdep_is_held(&mvm->mutex));
645 /* make sure it's the same peer */
646 if (cur_sta != sta)
647 goto out;
648
649 /*
650 * If we're currently in a switch because of the now canceled peer,
651 * wait a DTIM here to make sure the phy is back on the base channel.
652 * We can't otherwise force it.
653 */
654 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
655 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
656 wait_for_phy = true;
657
Sara Sharon0ae98812017-01-04 14:53:58 +0200658 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300659 dev_kfree_skb(mvm->tdls_cs.peer.skb);
660 mvm->tdls_cs.peer.skb = NULL;
661
662out:
663 mutex_unlock(&mvm->mutex);
664
665 /* make sure the phy is on the base channel */
666 if (wait_for_phy)
667 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
668 vif->bss_conf.beacon_int));
669
670 /* flush the channel switch state */
671 flush_delayed_work(&mvm->tdls_cs.dwork);
672
673 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
674}
675
676void
677iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
678 struct ieee80211_vif *vif,
679 struct ieee80211_tdls_ch_sw_params *params)
680{
681 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
682 enum iwl_tdls_channel_switch_type type;
683 unsigned int delay;
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200684 const char *action_str =
685 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
686 "REQ" : "RESP";
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300687
688 mutex_lock(&mvm->mutex);
689
690 IWL_DEBUG_TDLS(mvm,
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200691 "Received TDLS ch switch action %s from %pM status %d\n",
692 action_str, params->sta->addr, params->status);
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300693
694 /*
695 * we got a non-zero status from a peer we were switching to - move to
696 * the idle state and retry again later
697 */
698 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
699 params->status != 0 &&
700 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
Sara Sharon0ae98812017-01-04 14:53:58 +0200701 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300702 struct ieee80211_sta *cur_sta;
703
704 /* make sure it's the same peer */
705 cur_sta = rcu_dereference_protected(
706 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
707 lockdep_is_held(&mvm->mutex));
708 if (cur_sta == params->sta) {
709 iwl_mvm_tdls_update_cs_state(mvm,
710 IWL_MVM_TDLS_SW_IDLE);
711 goto retry;
712 }
713 }
714
715 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
716 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
717
718 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
719 params->sta->tdls_initiator, 0,
720 params->chandef, params->timestamp,
721 params->switch_time,
722 params->switch_timeout,
723 params->tmpl_skb,
724 params->ch_sw_tm_ie);
725
726retry:
727 /* register a timeout in case we don't succeed in switching */
728 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
729 1024 / 1000;
730 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
731 msecs_to_jiffies(delay));
732 mutex_unlock(&mvm->mutex);
733}