blob: ea1831ccc8f310d94ad43a6814b810b4b27dccff [file] [log] [blame]
Arik Nemtsovd4317252014-09-07 19:18:31 +03001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Mobile Communications GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2014 Intel Mobile Communications GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +030064#include <linux/etherdevice.h>
Arik Nemtsovd4317252014-09-07 19:18:31 +030065#include "mvm.h"
66#include "time-event.h"
67
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +030068#define TU_TO_US(x) (x * 1024)
69#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
70
Arik Nemtsovd4317252014-09-07 19:18:31 +030071void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
72{
73 struct ieee80211_sta *sta;
74 struct iwl_mvm_sta *mvmsta;
75 int i;
76
77 lockdep_assert_held(&mvm->mutex);
78
79 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
80 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
81 lockdep_is_held(&mvm->mutex));
82 if (!sta || IS_ERR(sta) || !sta->tdls)
83 continue;
84
85 mvmsta = iwl_mvm_sta_from_mac80211(sta);
86 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
87 NL80211_TDLS_TEARDOWN,
88 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
89 GFP_KERNEL);
90 }
91}
92
93int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
94{
95 struct ieee80211_sta *sta;
96 struct iwl_mvm_sta *mvmsta;
97 int count = 0;
98 int i;
99
100 lockdep_assert_held(&mvm->mutex);
101
102 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
103 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
104 lockdep_is_held(&mvm->mutex));
105 if (!sta || IS_ERR(sta) || !sta->tdls)
106 continue;
107
108 if (vif) {
109 mvmsta = iwl_mvm_sta_from_mac80211(sta);
110 if (mvmsta->vif != vif)
111 continue;
112 }
113
114 count++;
115 }
116
117 return count;
118}
119
Arik Nemtsov307e4722014-09-15 18:48:59 +0300120static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
121{
122 struct iwl_rx_packet *pkt;
123 struct iwl_tdls_config_res *resp;
124 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
125 struct iwl_host_cmd cmd = {
126 .id = TDLS_CONFIG_CMD,
127 .flags = CMD_WANT_SKB,
128 .data = { &tdls_cfg_cmd, },
129 .len = { sizeof(struct iwl_tdls_config_cmd), },
130 };
131 struct ieee80211_sta *sta;
132 int ret, i, cnt;
133 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
134
135 lockdep_assert_held(&mvm->mutex);
136
137 tdls_cfg_cmd.id_and_color =
138 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
139 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
140 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
141
142 /* for now the Tx cmd is empty and unused */
143
144 /* populate TDLS peer data */
145 cnt = 0;
146 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
147 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
148 lockdep_is_held(&mvm->mutex));
149 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
150 continue;
151
152 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
153 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
154 IWL_MVM_TDLS_FW_TID;
155 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
156 tdls_cfg_cmd.sta_info[cnt].is_initiator =
157 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
158
159 cnt++;
160 }
161
162 tdls_cfg_cmd.tdls_peer_count = cnt;
163 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
164
165 ret = iwl_mvm_send_cmd(mvm, &cmd);
166 if (WARN_ON_ONCE(ret))
167 return;
168
169 pkt = cmd.resp_pkt;
170 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
171 IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
172 pkt->hdr.flags);
173 goto exit;
174 }
175
176 if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
177 goto exit;
178
179 /* we don't really care about the response at this point */
180
181exit:
182 iwl_free_resp(&cmd);
183}
184
Arik Nemtsovd4317252014-09-07 19:18:31 +0300185void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
186 bool sta_added)
187{
188 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
189
Arik Nemtsov307e4722014-09-15 18:48:59 +0300190 /* when the first peer joins, send a power update first */
191 if (tdls_sta_cnt == 1 && sta_added)
192 iwl_mvm_power_update_mac(mvm);
193
194 /* configure the FW with TDLS peer info */
195 iwl_mvm_tdls_config(mvm, vif);
196
197 /* when the last peer leaves, send a power update last */
198 if (tdls_sta_cnt == 0 && !sta_added)
Arik Nemtsovd4317252014-09-07 19:18:31 +0300199 iwl_mvm_power_update_mac(mvm);
200}
201
202void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
203 struct ieee80211_vif *vif)
204{
205 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
206 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
207
208 /*
209 * iwl_mvm_protect_session() reads directly from the device
210 * (the system time), so make sure it is available.
211 */
212 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
213 return;
214
215 mutex_lock(&mvm->mutex);
216 /* Protect the session to hear the TDLS setup response on the channel */
217 iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
218 mutex_unlock(&mvm->mutex);
219
220 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
221}
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300222
223static const char *
224iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
225{
226 switch (state) {
227 case IWL_MVM_TDLS_SW_IDLE:
228 return "IDLE";
229 case IWL_MVM_TDLS_SW_REQ_SENT:
230 return "REQ SENT";
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200231 case IWL_MVM_TDLS_SW_RESP_RCVD:
232 return "RESP RECEIVED";
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300233 case IWL_MVM_TDLS_SW_REQ_RCVD:
234 return "REQ RECEIVED";
235 case IWL_MVM_TDLS_SW_ACTIVE:
236 return "ACTIVE";
237 }
238
239 return NULL;
240}
241
242static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
243 enum iwl_mvm_tdls_cs_state state)
244{
245 if (mvm->tdls_cs.state == state)
246 return;
247
248 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
249 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
250 iwl_mvm_tdls_cs_state_str(state));
251 mvm->tdls_cs.state = state;
252
253 if (state == IWL_MVM_TDLS_SW_IDLE)
254 mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
255}
256
257int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
258 struct iwl_device_cmd *cmd)
259{
260 struct iwl_rx_packet *pkt = rxb_addr(rxb);
261 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
262 struct ieee80211_sta *sta;
263 unsigned int delay;
264 struct iwl_mvm_sta *mvmsta;
265 struct ieee80211_vif *vif;
266 u32 sta_id = le32_to_cpu(notif->sta_id);
267
268 lockdep_assert_held(&mvm->mutex);
269
270 /* can fail sometimes */
271 if (!le32_to_cpu(notif->status)) {
272 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
273 goto out;
274 }
275
276 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
277 goto out;
278
279 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
280 lockdep_is_held(&mvm->mutex));
281 /* the station may not be here, but if it is, it must be a TDLS peer */
282 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
283 goto out;
284
285 mvmsta = iwl_mvm_sta_from_mac80211(sta);
286 vif = mvmsta->vif;
287
288 /*
289 * Update state and possibly switch again after this is over (DTIM).
290 * Also convert TU to msec.
291 */
292 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
293 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
294 msecs_to_jiffies(delay));
295
296 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
297
298out:
299 return 0;
300}
301
302static int
303iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
304 enum iwl_tdls_channel_switch_type type,
305 const u8 *peer, bool peer_initiator)
306{
307 bool same_peer = false;
308 int ret = 0;
309
310 /* get the existing peer if it's there */
311 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
312 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
313 struct ieee80211_sta *sta = rcu_dereference_protected(
314 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
315 lockdep_is_held(&mvm->mutex));
316 if (!IS_ERR_OR_NULL(sta))
317 same_peer = ether_addr_equal(peer, sta->addr);
318 }
319
320 switch (mvm->tdls_cs.state) {
321 case IWL_MVM_TDLS_SW_IDLE:
322 /*
323 * might be spurious packet from the peer after the switch is
324 * already done
325 */
326 if (type == TDLS_MOVE_CH)
327 ret = -EINVAL;
328 break;
329 case IWL_MVM_TDLS_SW_REQ_SENT:
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200330 /* only allow requests from the same peer */
331 if (!same_peer)
332 ret = -EBUSY;
333 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
334 !peer_initiator)
335 /*
336 * We received a ch-switch request while an outgoing
337 * one is pending. Allow it if the peer is the link
338 * initiator.
339 */
340 ret = -EBUSY;
341 else if (type == TDLS_SEND_CHAN_SW_REQ)
342 /* wait for idle before sending another request */
343 ret = -EBUSY;
344 break;
345 case IWL_MVM_TDLS_SW_RESP_RCVD:
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300346 /*
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200347 * we are waiting for the FW to give an "active" notification,
348 * so ignore requests in the meantime
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300349 */
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200350 ret = -EBUSY;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300351 break;
352 case IWL_MVM_TDLS_SW_REQ_RCVD:
353 /* as above, allow the link initiator to proceed */
354 if (type == TDLS_SEND_CHAN_SW_REQ) {
355 if (!same_peer)
356 ret = -EBUSY;
357 else if (peer_initiator) /* they are the initiator */
358 ret = -EBUSY;
359 } else if (type == TDLS_MOVE_CH) {
360 ret = -EINVAL;
361 }
362 break;
363 case IWL_MVM_TDLS_SW_ACTIVE:
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200364 /*
365 * the only valid request when active is a request to return
366 * to the base channel by the current off-channel peer
367 */
368 if (type != TDLS_MOVE_CH || !same_peer)
369 ret = -EBUSY;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300370 break;
371 }
372
373 if (ret)
374 IWL_DEBUG_TDLS(mvm,
375 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
376 type, mvm->tdls_cs.state, peer, same_peer,
377 peer_initiator);
378
379 return ret;
380}
381
382static int
383iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
384 struct ieee80211_vif *vif,
385 enum iwl_tdls_channel_switch_type type,
386 const u8 *peer, bool peer_initiator,
387 u8 oper_class,
388 struct cfg80211_chan_def *chandef,
389 u32 timestamp, u16 switch_time,
390 u16 switch_timeout, struct sk_buff *skb,
391 u32 ch_sw_tm_ie)
392{
393 struct ieee80211_sta *sta;
394 struct iwl_mvm_sta *mvmsta;
395 struct ieee80211_tx_info *info;
396 struct ieee80211_hdr *hdr;
397 struct iwl_tdls_channel_switch_cmd cmd = {0};
398 int ret;
399
400 lockdep_assert_held(&mvm->mutex);
401
402 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator);
403 if (ret)
404 return ret;
405
406 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
407 ret = -EINVAL;
408 goto out;
409 }
410
411 cmd.switch_type = type;
412 cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
413 cmd.timing.switch_time = cpu_to_le32(switch_time);
414 cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
415
416 rcu_read_lock();
417 sta = ieee80211_find_sta(vif, peer);
418 if (!sta) {
419 rcu_read_unlock();
420 ret = -ENOENT;
421 goto out;
422 }
423 mvmsta = iwl_mvm_sta_from_mac80211(sta);
424 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
425
426 if (!chandef) {
427 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
428 mvm->tdls_cs.peer.chandef.chan) {
429 /* actually moving to the channel */
430 chandef = &mvm->tdls_cs.peer.chandef;
431 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
432 type == TDLS_MOVE_CH) {
433 /* we need to return to base channel */
434 struct ieee80211_chanctx_conf *chanctx =
435 rcu_dereference(vif->chanctx_conf);
436
437 if (WARN_ON_ONCE(!chanctx)) {
438 rcu_read_unlock();
439 goto out;
440 }
441
442 chandef = &chanctx->def;
443 }
444 }
445
446 if (chandef) {
447 cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
448 PHY_BAND_24 : PHY_BAND_5);
449 cmd.ci.channel = chandef->chan->hw_value;
450 cmd.ci.width = iwl_mvm_get_channel_width(chandef);
451 cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
452 }
453
454 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
455 cmd.timing.max_offchan_duration =
456 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
457 vif->bss_conf.beacon_int) / 2);
458
459 /* Switch time is the first element in the switch-timing IE. */
460 cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
461
462 info = IEEE80211_SKB_CB(skb);
463 if (info->control.hw_key)
464 iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
465
466 iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
467 mvmsta->sta_id);
468
469 hdr = (void *)skb->data;
470 iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
471 hdr->frame_control);
472 rcu_read_unlock();
473
474 memcpy(cmd.frame.data, skb->data, skb->len);
475
476 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
477 sizeof(cmd), &cmd);
478 if (ret) {
479 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
480 ret);
481 goto out;
482 }
483
484 /* channel switch has started, update state */
485 if (type != TDLS_MOVE_CH) {
486 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
487 iwl_mvm_tdls_update_cs_state(mvm,
488 type == TDLS_SEND_CHAN_SW_REQ ?
489 IWL_MVM_TDLS_SW_REQ_SENT :
490 IWL_MVM_TDLS_SW_REQ_RCVD);
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200491 } else {
492 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300493 }
494
495out:
496
497 /* channel switch failed - we are idle */
498 if (ret)
499 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
500
501 return ret;
502}
503
504void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
505{
506 struct iwl_mvm *mvm;
507 struct ieee80211_sta *sta;
508 struct iwl_mvm_sta *mvmsta;
509 struct ieee80211_vif *vif;
510 unsigned int delay;
511 int ret;
512
513 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
514 mutex_lock(&mvm->mutex);
515
516 /* called after an active channel switch has finished or timed-out */
517 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
518
519 /* station might be gone, in that case do nothing */
520 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
521 goto out;
522
523 sta = rcu_dereference_protected(
524 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
525 lockdep_is_held(&mvm->mutex));
526 /* the station may not be here, but if it is, it must be a TDLS peer */
527 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
528 goto out;
529
530 mvmsta = iwl_mvm_sta_from_mac80211(sta);
531 vif = mvmsta->vif;
532 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
533 TDLS_SEND_CHAN_SW_REQ,
534 sta->addr,
535 mvm->tdls_cs.peer.initiator,
536 mvm->tdls_cs.peer.op_class,
537 &mvm->tdls_cs.peer.chandef,
538 0, 0, 0,
539 mvm->tdls_cs.peer.skb,
540 mvm->tdls_cs.peer.ch_sw_tm_ie);
541 if (ret)
542 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
543
544 /* retry after a DTIM if we failed sending now */
545 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
546 queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
547 msecs_to_jiffies(delay));
548out:
549 mutex_unlock(&mvm->mutex);
550}
551
552int
553iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
554 struct ieee80211_vif *vif,
555 struct ieee80211_sta *sta, u8 oper_class,
556 struct cfg80211_chan_def *chandef,
557 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
558{
559 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
560 struct iwl_mvm_sta *mvmsta;
561 unsigned int delay;
562 int ret;
563
564 mutex_lock(&mvm->mutex);
565
566 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
567 sta->addr, chandef->chan->center_freq, chandef->width);
568
569 /* we only support a single peer for channel switching */
570 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
571 IWL_DEBUG_TDLS(mvm,
572 "Existing peer. Can't start switch with %pM\n",
573 sta->addr);
574 ret = -EBUSY;
575 goto out;
576 }
577
578 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
579 TDLS_SEND_CHAN_SW_REQ,
580 sta->addr, sta->tdls_initiator,
581 oper_class, chandef, 0, 0, 0,
582 tmpl_skb, ch_sw_tm_ie);
583 if (ret)
584 goto out;
585
586 /*
587 * Mark the peer as "in tdls switch" for this vif. We only allow a
588 * single such peer per vif.
589 */
590 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
591 if (!mvm->tdls_cs.peer.skb) {
592 ret = -ENOMEM;
593 goto out;
594 }
595
596 mvmsta = iwl_mvm_sta_from_mac80211(sta);
597 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
598 mvm->tdls_cs.peer.chandef = *chandef;
599 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
600 mvm->tdls_cs.peer.op_class = oper_class;
601 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
602
603 /*
604 * Wait for 2 DTIM periods before attempting the next switch. The next
605 * switch will be made sooner if the current one completes before that.
606 */
607 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
608 vif->bss_conf.beacon_int);
609 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
610 msecs_to_jiffies(delay));
611
612out:
613 mutex_unlock(&mvm->mutex);
614 return ret;
615}
616
617void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
618 struct ieee80211_vif *vif,
619 struct ieee80211_sta *sta)
620{
621 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
622 struct ieee80211_sta *cur_sta;
623 bool wait_for_phy = false;
624
625 mutex_lock(&mvm->mutex);
626
627 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
628
629 /* we only support a single peer for channel switching */
630 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
631 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
632 goto out;
633 }
634
635 cur_sta = rcu_dereference_protected(
636 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
637 lockdep_is_held(&mvm->mutex));
638 /* make sure it's the same peer */
639 if (cur_sta != sta)
640 goto out;
641
642 /*
643 * If we're currently in a switch because of the now canceled peer,
644 * wait a DTIM here to make sure the phy is back on the base channel.
645 * We can't otherwise force it.
646 */
647 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
648 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
649 wait_for_phy = true;
650
651 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
652 dev_kfree_skb(mvm->tdls_cs.peer.skb);
653 mvm->tdls_cs.peer.skb = NULL;
654
655out:
656 mutex_unlock(&mvm->mutex);
657
658 /* make sure the phy is on the base channel */
659 if (wait_for_phy)
660 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
661 vif->bss_conf.beacon_int));
662
663 /* flush the channel switch state */
664 flush_delayed_work(&mvm->tdls_cs.dwork);
665
666 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
667}
668
669void
670iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
671 struct ieee80211_vif *vif,
672 struct ieee80211_tdls_ch_sw_params *params)
673{
674 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
675 enum iwl_tdls_channel_switch_type type;
676 unsigned int delay;
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200677 const char *action_str =
678 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
679 "REQ" : "RESP";
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300680
681 mutex_lock(&mvm->mutex);
682
683 IWL_DEBUG_TDLS(mvm,
Arik Nemtsov5cb12702015-01-22 12:19:26 +0200684 "Received TDLS ch switch action %s from %pM status %d\n",
685 action_str, params->sta->addr, params->status);
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300686
687 /*
688 * we got a non-zero status from a peer we were switching to - move to
689 * the idle state and retry again later
690 */
691 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
692 params->status != 0 &&
693 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
694 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
695 struct ieee80211_sta *cur_sta;
696
697 /* make sure it's the same peer */
698 cur_sta = rcu_dereference_protected(
699 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
700 lockdep_is_held(&mvm->mutex));
701 if (cur_sta == params->sta) {
702 iwl_mvm_tdls_update_cs_state(mvm,
703 IWL_MVM_TDLS_SW_IDLE);
704 goto retry;
705 }
706 }
707
708 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
709 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
710
711 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
712 params->sta->tdls_initiator, 0,
713 params->chandef, params->timestamp,
714 params->switch_time,
715 params->switch_timeout,
716 params->tmpl_skb,
717 params->ch_sw_tm_ie);
718
719retry:
720 /* register a timeout in case we don't succeed in switching */
721 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
722 1024 / 1000;
723 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
724 msecs_to_jiffies(delay));
725 mutex_unlock(&mvm->mutex);
726}