blob: b2bd80c4f72c6a9672518b0f9bd0cd35712b07a2 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mac.h"
19
20#include <net/mac80211.h>
21#include <linux/etherdevice.h>
22
Michal Kazior8cd13ca2013-07-16 09:38:54 +020023#include "hif.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030024#include "core.h"
25#include "debug.h"
26#include "wmi.h"
27#include "htt.h"
28#include "txrx.h"
Kalle Valo43d2a302014-09-10 18:23:30 +030029#include "testmode.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030030
31/**********/
32/* Crypto */
33/**********/
34
35static int ath10k_send_key(struct ath10k_vif *arvif,
36 struct ieee80211_key_conf *key,
37 enum set_key_cmd cmd,
38 const u8 *macaddr)
39{
Michal Kazior7aa7a722014-08-25 12:09:38 +020040 struct ath10k *ar = arvif->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +030041 struct wmi_vdev_install_key_arg arg = {
42 .vdev_id = arvif->vdev_id,
43 .key_idx = key->keyidx,
44 .key_len = key->keylen,
45 .key_data = key->key,
46 .macaddr = macaddr,
47 };
48
Michal Kazior548db542013-07-05 16:15:15 +030049 lockdep_assert_held(&arvif->ar->conf_mutex);
50
Kalle Valo5e3dd152013-06-12 20:52:10 +030051 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
52 arg.key_flags = WMI_KEY_PAIRWISE;
53 else
54 arg.key_flags = WMI_KEY_GROUP;
55
56 switch (key->cipher) {
57 case WLAN_CIPHER_SUITE_CCMP:
58 arg.key_cipher = WMI_CIPHER_AES_CCM;
Marek Kwaczynskieeab2662014-05-14 16:56:17 +030059 if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
60 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
61 else
62 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
Kalle Valo5e3dd152013-06-12 20:52:10 +030063 break;
64 case WLAN_CIPHER_SUITE_TKIP:
Kalle Valo5e3dd152013-06-12 20:52:10 +030065 arg.key_cipher = WMI_CIPHER_TKIP;
66 arg.key_txmic_len = 8;
67 arg.key_rxmic_len = 8;
68 break;
69 case WLAN_CIPHER_SUITE_WEP40:
70 case WLAN_CIPHER_SUITE_WEP104:
71 arg.key_cipher = WMI_CIPHER_WEP;
72 /* AP/IBSS mode requires self-key to be groupwise
73 * Otherwise pairwise key must be set */
74 if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
75 arg.key_flags = WMI_KEY_PAIRWISE;
76 break;
77 default:
Michal Kazior7aa7a722014-08-25 12:09:38 +020078 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
Kalle Valo5e3dd152013-06-12 20:52:10 +030079 return -EOPNOTSUPP;
80 }
81
82 if (cmd == DISABLE_KEY) {
83 arg.key_cipher = WMI_CIPHER_NONE;
84 arg.key_data = NULL;
85 }
86
87 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
88}
89
90static int ath10k_install_key(struct ath10k_vif *arvif,
91 struct ieee80211_key_conf *key,
92 enum set_key_cmd cmd,
93 const u8 *macaddr)
94{
95 struct ath10k *ar = arvif->ar;
96 int ret;
97
Michal Kazior548db542013-07-05 16:15:15 +030098 lockdep_assert_held(&ar->conf_mutex);
99
Wolfram Sang16735d02013-11-14 14:32:02 -0800100 reinit_completion(&ar->install_key_done);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300101
102 ret = ath10k_send_key(arvif, key, cmd, macaddr);
103 if (ret)
104 return ret;
105
106 ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
107 if (ret == 0)
108 return -ETIMEDOUT;
109
110 return 0;
111}
112
113static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
114 const u8 *addr)
115{
116 struct ath10k *ar = arvif->ar;
117 struct ath10k_peer *peer;
118 int ret;
119 int i;
120
121 lockdep_assert_held(&ar->conf_mutex);
122
123 spin_lock_bh(&ar->data_lock);
124 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
125 spin_unlock_bh(&ar->data_lock);
126
127 if (!peer)
128 return -ENOENT;
129
130 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
131 if (arvif->wep_keys[i] == NULL)
132 continue;
133
134 ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
135 addr);
136 if (ret)
137 return ret;
138
139 peer->keys[i] = arvif->wep_keys[i];
140 }
141
142 return 0;
143}
144
145static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
146 const u8 *addr)
147{
148 struct ath10k *ar = arvif->ar;
149 struct ath10k_peer *peer;
150 int first_errno = 0;
151 int ret;
152 int i;
153
154 lockdep_assert_held(&ar->conf_mutex);
155
156 spin_lock_bh(&ar->data_lock);
157 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
158 spin_unlock_bh(&ar->data_lock);
159
160 if (!peer)
161 return -ENOENT;
162
163 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
164 if (peer->keys[i] == NULL)
165 continue;
166
167 ret = ath10k_install_key(arvif, peer->keys[i],
168 DISABLE_KEY, addr);
169 if (ret && first_errno == 0)
170 first_errno = ret;
171
172 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200173 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300174 i, ret);
175
176 peer->keys[i] = NULL;
177 }
178
179 return first_errno;
180}
181
182static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
183 struct ieee80211_key_conf *key)
184{
185 struct ath10k *ar = arvif->ar;
186 struct ath10k_peer *peer;
187 u8 addr[ETH_ALEN];
188 int first_errno = 0;
189 int ret;
190 int i;
191
192 lockdep_assert_held(&ar->conf_mutex);
193
194 for (;;) {
195 /* since ath10k_install_key we can't hold data_lock all the
196 * time, so we try to remove the keys incrementally */
197 spin_lock_bh(&ar->data_lock);
198 i = 0;
199 list_for_each_entry(peer, &ar->peers, list) {
200 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
201 if (peer->keys[i] == key) {
Kalle Valob25f32c2014-09-14 12:50:49 +0300202 ether_addr_copy(addr, peer->addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300203 peer->keys[i] = NULL;
204 break;
205 }
206 }
207
208 if (i < ARRAY_SIZE(peer->keys))
209 break;
210 }
211 spin_unlock_bh(&ar->data_lock);
212
213 if (i == ARRAY_SIZE(peer->keys))
214 break;
215
216 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
217 if (ret && first_errno == 0)
218 first_errno = ret;
219
220 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200221 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +0200222 addr, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300223 }
224
225 return first_errno;
226}
227
Kalle Valo5e3dd152013-06-12 20:52:10 +0300228/*********************/
229/* General utilities */
230/*********************/
231
232static inline enum wmi_phy_mode
233chan_to_phymode(const struct cfg80211_chan_def *chandef)
234{
235 enum wmi_phy_mode phymode = MODE_UNKNOWN;
236
237 switch (chandef->chan->band) {
238 case IEEE80211_BAND_2GHZ:
239 switch (chandef->width) {
240 case NL80211_CHAN_WIDTH_20_NOHT:
241 phymode = MODE_11G;
242 break;
243 case NL80211_CHAN_WIDTH_20:
244 phymode = MODE_11NG_HT20;
245 break;
246 case NL80211_CHAN_WIDTH_40:
247 phymode = MODE_11NG_HT40;
248 break;
John W. Linville0f817ed2013-06-27 13:50:09 -0400249 case NL80211_CHAN_WIDTH_5:
250 case NL80211_CHAN_WIDTH_10:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300251 case NL80211_CHAN_WIDTH_80:
252 case NL80211_CHAN_WIDTH_80P80:
253 case NL80211_CHAN_WIDTH_160:
254 phymode = MODE_UNKNOWN;
255 break;
256 }
257 break;
258 case IEEE80211_BAND_5GHZ:
259 switch (chandef->width) {
260 case NL80211_CHAN_WIDTH_20_NOHT:
261 phymode = MODE_11A;
262 break;
263 case NL80211_CHAN_WIDTH_20:
264 phymode = MODE_11NA_HT20;
265 break;
266 case NL80211_CHAN_WIDTH_40:
267 phymode = MODE_11NA_HT40;
268 break;
269 case NL80211_CHAN_WIDTH_80:
270 phymode = MODE_11AC_VHT80;
271 break;
John W. Linville0f817ed2013-06-27 13:50:09 -0400272 case NL80211_CHAN_WIDTH_5:
273 case NL80211_CHAN_WIDTH_10:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300274 case NL80211_CHAN_WIDTH_80P80:
275 case NL80211_CHAN_WIDTH_160:
276 phymode = MODE_UNKNOWN;
277 break;
278 }
279 break;
280 default:
281 break;
282 }
283
284 WARN_ON(phymode == MODE_UNKNOWN);
285 return phymode;
286}
287
288static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
289{
290/*
291 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
292 * 0 for no restriction
293 * 1 for 1/4 us
294 * 2 for 1/2 us
295 * 3 for 1 us
296 * 4 for 2 us
297 * 5 for 4 us
298 * 6 for 8 us
299 * 7 for 16 us
300 */
301 switch (mpdudensity) {
302 case 0:
303 return 0;
304 case 1:
305 case 2:
306 case 3:
307 /* Our lower layer calculations limit our precision to
308 1 microsecond */
309 return 1;
310 case 4:
311 return 2;
312 case 5:
313 return 4;
314 case 6:
315 return 8;
316 case 7:
317 return 16;
318 default:
319 return 0;
320 }
321}
322
323static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
324{
325 int ret;
326
327 lockdep_assert_held(&ar->conf_mutex);
328
329 ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
Ben Greear479398b2013-11-04 09:19:34 -0800330 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200331 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +0200332 addr, vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300333 return ret;
Ben Greear479398b2013-11-04 09:19:34 -0800334 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300335
336 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
Ben Greear479398b2013-11-04 09:19:34 -0800337 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200338 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +0200339 addr, vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300340 return ret;
Ben Greear479398b2013-11-04 09:19:34 -0800341 }
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100342 spin_lock_bh(&ar->data_lock);
343 ar->num_peers++;
344 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300345
346 return 0;
347}
348
Kalle Valo5a13e762014-01-20 11:01:46 +0200349static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
350{
351 struct ath10k *ar = arvif->ar;
352 u32 param;
353 int ret;
354
355 param = ar->wmi.pdev_param->sta_kickout_th;
356 ret = ath10k_wmi_pdev_set_param(ar, param,
357 ATH10K_KICKOUT_THRESHOLD);
358 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200359 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200360 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200361 return ret;
362 }
363
364 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
365 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
366 ATH10K_KEEPALIVE_MIN_IDLE);
367 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200368 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200369 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200370 return ret;
371 }
372
373 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
374 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
375 ATH10K_KEEPALIVE_MAX_IDLE);
376 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200377 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200378 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200379 return ret;
380 }
381
382 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
383 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
384 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
385 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200386 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200387 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200388 return ret;
389 }
390
391 return 0;
392}
393
Michal Kazior424121c2013-07-22 14:13:31 +0200394static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
395{
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200396 struct ath10k *ar = arvif->ar;
397 u32 vdev_param;
398
Michal Kazior424121c2013-07-22 14:13:31 +0200399 if (value != 0xFFFFFFFF)
400 value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
401 ATH10K_RTS_MAX);
402
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200403 vdev_param = ar->wmi.vdev_param->rts_threshold;
404 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
Michal Kazior424121c2013-07-22 14:13:31 +0200405}
406
407static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
408{
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200409 struct ath10k *ar = arvif->ar;
410 u32 vdev_param;
411
Michal Kazior424121c2013-07-22 14:13:31 +0200412 if (value != 0xFFFFFFFF)
413 value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
414 ATH10K_FRAGMT_THRESHOLD_MIN,
415 ATH10K_FRAGMT_THRESHOLD_MAX);
416
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200417 vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
418 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
Michal Kazior424121c2013-07-22 14:13:31 +0200419}
420
Kalle Valo5e3dd152013-06-12 20:52:10 +0300421static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
422{
423 int ret;
424
425 lockdep_assert_held(&ar->conf_mutex);
426
427 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
428 if (ret)
429 return ret;
430
431 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
432 if (ret)
433 return ret;
434
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100435 spin_lock_bh(&ar->data_lock);
436 ar->num_peers--;
437 spin_unlock_bh(&ar->data_lock);
438
Kalle Valo5e3dd152013-06-12 20:52:10 +0300439 return 0;
440}
441
442static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
443{
444 struct ath10k_peer *peer, *tmp;
445
446 lockdep_assert_held(&ar->conf_mutex);
447
448 spin_lock_bh(&ar->data_lock);
449 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
450 if (peer->vdev_id != vdev_id)
451 continue;
452
Michal Kazior7aa7a722014-08-25 12:09:38 +0200453 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300454 peer->addr, vdev_id);
455
456 list_del(&peer->list);
457 kfree(peer);
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100458 ar->num_peers--;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300459 }
460 spin_unlock_bh(&ar->data_lock);
461}
462
Michal Kaziora96d7742013-07-16 09:38:56 +0200463static void ath10k_peer_cleanup_all(struct ath10k *ar)
464{
465 struct ath10k_peer *peer, *tmp;
466
467 lockdep_assert_held(&ar->conf_mutex);
468
469 spin_lock_bh(&ar->data_lock);
470 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
471 list_del(&peer->list);
472 kfree(peer);
473 }
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100474 ar->num_peers = 0;
Michal Kaziora96d7742013-07-16 09:38:56 +0200475 spin_unlock_bh(&ar->data_lock);
476}
477
Kalle Valo5e3dd152013-06-12 20:52:10 +0300478/************************/
479/* Interface management */
480/************************/
481
Michal Kazior64badcb2014-09-18 11:18:02 +0300482void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
483{
484 struct ath10k *ar = arvif->ar;
485
486 lockdep_assert_held(&ar->data_lock);
487
488 if (!arvif->beacon)
489 return;
490
491 if (!arvif->beacon_buf)
492 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
493 arvif->beacon->len, DMA_TO_DEVICE);
494
495 dev_kfree_skb_any(arvif->beacon);
496
497 arvif->beacon = NULL;
498 arvif->beacon_sent = false;
499}
500
501static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
502{
503 struct ath10k *ar = arvif->ar;
504
505 lockdep_assert_held(&ar->data_lock);
506
507 ath10k_mac_vif_beacon_free(arvif);
508
509 if (arvif->beacon_buf) {
510 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
511 arvif->beacon_buf, arvif->beacon_paddr);
512 arvif->beacon_buf = NULL;
513 }
514}
515
Kalle Valo5e3dd152013-06-12 20:52:10 +0300516static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
517{
518 int ret;
519
Michal Kazior548db542013-07-05 16:15:15 +0300520 lockdep_assert_held(&ar->conf_mutex);
521
Kalle Valo5e3dd152013-06-12 20:52:10 +0300522 ret = wait_for_completion_timeout(&ar->vdev_setup_done,
523 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
524 if (ret == 0)
525 return -ETIMEDOUT;
526
527 return 0;
528}
529
Michal Kazior1bbc0972014-04-08 09:45:47 +0300530static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300531{
Michal Kaziorc930f742014-01-23 11:38:25 +0100532 struct cfg80211_chan_def *chandef = &ar->chandef;
533 struct ieee80211_channel *channel = chandef->chan;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300534 struct wmi_vdev_start_request_arg arg = {};
Kalle Valo5e3dd152013-06-12 20:52:10 +0300535 int ret = 0;
536
537 lockdep_assert_held(&ar->conf_mutex);
538
Kalle Valo5e3dd152013-06-12 20:52:10 +0300539 arg.vdev_id = vdev_id;
540 arg.channel.freq = channel->center_freq;
Michal Kaziorc930f742014-01-23 11:38:25 +0100541 arg.channel.band_center_freq1 = chandef->center_freq1;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300542
543 /* TODO setup this dynamically, what in case we
544 don't have any vifs? */
Michal Kaziorc930f742014-01-23 11:38:25 +0100545 arg.channel.mode = chan_to_phymode(chandef);
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200546 arg.channel.chan_radar =
547 !!(channel->flags & IEEE80211_CHAN_RADAR);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300548
Michal Kazior89c5c842013-10-23 04:02:13 -0700549 arg.channel.min_power = 0;
Michal Kazior02256932013-10-23 04:02:14 -0700550 arg.channel.max_power = channel->max_power * 2;
551 arg.channel.max_reg_power = channel->max_reg_power * 2;
552 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300553
554 ret = ath10k_wmi_vdev_start(ar, &arg);
555 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200556 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200557 vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300558 return ret;
559 }
560
561 ret = ath10k_vdev_setup_sync(ar);
562 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200563 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200564 vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300565 return ret;
566 }
567
568 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
569 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200570 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200571 vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300572 goto vdev_stop;
573 }
574
575 ar->monitor_vdev_id = vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300576
Michal Kazior7aa7a722014-08-25 12:09:38 +0200577 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
Michal Kazior1bbc0972014-04-08 09:45:47 +0300578 ar->monitor_vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300579 return 0;
580
581vdev_stop:
582 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
583 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200584 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200585 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300586
587 return ret;
588}
589
Michal Kazior1bbc0972014-04-08 09:45:47 +0300590static int ath10k_monitor_vdev_stop(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300591{
592 int ret = 0;
593
594 lockdep_assert_held(&ar->conf_mutex);
595
Marek Puzyniak52fa0192013-09-24 14:06:24 +0200596 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
597 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200598 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200599 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300600
601 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
602 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200603 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200604 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300605
606 ret = ath10k_vdev_setup_sync(ar);
607 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200608 ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200609 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300610
Michal Kazior7aa7a722014-08-25 12:09:38 +0200611 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
Michal Kazior1bbc0972014-04-08 09:45:47 +0300612 ar->monitor_vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300613 return ret;
614}
615
Michal Kazior1bbc0972014-04-08 09:45:47 +0300616static int ath10k_monitor_vdev_create(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300617{
618 int bit, ret = 0;
619
620 lockdep_assert_held(&ar->conf_mutex);
621
Ben Greeara9aefb32014-08-12 11:02:19 +0300622 if (ar->free_vdev_map == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200623 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300624 return -ENOMEM;
625 }
626
Ben Greear16c11172014-09-23 14:17:16 -0700627 bit = __ffs64(ar->free_vdev_map);
Ben Greeara9aefb32014-08-12 11:02:19 +0300628
Ben Greear16c11172014-09-23 14:17:16 -0700629 ar->monitor_vdev_id = bit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300630
631 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
632 WMI_VDEV_TYPE_MONITOR,
633 0, ar->mac_addr);
634 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200635 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200636 ar->monitor_vdev_id, ret);
Ben Greeara9aefb32014-08-12 11:02:19 +0300637 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300638 }
639
Ben Greear16c11172014-09-23 14:17:16 -0700640 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200641 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300642 ar->monitor_vdev_id);
643
Kalle Valo5e3dd152013-06-12 20:52:10 +0300644 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300645}
646
Michal Kazior1bbc0972014-04-08 09:45:47 +0300647static int ath10k_monitor_vdev_delete(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300648{
649 int ret = 0;
650
651 lockdep_assert_held(&ar->conf_mutex);
652
Kalle Valo5e3dd152013-06-12 20:52:10 +0300653 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
654 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200655 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200656 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300657 return ret;
658 }
659
Ben Greear16c11172014-09-23 14:17:16 -0700660 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300661
Michal Kazior7aa7a722014-08-25 12:09:38 +0200662 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300663 ar->monitor_vdev_id);
664 return ret;
665}
666
Michal Kazior1bbc0972014-04-08 09:45:47 +0300667static int ath10k_monitor_start(struct ath10k *ar)
668{
669 int ret;
670
671 lockdep_assert_held(&ar->conf_mutex);
672
Michal Kazior1bbc0972014-04-08 09:45:47 +0300673 ret = ath10k_monitor_vdev_create(ar);
674 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200675 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
Michal Kazior1bbc0972014-04-08 09:45:47 +0300676 return ret;
677 }
678
679 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
680 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200681 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
Michal Kazior1bbc0972014-04-08 09:45:47 +0300682 ath10k_monitor_vdev_delete(ar);
683 return ret;
684 }
685
686 ar->monitor_started = true;
Michal Kazior7aa7a722014-08-25 12:09:38 +0200687 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
Michal Kazior1bbc0972014-04-08 09:45:47 +0300688
689 return 0;
690}
691
Michal Kazior19337472014-08-28 12:58:16 +0200692static int ath10k_monitor_stop(struct ath10k *ar)
Michal Kazior1bbc0972014-04-08 09:45:47 +0300693{
694 int ret;
695
696 lockdep_assert_held(&ar->conf_mutex);
697
Michal Kazior1bbc0972014-04-08 09:45:47 +0300698 ret = ath10k_monitor_vdev_stop(ar);
Michal Kazior19337472014-08-28 12:58:16 +0200699 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200700 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
Michal Kazior19337472014-08-28 12:58:16 +0200701 return ret;
702 }
Michal Kazior1bbc0972014-04-08 09:45:47 +0300703
704 ret = ath10k_monitor_vdev_delete(ar);
Michal Kazior19337472014-08-28 12:58:16 +0200705 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200706 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
Michal Kazior19337472014-08-28 12:58:16 +0200707 return ret;
708 }
Michal Kazior1bbc0972014-04-08 09:45:47 +0300709
710 ar->monitor_started = false;
Michal Kazior7aa7a722014-08-25 12:09:38 +0200711 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
Michal Kazior19337472014-08-28 12:58:16 +0200712
713 return 0;
714}
715
716static int ath10k_monitor_recalc(struct ath10k *ar)
717{
718 bool should_start;
719
720 lockdep_assert_held(&ar->conf_mutex);
721
722 should_start = ar->monitor ||
723 ar->filter_flags & FIF_PROMISC_IN_BSS ||
724 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
725
726 ath10k_dbg(ar, ATH10K_DBG_MAC,
727 "mac monitor recalc started? %d should? %d\n",
728 ar->monitor_started, should_start);
729
730 if (should_start == ar->monitor_started)
731 return 0;
732
733 if (should_start)
734 return ath10k_monitor_start(ar);
Kalle Valod8bb26b2014-09-14 12:50:33 +0300735
736 return ath10k_monitor_stop(ar);
Michal Kazior1bbc0972014-04-08 09:45:47 +0300737}
738
Marek Kwaczynskie81bd102014-03-11 12:58:00 +0200739static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
740{
741 struct ath10k *ar = arvif->ar;
742 u32 vdev_param, rts_cts = 0;
743
744 lockdep_assert_held(&ar->conf_mutex);
745
746 vdev_param = ar->wmi.vdev_param->enable_rtscts;
747
748 if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
749 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
750
751 if (arvif->num_legacy_stations > 0)
752 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
753 WMI_RTSCTS_PROFILE);
754
755 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
756 rts_cts);
757}
758
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200759static int ath10k_start_cac(struct ath10k *ar)
760{
761 int ret;
762
763 lockdep_assert_held(&ar->conf_mutex);
764
765 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
766
Michal Kazior19337472014-08-28 12:58:16 +0200767 ret = ath10k_monitor_recalc(ar);
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200768 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200769 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200770 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
771 return ret;
772 }
773
Michal Kazior7aa7a722014-08-25 12:09:38 +0200774 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200775 ar->monitor_vdev_id);
776
777 return 0;
778}
779
780static int ath10k_stop_cac(struct ath10k *ar)
781{
782 lockdep_assert_held(&ar->conf_mutex);
783
784 /* CAC is not running - do nothing */
785 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
786 return 0;
787
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200788 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
Michal Kazior1bbc0972014-04-08 09:45:47 +0300789 ath10k_monitor_stop(ar);
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200790
Michal Kazior7aa7a722014-08-25 12:09:38 +0200791 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200792
793 return 0;
794}
795
Michal Kaziord6500972014-04-08 09:56:09 +0300796static void ath10k_recalc_radar_detection(struct ath10k *ar)
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200797{
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200798 int ret;
799
800 lockdep_assert_held(&ar->conf_mutex);
801
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200802 ath10k_stop_cac(ar);
803
Michal Kaziord6500972014-04-08 09:56:09 +0300804 if (!ar->radar_enabled)
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200805 return;
806
Michal Kaziord6500972014-04-08 09:56:09 +0300807 if (ar->num_started_vdevs > 0)
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200808 return;
809
810 ret = ath10k_start_cac(ar);
811 if (ret) {
812 /*
813 * Not possible to start CAC on current channel so starting
814 * radiation is not allowed, make this channel DFS_UNAVAILABLE
815 * by indicating that radar was detected.
816 */
Michal Kazior7aa7a722014-08-25 12:09:38 +0200817 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200818 ieee80211_radar_detected(ar->hw);
819 }
820}
821
Michal Kaziordc55e302014-07-29 12:53:36 +0300822static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
Michal Kazior72654fa2014-04-08 09:56:09 +0300823{
824 struct ath10k *ar = arvif->ar;
825 struct cfg80211_chan_def *chandef = &ar->chandef;
826 struct wmi_vdev_start_request_arg arg = {};
827 int ret = 0;
828
829 lockdep_assert_held(&ar->conf_mutex);
830
831 reinit_completion(&ar->vdev_setup_done);
832
833 arg.vdev_id = arvif->vdev_id;
834 arg.dtim_period = arvif->dtim_period;
835 arg.bcn_intval = arvif->beacon_interval;
836
837 arg.channel.freq = chandef->chan->center_freq;
838 arg.channel.band_center_freq1 = chandef->center_freq1;
839 arg.channel.mode = chan_to_phymode(chandef);
840
841 arg.channel.min_power = 0;
842 arg.channel.max_power = chandef->chan->max_power * 2;
843 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
844 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
845
846 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
847 arg.ssid = arvif->u.ap.ssid;
848 arg.ssid_len = arvif->u.ap.ssid_len;
849 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
850
851 /* For now allow DFS for AP mode */
852 arg.channel.chan_radar =
853 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
854 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
855 arg.ssid = arvif->vif->bss_conf.ssid;
856 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
857 }
858
Michal Kazior7aa7a722014-08-25 12:09:38 +0200859 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kazior72654fa2014-04-08 09:56:09 +0300860 "mac vdev %d start center_freq %d phymode %s\n",
861 arg.vdev_id, arg.channel.freq,
862 ath10k_wmi_phymode_str(arg.channel.mode));
863
Michal Kaziordc55e302014-07-29 12:53:36 +0300864 if (restart)
865 ret = ath10k_wmi_vdev_restart(ar, &arg);
866 else
867 ret = ath10k_wmi_vdev_start(ar, &arg);
868
Michal Kazior72654fa2014-04-08 09:56:09 +0300869 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200870 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
Michal Kazior72654fa2014-04-08 09:56:09 +0300871 arg.vdev_id, ret);
872 return ret;
873 }
874
875 ret = ath10k_vdev_setup_sync(ar);
876 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200877 ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n",
Michal Kazior72654fa2014-04-08 09:56:09 +0300878 arg.vdev_id, ret);
879 return ret;
880 }
881
Michal Kaziord6500972014-04-08 09:56:09 +0300882 ar->num_started_vdevs++;
883 ath10k_recalc_radar_detection(ar);
884
Michal Kazior72654fa2014-04-08 09:56:09 +0300885 return ret;
886}
887
Michal Kaziordc55e302014-07-29 12:53:36 +0300888static int ath10k_vdev_start(struct ath10k_vif *arvif)
889{
890 return ath10k_vdev_start_restart(arvif, false);
891}
892
893static int ath10k_vdev_restart(struct ath10k_vif *arvif)
894{
895 return ath10k_vdev_start_restart(arvif, true);
896}
897
Michal Kazior72654fa2014-04-08 09:56:09 +0300898static int ath10k_vdev_stop(struct ath10k_vif *arvif)
899{
900 struct ath10k *ar = arvif->ar;
901 int ret;
902
903 lockdep_assert_held(&ar->conf_mutex);
904
905 reinit_completion(&ar->vdev_setup_done);
906
907 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
908 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200909 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
Michal Kazior72654fa2014-04-08 09:56:09 +0300910 arvif->vdev_id, ret);
911 return ret;
912 }
913
914 ret = ath10k_vdev_setup_sync(ar);
915 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200916 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
Michal Kazior72654fa2014-04-08 09:56:09 +0300917 arvif->vdev_id, ret);
918 return ret;
919 }
920
Michal Kaziord6500972014-04-08 09:56:09 +0300921 WARN_ON(ar->num_started_vdevs == 0);
922
923 if (ar->num_started_vdevs != 0) {
924 ar->num_started_vdevs--;
925 ath10k_recalc_radar_detection(ar);
926 }
927
Michal Kazior72654fa2014-04-08 09:56:09 +0300928 return ret;
929}
930
Kalle Valo5e3dd152013-06-12 20:52:10 +0300931static void ath10k_control_beaconing(struct ath10k_vif *arvif,
Kalle Valo5b07e072014-09-14 12:50:06 +0300932 struct ieee80211_bss_conf *info)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300933{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200934 struct ath10k *ar = arvif->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300935 int ret = 0;
936
Michal Kazior548db542013-07-05 16:15:15 +0300937 lockdep_assert_held(&arvif->ar->conf_mutex);
938
Kalle Valo5e3dd152013-06-12 20:52:10 +0300939 if (!info->enable_beacon) {
940 ath10k_vdev_stop(arvif);
Michal Kaziorc930f742014-01-23 11:38:25 +0100941
942 arvif->is_started = false;
943 arvif->is_up = false;
944
Michal Kazior748afc42014-01-23 12:48:21 +0100945 spin_lock_bh(&arvif->ar->data_lock);
Michal Kazior64badcb2014-09-18 11:18:02 +0300946 ath10k_mac_vif_beacon_free(arvif);
Michal Kazior748afc42014-01-23 12:48:21 +0100947 spin_unlock_bh(&arvif->ar->data_lock);
948
Kalle Valo5e3dd152013-06-12 20:52:10 +0300949 return;
950 }
951
952 arvif->tx_seq_no = 0x1000;
953
954 ret = ath10k_vdev_start(arvif);
955 if (ret)
956 return;
957
Michal Kaziorc930f742014-01-23 11:38:25 +0100958 arvif->aid = 0;
Kalle Valob25f32c2014-09-14 12:50:49 +0300959 ether_addr_copy(arvif->bssid, info->bssid);
Michal Kaziorc930f742014-01-23 11:38:25 +0100960
961 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
962 arvif->bssid);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300963 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200964 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +0200965 arvif->vdev_id, ret);
Michal Kaziorc930f742014-01-23 11:38:25 +0100966 ath10k_vdev_stop(arvif);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300967 return;
968 }
Michal Kaziorc930f742014-01-23 11:38:25 +0100969
970 arvif->is_started = true;
971 arvif->is_up = true;
972
Michal Kazior7aa7a722014-08-25 12:09:38 +0200973 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300974}
975
976static void ath10k_control_ibss(struct ath10k_vif *arvif,
977 struct ieee80211_bss_conf *info,
978 const u8 self_peer[ETH_ALEN])
979{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200980 struct ath10k *ar = arvif->ar;
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200981 u32 vdev_param;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300982 int ret = 0;
983
Michal Kazior548db542013-07-05 16:15:15 +0300984 lockdep_assert_held(&arvif->ar->conf_mutex);
985
Kalle Valo5e3dd152013-06-12 20:52:10 +0300986 if (!info->ibss_joined) {
987 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
988 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200989 ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300990 self_peer, arvif->vdev_id, ret);
991
Michal Kaziorc930f742014-01-23 11:38:25 +0100992 if (is_zero_ether_addr(arvif->bssid))
Kalle Valo5e3dd152013-06-12 20:52:10 +0300993 return;
994
Michal Kaziorc930f742014-01-23 11:38:25 +0100995 memset(arvif->bssid, 0, ETH_ALEN);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300996
997 return;
998 }
999
1000 ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
1001 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001002 ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001003 self_peer, arvif->vdev_id, ret);
1004 return;
1005 }
1006
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02001007 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1008 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001009 ATH10K_DEFAULT_ATIM);
1010 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001011 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001012 arvif->vdev_id, ret);
1013}
1014
1015/*
1016 * Review this when mac80211 gains per-interface powersave support.
1017 */
Michal Kaziorad088bf2013-10-16 15:44:46 +03001018static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001019{
Michal Kaziorad088bf2013-10-16 15:44:46 +03001020 struct ath10k *ar = arvif->ar;
1021 struct ieee80211_conf *conf = &ar->hw->conf;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001022 enum wmi_sta_powersave_param param;
1023 enum wmi_sta_ps_mode psmode;
1024 int ret;
1025
Michal Kazior548db542013-07-05 16:15:15 +03001026 lockdep_assert_held(&arvif->ar->conf_mutex);
1027
Michal Kaziorad088bf2013-10-16 15:44:46 +03001028 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1029 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001030
1031 if (conf->flags & IEEE80211_CONF_PS) {
1032 psmode = WMI_STA_PS_MODE_ENABLED;
1033 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1034
Michal Kaziorad088bf2013-10-16 15:44:46 +03001035 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001036 conf->dynamic_ps_timeout);
1037 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001038 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02001039 arvif->vdev_id, ret);
Michal Kaziorad088bf2013-10-16 15:44:46 +03001040 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001041 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001042 } else {
1043 psmode = WMI_STA_PS_MODE_DISABLED;
1044 }
1045
Michal Kazior7aa7a722014-08-25 12:09:38 +02001046 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03001047 arvif->vdev_id, psmode ? "enable" : "disable");
1048
Michal Kaziorad088bf2013-10-16 15:44:46 +03001049 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1050 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001051 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02001052 psmode, arvif->vdev_id, ret);
Michal Kaziorad088bf2013-10-16 15:44:46 +03001053 return ret;
1054 }
1055
1056 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001057}
1058
1059/**********************/
1060/* Station management */
1061/**********************/
1062
1063static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
1064 struct ath10k_vif *arvif,
1065 struct ieee80211_sta *sta,
1066 struct ieee80211_bss_conf *bss_conf,
1067 struct wmi_peer_assoc_complete_arg *arg)
1068{
Michal Kazior548db542013-07-05 16:15:15 +03001069 lockdep_assert_held(&ar->conf_mutex);
1070
Kalle Valob25f32c2014-09-14 12:50:49 +03001071 ether_addr_copy(arg->addr, sta->addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001072 arg->vdev_id = arvif->vdev_id;
1073 arg->peer_aid = sta->aid;
1074 arg->peer_flags |= WMI_PEER_AUTH;
1075
1076 if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
1077 /*
1078 * Seems FW have problems with Power Save in STA
1079 * mode when we setup this parameter to high (eg. 5).
1080 * Often we see that FW don't send NULL (with clean P flags)
1081 * frame even there is info about buffered frames in beacons.
1082 * Sometimes we have to wait more than 10 seconds before FW
1083 * will wakeup. Often sending one ping from AP to our device
1084 * just fail (more than 50%).
1085 *
1086 * Seems setting this FW parameter to 1 couse FW
1087 * will check every beacon and will wakup immediately
1088 * after detection buffered data.
1089 */
1090 arg->peer_listen_intval = 1;
1091 else
1092 arg->peer_listen_intval = ar->hw->conf.listen_interval;
1093
1094 arg->peer_num_spatial_streams = 1;
1095
1096 /*
1097 * The assoc capabilities are available only in managed mode.
1098 */
1099 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
1100 arg->peer_caps = bss_conf->assoc_capability;
1101}
1102
1103static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
1104 struct ath10k_vif *arvif,
1105 struct wmi_peer_assoc_complete_arg *arg)
1106{
1107 struct ieee80211_vif *vif = arvif->vif;
1108 struct ieee80211_bss_conf *info = &vif->bss_conf;
1109 struct cfg80211_bss *bss;
1110 const u8 *rsnie = NULL;
1111 const u8 *wpaie = NULL;
1112
Michal Kazior548db542013-07-05 16:15:15 +03001113 lockdep_assert_held(&ar->conf_mutex);
1114
Kalle Valo5e3dd152013-06-12 20:52:10 +03001115 bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
1116 info->bssid, NULL, 0, 0, 0);
1117 if (bss) {
1118 const struct cfg80211_bss_ies *ies;
1119
1120 rcu_read_lock();
1121 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
1122
1123 ies = rcu_dereference(bss->ies);
1124
1125 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
Kalle Valo5b07e072014-09-14 12:50:06 +03001126 WLAN_OUI_TYPE_MICROSOFT_WPA,
1127 ies->data,
1128 ies->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001129 rcu_read_unlock();
1130 cfg80211_put_bss(ar->hw->wiphy, bss);
1131 }
1132
1133 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
1134 if (rsnie || wpaie) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001135 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001136 arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
1137 }
1138
1139 if (wpaie) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001140 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001141 arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
1142 }
1143}
1144
1145static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
1146 struct ieee80211_sta *sta,
1147 struct wmi_peer_assoc_complete_arg *arg)
1148{
1149 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
1150 const struct ieee80211_supported_band *sband;
1151 const struct ieee80211_rate *rates;
1152 u32 ratemask;
1153 int i;
1154
Michal Kazior548db542013-07-05 16:15:15 +03001155 lockdep_assert_held(&ar->conf_mutex);
1156
Kalle Valo5e3dd152013-06-12 20:52:10 +03001157 sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
1158 ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
1159 rates = sband->bitrates;
1160
1161 rateset->num_rates = 0;
1162
1163 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
1164 if (!(ratemask & 1))
1165 continue;
1166
1167 rateset->rates[rateset->num_rates] = rates->hw_value;
1168 rateset->num_rates++;
1169 }
1170}
1171
1172static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
1173 struct ieee80211_sta *sta,
1174 struct wmi_peer_assoc_complete_arg *arg)
1175{
1176 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001177 int i, n;
Kalle Valoaf762c02014-09-14 12:50:17 +03001178 u32 stbc;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001179
Michal Kazior548db542013-07-05 16:15:15 +03001180 lockdep_assert_held(&ar->conf_mutex);
1181
Kalle Valo5e3dd152013-06-12 20:52:10 +03001182 if (!ht_cap->ht_supported)
1183 return;
1184
1185 arg->peer_flags |= WMI_PEER_HT;
1186 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
1187 ht_cap->ampdu_factor)) - 1;
1188
1189 arg->peer_mpdu_density =
1190 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
1191
1192 arg->peer_ht_caps = ht_cap->cap;
1193 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
1194
1195 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
1196 arg->peer_flags |= WMI_PEER_LDPC;
1197
1198 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
1199 arg->peer_flags |= WMI_PEER_40MHZ;
1200 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
1201 }
1202
1203 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
1204 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
1205
1206 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
1207 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
1208
1209 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
1210 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
1211 arg->peer_flags |= WMI_PEER_STBC;
1212 }
1213
1214 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001215 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
1216 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
1217 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
1218 arg->peer_rate_caps |= stbc;
1219 arg->peer_flags |= WMI_PEER_STBC;
1220 }
1221
Kalle Valo5e3dd152013-06-12 20:52:10 +03001222 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
1223 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
1224 else if (ht_cap->mcs.rx_mask[1])
1225 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
1226
1227 for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
1228 if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
1229 arg->peer_ht_rates.rates[n++] = i;
1230
Bartosz Markowskifd71f802014-02-10 13:12:55 +01001231 /*
1232 * This is a workaround for HT-enabled STAs which break the spec
1233 * and have no HT capabilities RX mask (no HT RX MCS map).
1234 *
1235 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
1236 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
1237 *
1238 * Firmware asserts if such situation occurs.
1239 */
1240 if (n == 0) {
1241 arg->peer_ht_rates.num_rates = 8;
1242 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
1243 arg->peer_ht_rates.rates[i] = i;
1244 } else {
1245 arg->peer_ht_rates.num_rates = n;
1246 arg->peer_num_spatial_streams = sta->rx_nss;
1247 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001248
Michal Kazior7aa7a722014-08-25 12:09:38 +02001249 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03001250 arg->addr,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001251 arg->peer_ht_rates.num_rates,
1252 arg->peer_num_spatial_streams);
1253}
1254
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001255static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
1256 struct ath10k_vif *arvif,
1257 struct ieee80211_sta *sta)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001258{
1259 u32 uapsd = 0;
1260 u32 max_sp = 0;
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001261 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001262
Michal Kazior548db542013-07-05 16:15:15 +03001263 lockdep_assert_held(&ar->conf_mutex);
1264
Kalle Valo5e3dd152013-06-12 20:52:10 +03001265 if (sta->wme && sta->uapsd_queues) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001266 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001267 sta->uapsd_queues, sta->max_sp);
1268
Kalle Valo5e3dd152013-06-12 20:52:10 +03001269 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
1270 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
1271 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
1272 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
1273 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
1274 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
1275 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
1276 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
1277 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
1278 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
1279 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
1280 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
1281
Kalle Valo5e3dd152013-06-12 20:52:10 +03001282 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
1283 max_sp = sta->max_sp;
1284
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001285 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
1286 sta->addr,
1287 WMI_AP_PS_PEER_PARAM_UAPSD,
1288 uapsd);
1289 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001290 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001291 arvif->vdev_id, ret);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001292 return ret;
1293 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001294
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001295 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
1296 sta->addr,
1297 WMI_AP_PS_PEER_PARAM_MAX_SP,
1298 max_sp);
1299 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001300 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001301 arvif->vdev_id, ret);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001302 return ret;
1303 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001304
1305 /* TODO setup this based on STA listen interval and
1306 beacon interval. Currently we don't know
1307 sta->listen_interval - mac80211 patch required.
1308 Currently use 10 seconds */
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001309 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
Kalle Valo5b07e072014-09-14 12:50:06 +03001310 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
1311 10);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001312 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001313 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001314 arvif->vdev_id, ret);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001315 return ret;
1316 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001317 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001318
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001319 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001320}
1321
1322static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
1323 struct ieee80211_sta *sta,
1324 struct wmi_peer_assoc_complete_arg *arg)
1325{
1326 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
Sujith Manoharana24b88b2013-10-07 19:51:57 -07001327 u8 ampdu_factor;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001328
1329 if (!vht_cap->vht_supported)
1330 return;
1331
1332 arg->peer_flags |= WMI_PEER_VHT;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001333 arg->peer_vht_caps = vht_cap->cap;
1334
Sujith Manoharana24b88b2013-10-07 19:51:57 -07001335 ampdu_factor = (vht_cap->cap &
1336 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
1337 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
1338
1339 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
1340 * zero in VHT IE. Using it would result in degraded throughput.
1341 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
1342 * it if VHT max_mpdu is smaller. */
1343 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
1344 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
1345 ampdu_factor)) - 1);
1346
Kalle Valo5e3dd152013-06-12 20:52:10 +03001347 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
1348 arg->peer_flags |= WMI_PEER_80MHZ;
1349
1350 arg->peer_vht_rates.rx_max_rate =
1351 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
1352 arg->peer_vht_rates.rx_mcs_set =
1353 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
1354 arg->peer_vht_rates.tx_max_rate =
1355 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
1356 arg->peer_vht_rates.tx_mcs_set =
1357 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
1358
Michal Kazior7aa7a722014-08-25 12:09:38 +02001359 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03001360 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001361}
1362
1363static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
1364 struct ath10k_vif *arvif,
1365 struct ieee80211_sta *sta,
1366 struct ieee80211_bss_conf *bss_conf,
1367 struct wmi_peer_assoc_complete_arg *arg)
1368{
1369 switch (arvif->vdev_type) {
1370 case WMI_VDEV_TYPE_AP:
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001371 if (sta->wme)
1372 arg->peer_flags |= WMI_PEER_QOS;
1373
1374 if (sta->wme && sta->uapsd_queues) {
1375 arg->peer_flags |= WMI_PEER_APSD;
1376 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
1377 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001378 break;
1379 case WMI_VDEV_TYPE_STA:
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001380 if (bss_conf->qos)
1381 arg->peer_flags |= WMI_PEER_QOS;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001382 break;
1383 default:
1384 break;
1385 }
1386}
1387
1388static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
1389 struct ath10k_vif *arvif,
1390 struct ieee80211_sta *sta,
1391 struct wmi_peer_assoc_complete_arg *arg)
1392{
1393 enum wmi_phy_mode phymode = MODE_UNKNOWN;
1394
Kalle Valo5e3dd152013-06-12 20:52:10 +03001395 switch (ar->hw->conf.chandef.chan->band) {
1396 case IEEE80211_BAND_2GHZ:
1397 if (sta->ht_cap.ht_supported) {
1398 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1399 phymode = MODE_11NG_HT40;
1400 else
1401 phymode = MODE_11NG_HT20;
1402 } else {
1403 phymode = MODE_11G;
1404 }
1405
1406 break;
1407 case IEEE80211_BAND_5GHZ:
Sujith Manoharan7cc45e92013-09-08 18:19:55 +03001408 /*
1409 * Check VHT first.
1410 */
1411 if (sta->vht_cap.vht_supported) {
1412 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
1413 phymode = MODE_11AC_VHT80;
1414 else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1415 phymode = MODE_11AC_VHT40;
1416 else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
1417 phymode = MODE_11AC_VHT20;
1418 } else if (sta->ht_cap.ht_supported) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001419 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1420 phymode = MODE_11NA_HT40;
1421 else
1422 phymode = MODE_11NA_HT20;
1423 } else {
1424 phymode = MODE_11A;
1425 }
1426
1427 break;
1428 default:
1429 break;
1430 }
1431
Michal Kazior7aa7a722014-08-25 12:09:38 +02001432 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
Kalle Valo38a1d472013-09-08 17:56:14 +03001433 sta->addr, ath10k_wmi_phymode_str(phymode));
Kalle Valo60c3daa2013-09-08 17:56:07 +03001434
Kalle Valo5e3dd152013-06-12 20:52:10 +03001435 arg->peer_phymode = phymode;
1436 WARN_ON(phymode == MODE_UNKNOWN);
1437}
1438
Kalle Valob9ada652013-10-16 15:44:46 +03001439static int ath10k_peer_assoc_prepare(struct ath10k *ar,
1440 struct ath10k_vif *arvif,
1441 struct ieee80211_sta *sta,
1442 struct ieee80211_bss_conf *bss_conf,
1443 struct wmi_peer_assoc_complete_arg *arg)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001444{
Michal Kazior548db542013-07-05 16:15:15 +03001445 lockdep_assert_held(&ar->conf_mutex);
1446
Kalle Valob9ada652013-10-16 15:44:46 +03001447 memset(arg, 0, sizeof(*arg));
Kalle Valo5e3dd152013-06-12 20:52:10 +03001448
Kalle Valob9ada652013-10-16 15:44:46 +03001449 ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
1450 ath10k_peer_assoc_h_crypto(ar, arvif, arg);
1451 ath10k_peer_assoc_h_rates(ar, sta, arg);
1452 ath10k_peer_assoc_h_ht(ar, sta, arg);
1453 ath10k_peer_assoc_h_vht(ar, sta, arg);
1454 ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
1455 ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001456
Kalle Valob9ada652013-10-16 15:44:46 +03001457 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001458}
1459
Michal Kazior90046f52014-02-14 14:45:51 +01001460static const u32 ath10k_smps_map[] = {
1461 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
1462 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
1463 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
1464 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
1465};
1466
1467static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
1468 const u8 *addr,
1469 const struct ieee80211_sta_ht_cap *ht_cap)
1470{
1471 int smps;
1472
1473 if (!ht_cap->ht_supported)
1474 return 0;
1475
1476 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
1477 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
1478
1479 if (smps >= ARRAY_SIZE(ath10k_smps_map))
1480 return -EINVAL;
1481
1482 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
1483 WMI_PEER_SMPS_STATE,
1484 ath10k_smps_map[smps]);
1485}
1486
Kalle Valo5e3dd152013-06-12 20:52:10 +03001487/* can be called only in mac80211 callbacks due to `key_count` usage */
1488static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1489 struct ieee80211_vif *vif,
1490 struct ieee80211_bss_conf *bss_conf)
1491{
1492 struct ath10k *ar = hw->priv;
1493 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior90046f52014-02-14 14:45:51 +01001494 struct ieee80211_sta_ht_cap ht_cap;
Kalle Valob9ada652013-10-16 15:44:46 +03001495 struct wmi_peer_assoc_complete_arg peer_arg;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001496 struct ieee80211_sta *ap_sta;
1497 int ret;
1498
Michal Kazior548db542013-07-05 16:15:15 +03001499 lockdep_assert_held(&ar->conf_mutex);
1500
Kalle Valo5e3dd152013-06-12 20:52:10 +03001501 rcu_read_lock();
1502
1503 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
1504 if (!ap_sta) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001505 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02001506 bss_conf->bssid, arvif->vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001507 rcu_read_unlock();
1508 return;
1509 }
1510
Michal Kazior90046f52014-02-14 14:45:51 +01001511 /* ap_sta must be accessed only within rcu section which must be left
1512 * before calling ath10k_setup_peer_smps() which might sleep. */
1513 ht_cap = ap_sta->ht_cap;
1514
Kalle Valob9ada652013-10-16 15:44:46 +03001515 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
1516 bss_conf, &peer_arg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001517 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001518 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001519 bss_conf->bssid, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001520 rcu_read_unlock();
1521 return;
1522 }
1523
1524 rcu_read_unlock();
1525
Kalle Valob9ada652013-10-16 15:44:46 +03001526 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1527 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001528 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001529 bss_conf->bssid, arvif->vdev_id, ret);
Kalle Valob9ada652013-10-16 15:44:46 +03001530 return;
1531 }
1532
Michal Kazior90046f52014-02-14 14:45:51 +01001533 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
1534 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001535 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001536 arvif->vdev_id, ret);
Michal Kazior90046f52014-02-14 14:45:51 +01001537 return;
1538 }
1539
Michal Kazior7aa7a722014-08-25 12:09:38 +02001540 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03001541 "mac vdev %d up (associated) bssid %pM aid %d\n",
1542 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
1543
Michal Kaziorc930f742014-01-23 11:38:25 +01001544 arvif->aid = bss_conf->aid;
Kalle Valob25f32c2014-09-14 12:50:49 +03001545 ether_addr_copy(arvif->bssid, bss_conf->bssid);
Michal Kaziorc930f742014-01-23 11:38:25 +01001546
1547 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
1548 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001549 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001550 arvif->vdev_id, ret);
Michal Kaziorc930f742014-01-23 11:38:25 +01001551 return;
1552 }
1553
1554 arvif->is_up = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001555}
1556
1557/*
1558 * FIXME: flush TIDs
1559 */
1560static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1561 struct ieee80211_vif *vif)
1562{
1563 struct ath10k *ar = hw->priv;
1564 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1565 int ret;
1566
Michal Kazior548db542013-07-05 16:15:15 +03001567 lockdep_assert_held(&ar->conf_mutex);
1568
Kalle Valo5e3dd152013-06-12 20:52:10 +03001569 /*
1570 * For some reason, calling VDEV-DOWN before VDEV-STOP
1571 * makes the FW to send frames via HTT after disassociation.
1572 * No idea why this happens, even though VDEV-DOWN is supposed
1573 * to be analogous to link down, so just stop the VDEV.
1574 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02001575 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03001576 arvif->vdev_id);
1577
1578 /* FIXME: check return value */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001579 ret = ath10k_vdev_stop(arvif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001580
1581 /*
1582 * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
1583 * report beacons from previously associated network through HTT.
1584 * This in turn would spam mac80211 WARN_ON if we bring down all
1585 * interfaces as it expects there is no rx when no interface is
1586 * running.
1587 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02001588 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
Kalle Valo60c3daa2013-09-08 17:56:07 +03001589
1590 /* FIXME: why don't we print error if wmi call fails? */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001591 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001592
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001593 arvif->def_wep_key_idx = 0;
Michal Kaziorc930f742014-01-23 11:38:25 +01001594
1595 arvif->is_started = false;
1596 arvif->is_up = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001597}
1598
1599static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02001600 struct ieee80211_sta *sta, bool reassoc)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001601{
Kalle Valob9ada652013-10-16 15:44:46 +03001602 struct wmi_peer_assoc_complete_arg peer_arg;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001603 int ret = 0;
1604
Michal Kazior548db542013-07-05 16:15:15 +03001605 lockdep_assert_held(&ar->conf_mutex);
1606
Kalle Valob9ada652013-10-16 15:44:46 +03001607 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001608 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001609 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02001610 sta->addr, arvif->vdev_id, ret);
Kalle Valob9ada652013-10-16 15:44:46 +03001611 return ret;
1612 }
1613
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02001614 peer_arg.peer_reassoc = reassoc;
Kalle Valob9ada652013-10-16 15:44:46 +03001615 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1616 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001617 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001618 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001619 return ret;
1620 }
1621
Michal Kazior90046f52014-02-14 14:45:51 +01001622 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
1623 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001624 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02001625 arvif->vdev_id, ret);
Michal Kazior90046f52014-02-14 14:45:51 +01001626 return ret;
1627 }
1628
Michal Kaziora4841eb2014-08-28 09:59:39 +02001629 if (!sta->wme && !reassoc) {
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02001630 arvif->num_legacy_stations++;
1631 ret = ath10k_recalc_rtscts_prot(arvif);
1632 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001633 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02001634 arvif->vdev_id, ret);
1635 return ret;
1636 }
1637 }
1638
Kalle Valo5e3dd152013-06-12 20:52:10 +03001639 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
1640 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001641 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001642 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001643 return ret;
1644 }
1645
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001646 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
1647 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001648 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001649 sta->addr, arvif->vdev_id, ret);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01001650 return ret;
1651 }
1652
Kalle Valo5e3dd152013-06-12 20:52:10 +03001653 return ret;
1654}
1655
1656static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
1657 struct ieee80211_sta *sta)
1658{
1659 int ret = 0;
1660
Michal Kazior548db542013-07-05 16:15:15 +03001661 lockdep_assert_held(&ar->conf_mutex);
1662
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02001663 if (!sta->wme) {
1664 arvif->num_legacy_stations--;
1665 ret = ath10k_recalc_rtscts_prot(arvif);
1666 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001667 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02001668 arvif->vdev_id, ret);
1669 return ret;
1670 }
1671 }
1672
Kalle Valo5e3dd152013-06-12 20:52:10 +03001673 ret = ath10k_clear_peer_keys(arvif, sta->addr);
1674 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001675 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001676 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001677 return ret;
1678 }
1679
1680 return ret;
1681}
1682
1683/**************/
1684/* Regulatory */
1685/**************/
1686
1687static int ath10k_update_channel_list(struct ath10k *ar)
1688{
1689 struct ieee80211_hw *hw = ar->hw;
1690 struct ieee80211_supported_band **bands;
1691 enum ieee80211_band band;
1692 struct ieee80211_channel *channel;
1693 struct wmi_scan_chan_list_arg arg = {0};
1694 struct wmi_channel_arg *ch;
1695 bool passive;
1696 int len;
1697 int ret;
1698 int i;
1699
Michal Kazior548db542013-07-05 16:15:15 +03001700 lockdep_assert_held(&ar->conf_mutex);
1701
Kalle Valo5e3dd152013-06-12 20:52:10 +03001702 bands = hw->wiphy->bands;
1703 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1704 if (!bands[band])
1705 continue;
1706
1707 for (i = 0; i < bands[band]->n_channels; i++) {
1708 if (bands[band]->channels[i].flags &
1709 IEEE80211_CHAN_DISABLED)
1710 continue;
1711
1712 arg.n_channels++;
1713 }
1714 }
1715
1716 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
1717 arg.channels = kzalloc(len, GFP_KERNEL);
1718 if (!arg.channels)
1719 return -ENOMEM;
1720
1721 ch = arg.channels;
1722 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1723 if (!bands[band])
1724 continue;
1725
1726 for (i = 0; i < bands[band]->n_channels; i++) {
1727 channel = &bands[band]->channels[i];
1728
1729 if (channel->flags & IEEE80211_CHAN_DISABLED)
1730 continue;
1731
1732 ch->allow_ht = true;
1733
1734 /* FIXME: when should we really allow VHT? */
1735 ch->allow_vht = true;
1736
1737 ch->allow_ibss =
Luis R. Rodriguez8fe02e12013-10-21 19:22:25 +02001738 !(channel->flags & IEEE80211_CHAN_NO_IR);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001739
1740 ch->ht40plus =
1741 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
1742
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001743 ch->chan_radar =
1744 !!(channel->flags & IEEE80211_CHAN_RADAR);
1745
Luis R. Rodriguez8fe02e12013-10-21 19:22:25 +02001746 passive = channel->flags & IEEE80211_CHAN_NO_IR;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001747 ch->passive = passive;
1748
1749 ch->freq = channel->center_freq;
Michal Kazior2d667212014-09-18 15:21:21 +02001750 ch->band_center_freq1 = channel->center_freq;
Michal Kazior89c5c842013-10-23 04:02:13 -07001751 ch->min_power = 0;
Michal Kazior02256932013-10-23 04:02:14 -07001752 ch->max_power = channel->max_power * 2;
1753 ch->max_reg_power = channel->max_reg_power * 2;
1754 ch->max_antenna_gain = channel->max_antenna_gain * 2;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001755 ch->reg_class_id = 0; /* FIXME */
1756
1757 /* FIXME: why use only legacy modes, why not any
1758 * HT/VHT modes? Would that even make any
1759 * difference? */
1760 if (channel->band == IEEE80211_BAND_2GHZ)
1761 ch->mode = MODE_11G;
1762 else
1763 ch->mode = MODE_11A;
1764
1765 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
1766 continue;
1767
Michal Kazior7aa7a722014-08-25 12:09:38 +02001768 ath10k_dbg(ar, ATH10K_DBG_WMI,
Kalle Valo60c3daa2013-09-08 17:56:07 +03001769 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
1770 ch - arg.channels, arg.n_channels,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001771 ch->freq, ch->max_power, ch->max_reg_power,
1772 ch->max_antenna_gain, ch->mode);
1773
1774 ch++;
1775 }
1776 }
1777
1778 ret = ath10k_wmi_scan_chan_list(ar, &arg);
1779 kfree(arg.channels);
1780
1781 return ret;
1782}
1783
Marek Puzyniak821af6a2014-03-21 17:46:57 +02001784static enum wmi_dfs_region
1785ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
1786{
1787 switch (dfs_region) {
1788 case NL80211_DFS_UNSET:
1789 return WMI_UNINIT_DFS_DOMAIN;
1790 case NL80211_DFS_FCC:
1791 return WMI_FCC_DFS_DOMAIN;
1792 case NL80211_DFS_ETSI:
1793 return WMI_ETSI_DFS_DOMAIN;
1794 case NL80211_DFS_JP:
1795 return WMI_MKK4_DFS_DOMAIN;
1796 }
1797 return WMI_UNINIT_DFS_DOMAIN;
1798}
1799
Michal Kaziorf7843d72013-07-16 09:38:52 +02001800static void ath10k_regd_update(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001801{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001802 struct reg_dmn_pair_mapping *regpair;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001803 int ret;
Marek Puzyniak821af6a2014-03-21 17:46:57 +02001804 enum wmi_dfs_region wmi_dfs_reg;
1805 enum nl80211_dfs_regions nl_dfs_reg;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001806
Michal Kaziorf7843d72013-07-16 09:38:52 +02001807 lockdep_assert_held(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001808
1809 ret = ath10k_update_channel_list(ar);
1810 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001811 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001812
1813 regpair = ar->ath_common.regulatory.regpair;
Michal Kaziorf7843d72013-07-16 09:38:52 +02001814
Marek Puzyniak821af6a2014-03-21 17:46:57 +02001815 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
1816 nl_dfs_reg = ar->dfs_detector->region;
1817 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
1818 } else {
1819 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
1820 }
1821
Kalle Valo5e3dd152013-06-12 20:52:10 +03001822 /* Target allows setting up per-band regdomain but ath_common provides
1823 * a combined one only */
1824 ret = ath10k_wmi_pdev_set_regdomain(ar,
Kalle Valoef8c0012014-02-13 18:13:12 +02001825 regpair->reg_domain,
1826 regpair->reg_domain, /* 2ghz */
1827 regpair->reg_domain, /* 5ghz */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001828 regpair->reg_2ghz_ctl,
Marek Puzyniak821af6a2014-03-21 17:46:57 +02001829 regpair->reg_5ghz_ctl,
1830 wmi_dfs_reg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001831 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001832 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
Michal Kaziorf7843d72013-07-16 09:38:52 +02001833}
Michal Kazior548db542013-07-05 16:15:15 +03001834
Michal Kaziorf7843d72013-07-16 09:38:52 +02001835static void ath10k_reg_notifier(struct wiphy *wiphy,
1836 struct regulatory_request *request)
1837{
1838 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1839 struct ath10k *ar = hw->priv;
Janusz Dziedzic9702c682013-11-20 09:59:41 +02001840 bool result;
Michal Kaziorf7843d72013-07-16 09:38:52 +02001841
1842 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
1843
Janusz Dziedzic9702c682013-11-20 09:59:41 +02001844 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001845 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
Janusz Dziedzic9702c682013-11-20 09:59:41 +02001846 request->dfs_region);
1847 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
1848 request->dfs_region);
1849 if (!result)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001850 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
Janusz Dziedzic9702c682013-11-20 09:59:41 +02001851 request->dfs_region);
1852 }
1853
Michal Kaziorf7843d72013-07-16 09:38:52 +02001854 mutex_lock(&ar->conf_mutex);
1855 if (ar->state == ATH10K_STATE_ON)
1856 ath10k_regd_update(ar);
Michal Kazior548db542013-07-05 16:15:15 +03001857 mutex_unlock(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001858}
1859
1860/***************/
1861/* TX handlers */
1862/***************/
1863
Michal Kazior42c3aa62013-10-02 11:03:38 +02001864static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
1865{
1866 if (ieee80211_is_mgmt(hdr->frame_control))
1867 return HTT_DATA_TX_EXT_TID_MGMT;
1868
1869 if (!ieee80211_is_data_qos(hdr->frame_control))
1870 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1871
1872 if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
1873 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1874
1875 return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1876}
1877
Michal Kazior2b37c292014-09-02 11:00:22 +03001878static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
Michal Kaziorddb6ad72013-10-02 11:03:39 +02001879{
Michal Kazior2b37c292014-09-02 11:00:22 +03001880 if (vif)
1881 return ath10k_vif_to_arvif(vif)->vdev_id;
Michal Kaziorddb6ad72013-10-02 11:03:39 +02001882
Michal Kazior1bbc0972014-04-08 09:45:47 +03001883 if (ar->monitor_started)
Michal Kaziorddb6ad72013-10-02 11:03:39 +02001884 return ar->monitor_vdev_id;
1885
Michal Kazior7aa7a722014-08-25 12:09:38 +02001886 ath10k_warn(ar, "failed to resolve vdev id\n");
Michal Kaziorddb6ad72013-10-02 11:03:39 +02001887 return 0;
1888}
1889
Michal Kazior4b604552014-07-21 21:03:09 +03001890/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
1891 * Control in the header.
Kalle Valo5e3dd152013-06-12 20:52:10 +03001892 */
Michal Kazior4b604552014-07-21 21:03:09 +03001893static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001894{
1895 struct ieee80211_hdr *hdr = (void *)skb->data;
Michal Kaziorc21c64d2014-07-21 21:03:10 +03001896 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001897 u8 *qos_ctl;
1898
1899 if (!ieee80211_is_data_qos(hdr->frame_control))
1900 return;
1901
1902 qos_ctl = ieee80211_get_qos_ctl(hdr);
Michal Kaziorba0ccd72013-07-22 14:25:28 +02001903 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
1904 skb->data, (void *)qos_ctl - (void *)skb->data);
1905 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
Michal Kaziorc21c64d2014-07-21 21:03:10 +03001906
1907 /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
1908 * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
1909 * used only for CQM purposes (e.g. hostapd station keepalive ping) so
1910 * it is safe to downgrade to NullFunc.
1911 */
1912 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1913 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1914 cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1915 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001916}
1917
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001918static void ath10k_tx_wep_key_work(struct work_struct *work)
1919{
1920 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1921 wep_key_work);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001922 struct ath10k *ar = arvif->ar;
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001923 int ret, keyidx = arvif->def_wep_key_newidx;
1924
Michal Kazior911e6c02014-05-26 12:46:03 +03001925 mutex_lock(&arvif->ar->conf_mutex);
1926
1927 if (arvif->ar->state != ATH10K_STATE_ON)
1928 goto unlock;
1929
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001930 if (arvif->def_wep_key_idx == keyidx)
Michal Kazior911e6c02014-05-26 12:46:03 +03001931 goto unlock;
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001932
Michal Kazior7aa7a722014-08-25 12:09:38 +02001933 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001934 arvif->vdev_id, keyidx);
1935
1936 ret = ath10k_wmi_vdev_set_param(arvif->ar,
1937 arvif->vdev_id,
1938 arvif->ar->wmi.vdev_param->def_keyid,
1939 keyidx);
1940 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001941 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02001942 arvif->vdev_id,
1943 ret);
Michal Kazior911e6c02014-05-26 12:46:03 +03001944 goto unlock;
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001945 }
1946
1947 arvif->def_wep_key_idx = keyidx;
Michal Kazior911e6c02014-05-26 12:46:03 +03001948
1949unlock:
1950 mutex_unlock(&arvif->ar->conf_mutex);
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001951}
1952
Michal Kazior4b604552014-07-21 21:03:09 +03001953static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif,
1954 struct ieee80211_key_conf *key,
1955 struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001956{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001957 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1958 struct ath10k *ar = arvif->ar;
1959 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001960
Kalle Valo5e3dd152013-06-12 20:52:10 +03001961 if (!ieee80211_has_protected(hdr->frame_control))
1962 return;
1963
1964 if (!key)
1965 return;
1966
1967 if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
1968 key->cipher != WLAN_CIPHER_SUITE_WEP104)
1969 return;
1970
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001971 if (key->keyidx == arvif->def_wep_key_idx)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001972 return;
1973
Michal Kaziorcc4827b2013-10-16 15:44:45 +03001974 /* FIXME: Most likely a few frames will be TXed with an old key. Simply
1975 * queueing frames until key index is updated is not an option because
1976 * sk_buff may need more processing to be done, e.g. offchannel */
1977 arvif->def_wep_key_newidx = key->keyidx;
1978 ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001979}
1980
Michal Kazior4b604552014-07-21 21:03:09 +03001981static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
1982 struct ieee80211_vif *vif,
1983 struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001984{
1985 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001986 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1987
1988 /* This is case only for P2P_GO */
1989 if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
1990 arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
1991 return;
1992
1993 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
1994 spin_lock_bh(&ar->data_lock);
1995 if (arvif->u.ap.noa_data)
1996 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
1997 GFP_ATOMIC))
1998 memcpy(skb_put(skb, arvif->u.ap.noa_len),
1999 arvif->u.ap.noa_data,
2000 arvif->u.ap.noa_len);
2001 spin_unlock_bh(&ar->data_lock);
2002 }
2003}
2004
2005static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
2006{
2007 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002008 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002009
Michal Kazior961d4c32013-08-09 10:13:34 +02002010 if (ar->htt.target_version_major >= 3) {
2011 /* Since HTT 3.0 there is no separate mgmt tx command */
2012 ret = ath10k_htt_tx(&ar->htt, skb);
2013 goto exit;
2014 }
2015
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002016 if (ieee80211_is_mgmt(hdr->frame_control)) {
2017 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
2018 ar->fw_features)) {
2019 if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
2020 ATH10K_MAX_NUM_MGMT_PENDING) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002021 ath10k_warn(ar, "reached WMI management transmit queue limit\n");
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002022 ret = -EBUSY;
2023 goto exit;
2024 }
2025
2026 skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
2027 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
2028 } else {
2029 ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
2030 }
2031 } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
2032 ar->fw_features) &&
2033 ieee80211_is_nullfunc(hdr->frame_control)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002034 /* FW does not report tx status properly for NullFunc frames
2035 * unless they are sent through mgmt tx path. mac80211 sends
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002036 * those frames when it detects link/beacon loss and depends
2037 * on the tx status to be correct. */
Michal Kazioredb82362013-07-05 16:15:14 +03002038 ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002039 } else {
Michal Kazioredb82362013-07-05 16:15:14 +03002040 ret = ath10k_htt_tx(&ar->htt, skb);
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002041 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002042
Michal Kazior961d4c32013-08-09 10:13:34 +02002043exit:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002044 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002045 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
2046 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002047 ieee80211_free_txskb(ar->hw, skb);
2048 }
2049}
2050
2051void ath10k_offchan_tx_purge(struct ath10k *ar)
2052{
2053 struct sk_buff *skb;
2054
2055 for (;;) {
2056 skb = skb_dequeue(&ar->offchan_tx_queue);
2057 if (!skb)
2058 break;
2059
2060 ieee80211_free_txskb(ar->hw, skb);
2061 }
2062}
2063
2064void ath10k_offchan_tx_work(struct work_struct *work)
2065{
2066 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
2067 struct ath10k_peer *peer;
2068 struct ieee80211_hdr *hdr;
2069 struct sk_buff *skb;
2070 const u8 *peer_addr;
2071 int vdev_id;
2072 int ret;
2073
2074 /* FW requirement: We must create a peer before FW will send out
2075 * an offchannel frame. Otherwise the frame will be stuck and
2076 * never transmitted. We delete the peer upon tx completion.
2077 * It is unlikely that a peer for offchannel tx will already be
2078 * present. However it may be in some rare cases so account for that.
2079 * Otherwise we might remove a legitimate peer and break stuff. */
2080
2081 for (;;) {
2082 skb = skb_dequeue(&ar->offchan_tx_queue);
2083 if (!skb)
2084 break;
2085
2086 mutex_lock(&ar->conf_mutex);
2087
Michal Kazior7aa7a722014-08-25 12:09:38 +02002088 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002089 skb);
2090
2091 hdr = (struct ieee80211_hdr *)skb->data;
2092 peer_addr = ieee80211_get_DA(hdr);
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002093 vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002094
2095 spin_lock_bh(&ar->data_lock);
2096 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
2097 spin_unlock_bh(&ar->data_lock);
2098
2099 if (peer)
Kalle Valo60c3daa2013-09-08 17:56:07 +03002100 /* FIXME: should this use ath10k_warn()? */
Michal Kazior7aa7a722014-08-25 12:09:38 +02002101 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002102 peer_addr, vdev_id);
2103
2104 if (!peer) {
2105 ret = ath10k_peer_create(ar, vdev_id, peer_addr);
2106 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002107 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002108 peer_addr, vdev_id, ret);
2109 }
2110
2111 spin_lock_bh(&ar->data_lock);
Wolfram Sang16735d02013-11-14 14:32:02 -08002112 reinit_completion(&ar->offchan_tx_completed);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002113 ar->offchan_tx_skb = skb;
2114 spin_unlock_bh(&ar->data_lock);
2115
2116 ath10k_tx_htt(ar, skb);
2117
2118 ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
2119 3 * HZ);
2120 if (ret <= 0)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002121 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002122 skb);
2123
2124 if (!peer) {
2125 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
2126 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002127 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002128 peer_addr, vdev_id, ret);
2129 }
2130
2131 mutex_unlock(&ar->conf_mutex);
2132 }
2133}
2134
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002135void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
2136{
2137 struct sk_buff *skb;
2138
2139 for (;;) {
2140 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
2141 if (!skb)
2142 break;
2143
2144 ieee80211_free_txskb(ar->hw, skb);
2145 }
2146}
2147
2148void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
2149{
2150 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
2151 struct sk_buff *skb;
2152 int ret;
2153
2154 for (;;) {
2155 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
2156 if (!skb)
2157 break;
2158
2159 ret = ath10k_wmi_mgmt_tx(ar, skb);
Michal Kazior5fb5e412013-10-28 07:18:13 +01002160 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002161 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02002162 ret);
Michal Kazior5fb5e412013-10-28 07:18:13 +01002163 ieee80211_free_txskb(ar->hw, skb);
2164 }
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002165 }
2166}
2167
Kalle Valo5e3dd152013-06-12 20:52:10 +03002168/************/
2169/* Scanning */
2170/************/
2171
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002172void __ath10k_scan_finish(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002173{
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002174 lockdep_assert_held(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002175
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002176 switch (ar->scan.state) {
2177 case ATH10K_SCAN_IDLE:
2178 break;
2179 case ATH10K_SCAN_RUNNING:
2180 case ATH10K_SCAN_ABORTING:
2181 if (ar->scan.is_roc)
2182 ieee80211_remain_on_channel_expired(ar->hw);
2183 else
2184 ieee80211_scan_completed(ar->hw,
2185 (ar->scan.state ==
2186 ATH10K_SCAN_ABORTING));
2187 /* fall through */
2188 case ATH10K_SCAN_STARTING:
2189 ar->scan.state = ATH10K_SCAN_IDLE;
2190 ar->scan_channel = NULL;
2191 ath10k_offchan_tx_purge(ar);
2192 cancel_delayed_work(&ar->scan.timeout);
2193 complete_all(&ar->scan.completed);
2194 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002195 }
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002196}
Kalle Valo5e3dd152013-06-12 20:52:10 +03002197
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002198void ath10k_scan_finish(struct ath10k *ar)
2199{
2200 spin_lock_bh(&ar->data_lock);
2201 __ath10k_scan_finish(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002202 spin_unlock_bh(&ar->data_lock);
2203}
2204
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002205static int ath10k_scan_stop(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002206{
2207 struct wmi_stop_scan_arg arg = {
2208 .req_id = 1, /* FIXME */
2209 .req_type = WMI_SCAN_STOP_ONE,
2210 .u.scan_id = ATH10K_SCAN_ID,
2211 };
2212 int ret;
2213
2214 lockdep_assert_held(&ar->conf_mutex);
2215
Kalle Valo5e3dd152013-06-12 20:52:10 +03002216 ret = ath10k_wmi_stop_scan(ar, &arg);
2217 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002218 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002219 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002220 }
2221
Kalle Valo5e3dd152013-06-12 20:52:10 +03002222 ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002223 if (ret == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002224 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002225 ret = -ETIMEDOUT;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002226 } else if (ret > 0) {
2227 ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002228 }
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002229
2230out:
2231 /* Scan state should be updated upon scan completion but in case
2232 * firmware fails to deliver the event (for whatever reason) it is
2233 * desired to clean up scan state anyway. Firmware may have just
2234 * dropped the scan completion event delivery due to transport pipe
2235 * being overflown with data and/or it can recover on its own before
2236 * next scan request is submitted.
2237 */
2238 spin_lock_bh(&ar->data_lock);
2239 if (ar->scan.state != ATH10K_SCAN_IDLE)
2240 __ath10k_scan_finish(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002241 spin_unlock_bh(&ar->data_lock);
2242
2243 return ret;
2244}
2245
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002246static void ath10k_scan_abort(struct ath10k *ar)
2247{
2248 int ret;
2249
2250 lockdep_assert_held(&ar->conf_mutex);
2251
2252 spin_lock_bh(&ar->data_lock);
2253
2254 switch (ar->scan.state) {
2255 case ATH10K_SCAN_IDLE:
2256 /* This can happen if timeout worker kicked in and called
2257 * abortion while scan completion was being processed.
2258 */
2259 break;
2260 case ATH10K_SCAN_STARTING:
2261 case ATH10K_SCAN_ABORTING:
Michal Kazior7aa7a722014-08-25 12:09:38 +02002262 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002263 ath10k_scan_state_str(ar->scan.state),
2264 ar->scan.state);
2265 break;
2266 case ATH10K_SCAN_RUNNING:
2267 ar->scan.state = ATH10K_SCAN_ABORTING;
2268 spin_unlock_bh(&ar->data_lock);
2269
2270 ret = ath10k_scan_stop(ar);
2271 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002272 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002273
2274 spin_lock_bh(&ar->data_lock);
2275 break;
2276 }
2277
2278 spin_unlock_bh(&ar->data_lock);
2279}
2280
2281void ath10k_scan_timeout_work(struct work_struct *work)
2282{
2283 struct ath10k *ar = container_of(work, struct ath10k,
2284 scan.timeout.work);
2285
2286 mutex_lock(&ar->conf_mutex);
2287 ath10k_scan_abort(ar);
2288 mutex_unlock(&ar->conf_mutex);
2289}
2290
Kalle Valo5e3dd152013-06-12 20:52:10 +03002291static int ath10k_start_scan(struct ath10k *ar,
2292 const struct wmi_start_scan_arg *arg)
2293{
2294 int ret;
2295
2296 lockdep_assert_held(&ar->conf_mutex);
2297
2298 ret = ath10k_wmi_start_scan(ar, arg);
2299 if (ret)
2300 return ret;
2301
Kalle Valo5e3dd152013-06-12 20:52:10 +03002302 ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
2303 if (ret == 0) {
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002304 ret = ath10k_scan_stop(ar);
2305 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002306 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002307
2308 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002309 }
2310
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002311 /* Add a 200ms margin to account for event/command processing */
2312 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
2313 msecs_to_jiffies(arg->max_scan_time+200));
Kalle Valo5e3dd152013-06-12 20:52:10 +03002314 return 0;
2315}
2316
2317/**********************/
2318/* mac80211 callbacks */
2319/**********************/
2320
2321static void ath10k_tx(struct ieee80211_hw *hw,
2322 struct ieee80211_tx_control *control,
2323 struct sk_buff *skb)
2324{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002325 struct ath10k *ar = hw->priv;
Michal Kazior4b604552014-07-21 21:03:09 +03002326 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2327 struct ieee80211_vif *vif = info->control.vif;
2328 struct ieee80211_key_conf *key = info->control.hw_key;
2329 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002330
2331 /* We should disable CCK RATE due to P2P */
2332 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002333 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002334
Michal Kazior4b604552014-07-21 21:03:09 +03002335 ATH10K_SKB_CB(skb)->htt.is_offchan = false;
2336 ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
Michal Kazior2b37c292014-09-02 11:00:22 +03002337 ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002338
Michal Kaziorcf84bd42013-07-16 11:04:54 +02002339 /* it makes no sense to process injected frames like that */
Michal Kazior4b604552014-07-21 21:03:09 +03002340 if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
2341 ath10k_tx_h_nwifi(hw, skb);
2342 ath10k_tx_h_update_wep_key(vif, key, skb);
2343 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
2344 ath10k_tx_h_seq_no(vif, skb);
Michal Kaziorcf84bd42013-07-16 11:04:54 +02002345 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002346
Kalle Valo5e3dd152013-06-12 20:52:10 +03002347 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
2348 spin_lock_bh(&ar->data_lock);
2349 ATH10K_SKB_CB(skb)->htt.is_offchan = true;
Bartosz Markowski5e00d312013-09-26 17:47:12 +02002350 ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002351 spin_unlock_bh(&ar->data_lock);
2352
Michal Kazior7aa7a722014-08-25 12:09:38 +02002353 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
2354 skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002355
2356 skb_queue_tail(&ar->offchan_tx_queue, skb);
2357 ieee80211_queue_work(hw, &ar->offchan_tx_work);
2358 return;
2359 }
2360
2361 ath10k_tx_htt(ar, skb);
2362}
2363
Michal Kaziorbca7baf2014-05-26 12:46:03 +03002364/* Must not be called with conf_mutex held as workers can use that also. */
2365static void ath10k_drain_tx(struct ath10k *ar)
2366{
2367 /* make sure rcu-protected mac80211 tx path itself is drained */
2368 synchronize_net();
2369
2370 ath10k_offchan_tx_purge(ar);
2371 ath10k_mgmt_over_wmi_tx_purge(ar);
2372
2373 cancel_work_sync(&ar->offchan_tx_work);
2374 cancel_work_sync(&ar->wmi_mgmt_tx_work);
2375}
2376
Michal Kazioraffd3212013-07-16 09:54:35 +02002377void ath10k_halt(struct ath10k *ar)
Michal Kazior818bdd12013-07-16 09:38:57 +02002378{
Michal Kaziord9bc4b92014-04-23 19:30:06 +03002379 struct ath10k_vif *arvif;
2380
Michal Kazior818bdd12013-07-16 09:38:57 +02002381 lockdep_assert_held(&ar->conf_mutex);
2382
Michal Kazior19337472014-08-28 12:58:16 +02002383 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
2384 ar->filter_flags = 0;
2385 ar->monitor = false;
2386
2387 if (ar->monitor_started)
Michal Kazior1bbc0972014-04-08 09:45:47 +03002388 ath10k_monitor_stop(ar);
Michal Kazior19337472014-08-28 12:58:16 +02002389
2390 ar->monitor_started = false;
Michal Kazior1bbc0972014-04-08 09:45:47 +03002391
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002392 ath10k_scan_finish(ar);
Michal Kazior818bdd12013-07-16 09:38:57 +02002393 ath10k_peer_cleanup_all(ar);
2394 ath10k_core_stop(ar);
2395 ath10k_hif_power_down(ar);
2396
2397 spin_lock_bh(&ar->data_lock);
Michal Kazior64badcb2014-09-18 11:18:02 +03002398 list_for_each_entry(arvif, &ar->arvifs, list)
2399 ath10k_mac_vif_beacon_cleanup(arvif);
Michal Kazior818bdd12013-07-16 09:38:57 +02002400 spin_unlock_bh(&ar->data_lock);
2401}
2402
Ben Greear46acf7b2014-05-16 17:15:38 +03002403static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2404{
2405 struct ath10k *ar = hw->priv;
2406
2407 mutex_lock(&ar->conf_mutex);
2408
2409 if (ar->cfg_tx_chainmask) {
2410 *tx_ant = ar->cfg_tx_chainmask;
2411 *rx_ant = ar->cfg_rx_chainmask;
2412 } else {
2413 *tx_ant = ar->supp_tx_chainmask;
2414 *rx_ant = ar->supp_rx_chainmask;
2415 }
2416
2417 mutex_unlock(&ar->conf_mutex);
2418
2419 return 0;
2420}
2421
2422static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
2423{
2424 int ret;
2425
2426 lockdep_assert_held(&ar->conf_mutex);
2427
2428 ar->cfg_tx_chainmask = tx_ant;
2429 ar->cfg_rx_chainmask = rx_ant;
2430
2431 if ((ar->state != ATH10K_STATE_ON) &&
2432 (ar->state != ATH10K_STATE_RESTARTED))
2433 return 0;
2434
2435 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
2436 tx_ant);
2437 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002438 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
Ben Greear46acf7b2014-05-16 17:15:38 +03002439 ret, tx_ant);
2440 return ret;
2441 }
2442
2443 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
2444 rx_ant);
2445 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002446 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
Ben Greear46acf7b2014-05-16 17:15:38 +03002447 ret, rx_ant);
2448 return ret;
2449 }
2450
2451 return 0;
2452}
2453
2454static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
2455{
2456 struct ath10k *ar = hw->priv;
2457 int ret;
2458
2459 mutex_lock(&ar->conf_mutex);
2460 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
2461 mutex_unlock(&ar->conf_mutex);
2462 return ret;
2463}
2464
Kalle Valo5e3dd152013-06-12 20:52:10 +03002465static int ath10k_start(struct ieee80211_hw *hw)
2466{
2467 struct ath10k *ar = hw->priv;
Michal Kazior818bdd12013-07-16 09:38:57 +02002468 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002469
Michal Kaziorbca7baf2014-05-26 12:46:03 +03002470 /*
2471 * This makes sense only when restarting hw. It is harmless to call
2472 * uncoditionally. This is necessary to make sure no HTT/WMI tx
2473 * commands will be submitted while restarting.
2474 */
2475 ath10k_drain_tx(ar);
2476
Michal Kazior548db542013-07-05 16:15:15 +03002477 mutex_lock(&ar->conf_mutex);
2478
Michal Kaziorc5058f52014-05-26 12:46:03 +03002479 switch (ar->state) {
2480 case ATH10K_STATE_OFF:
2481 ar->state = ATH10K_STATE_ON;
2482 break;
2483 case ATH10K_STATE_RESTARTING:
2484 ath10k_halt(ar);
2485 ar->state = ATH10K_STATE_RESTARTED;
2486 break;
2487 case ATH10K_STATE_ON:
2488 case ATH10K_STATE_RESTARTED:
2489 case ATH10K_STATE_WEDGED:
2490 WARN_ON(1);
Michal Kazior818bdd12013-07-16 09:38:57 +02002491 ret = -EINVAL;
Michal Kaziorae254432014-05-26 12:46:02 +03002492 goto err;
Kalle Valo43d2a302014-09-10 18:23:30 +03002493 case ATH10K_STATE_UTF:
2494 ret = -EBUSY;
2495 goto err;
Michal Kazior818bdd12013-07-16 09:38:57 +02002496 }
2497
2498 ret = ath10k_hif_power_up(ar);
2499 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002500 ath10k_err(ar, "Could not init hif: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03002501 goto err_off;
Michal Kazior818bdd12013-07-16 09:38:57 +02002502 }
2503
Kalle Valo43d2a302014-09-10 18:23:30 +03002504 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
Michal Kazior818bdd12013-07-16 09:38:57 +02002505 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002506 ath10k_err(ar, "Could not init core: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03002507 goto err_power_down;
Michal Kazior818bdd12013-07-16 09:38:57 +02002508 }
2509
Bartosz Markowski226a3392013-09-26 17:47:16 +02002510 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
Michal Kaziorae254432014-05-26 12:46:02 +03002511 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002512 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03002513 goto err_core_stop;
2514 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002515
Michal Kaziorc4dd0d02013-11-13 11:05:10 +01002516 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
Michal Kaziorae254432014-05-26 12:46:02 +03002517 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002518 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03002519 goto err_core_stop;
2520 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002521
Ben Greear46acf7b2014-05-16 17:15:38 +03002522 if (ar->cfg_tx_chainmask)
2523 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
2524 ar->cfg_rx_chainmask);
2525
Marek Puzyniakab6258e2014-01-29 15:03:31 +02002526 /*
2527 * By default FW set ARP frames ac to voice (6). In that case ARP
2528 * exchange is not working properly for UAPSD enabled AP. ARP requests
2529 * which arrives with access category 0 are processed by network stack
2530 * and send back with access category 0, but FW changes access category
2531 * to 6. Set ARP frames access category to best effort (0) solves
2532 * this problem.
2533 */
2534
2535 ret = ath10k_wmi_pdev_set_param(ar,
2536 ar->wmi.pdev_param->arp_ac_override, 0);
2537 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002538 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
Marek Puzyniakab6258e2014-01-29 15:03:31 +02002539 ret);
Michal Kaziorae254432014-05-26 12:46:02 +03002540 goto err_core_stop;
Marek Puzyniakab6258e2014-01-29 15:03:31 +02002541 }
2542
Michal Kaziord6500972014-04-08 09:56:09 +03002543 ar->num_started_vdevs = 0;
Michal Kaziorf7843d72013-07-16 09:38:52 +02002544 ath10k_regd_update(ar);
2545
Simon Wunderlich855aed12014-08-02 09:12:54 +03002546 ath10k_spectral_start(ar);
2547
Michal Kaziorae254432014-05-26 12:46:02 +03002548 mutex_unlock(&ar->conf_mutex);
2549 return 0;
2550
2551err_core_stop:
2552 ath10k_core_stop(ar);
2553
2554err_power_down:
2555 ath10k_hif_power_down(ar);
2556
2557err_off:
2558 ar->state = ATH10K_STATE_OFF;
2559
2560err:
Michal Kazior548db542013-07-05 16:15:15 +03002561 mutex_unlock(&ar->conf_mutex);
Michal Kaziorc60bdd82014-01-29 07:26:31 +01002562 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002563}
2564
2565static void ath10k_stop(struct ieee80211_hw *hw)
2566{
2567 struct ath10k *ar = hw->priv;
2568
Michal Kaziorbca7baf2014-05-26 12:46:03 +03002569 ath10k_drain_tx(ar);
2570
Michal Kazior548db542013-07-05 16:15:15 +03002571 mutex_lock(&ar->conf_mutex);
Michal Kaziorc5058f52014-05-26 12:46:03 +03002572 if (ar->state != ATH10K_STATE_OFF) {
Michal Kazior818bdd12013-07-16 09:38:57 +02002573 ath10k_halt(ar);
Michal Kaziorc5058f52014-05-26 12:46:03 +03002574 ar->state = ATH10K_STATE_OFF;
2575 }
Michal Kazior548db542013-07-05 16:15:15 +03002576 mutex_unlock(&ar->conf_mutex);
2577
Michal Kazior5c81c7f2014-08-05 14:54:44 +02002578 cancel_delayed_work_sync(&ar->scan.timeout);
Michal Kazioraffd3212013-07-16 09:54:35 +02002579 cancel_work_sync(&ar->restart_work);
2580}
2581
Michal Kaziorad088bf2013-10-16 15:44:46 +03002582static int ath10k_config_ps(struct ath10k *ar)
Michal Kazioraffd3212013-07-16 09:54:35 +02002583{
Michal Kaziorad088bf2013-10-16 15:44:46 +03002584 struct ath10k_vif *arvif;
2585 int ret = 0;
Michal Kazioraffd3212013-07-16 09:54:35 +02002586
2587 lockdep_assert_held(&ar->conf_mutex);
2588
Michal Kaziorad088bf2013-10-16 15:44:46 +03002589 list_for_each_entry(arvif, &ar->arvifs, list) {
2590 ret = ath10k_mac_vif_setup_ps(arvif);
2591 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002592 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
Michal Kaziorad088bf2013-10-16 15:44:46 +03002593 break;
2594 }
2595 }
Michal Kazioraffd3212013-07-16 09:54:35 +02002596
Michal Kaziorad088bf2013-10-16 15:44:46 +03002597 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002598}
2599
Michal Kaziorc930f742014-01-23 11:38:25 +01002600static const char *chandef_get_width(enum nl80211_chan_width width)
2601{
2602 switch (width) {
2603 case NL80211_CHAN_WIDTH_20_NOHT:
2604 return "20 (noht)";
2605 case NL80211_CHAN_WIDTH_20:
2606 return "20";
2607 case NL80211_CHAN_WIDTH_40:
2608 return "40";
2609 case NL80211_CHAN_WIDTH_80:
2610 return "80";
2611 case NL80211_CHAN_WIDTH_80P80:
2612 return "80+80";
2613 case NL80211_CHAN_WIDTH_160:
2614 return "160";
2615 case NL80211_CHAN_WIDTH_5:
2616 return "5";
2617 case NL80211_CHAN_WIDTH_10:
2618 return "10";
2619 }
2620 return "?";
2621}
2622
2623static void ath10k_config_chan(struct ath10k *ar)
2624{
2625 struct ath10k_vif *arvif;
Michal Kaziorc930f742014-01-23 11:38:25 +01002626 int ret;
2627
2628 lockdep_assert_held(&ar->conf_mutex);
2629
Michal Kazior7aa7a722014-08-25 12:09:38 +02002630 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kaziorc930f742014-01-23 11:38:25 +01002631 "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
2632 ar->chandef.chan->center_freq,
2633 ar->chandef.center_freq1,
2634 ar->chandef.center_freq2,
2635 chandef_get_width(ar->chandef.width));
2636
2637 /* First stop monitor interface. Some FW versions crash if there's a
2638 * lone monitor interface. */
Michal Kazior1bbc0972014-04-08 09:45:47 +03002639 if (ar->monitor_started)
Michal Kazior19337472014-08-28 12:58:16 +02002640 ath10k_monitor_stop(ar);
Michal Kaziorc930f742014-01-23 11:38:25 +01002641
2642 list_for_each_entry(arvif, &ar->arvifs, list) {
2643 if (!arvif->is_started)
2644 continue;
2645
Michal Kaziordc55e302014-07-29 12:53:36 +03002646 if (!arvif->is_up)
2647 continue;
2648
Michal Kaziorc930f742014-01-23 11:38:25 +01002649 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2650 continue;
2651
Michal Kaziordc55e302014-07-29 12:53:36 +03002652 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
Michal Kaziorc930f742014-01-23 11:38:25 +01002653 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002654 ath10k_warn(ar, "failed to down vdev %d: %d\n",
Michal Kaziorc930f742014-01-23 11:38:25 +01002655 arvif->vdev_id, ret);
2656 continue;
2657 }
2658 }
2659
Michal Kaziordc55e302014-07-29 12:53:36 +03002660 /* all vdevs are downed now - attempt to restart and re-up them */
Michal Kaziorc930f742014-01-23 11:38:25 +01002661
2662 list_for_each_entry(arvif, &ar->arvifs, list) {
2663 if (!arvif->is_started)
2664 continue;
2665
2666 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2667 continue;
2668
Michal Kaziordc55e302014-07-29 12:53:36 +03002669 ret = ath10k_vdev_restart(arvif);
Michal Kaziorc930f742014-01-23 11:38:25 +01002670 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002671 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
Michal Kaziorc930f742014-01-23 11:38:25 +01002672 arvif->vdev_id, ret);
2673 continue;
2674 }
2675
2676 if (!arvif->is_up)
2677 continue;
2678
2679 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
2680 arvif->bssid);
2681 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002682 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
Michal Kaziorc930f742014-01-23 11:38:25 +01002683 arvif->vdev_id, ret);
2684 continue;
2685 }
2686 }
2687
Michal Kazior19337472014-08-28 12:58:16 +02002688 ath10k_monitor_recalc(ar);
Michal Kaziorc930f742014-01-23 11:38:25 +01002689}
2690
Kalle Valo5e3dd152013-06-12 20:52:10 +03002691static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2692{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002693 struct ath10k *ar = hw->priv;
2694 struct ieee80211_conf *conf = &hw->conf;
2695 int ret = 0;
Michal Kazior5474efe2013-10-23 04:02:15 -07002696 u32 param;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002697
2698 mutex_lock(&ar->conf_mutex);
2699
2700 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002701 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kaziord6500972014-04-08 09:56:09 +03002702 "mac config channel %dMHz flags 0x%x radar %d\n",
Marek Puzyniake8a50f82013-11-20 09:59:47 +02002703 conf->chandef.chan->center_freq,
Michal Kaziord6500972014-04-08 09:56:09 +03002704 conf->chandef.chan->flags,
2705 conf->radar_enabled);
Marek Puzyniake8a50f82013-11-20 09:59:47 +02002706
Kalle Valo5e3dd152013-06-12 20:52:10 +03002707 spin_lock_bh(&ar->data_lock);
2708 ar->rx_channel = conf->chandef.chan;
2709 spin_unlock_bh(&ar->data_lock);
Marek Puzyniake8a50f82013-11-20 09:59:47 +02002710
Michal Kaziord6500972014-04-08 09:56:09 +03002711 ar->radar_enabled = conf->radar_enabled;
2712 ath10k_recalc_radar_detection(ar);
Michal Kaziorc930f742014-01-23 11:38:25 +01002713
2714 if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
2715 ar->chandef = conf->chandef;
2716 ath10k_config_chan(ar);
2717 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002718 }
2719
Michal Kazior5474efe2013-10-23 04:02:15 -07002720 if (changed & IEEE80211_CONF_CHANGE_POWER) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002721 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n",
Michal Kazior5474efe2013-10-23 04:02:15 -07002722 hw->conf.power_level);
2723
2724 param = ar->wmi.pdev_param->txpower_limit2g;
2725 ret = ath10k_wmi_pdev_set_param(ar, param,
2726 hw->conf.power_level * 2);
2727 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002728 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
Michal Kazior5474efe2013-10-23 04:02:15 -07002729 hw->conf.power_level, ret);
2730
2731 param = ar->wmi.pdev_param->txpower_limit5g;
2732 ret = ath10k_wmi_pdev_set_param(ar, param,
2733 hw->conf.power_level * 2);
2734 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002735 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
Michal Kazior5474efe2013-10-23 04:02:15 -07002736 hw->conf.power_level, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002737 }
2738
Michal Kazioraffd3212013-07-16 09:54:35 +02002739 if (changed & IEEE80211_CONF_CHANGE_PS)
2740 ath10k_config_ps(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002741
2742 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
Michal Kazior19337472014-08-28 12:58:16 +02002743 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
2744 ret = ath10k_monitor_recalc(ar);
2745 if (ret)
2746 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002747 }
2748
2749 mutex_unlock(&ar->conf_mutex);
2750 return ret;
2751}
2752
2753/*
2754 * TODO:
2755 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
2756 * because we will send mgmt frames without CCK. This requirement
2757 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
2758 * in the TX packet.
2759 */
2760static int ath10k_add_interface(struct ieee80211_hw *hw,
2761 struct ieee80211_vif *vif)
2762{
2763 struct ath10k *ar = hw->priv;
2764 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2765 enum wmi_sta_powersave_param param;
2766 int ret = 0;
Kalle Valo5a13e762014-01-20 11:01:46 +02002767 u32 value;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002768 int bit;
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02002769 u32 vdev_param;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002770
2771 mutex_lock(&ar->conf_mutex);
2772
Michal Kazior0dbd09e2013-07-31 10:55:14 +02002773 memset(arvif, 0, sizeof(*arvif));
2774
Kalle Valo5e3dd152013-06-12 20:52:10 +03002775 arvif->ar = ar;
2776 arvif->vif = vif;
2777
Michal Kaziorcc4827b2013-10-16 15:44:45 +03002778 INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
Ben Greeare63b33f2013-10-22 14:54:14 -07002779 INIT_LIST_HEAD(&arvif->list);
Michal Kaziorcc4827b2013-10-16 15:44:45 +03002780
Ben Greeara9aefb32014-08-12 11:02:19 +03002781 if (ar->free_vdev_map == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002782 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002783 ret = -EBUSY;
Michal Kazior9dad14a2013-10-16 15:44:45 +03002784 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002785 }
Ben Greear16c11172014-09-23 14:17:16 -07002786 bit = __ffs64(ar->free_vdev_map);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002787
Ben Greear16c11172014-09-23 14:17:16 -07002788 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
2789 bit, ar->free_vdev_map);
2790
2791 arvif->vdev_id = bit;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002792 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002793
2794 if (ar->p2p)
2795 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
2796
2797 switch (vif->type) {
2798 case NL80211_IFTYPE_UNSPECIFIED:
2799 case NL80211_IFTYPE_STATION:
2800 arvif->vdev_type = WMI_VDEV_TYPE_STA;
2801 if (vif->p2p)
2802 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
2803 break;
2804 case NL80211_IFTYPE_ADHOC:
2805 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
2806 break;
2807 case NL80211_IFTYPE_AP:
2808 arvif->vdev_type = WMI_VDEV_TYPE_AP;
2809
2810 if (vif->p2p)
2811 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
2812 break;
2813 case NL80211_IFTYPE_MONITOR:
2814 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
2815 break;
2816 default:
2817 WARN_ON(1);
2818 break;
2819 }
2820
Michal Kazior64badcb2014-09-18 11:18:02 +03002821 /* Some firmware revisions don't wait for beacon tx completion before
2822 * sending another SWBA event. This could lead to hardware using old
2823 * (freed) beacon data in some cases, e.g. tx credit starvation
2824 * combined with missed TBTT. This is very very rare.
2825 *
2826 * On non-IOMMU-enabled hosts this could be a possible security issue
2827 * because hw could beacon some random data on the air. On
2828 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
2829 * device would crash.
2830 *
2831 * Since there are no beacon tx completions (implicit nor explicit)
2832 * propagated to host the only workaround for this is to allocate a
2833 * DMA-coherent buffer for a lifetime of a vif and use it for all
2834 * beacon tx commands. Worst case for this approach is some beacons may
2835 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
2836 */
2837 if (vif->type == NL80211_IFTYPE_ADHOC ||
2838 vif->type == NL80211_IFTYPE_AP) {
2839 arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
2840 IEEE80211_MAX_FRAME_LEN,
2841 &arvif->beacon_paddr,
Rajkumar Manoharan82d7aba2014-10-10 17:38:27 +05302842 GFP_ATOMIC);
Michal Kazior64badcb2014-09-18 11:18:02 +03002843 if (!arvif->beacon_buf) {
2844 ret = -ENOMEM;
2845 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
2846 ret);
2847 goto err;
2848 }
2849 }
2850
2851 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
2852 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
2853 arvif->beacon_buf ? "single-buf" : "per-skb");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002854
2855 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
2856 arvif->vdev_subtype, vif->addr);
2857 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002858 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002859 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002860 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002861 }
2862
Ben Greear16c11172014-09-23 14:17:16 -07002863 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
Michal Kazior05791192013-10-16 15:44:45 +03002864 list_add(&arvif->list, &ar->arvifs);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002865
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02002866 vdev_param = ar->wmi.vdev_param->def_keyid;
2867 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
Michal Kaziorcc4827b2013-10-16 15:44:45 +03002868 arvif->def_wep_key_idx);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002869 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002870 ath10k_warn(ar, "failed to set vdev %i default key id: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002871 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002872 goto err_vdev_delete;
2873 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002874
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02002875 vdev_param = ar->wmi.vdev_param->tx_encap_type;
2876 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002877 ATH10K_HW_TXRX_NATIVE_WIFI);
Bartosz Markowskiebc9abd2013-10-15 09:26:20 +02002878 /* 10.X firmware does not support this VDEV parameter. Do not warn */
Michal Kazior9dad14a2013-10-16 15:44:45 +03002879 if (ret && ret != -EOPNOTSUPP) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002880 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002881 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002882 goto err_vdev_delete;
2883 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002884
2885 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2886 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
2887 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002888 ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002889 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002890 goto err_vdev_delete;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002891 }
Marek Puzyniakcdf07402013-12-30 09:07:51 +01002892
Kalle Valo5a13e762014-01-20 11:01:46 +02002893 ret = ath10k_mac_set_kickout(arvif);
2894 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002895 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002896 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +02002897 goto err_peer_delete;
2898 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002899 }
2900
2901 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
2902 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
2903 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
2904 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2905 param, value);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002906 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002907 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002908 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002909 goto err_peer_delete;
2910 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002911
2912 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
2913 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
2914 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2915 param, value);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002916 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002917 ath10k_warn(ar, "failed to set vdev %i TX wake thresh: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002918 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002919 goto err_peer_delete;
2920 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002921
2922 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
2923 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
2924 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2925 param, value);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002926 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002927 ath10k_warn(ar, "failed to set vdev %i PSPOLL count: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002928 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002929 goto err_peer_delete;
2930 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002931 }
2932
Michal Kazior424121c2013-07-22 14:13:31 +02002933 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002934 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002935 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
Michal Kazior679c54a2013-07-05 16:15:04 +03002936 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002937 goto err_peer_delete;
2938 }
Michal Kazior679c54a2013-07-05 16:15:04 +03002939
Michal Kazior424121c2013-07-22 14:13:31 +02002940 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002941 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002942 ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
Michal Kazior679c54a2013-07-05 16:15:04 +03002943 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002944 goto err_peer_delete;
2945 }
Michal Kazior679c54a2013-07-05 16:15:04 +03002946
Kalle Valo5e3dd152013-06-12 20:52:10 +03002947 mutex_unlock(&ar->conf_mutex);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002948 return 0;
2949
2950err_peer_delete:
2951 if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
2952 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
2953
2954err_vdev_delete:
2955 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
Ben Greear16c11172014-09-23 14:17:16 -07002956 ar->free_vdev_map |= 1LL << arvif->vdev_id;
Michal Kazior05791192013-10-16 15:44:45 +03002957 list_del(&arvif->list);
Michal Kazior9dad14a2013-10-16 15:44:45 +03002958
2959err:
Michal Kazior64badcb2014-09-18 11:18:02 +03002960 if (arvif->beacon_buf) {
2961 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
2962 arvif->beacon_buf, arvif->beacon_paddr);
2963 arvif->beacon_buf = NULL;
2964 }
2965
Michal Kazior9dad14a2013-10-16 15:44:45 +03002966 mutex_unlock(&ar->conf_mutex);
2967
Kalle Valo5e3dd152013-06-12 20:52:10 +03002968 return ret;
2969}
2970
2971static void ath10k_remove_interface(struct ieee80211_hw *hw,
2972 struct ieee80211_vif *vif)
2973{
2974 struct ath10k *ar = hw->priv;
2975 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2976 int ret;
2977
2978 mutex_lock(&ar->conf_mutex);
2979
Michal Kaziorcc4827b2013-10-16 15:44:45 +03002980 cancel_work_sync(&arvif->wep_key_work);
2981
Michal Kaziored543882013-09-13 14:16:56 +02002982 spin_lock_bh(&ar->data_lock);
Michal Kazior64badcb2014-09-18 11:18:02 +03002983 ath10k_mac_vif_beacon_cleanup(arvif);
Michal Kaziored543882013-09-13 14:16:56 +02002984 spin_unlock_bh(&ar->data_lock);
2985
Simon Wunderlich855aed12014-08-02 09:12:54 +03002986 ret = ath10k_spectral_vif_stop(arvif);
2987 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002988 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
Simon Wunderlich855aed12014-08-02 09:12:54 +03002989 arvif->vdev_id, ret);
2990
Ben Greear16c11172014-09-23 14:17:16 -07002991 ar->free_vdev_map |= 1LL << arvif->vdev_id;
Michal Kazior05791192013-10-16 15:44:45 +03002992 list_del(&arvif->list);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002993
2994 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2995 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
2996 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002997 ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002998 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002999
3000 kfree(arvif->u.ap.noa_data);
3001 }
3002
Michal Kazior7aa7a722014-08-25 12:09:38 +02003003 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03003004 arvif->vdev_id);
3005
Kalle Valo5e3dd152013-06-12 20:52:10 +03003006 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
3007 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003008 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02003009 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003010
Kalle Valo5e3dd152013-06-12 20:52:10 +03003011 ath10k_peer_cleanup(ar, arvif->vdev_id);
3012
3013 mutex_unlock(&ar->conf_mutex);
3014}
3015
3016/*
3017 * FIXME: Has to be verified.
3018 */
3019#define SUPPORTED_FILTERS \
3020 (FIF_PROMISC_IN_BSS | \
3021 FIF_ALLMULTI | \
3022 FIF_CONTROL | \
3023 FIF_PSPOLL | \
3024 FIF_OTHER_BSS | \
3025 FIF_BCN_PRBRESP_PROMISC | \
3026 FIF_PROBE_REQ | \
3027 FIF_FCSFAIL)
3028
3029static void ath10k_configure_filter(struct ieee80211_hw *hw,
3030 unsigned int changed_flags,
3031 unsigned int *total_flags,
3032 u64 multicast)
3033{
3034 struct ath10k *ar = hw->priv;
3035 int ret;
3036
3037 mutex_lock(&ar->conf_mutex);
3038
3039 changed_flags &= SUPPORTED_FILTERS;
3040 *total_flags &= SUPPORTED_FILTERS;
3041 ar->filter_flags = *total_flags;
3042
Michal Kazior19337472014-08-28 12:58:16 +02003043 ret = ath10k_monitor_recalc(ar);
3044 if (ret)
3045 ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003046
3047 mutex_unlock(&ar->conf_mutex);
3048}
3049
3050static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
3051 struct ieee80211_vif *vif,
3052 struct ieee80211_bss_conf *info,
3053 u32 changed)
3054{
3055 struct ath10k *ar = hw->priv;
3056 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3057 int ret = 0;
Kalle Valoaf762c02014-09-14 12:50:17 +03003058 u32 vdev_param, pdev_param, slottime, preamble;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003059
3060 mutex_lock(&ar->conf_mutex);
3061
3062 if (changed & BSS_CHANGED_IBSS)
3063 ath10k_control_ibss(arvif, info, vif->addr);
3064
3065 if (changed & BSS_CHANGED_BEACON_INT) {
3066 arvif->beacon_interval = info->beacon_int;
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02003067 vdev_param = ar->wmi.vdev_param->beacon_interval;
3068 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03003069 arvif->beacon_interval);
Michal Kazior7aa7a722014-08-25 12:09:38 +02003070 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03003071 "mac vdev %d beacon_interval %d\n",
3072 arvif->vdev_id, arvif->beacon_interval);
3073
Kalle Valo5e3dd152013-06-12 20:52:10 +03003074 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003075 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003076 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003077 }
3078
3079 if (changed & BSS_CHANGED_BEACON) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003080 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03003081 "vdev %d set beacon tx mode to staggered\n",
3082 arvif->vdev_id);
3083
Bartosz Markowski226a3392013-09-26 17:47:16 +02003084 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
3085 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03003086 WMI_BEACON_STAGGERED_MODE);
3087 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003088 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003089 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003090 }
3091
John W. Linvilleb70727e2013-06-13 13:34:29 -04003092 if (changed & BSS_CHANGED_BEACON_INFO) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03003093 arvif->dtim_period = info->dtim_period;
3094
Michal Kazior7aa7a722014-08-25 12:09:38 +02003095 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03003096 "mac vdev %d dtim_period %d\n",
3097 arvif->vdev_id, arvif->dtim_period);
3098
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02003099 vdev_param = ar->wmi.vdev_param->dtim_period;
3100 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03003101 arvif->dtim_period);
3102 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003103 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003104 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003105 }
3106
3107 if (changed & BSS_CHANGED_SSID &&
3108 vif->type == NL80211_IFTYPE_AP) {
3109 arvif->u.ap.ssid_len = info->ssid_len;
3110 if (info->ssid_len)
3111 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
3112 arvif->u.ap.hidden_ssid = info->hidden_ssid;
3113 }
3114
Michal Kazior7b161a72014-05-26 12:46:03 +03003115 /*
3116 * Firmware manages AP self-peer internally so make sure to not create
3117 * it in driver. Otherwise AP self-peer deletion may timeout later.
3118 */
3119 if (changed & BSS_CHANGED_BSSID &&
3120 vif->type != NL80211_IFTYPE_AP) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03003121 if (!is_zero_ether_addr(info->bssid)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03003122 if (vif->type == NL80211_IFTYPE_STATION) {
Janusz Dziedzic3c7984e2014-10-02 13:56:40 +02003123 ath10k_dbg(ar, ATH10K_DBG_MAC,
3124 "mac vdev %d create peer %pM\n",
3125 arvif->vdev_id, info->bssid);
3126
3127 ret = ath10k_peer_create(ar, arvif->vdev_id,
3128 info->bssid);
3129 if (ret)
3130 ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n",
3131 info->bssid, arvif->vdev_id,
3132 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003133 /*
3134 * this is never erased as we it for crypto key
3135 * clearing; this is FW requirement
3136 */
Kalle Valob25f32c2014-09-14 12:50:49 +03003137 ether_addr_copy(arvif->bssid, info->bssid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003138
Michal Kazior7aa7a722014-08-25 12:09:38 +02003139 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03003140 "mac vdev %d start %pM\n",
3141 arvif->vdev_id, info->bssid);
3142
Kalle Valo5e3dd152013-06-12 20:52:10 +03003143 ret = ath10k_vdev_start(arvif);
Michal Kaziorc930f742014-01-23 11:38:25 +01003144 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003145 ath10k_warn(ar, "failed to start vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02003146 arvif->vdev_id, ret);
Kalle Valo75459e32014-02-13 18:13:12 +02003147 goto exit;
Michal Kaziorc930f742014-01-23 11:38:25 +01003148 }
3149
3150 arvif->is_started = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003151 }
3152
3153 /*
3154 * Mac80211 does not keep IBSS bssid when leaving IBSS,
3155 * so driver need to store it. It is needed when leaving
3156 * IBSS in order to remove BSSID peer.
3157 */
3158 if (vif->type == NL80211_IFTYPE_ADHOC)
Michal Kaziorc930f742014-01-23 11:38:25 +01003159 memcpy(arvif->bssid, info->bssid,
Kalle Valo5e3dd152013-06-12 20:52:10 +03003160 ETH_ALEN);
3161 }
3162 }
3163
3164 if (changed & BSS_CHANGED_BEACON_ENABLED)
3165 ath10k_control_beaconing(arvif, info);
3166
3167 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02003168 arvif->use_cts_prot = info->use_cts_prot;
Michal Kazior7aa7a722014-08-25 12:09:38 +02003169 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02003170 arvif->vdev_id, info->use_cts_prot);
Kalle Valo60c3daa2013-09-08 17:56:07 +03003171
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02003172 ret = ath10k_recalc_rtscts_prot(arvif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003173 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003174 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02003175 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003176 }
3177
3178 if (changed & BSS_CHANGED_ERP_SLOT) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03003179 if (info->use_short_slot)
3180 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
3181
3182 else
3183 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
3184
Michal Kazior7aa7a722014-08-25 12:09:38 +02003185 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03003186 arvif->vdev_id, slottime);
3187
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02003188 vdev_param = ar->wmi.vdev_param->slot_time;
3189 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03003190 slottime);
3191 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003192 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003193 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003194 }
3195
3196 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03003197 if (info->use_short_preamble)
3198 preamble = WMI_VDEV_PREAMBLE_SHORT;
3199 else
3200 preamble = WMI_VDEV_PREAMBLE_LONG;
3201
Michal Kazior7aa7a722014-08-25 12:09:38 +02003202 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03003203 "mac vdev %d preamble %dn",
3204 arvif->vdev_id, preamble);
3205
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02003206 vdev_param = ar->wmi.vdev_param->preamble;
3207 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03003208 preamble);
3209 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003210 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003211 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003212 }
3213
3214 if (changed & BSS_CHANGED_ASSOC) {
Michal Kaziore556f112014-08-28 12:58:17 +02003215 if (info->assoc) {
3216 /* Workaround: Make sure monitor vdev is not running
3217 * when associating to prevent some firmware revisions
3218 * (e.g. 10.1 and 10.2) from crashing.
3219 */
3220 if (ar->monitor_started)
3221 ath10k_monitor_stop(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003222 ath10k_bss_assoc(hw, vif, info);
Michal Kaziore556f112014-08-28 12:58:17 +02003223 ath10k_monitor_recalc(ar);
3224 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03003225 }
3226
Kalle Valo75459e32014-02-13 18:13:12 +02003227exit:
Kalle Valo5e3dd152013-06-12 20:52:10 +03003228 mutex_unlock(&ar->conf_mutex);
3229}
3230
3231static int ath10k_hw_scan(struct ieee80211_hw *hw,
3232 struct ieee80211_vif *vif,
David Spinadelc56ef672014-02-05 15:21:13 +02003233 struct ieee80211_scan_request *hw_req)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003234{
3235 struct ath10k *ar = hw->priv;
3236 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
David Spinadelc56ef672014-02-05 15:21:13 +02003237 struct cfg80211_scan_request *req = &hw_req->req;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003238 struct wmi_start_scan_arg arg;
3239 int ret = 0;
3240 int i;
3241
3242 mutex_lock(&ar->conf_mutex);
3243
3244 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003245 switch (ar->scan.state) {
3246 case ATH10K_SCAN_IDLE:
3247 reinit_completion(&ar->scan.started);
3248 reinit_completion(&ar->scan.completed);
3249 ar->scan.state = ATH10K_SCAN_STARTING;
3250 ar->scan.is_roc = false;
3251 ar->scan.vdev_id = arvif->vdev_id;
3252 ret = 0;
3253 break;
3254 case ATH10K_SCAN_STARTING:
3255 case ATH10K_SCAN_RUNNING:
3256 case ATH10K_SCAN_ABORTING:
Kalle Valo5e3dd152013-06-12 20:52:10 +03003257 ret = -EBUSY;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003258 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003259 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03003260 spin_unlock_bh(&ar->data_lock);
3261
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003262 if (ret)
3263 goto exit;
3264
Kalle Valo5e3dd152013-06-12 20:52:10 +03003265 memset(&arg, 0, sizeof(arg));
3266 ath10k_wmi_start_scan_init(ar, &arg);
3267 arg.vdev_id = arvif->vdev_id;
3268 arg.scan_id = ATH10K_SCAN_ID;
3269
3270 if (!req->no_cck)
3271 arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
3272
3273 if (req->ie_len) {
3274 arg.ie_len = req->ie_len;
3275 memcpy(arg.ie, req->ie, arg.ie_len);
3276 }
3277
3278 if (req->n_ssids) {
3279 arg.n_ssids = req->n_ssids;
3280 for (i = 0; i < arg.n_ssids; i++) {
3281 arg.ssids[i].len = req->ssids[i].ssid_len;
3282 arg.ssids[i].ssid = req->ssids[i].ssid;
3283 }
Michal Kaziordcd4a562013-07-31 10:55:12 +02003284 } else {
3285 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003286 }
3287
3288 if (req->n_channels) {
3289 arg.n_channels = req->n_channels;
3290 for (i = 0; i < arg.n_channels; i++)
3291 arg.channels[i] = req->channels[i]->center_freq;
3292 }
3293
3294 ret = ath10k_start_scan(ar, &arg);
3295 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003296 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003297 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003298 ar->scan.state = ATH10K_SCAN_IDLE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003299 spin_unlock_bh(&ar->data_lock);
3300 }
3301
3302exit:
3303 mutex_unlock(&ar->conf_mutex);
3304 return ret;
3305}
3306
3307static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
3308 struct ieee80211_vif *vif)
3309{
3310 struct ath10k *ar = hw->priv;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003311
3312 mutex_lock(&ar->conf_mutex);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003313 cancel_delayed_work_sync(&ar->scan.timeout);
3314 ath10k_scan_abort(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003315 mutex_unlock(&ar->conf_mutex);
3316}
3317
Michal Kaziorcfb27d22013-12-02 09:06:36 +01003318static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
3319 struct ath10k_vif *arvif,
3320 enum set_key_cmd cmd,
3321 struct ieee80211_key_conf *key)
3322{
3323 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
3324 int ret;
3325
3326 /* 10.1 firmware branch requires default key index to be set to group
3327 * key index after installing it. Otherwise FW/HW Txes corrupted
3328 * frames with multi-vif APs. This is not required for main firmware
3329 * branch (e.g. 636).
3330 *
3331 * FIXME: This has been tested only in AP. It remains unknown if this
3332 * is required for multi-vif STA interfaces on 10.1 */
3333
3334 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
3335 return;
3336
3337 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
3338 return;
3339
3340 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
3341 return;
3342
3343 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
3344 return;
3345
3346 if (cmd != SET_KEY)
3347 return;
3348
3349 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
3350 key->keyidx);
3351 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003352 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02003353 arvif->vdev_id, ret);
Michal Kaziorcfb27d22013-12-02 09:06:36 +01003354}
3355
Kalle Valo5e3dd152013-06-12 20:52:10 +03003356static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3357 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3358 struct ieee80211_key_conf *key)
3359{
3360 struct ath10k *ar = hw->priv;
3361 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3362 struct ath10k_peer *peer;
3363 const u8 *peer_addr;
3364 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3365 key->cipher == WLAN_CIPHER_SUITE_WEP104;
3366 int ret = 0;
3367
3368 if (key->keyidx > WMI_MAX_KEY_INDEX)
3369 return -ENOSPC;
3370
3371 mutex_lock(&ar->conf_mutex);
3372
3373 if (sta)
3374 peer_addr = sta->addr;
3375 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
3376 peer_addr = vif->bss_conf.bssid;
3377 else
3378 peer_addr = vif->addr;
3379
3380 key->hw_key_idx = key->keyidx;
3381
3382 /* the peer should not disappear in mid-way (unless FW goes awry) since
3383 * we already hold conf_mutex. we just make sure its there now. */
3384 spin_lock_bh(&ar->data_lock);
3385 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
3386 spin_unlock_bh(&ar->data_lock);
3387
3388 if (!peer) {
3389 if (cmd == SET_KEY) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003390 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03003391 peer_addr);
3392 ret = -EOPNOTSUPP;
3393 goto exit;
3394 } else {
3395 /* if the peer doesn't exist there is no key to disable
3396 * anymore */
3397 goto exit;
3398 }
3399 }
3400
3401 if (is_wep) {
3402 if (cmd == SET_KEY)
3403 arvif->wep_keys[key->keyidx] = key;
3404 else
3405 arvif->wep_keys[key->keyidx] = NULL;
3406
3407 if (cmd == DISABLE_KEY)
3408 ath10k_clear_vdev_key(arvif, key);
3409 }
3410
3411 ret = ath10k_install_key(arvif, key, cmd, peer_addr);
3412 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003413 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02003414 arvif->vdev_id, peer_addr, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003415 goto exit;
3416 }
3417
Michal Kaziorcfb27d22013-12-02 09:06:36 +01003418 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
3419
Kalle Valo5e3dd152013-06-12 20:52:10 +03003420 spin_lock_bh(&ar->data_lock);
3421 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
3422 if (peer && cmd == SET_KEY)
3423 peer->keys[key->keyidx] = key;
3424 else if (peer && cmd == DISABLE_KEY)
3425 peer->keys[key->keyidx] = NULL;
3426 else if (peer == NULL)
3427 /* impossible unless FW goes crazy */
Michal Kazior7aa7a722014-08-25 12:09:38 +02003428 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003429 spin_unlock_bh(&ar->data_lock);
3430
3431exit:
3432 mutex_unlock(&ar->conf_mutex);
3433 return ret;
3434}
3435
Michal Kazior9797feb2014-02-14 14:49:48 +01003436static void ath10k_sta_rc_update_wk(struct work_struct *wk)
3437{
3438 struct ath10k *ar;
3439 struct ath10k_vif *arvif;
3440 struct ath10k_sta *arsta;
3441 struct ieee80211_sta *sta;
3442 u32 changed, bw, nss, smps;
3443 int err;
3444
3445 arsta = container_of(wk, struct ath10k_sta, update_wk);
3446 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
3447 arvif = arsta->arvif;
3448 ar = arvif->ar;
3449
3450 spin_lock_bh(&ar->data_lock);
3451
3452 changed = arsta->changed;
3453 arsta->changed = 0;
3454
3455 bw = arsta->bw;
3456 nss = arsta->nss;
3457 smps = arsta->smps;
3458
3459 spin_unlock_bh(&ar->data_lock);
3460
3461 mutex_lock(&ar->conf_mutex);
3462
3463 if (changed & IEEE80211_RC_BW_CHANGED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003464 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01003465 sta->addr, bw);
3466
3467 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
3468 WMI_PEER_CHAN_WIDTH, bw);
3469 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003470 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01003471 sta->addr, bw, err);
3472 }
3473
3474 if (changed & IEEE80211_RC_NSS_CHANGED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003475 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01003476 sta->addr, nss);
3477
3478 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
3479 WMI_PEER_NSS, nss);
3480 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003481 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01003482 sta->addr, nss, err);
3483 }
3484
3485 if (changed & IEEE80211_RC_SMPS_CHANGED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003486 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01003487 sta->addr, smps);
3488
3489 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
3490 WMI_PEER_SMPS_STATE, smps);
3491 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003492 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01003493 sta->addr, smps, err);
3494 }
3495
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02003496 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003497 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02003498 sta->addr);
3499
3500 err = ath10k_station_assoc(ar, arvif, sta, true);
3501 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003502 ath10k_warn(ar, "failed to reassociate station: %pM\n",
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02003503 sta->addr);
3504 }
3505
Michal Kazior9797feb2014-02-14 14:49:48 +01003506 mutex_unlock(&ar->conf_mutex);
3507}
3508
Kalle Valo5e3dd152013-06-12 20:52:10 +03003509static int ath10k_sta_state(struct ieee80211_hw *hw,
3510 struct ieee80211_vif *vif,
3511 struct ieee80211_sta *sta,
3512 enum ieee80211_sta_state old_state,
3513 enum ieee80211_sta_state new_state)
3514{
3515 struct ath10k *ar = hw->priv;
3516 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior9797feb2014-02-14 14:49:48 +01003517 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
Bartosz Markowski0e759f32014-01-02 14:38:33 +01003518 int max_num_peers;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003519 int ret = 0;
3520
Michal Kazior76f90022014-02-25 09:29:57 +02003521 if (old_state == IEEE80211_STA_NOTEXIST &&
3522 new_state == IEEE80211_STA_NONE) {
3523 memset(arsta, 0, sizeof(*arsta));
3524 arsta->arvif = arvif;
3525 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
3526 }
3527
Michal Kazior9797feb2014-02-14 14:49:48 +01003528 /* cancel must be done outside the mutex to avoid deadlock */
3529 if ((old_state == IEEE80211_STA_NONE &&
3530 new_state == IEEE80211_STA_NOTEXIST))
3531 cancel_work_sync(&arsta->update_wk);
3532
Kalle Valo5e3dd152013-06-12 20:52:10 +03003533 mutex_lock(&ar->conf_mutex);
3534
3535 if (old_state == IEEE80211_STA_NOTEXIST &&
3536 new_state == IEEE80211_STA_NONE &&
3537 vif->type != NL80211_IFTYPE_STATION) {
3538 /*
3539 * New station addition.
3540 */
Bartosz Markowski0e759f32014-01-02 14:38:33 +01003541 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
3542 max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
3543 else
3544 max_num_peers = TARGET_NUM_PEERS;
3545
3546 if (ar->num_peers >= max_num_peers) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003547 ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n",
Bartosz Markowski0e759f32014-01-02 14:38:33 +01003548 ar->num_peers, max_num_peers);
3549 ret = -ENOBUFS;
3550 goto exit;
3551 }
3552
Michal Kazior7aa7a722014-08-25 12:09:38 +02003553 ath10k_dbg(ar, ATH10K_DBG_MAC,
Bartosz Markowski0e759f32014-01-02 14:38:33 +01003554 "mac vdev %d peer create %pM (new sta) num_peers %d\n",
3555 arvif->vdev_id, sta->addr, ar->num_peers);
Kalle Valo60c3daa2013-09-08 17:56:07 +03003556
Kalle Valo5e3dd152013-06-12 20:52:10 +03003557 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
3558 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003559 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
Ben Greear479398b2013-11-04 09:19:34 -08003560 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003561 } else if ((old_state == IEEE80211_STA_NONE &&
3562 new_state == IEEE80211_STA_NOTEXIST)) {
3563 /*
3564 * Existing station deletion.
3565 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02003566 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03003567 "mac vdev %d peer delete %pM (sta gone)\n",
3568 arvif->vdev_id, sta->addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003569 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
3570 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003571 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003572 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003573
3574 if (vif->type == NL80211_IFTYPE_STATION)
3575 ath10k_bss_disassoc(hw, vif);
3576 } else if (old_state == IEEE80211_STA_AUTH &&
3577 new_state == IEEE80211_STA_ASSOC &&
3578 (vif->type == NL80211_IFTYPE_AP ||
3579 vif->type == NL80211_IFTYPE_ADHOC)) {
3580 /*
3581 * New association.
3582 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02003583 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03003584 sta->addr);
3585
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02003586 ret = ath10k_station_assoc(ar, arvif, sta, false);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003587 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003588 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003589 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003590 } else if (old_state == IEEE80211_STA_ASSOC &&
3591 new_state == IEEE80211_STA_AUTH &&
3592 (vif->type == NL80211_IFTYPE_AP ||
3593 vif->type == NL80211_IFTYPE_ADHOC)) {
3594 /*
3595 * Disassociation.
3596 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02003597 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03003598 sta->addr);
3599
Kalle Valo5e3dd152013-06-12 20:52:10 +03003600 ret = ath10k_station_disassoc(ar, arvif, sta);
3601 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003602 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02003603 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003604 }
Bartosz Markowski0e759f32014-01-02 14:38:33 +01003605exit:
Kalle Valo5e3dd152013-06-12 20:52:10 +03003606 mutex_unlock(&ar->conf_mutex);
3607 return ret;
3608}
3609
3610static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
Kalle Valo5b07e072014-09-14 12:50:06 +03003611 u16 ac, bool enable)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003612{
3613 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3614 u32 value = 0;
3615 int ret = 0;
3616
Michal Kazior548db542013-07-05 16:15:15 +03003617 lockdep_assert_held(&ar->conf_mutex);
3618
Kalle Valo5e3dd152013-06-12 20:52:10 +03003619 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
3620 return 0;
3621
3622 switch (ac) {
3623 case IEEE80211_AC_VO:
3624 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
3625 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
3626 break;
3627 case IEEE80211_AC_VI:
3628 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
3629 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
3630 break;
3631 case IEEE80211_AC_BE:
3632 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
3633 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
3634 break;
3635 case IEEE80211_AC_BK:
3636 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
3637 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
3638 break;
3639 }
3640
3641 if (enable)
3642 arvif->u.sta.uapsd |= value;
3643 else
3644 arvif->u.sta.uapsd &= ~value;
3645
3646 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
3647 WMI_STA_PS_PARAM_UAPSD,
3648 arvif->u.sta.uapsd);
3649 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003650 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003651 goto exit;
3652 }
3653
3654 if (arvif->u.sta.uapsd)
3655 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
3656 else
3657 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
3658
3659 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
3660 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
3661 value);
3662 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003663 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003664
3665exit:
3666 return ret;
3667}
3668
3669static int ath10k_conf_tx(struct ieee80211_hw *hw,
3670 struct ieee80211_vif *vif, u16 ac,
3671 const struct ieee80211_tx_queue_params *params)
3672{
3673 struct ath10k *ar = hw->priv;
3674 struct wmi_wmm_params_arg *p = NULL;
3675 int ret;
3676
3677 mutex_lock(&ar->conf_mutex);
3678
3679 switch (ac) {
3680 case IEEE80211_AC_VO:
3681 p = &ar->wmm_params.ac_vo;
3682 break;
3683 case IEEE80211_AC_VI:
3684 p = &ar->wmm_params.ac_vi;
3685 break;
3686 case IEEE80211_AC_BE:
3687 p = &ar->wmm_params.ac_be;
3688 break;
3689 case IEEE80211_AC_BK:
3690 p = &ar->wmm_params.ac_bk;
3691 break;
3692 }
3693
3694 if (WARN_ON(!p)) {
3695 ret = -EINVAL;
3696 goto exit;
3697 }
3698
3699 p->cwmin = params->cw_min;
3700 p->cwmax = params->cw_max;
3701 p->aifs = params->aifs;
3702
3703 /*
3704 * The channel time duration programmed in the HW is in absolute
3705 * microseconds, while mac80211 gives the txop in units of
3706 * 32 microseconds.
3707 */
3708 p->txop = params->txop * 32;
3709
3710 /* FIXME: FW accepts wmm params per hw, not per vif */
3711 ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
3712 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003713 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003714 goto exit;
3715 }
3716
3717 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
3718 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003719 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003720
3721exit:
3722 mutex_unlock(&ar->conf_mutex);
3723 return ret;
3724}
3725
3726#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
3727
3728static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
3729 struct ieee80211_vif *vif,
3730 struct ieee80211_channel *chan,
3731 int duration,
3732 enum ieee80211_roc_type type)
3733{
3734 struct ath10k *ar = hw->priv;
3735 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3736 struct wmi_start_scan_arg arg;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003737 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003738
3739 mutex_lock(&ar->conf_mutex);
3740
3741 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003742 switch (ar->scan.state) {
3743 case ATH10K_SCAN_IDLE:
3744 reinit_completion(&ar->scan.started);
3745 reinit_completion(&ar->scan.completed);
3746 reinit_completion(&ar->scan.on_channel);
3747 ar->scan.state = ATH10K_SCAN_STARTING;
3748 ar->scan.is_roc = true;
3749 ar->scan.vdev_id = arvif->vdev_id;
3750 ar->scan.roc_freq = chan->center_freq;
3751 ret = 0;
3752 break;
3753 case ATH10K_SCAN_STARTING:
3754 case ATH10K_SCAN_RUNNING:
3755 case ATH10K_SCAN_ABORTING:
Kalle Valo5e3dd152013-06-12 20:52:10 +03003756 ret = -EBUSY;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003757 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003758 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03003759 spin_unlock_bh(&ar->data_lock);
3760
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003761 if (ret)
3762 goto exit;
3763
Kalle Valo5e3dd152013-06-12 20:52:10 +03003764 memset(&arg, 0, sizeof(arg));
3765 ath10k_wmi_start_scan_init(ar, &arg);
3766 arg.vdev_id = arvif->vdev_id;
3767 arg.scan_id = ATH10K_SCAN_ID;
3768 arg.n_channels = 1;
3769 arg.channels[0] = chan->center_freq;
3770 arg.dwell_time_active = duration;
3771 arg.dwell_time_passive = duration;
3772 arg.max_scan_time = 2 * duration;
3773 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
3774 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
3775
3776 ret = ath10k_start_scan(ar, &arg);
3777 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003778 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003779 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003780 ar->scan.state = ATH10K_SCAN_IDLE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003781 spin_unlock_bh(&ar->data_lock);
3782 goto exit;
3783 }
3784
3785 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
3786 if (ret == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003787 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003788
3789 ret = ath10k_scan_stop(ar);
3790 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003791 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003792
Kalle Valo5e3dd152013-06-12 20:52:10 +03003793 ret = -ETIMEDOUT;
3794 goto exit;
3795 }
3796
3797 ret = 0;
3798exit:
3799 mutex_unlock(&ar->conf_mutex);
3800 return ret;
3801}
3802
3803static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
3804{
3805 struct ath10k *ar = hw->priv;
3806
3807 mutex_lock(&ar->conf_mutex);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003808 cancel_delayed_work_sync(&ar->scan.timeout);
3809 ath10k_scan_abort(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003810 mutex_unlock(&ar->conf_mutex);
3811
3812 return 0;
3813}
3814
3815/*
3816 * Both RTS and Fragmentation threshold are interface-specific
3817 * in ath10k, but device-specific in mac80211.
3818 */
Kalle Valo5e3dd152013-06-12 20:52:10 +03003819
3820static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3821{
Kalle Valo5e3dd152013-06-12 20:52:10 +03003822 struct ath10k *ar = hw->priv;
Michal Kaziorad088bf2013-10-16 15:44:46 +03003823 struct ath10k_vif *arvif;
3824 int ret = 0;
Michal Kazior548db542013-07-05 16:15:15 +03003825
Michal Kaziorad088bf2013-10-16 15:44:46 +03003826 mutex_lock(&ar->conf_mutex);
3827 list_for_each_entry(arvif, &ar->arvifs, list) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003828 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
Michal Kaziorad088bf2013-10-16 15:44:46 +03003829 arvif->vdev_id, value);
Kalle Valo60c3daa2013-09-08 17:56:07 +03003830
Michal Kaziorad088bf2013-10-16 15:44:46 +03003831 ret = ath10k_mac_set_rts(arvif, value);
3832 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003833 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
Michal Kaziorad088bf2013-10-16 15:44:46 +03003834 arvif->vdev_id, ret);
3835 break;
3836 }
3837 }
3838 mutex_unlock(&ar->conf_mutex);
3839
3840 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003841}
3842
3843static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3844{
Kalle Valo5e3dd152013-06-12 20:52:10 +03003845 struct ath10k *ar = hw->priv;
Michal Kaziorad088bf2013-10-16 15:44:46 +03003846 struct ath10k_vif *arvif;
3847 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003848
Kalle Valo5e3dd152013-06-12 20:52:10 +03003849 mutex_lock(&ar->conf_mutex);
Michal Kaziorad088bf2013-10-16 15:44:46 +03003850 list_for_each_entry(arvif, &ar->arvifs, list) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003851 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
Michal Kaziorad088bf2013-10-16 15:44:46 +03003852 arvif->vdev_id, value);
3853
3854 ret = ath10k_mac_set_rts(arvif, value);
3855 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003856 ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n",
Michal Kaziorad088bf2013-10-16 15:44:46 +03003857 arvif->vdev_id, ret);
3858 break;
3859 }
3860 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03003861 mutex_unlock(&ar->conf_mutex);
3862
Michal Kaziorad088bf2013-10-16 15:44:46 +03003863 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003864}
3865
Emmanuel Grumbach77be2c52014-03-27 11:30:29 +02003866static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3867 u32 queues, bool drop)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003868{
3869 struct ath10k *ar = hw->priv;
Michal Kazioraffd3212013-07-16 09:54:35 +02003870 bool skip;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003871 int ret;
3872
3873 /* mac80211 doesn't care if we really xmit queued frames or not
3874 * we'll collect those frames either way if we stop/delete vdevs */
3875 if (drop)
3876 return;
3877
Michal Kazior548db542013-07-05 16:15:15 +03003878 mutex_lock(&ar->conf_mutex);
3879
Michal Kazioraffd3212013-07-16 09:54:35 +02003880 if (ar->state == ATH10K_STATE_WEDGED)
3881 goto skip;
3882
Michal Kazioredb82362013-07-05 16:15:14 +03003883 ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
Kalle Valo5e3dd152013-06-12 20:52:10 +03003884 bool empty;
Michal Kazioraffd3212013-07-16 09:54:35 +02003885
Michal Kazioredb82362013-07-05 16:15:14 +03003886 spin_lock_bh(&ar->htt.tx_lock);
Michal Kazior0945baf2013-09-18 14:43:18 +02003887 empty = (ar->htt.num_pending_tx == 0);
Michal Kazioredb82362013-07-05 16:15:14 +03003888 spin_unlock_bh(&ar->htt.tx_lock);
Michal Kazioraffd3212013-07-16 09:54:35 +02003889
3890 skip = (ar->state == ATH10K_STATE_WEDGED);
3891
3892 (empty || skip);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003893 }), ATH10K_FLUSH_TIMEOUT_HZ);
Michal Kazioraffd3212013-07-16 09:54:35 +02003894
3895 if (ret <= 0 || skip)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003896 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
Ben Greear9ba4c782014-02-25 09:29:57 +02003897 skip, ar->state, ret);
Michal Kazior548db542013-07-05 16:15:15 +03003898
Michal Kazioraffd3212013-07-16 09:54:35 +02003899skip:
Michal Kazior548db542013-07-05 16:15:15 +03003900 mutex_unlock(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003901}
3902
3903/* TODO: Implement this function properly
3904 * For now it is needed to reply to Probe Requests in IBSS mode.
3905 * Propably we need this information from FW.
3906 */
3907static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
3908{
3909 return 1;
3910}
3911
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003912#ifdef CONFIG_PM
3913static int ath10k_suspend(struct ieee80211_hw *hw,
3914 struct cfg80211_wowlan *wowlan)
3915{
3916 struct ath10k *ar = hw->priv;
3917 int ret;
3918
Marek Puzyniak9042e172014-02-10 17:14:23 +01003919 mutex_lock(&ar->conf_mutex);
3920
Marek Puzyniak00f54822014-02-10 17:14:24 +01003921 ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003922 if (ret) {
Marek Puzyniak00f54822014-02-10 17:14:24 +01003923 if (ret == -ETIMEDOUT)
3924 goto resume;
Marek Puzyniak9042e172014-02-10 17:14:23 +01003925 ret = 1;
3926 goto exit;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003927 }
3928
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003929 ret = ath10k_hif_suspend(ar);
3930 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003931 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003932 goto resume;
3933 }
3934
Marek Puzyniak9042e172014-02-10 17:14:23 +01003935 ret = 0;
3936 goto exit;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003937resume:
3938 ret = ath10k_wmi_pdev_resume_target(ar);
3939 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003940 ath10k_warn(ar, "failed to resume target: %d\n", ret);
Marek Puzyniak9042e172014-02-10 17:14:23 +01003941
3942 ret = 1;
3943exit:
3944 mutex_unlock(&ar->conf_mutex);
3945 return ret;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003946}
3947
3948static int ath10k_resume(struct ieee80211_hw *hw)
3949{
3950 struct ath10k *ar = hw->priv;
3951 int ret;
3952
Marek Puzyniak9042e172014-02-10 17:14:23 +01003953 mutex_lock(&ar->conf_mutex);
3954
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003955 ret = ath10k_hif_resume(ar);
3956 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003957 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
Marek Puzyniak9042e172014-02-10 17:14:23 +01003958 ret = 1;
3959 goto exit;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003960 }
3961
3962 ret = ath10k_wmi_pdev_resume_target(ar);
3963 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003964 ath10k_warn(ar, "failed to resume target: %d\n", ret);
Marek Puzyniak9042e172014-02-10 17:14:23 +01003965 ret = 1;
3966 goto exit;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003967 }
3968
Marek Puzyniak9042e172014-02-10 17:14:23 +01003969 ret = 0;
3970exit:
3971 mutex_unlock(&ar->conf_mutex);
3972 return ret;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02003973}
3974#endif
3975
Michal Kazioraffd3212013-07-16 09:54:35 +02003976static void ath10k_restart_complete(struct ieee80211_hw *hw)
3977{
3978 struct ath10k *ar = hw->priv;
3979
3980 mutex_lock(&ar->conf_mutex);
3981
3982 /* If device failed to restart it will be in a different state, e.g.
3983 * ATH10K_STATE_WEDGED */
3984 if (ar->state == ATH10K_STATE_RESTARTED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003985 ath10k_info(ar, "device successfully recovered\n");
Michal Kazioraffd3212013-07-16 09:54:35 +02003986 ar->state = ATH10K_STATE_ON;
3987 }
3988
3989 mutex_unlock(&ar->conf_mutex);
3990}
3991
Michal Kazior2e1dea42013-07-31 10:32:40 +02003992static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
3993 struct survey_info *survey)
3994{
3995 struct ath10k *ar = hw->priv;
3996 struct ieee80211_supported_band *sband;
3997 struct survey_info *ar_survey = &ar->survey[idx];
3998 int ret = 0;
3999
4000 mutex_lock(&ar->conf_mutex);
4001
4002 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
4003 if (sband && idx >= sband->n_channels) {
4004 idx -= sband->n_channels;
4005 sband = NULL;
4006 }
4007
4008 if (!sband)
4009 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
4010
4011 if (!sband || idx >= sband->n_channels) {
4012 ret = -ENOENT;
4013 goto exit;
4014 }
4015
4016 spin_lock_bh(&ar->data_lock);
4017 memcpy(survey, ar_survey, sizeof(*survey));
4018 spin_unlock_bh(&ar->data_lock);
4019
4020 survey->channel = &sband->channels[idx];
4021
4022exit:
4023 mutex_unlock(&ar->conf_mutex);
4024 return ret;
4025}
4026
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004027/* Helper table for legacy fixed_rate/bitrate_mask */
4028static const u8 cck_ofdm_rate[] = {
4029 /* CCK */
4030 3, /* 1Mbps */
4031 2, /* 2Mbps */
4032 1, /* 5.5Mbps */
4033 0, /* 11Mbps */
4034 /* OFDM */
4035 3, /* 6Mbps */
4036 7, /* 9Mbps */
4037 2, /* 12Mbps */
4038 6, /* 18Mbps */
4039 1, /* 24Mbps */
4040 5, /* 36Mbps */
4041 0, /* 48Mbps */
4042 4, /* 54Mbps */
4043};
4044
4045/* Check if only one bit set */
4046static int ath10k_check_single_mask(u32 mask)
4047{
4048 int bit;
4049
4050 bit = ffs(mask);
4051 if (!bit)
4052 return 0;
4053
4054 mask &= ~BIT(bit - 1);
4055 if (mask)
4056 return 2;
4057
4058 return 1;
4059}
4060
4061static bool
4062ath10k_default_bitrate_mask(struct ath10k *ar,
4063 enum ieee80211_band band,
4064 const struct cfg80211_bitrate_mask *mask)
4065{
4066 u32 legacy = 0x00ff;
4067 u8 ht = 0xff, i;
4068 u16 vht = 0x3ff;
4069
4070 switch (band) {
4071 case IEEE80211_BAND_2GHZ:
4072 legacy = 0x00fff;
4073 vht = 0;
4074 break;
4075 case IEEE80211_BAND_5GHZ:
4076 break;
4077 default:
4078 return false;
4079 }
4080
4081 if (mask->control[band].legacy != legacy)
4082 return false;
4083
4084 for (i = 0; i < ar->num_rf_chains; i++)
4085 if (mask->control[band].ht_mcs[i] != ht)
4086 return false;
4087
4088 for (i = 0; i < ar->num_rf_chains; i++)
4089 if (mask->control[band].vht_mcs[i] != vht)
4090 return false;
4091
4092 return true;
4093}
4094
4095static bool
4096ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
4097 enum ieee80211_band band,
4098 u8 *fixed_nss)
4099{
4100 int ht_nss = 0, vht_nss = 0, i;
4101
4102 /* check legacy */
4103 if (ath10k_check_single_mask(mask->control[band].legacy))
4104 return false;
4105
4106 /* check HT */
4107 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
4108 if (mask->control[band].ht_mcs[i] == 0xff)
4109 continue;
4110 else if (mask->control[band].ht_mcs[i] == 0x00)
4111 break;
Kalle Valod8bb26b2014-09-14 12:50:33 +03004112
4113 return false;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004114 }
4115
4116 ht_nss = i;
4117
4118 /* check VHT */
4119 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
4120 if (mask->control[band].vht_mcs[i] == 0x03ff)
4121 continue;
4122 else if (mask->control[band].vht_mcs[i] == 0x0000)
4123 break;
Kalle Valod8bb26b2014-09-14 12:50:33 +03004124
4125 return false;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004126 }
4127
4128 vht_nss = i;
4129
4130 if (ht_nss > 0 && vht_nss > 0)
4131 return false;
4132
4133 if (ht_nss)
4134 *fixed_nss = ht_nss;
4135 else if (vht_nss)
4136 *fixed_nss = vht_nss;
4137 else
4138 return false;
4139
4140 return true;
4141}
4142
4143static bool
4144ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
4145 enum ieee80211_band band,
4146 enum wmi_rate_preamble *preamble)
4147{
4148 int legacy = 0, ht = 0, vht = 0, i;
4149
4150 *preamble = WMI_RATE_PREAMBLE_OFDM;
4151
4152 /* check legacy */
4153 legacy = ath10k_check_single_mask(mask->control[band].legacy);
4154 if (legacy > 1)
4155 return false;
4156
4157 /* check HT */
4158 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
4159 ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
4160 if (ht > 1)
4161 return false;
4162
4163 /* check VHT */
4164 for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
4165 vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
4166 if (vht > 1)
4167 return false;
4168
4169 /* Currently we support only one fixed_rate */
4170 if ((legacy + ht + vht) != 1)
4171 return false;
4172
4173 if (ht)
4174 *preamble = WMI_RATE_PREAMBLE_HT;
4175 else if (vht)
4176 *preamble = WMI_RATE_PREAMBLE_VHT;
4177
4178 return true;
4179}
4180
4181static bool
Michal Kazior7aa7a722014-08-25 12:09:38 +02004182ath10k_bitrate_mask_rate(struct ath10k *ar,
4183 const struct cfg80211_bitrate_mask *mask,
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004184 enum ieee80211_band band,
4185 u8 *fixed_rate,
4186 u8 *fixed_nss)
4187{
4188 u8 rate = 0, pream = 0, nss = 0, i;
4189 enum wmi_rate_preamble preamble;
4190
4191 /* Check if single rate correct */
4192 if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
4193 return false;
4194
4195 pream = preamble;
4196
4197 switch (preamble) {
4198 case WMI_RATE_PREAMBLE_CCK:
4199 case WMI_RATE_PREAMBLE_OFDM:
4200 i = ffs(mask->control[band].legacy) - 1;
4201
4202 if (band == IEEE80211_BAND_2GHZ && i < 4)
4203 pream = WMI_RATE_PREAMBLE_CCK;
4204
4205 if (band == IEEE80211_BAND_5GHZ)
4206 i += 4;
4207
4208 if (i >= ARRAY_SIZE(cck_ofdm_rate))
4209 return false;
4210
4211 rate = cck_ofdm_rate[i];
4212 break;
4213 case WMI_RATE_PREAMBLE_HT:
4214 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
4215 if (mask->control[band].ht_mcs[i])
4216 break;
4217
4218 if (i == IEEE80211_HT_MCS_MASK_LEN)
4219 return false;
4220
4221 rate = ffs(mask->control[band].ht_mcs[i]) - 1;
4222 nss = i;
4223 break;
4224 case WMI_RATE_PREAMBLE_VHT:
4225 for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
4226 if (mask->control[band].vht_mcs[i])
4227 break;
4228
4229 if (i == NL80211_VHT_NSS_MAX)
4230 return false;
4231
4232 rate = ffs(mask->control[band].vht_mcs[i]) - 1;
4233 nss = i;
4234 break;
4235 }
4236
4237 *fixed_nss = nss + 1;
4238 nss <<= 4;
4239 pream <<= 6;
4240
Michal Kazior7aa7a722014-08-25 12:09:38 +02004241 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004242 pream, nss, rate);
4243
4244 *fixed_rate = pream | nss | rate;
4245
4246 return true;
4247}
4248
Michal Kazior7aa7a722014-08-25 12:09:38 +02004249static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
4250 const struct cfg80211_bitrate_mask *mask,
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004251 enum ieee80211_band band,
4252 u8 *fixed_rate,
4253 u8 *fixed_nss)
4254{
4255 /* First check full NSS mask, if we can simply limit NSS */
4256 if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
4257 return true;
4258
4259 /* Next Check single rate is set */
Michal Kazior7aa7a722014-08-25 12:09:38 +02004260 return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004261}
4262
4263static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
4264 u8 fixed_rate,
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004265 u8 fixed_nss,
4266 u8 force_sgi)
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004267{
4268 struct ath10k *ar = arvif->ar;
4269 u32 vdev_param;
4270 int ret = 0;
4271
4272 mutex_lock(&ar->conf_mutex);
4273
4274 if (arvif->fixed_rate == fixed_rate &&
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004275 arvif->fixed_nss == fixed_nss &&
4276 arvif->force_sgi == force_sgi)
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004277 goto exit;
4278
4279 if (fixed_rate == WMI_FIXED_RATE_NONE)
Michal Kazior7aa7a722014-08-25 12:09:38 +02004280 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004281
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004282 if (force_sgi)
Michal Kazior7aa7a722014-08-25 12:09:38 +02004283 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004284
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004285 vdev_param = ar->wmi.vdev_param->fixed_rate;
4286 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4287 vdev_param, fixed_rate);
4288 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004289 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004290 fixed_rate, ret);
4291 ret = -EINVAL;
4292 goto exit;
4293 }
4294
4295 arvif->fixed_rate = fixed_rate;
4296
4297 vdev_param = ar->wmi.vdev_param->nss;
4298 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4299 vdev_param, fixed_nss);
4300
4301 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004302 ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004303 fixed_nss, ret);
4304 ret = -EINVAL;
4305 goto exit;
4306 }
4307
4308 arvif->fixed_nss = fixed_nss;
4309
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004310 vdev_param = ar->wmi.vdev_param->sgi;
4311 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4312 force_sgi);
4313
4314 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004315 ath10k_warn(ar, "failed to set sgi param %d: %d\n",
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004316 force_sgi, ret);
4317 ret = -EINVAL;
4318 goto exit;
4319 }
4320
4321 arvif->force_sgi = force_sgi;
4322
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004323exit:
4324 mutex_unlock(&ar->conf_mutex);
4325 return ret;
4326}
4327
4328static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
4329 struct ieee80211_vif *vif,
4330 const struct cfg80211_bitrate_mask *mask)
4331{
4332 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4333 struct ath10k *ar = arvif->ar;
4334 enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
4335 u8 fixed_rate = WMI_FIXED_RATE_NONE;
4336 u8 fixed_nss = ar->num_rf_chains;
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004337 u8 force_sgi;
4338
4339 force_sgi = mask->control[band].gi;
4340 if (force_sgi == NL80211_TXRATE_FORCE_LGI)
4341 return -EINVAL;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004342
4343 if (!ath10k_default_bitrate_mask(ar, band, mask)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004344 if (!ath10k_get_fixed_rate_nss(ar, mask, band,
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004345 &fixed_rate,
4346 &fixed_nss))
4347 return -EINVAL;
4348 }
4349
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004350 if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004351 ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01004352 return -EINVAL;
4353 }
4354
4355 return ath10k_set_fixed_rate_param(arvif, fixed_rate,
4356 fixed_nss, force_sgi);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004357}
4358
Michal Kazior9797feb2014-02-14 14:49:48 +01004359static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4360 struct ieee80211_vif *vif,
4361 struct ieee80211_sta *sta,
4362 u32 changed)
4363{
4364 struct ath10k *ar = hw->priv;
4365 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
4366 u32 bw, smps;
4367
4368 spin_lock_bh(&ar->data_lock);
4369
Michal Kazior7aa7a722014-08-25 12:09:38 +02004370 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kazior9797feb2014-02-14 14:49:48 +01004371 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
4372 sta->addr, changed, sta->bandwidth, sta->rx_nss,
4373 sta->smps_mode);
4374
4375 if (changed & IEEE80211_RC_BW_CHANGED) {
4376 bw = WMI_PEER_CHWIDTH_20MHZ;
4377
4378 switch (sta->bandwidth) {
4379 case IEEE80211_STA_RX_BW_20:
4380 bw = WMI_PEER_CHWIDTH_20MHZ;
4381 break;
4382 case IEEE80211_STA_RX_BW_40:
4383 bw = WMI_PEER_CHWIDTH_40MHZ;
4384 break;
4385 case IEEE80211_STA_RX_BW_80:
4386 bw = WMI_PEER_CHWIDTH_80MHZ;
4387 break;
4388 case IEEE80211_STA_RX_BW_160:
Michal Kazior7aa7a722014-08-25 12:09:38 +02004389 ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02004390 sta->bandwidth, sta->addr);
Michal Kazior9797feb2014-02-14 14:49:48 +01004391 bw = WMI_PEER_CHWIDTH_20MHZ;
4392 break;
4393 }
4394
4395 arsta->bw = bw;
4396 }
4397
4398 if (changed & IEEE80211_RC_NSS_CHANGED)
4399 arsta->nss = sta->rx_nss;
4400
4401 if (changed & IEEE80211_RC_SMPS_CHANGED) {
4402 smps = WMI_PEER_SMPS_PS_NONE;
4403
4404 switch (sta->smps_mode) {
4405 case IEEE80211_SMPS_AUTOMATIC:
4406 case IEEE80211_SMPS_OFF:
4407 smps = WMI_PEER_SMPS_PS_NONE;
4408 break;
4409 case IEEE80211_SMPS_STATIC:
4410 smps = WMI_PEER_SMPS_STATIC;
4411 break;
4412 case IEEE80211_SMPS_DYNAMIC:
4413 smps = WMI_PEER_SMPS_DYNAMIC;
4414 break;
4415 case IEEE80211_SMPS_NUM_MODES:
Michal Kazior7aa7a722014-08-25 12:09:38 +02004416 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02004417 sta->smps_mode, sta->addr);
Michal Kazior9797feb2014-02-14 14:49:48 +01004418 smps = WMI_PEER_SMPS_PS_NONE;
4419 break;
4420 }
4421
4422 arsta->smps = smps;
4423 }
4424
Michal Kazior9797feb2014-02-14 14:49:48 +01004425 arsta->changed |= changed;
4426
4427 spin_unlock_bh(&ar->data_lock);
4428
4429 ieee80211_queue_work(hw, &arsta->update_wk);
4430}
4431
Chun-Yeow Yeoh26ebbcc2014-02-25 09:29:54 +02004432static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4433{
4434 /*
4435 * FIXME: Return 0 for time being. Need to figure out whether FW
4436 * has the API to fetch 64-bit local TSF
4437 */
4438
4439 return 0;
4440}
4441
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02004442static int ath10k_ampdu_action(struct ieee80211_hw *hw,
4443 struct ieee80211_vif *vif,
4444 enum ieee80211_ampdu_mlme_action action,
4445 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4446 u8 buf_size)
4447{
Michal Kazior7aa7a722014-08-25 12:09:38 +02004448 struct ath10k *ar = hw->priv;
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02004449 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4450
Michal Kazior7aa7a722014-08-25 12:09:38 +02004451 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02004452 arvif->vdev_id, sta->addr, tid, action);
4453
4454 switch (action) {
4455 case IEEE80211_AMPDU_RX_START:
4456 case IEEE80211_AMPDU_RX_STOP:
4457 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
4458 * creation/removal. Do we need to verify this?
4459 */
4460 return 0;
4461 case IEEE80211_AMPDU_TX_START:
4462 case IEEE80211_AMPDU_TX_STOP_CONT:
4463 case IEEE80211_AMPDU_TX_STOP_FLUSH:
4464 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4465 case IEEE80211_AMPDU_TX_OPERATIONAL:
4466 /* Firmware offloads Tx aggregation entirely so deny mac80211
4467 * Tx aggregation requests.
4468 */
4469 return -EOPNOTSUPP;
4470 }
4471
4472 return -EINVAL;
4473}
4474
Kalle Valo5e3dd152013-06-12 20:52:10 +03004475static const struct ieee80211_ops ath10k_ops = {
4476 .tx = ath10k_tx,
4477 .start = ath10k_start,
4478 .stop = ath10k_stop,
4479 .config = ath10k_config,
4480 .add_interface = ath10k_add_interface,
4481 .remove_interface = ath10k_remove_interface,
4482 .configure_filter = ath10k_configure_filter,
4483 .bss_info_changed = ath10k_bss_info_changed,
4484 .hw_scan = ath10k_hw_scan,
4485 .cancel_hw_scan = ath10k_cancel_hw_scan,
4486 .set_key = ath10k_set_key,
4487 .sta_state = ath10k_sta_state,
4488 .conf_tx = ath10k_conf_tx,
4489 .remain_on_channel = ath10k_remain_on_channel,
4490 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
4491 .set_rts_threshold = ath10k_set_rts_threshold,
4492 .set_frag_threshold = ath10k_set_frag_threshold,
4493 .flush = ath10k_flush,
4494 .tx_last_beacon = ath10k_tx_last_beacon,
Ben Greear46acf7b2014-05-16 17:15:38 +03004495 .set_antenna = ath10k_set_antenna,
4496 .get_antenna = ath10k_get_antenna,
Michal Kazioraffd3212013-07-16 09:54:35 +02004497 .restart_complete = ath10k_restart_complete,
Michal Kazior2e1dea42013-07-31 10:32:40 +02004498 .get_survey = ath10k_get_survey,
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01004499 .set_bitrate_mask = ath10k_set_bitrate_mask,
Michal Kazior9797feb2014-02-14 14:49:48 +01004500 .sta_rc_update = ath10k_sta_rc_update,
Chun-Yeow Yeoh26ebbcc2014-02-25 09:29:54 +02004501 .get_tsf = ath10k_get_tsf,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02004502 .ampdu_action = ath10k_ampdu_action,
Ben Greear6cddcc72014-09-29 14:41:46 +03004503 .get_et_sset_count = ath10k_debug_get_et_sset_count,
4504 .get_et_stats = ath10k_debug_get_et_stats,
4505 .get_et_strings = ath10k_debug_get_et_strings,
Kalle Valo43d2a302014-09-10 18:23:30 +03004506
4507 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
4508
Michal Kazior8cd13ca2013-07-16 09:38:54 +02004509#ifdef CONFIG_PM
4510 .suspend = ath10k_suspend,
4511 .resume = ath10k_resume,
4512#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03004513};
4514
4515#define RATETAB_ENT(_rate, _rateid, _flags) { \
4516 .bitrate = (_rate), \
4517 .flags = (_flags), \
4518 .hw_value = (_rateid), \
4519}
4520
4521#define CHAN2G(_channel, _freq, _flags) { \
4522 .band = IEEE80211_BAND_2GHZ, \
4523 .hw_value = (_channel), \
4524 .center_freq = (_freq), \
4525 .flags = (_flags), \
4526 .max_antenna_gain = 0, \
4527 .max_power = 30, \
4528}
4529
4530#define CHAN5G(_channel, _freq, _flags) { \
4531 .band = IEEE80211_BAND_5GHZ, \
4532 .hw_value = (_channel), \
4533 .center_freq = (_freq), \
4534 .flags = (_flags), \
4535 .max_antenna_gain = 0, \
4536 .max_power = 30, \
4537}
4538
4539static const struct ieee80211_channel ath10k_2ghz_channels[] = {
4540 CHAN2G(1, 2412, 0),
4541 CHAN2G(2, 2417, 0),
4542 CHAN2G(3, 2422, 0),
4543 CHAN2G(4, 2427, 0),
4544 CHAN2G(5, 2432, 0),
4545 CHAN2G(6, 2437, 0),
4546 CHAN2G(7, 2442, 0),
4547 CHAN2G(8, 2447, 0),
4548 CHAN2G(9, 2452, 0),
4549 CHAN2G(10, 2457, 0),
4550 CHAN2G(11, 2462, 0),
4551 CHAN2G(12, 2467, 0),
4552 CHAN2G(13, 2472, 0),
4553 CHAN2G(14, 2484, 0),
4554};
4555
4556static const struct ieee80211_channel ath10k_5ghz_channels[] = {
Michal Kazior429ff562013-06-26 08:54:54 +02004557 CHAN5G(36, 5180, 0),
4558 CHAN5G(40, 5200, 0),
4559 CHAN5G(44, 5220, 0),
4560 CHAN5G(48, 5240, 0),
4561 CHAN5G(52, 5260, 0),
4562 CHAN5G(56, 5280, 0),
4563 CHAN5G(60, 5300, 0),
4564 CHAN5G(64, 5320, 0),
4565 CHAN5G(100, 5500, 0),
4566 CHAN5G(104, 5520, 0),
4567 CHAN5G(108, 5540, 0),
4568 CHAN5G(112, 5560, 0),
4569 CHAN5G(116, 5580, 0),
4570 CHAN5G(120, 5600, 0),
4571 CHAN5G(124, 5620, 0),
4572 CHAN5G(128, 5640, 0),
4573 CHAN5G(132, 5660, 0),
4574 CHAN5G(136, 5680, 0),
4575 CHAN5G(140, 5700, 0),
4576 CHAN5G(149, 5745, 0),
4577 CHAN5G(153, 5765, 0),
4578 CHAN5G(157, 5785, 0),
4579 CHAN5G(161, 5805, 0),
4580 CHAN5G(165, 5825, 0),
Kalle Valo5e3dd152013-06-12 20:52:10 +03004581};
4582
4583static struct ieee80211_rate ath10k_rates[] = {
4584 /* CCK */
4585 RATETAB_ENT(10, 0x82, 0),
4586 RATETAB_ENT(20, 0x84, 0),
4587 RATETAB_ENT(55, 0x8b, 0),
4588 RATETAB_ENT(110, 0x96, 0),
4589 /* OFDM */
4590 RATETAB_ENT(60, 0x0c, 0),
4591 RATETAB_ENT(90, 0x12, 0),
4592 RATETAB_ENT(120, 0x18, 0),
4593 RATETAB_ENT(180, 0x24, 0),
4594 RATETAB_ENT(240, 0x30, 0),
4595 RATETAB_ENT(360, 0x48, 0),
4596 RATETAB_ENT(480, 0x60, 0),
4597 RATETAB_ENT(540, 0x6c, 0),
4598};
4599
4600#define ath10k_a_rates (ath10k_rates + 4)
4601#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
4602#define ath10k_g_rates (ath10k_rates + 0)
4603#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
4604
Michal Kaziore7b54192014-08-07 11:03:27 +02004605struct ath10k *ath10k_mac_create(size_t priv_size)
Kalle Valo5e3dd152013-06-12 20:52:10 +03004606{
4607 struct ieee80211_hw *hw;
4608 struct ath10k *ar;
4609
Michal Kaziore7b54192014-08-07 11:03:27 +02004610 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004611 if (!hw)
4612 return NULL;
4613
4614 ar = hw->priv;
4615 ar->hw = hw;
4616
4617 return ar;
4618}
4619
4620void ath10k_mac_destroy(struct ath10k *ar)
4621{
4622 ieee80211_free_hw(ar->hw);
4623}
4624
4625static const struct ieee80211_iface_limit ath10k_if_limits[] = {
4626 {
4627 .max = 8,
4628 .types = BIT(NL80211_IFTYPE_STATION)
4629 | BIT(NL80211_IFTYPE_P2P_CLIENT)
Michal Kaziord531cb82013-07-31 10:55:13 +02004630 },
4631 {
4632 .max = 3,
4633 .types = BIT(NL80211_IFTYPE_P2P_GO)
4634 },
4635 {
4636 .max = 7,
4637 .types = BIT(NL80211_IFTYPE_AP)
4638 },
Kalle Valo5e3dd152013-06-12 20:52:10 +03004639};
4640
Bartosz Markowskif2595092013-12-10 16:20:39 +01004641static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
Marek Puzyniake8a50f82013-11-20 09:59:47 +02004642 {
4643 .max = 8,
4644 .types = BIT(NL80211_IFTYPE_AP)
4645 },
4646};
Marek Puzyniake8a50f82013-11-20 09:59:47 +02004647
4648static const struct ieee80211_iface_combination ath10k_if_comb[] = {
4649 {
4650 .limits = ath10k_if_limits,
4651 .n_limits = ARRAY_SIZE(ath10k_if_limits),
4652 .max_interfaces = 8,
4653 .num_different_channels = 1,
4654 .beacon_int_infra_match = true,
4655 },
Bartosz Markowskif2595092013-12-10 16:20:39 +01004656};
4657
4658static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
Marek Puzyniake8a50f82013-11-20 09:59:47 +02004659 {
Bartosz Markowskif2595092013-12-10 16:20:39 +01004660 .limits = ath10k_10x_if_limits,
4661 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
Marek Puzyniake8a50f82013-11-20 09:59:47 +02004662 .max_interfaces = 8,
4663 .num_different_channels = 1,
4664 .beacon_int_infra_match = true,
Bartosz Markowskif2595092013-12-10 16:20:39 +01004665#ifdef CONFIG_ATH10K_DFS_CERTIFIED
Marek Puzyniake8a50f82013-11-20 09:59:47 +02004666 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
4667 BIT(NL80211_CHAN_WIDTH_20) |
4668 BIT(NL80211_CHAN_WIDTH_40) |
4669 BIT(NL80211_CHAN_WIDTH_80),
Marek Puzyniake8a50f82013-11-20 09:59:47 +02004670#endif
Bartosz Markowskif2595092013-12-10 16:20:39 +01004671 },
Kalle Valo5e3dd152013-06-12 20:52:10 +03004672};
4673
4674static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4675{
4676 struct ieee80211_sta_vht_cap vht_cap = {0};
4677 u16 mcs_map;
Michal Kazior8865bee42013-07-24 12:36:46 +02004678 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004679
4680 vht_cap.vht_supported = 1;
4681 vht_cap.cap = ar->vht_cap_info;
4682
Michal Kazior8865bee42013-07-24 12:36:46 +02004683 mcs_map = 0;
4684 for (i = 0; i < 8; i++) {
4685 if (i < ar->num_rf_chains)
4686 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
4687 else
4688 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
4689 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004690
4691 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4692 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4693
4694 return vht_cap;
4695}
4696
4697static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4698{
4699 int i;
4700 struct ieee80211_sta_ht_cap ht_cap = {0};
4701
4702 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4703 return ht_cap;
4704
4705 ht_cap.ht_supported = 1;
4706 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4707 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4708 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4709 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4710 ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
4711
4712 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4713 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4714
4715 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4716 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4717
4718 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4719 u32 smps;
4720
4721 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4722 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4723
4724 ht_cap.cap |= smps;
4725 }
4726
4727 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
4728 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4729
4730 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4731 u32 stbc;
4732
4733 stbc = ar->ht_cap_info;
4734 stbc &= WMI_HT_CAP_RX_STBC;
4735 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4736 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4737 stbc &= IEEE80211_HT_CAP_RX_STBC;
4738
4739 ht_cap.cap |= stbc;
4740 }
4741
4742 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4743 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4744
4745 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4746 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4747
4748 /* max AMSDU is implicitly taken from vht_cap_info */
4749 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4750 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4751
Michal Kazior8865bee42013-07-24 12:36:46 +02004752 for (i = 0; i < ar->num_rf_chains; i++)
Kalle Valo5e3dd152013-06-12 20:52:10 +03004753 ht_cap.mcs.rx_mask[i] = 0xFF;
4754
4755 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4756
4757 return ht_cap;
4758}
4759
Kalle Valo5e3dd152013-06-12 20:52:10 +03004760static void ath10k_get_arvif_iter(void *data, u8 *mac,
4761 struct ieee80211_vif *vif)
4762{
4763 struct ath10k_vif_iter *arvif_iter = data;
4764 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4765
4766 if (arvif->vdev_id == arvif_iter->vdev_id)
4767 arvif_iter->arvif = arvif;
4768}
4769
4770struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
4771{
4772 struct ath10k_vif_iter arvif_iter;
4773 u32 flags;
4774
4775 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
4776 arvif_iter.vdev_id = vdev_id;
4777
4778 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
4779 ieee80211_iterate_active_interfaces_atomic(ar->hw,
4780 flags,
4781 ath10k_get_arvif_iter,
4782 &arvif_iter);
4783 if (!arvif_iter.arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004784 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004785 return NULL;
4786 }
4787
4788 return arvif_iter.arvif;
4789}
4790
4791int ath10k_mac_register(struct ath10k *ar)
4792{
4793 struct ieee80211_supported_band *band;
4794 struct ieee80211_sta_vht_cap vht_cap;
4795 struct ieee80211_sta_ht_cap ht_cap;
4796 void *channels;
4797 int ret;
4798
4799 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
4800
4801 SET_IEEE80211_DEV(ar->hw, ar->dev);
4802
4803 ht_cap = ath10k_get_ht_cap(ar);
4804 vht_cap = ath10k_create_vht_cap(ar);
4805
4806 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4807 channels = kmemdup(ath10k_2ghz_channels,
4808 sizeof(ath10k_2ghz_channels),
4809 GFP_KERNEL);
Michal Kaziord6015b22013-07-22 14:13:30 +02004810 if (!channels) {
4811 ret = -ENOMEM;
4812 goto err_free;
4813 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004814
4815 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
4816 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
4817 band->channels = channels;
4818 band->n_bitrates = ath10k_g_rates_size;
4819 band->bitrates = ath10k_g_rates;
4820 band->ht_cap = ht_cap;
4821
4822 /* vht is not supported in 2.4 GHz */
4823
4824 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
4825 }
4826
4827 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4828 channels = kmemdup(ath10k_5ghz_channels,
4829 sizeof(ath10k_5ghz_channels),
4830 GFP_KERNEL);
4831 if (!channels) {
Michal Kaziord6015b22013-07-22 14:13:30 +02004832 ret = -ENOMEM;
4833 goto err_free;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004834 }
4835
4836 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
4837 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
4838 band->channels = channels;
4839 band->n_bitrates = ath10k_a_rates_size;
4840 band->bitrates = ath10k_a_rates;
4841 band->ht_cap = ht_cap;
4842 band->vht_cap = vht_cap;
4843 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
4844 }
4845
4846 ar->hw->wiphy->interface_modes =
4847 BIT(NL80211_IFTYPE_STATION) |
Bartosz Markowskid3541812013-12-10 16:20:40 +01004848 BIT(NL80211_IFTYPE_AP);
4849
Ben Greear46acf7b2014-05-16 17:15:38 +03004850 ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
4851 ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
4852
Bartosz Markowskid3541812013-12-10 16:20:40 +01004853 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
4854 ar->hw->wiphy->interface_modes |=
4855 BIT(NL80211_IFTYPE_P2P_CLIENT) |
4856 BIT(NL80211_IFTYPE_P2P_GO);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004857
4858 ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
4859 IEEE80211_HW_SUPPORTS_PS |
4860 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
4861 IEEE80211_HW_SUPPORTS_UAPSD |
4862 IEEE80211_HW_MFP_CAPABLE |
4863 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4864 IEEE80211_HW_HAS_RATE_CONTROL |
4865 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
Janusz Dziedzic2f0f1122014-02-26 18:42:09 +02004866 IEEE80211_HW_AP_LINK_PS |
4867 IEEE80211_HW_SPECTRUM_MGMT;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004868
Michal Kazior1f8bb152013-09-18 14:43:22 +02004869 /* MSDU can have HTT TX fragment pushed in front. The additional 4
4870 * bytes is used for padding/alignment if necessary. */
4871 ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
4872
Kalle Valo5e3dd152013-06-12 20:52:10 +03004873 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
4874 ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
4875
4876 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
4877 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
4878 ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
4879 }
4880
4881 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
4882 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
4883
4884 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
Michal Kazior9797feb2014-02-14 14:49:48 +01004885 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004886
Kalle Valo5e3dd152013-06-12 20:52:10 +03004887 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
4888
4889 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
Michal Kaziorc2df44b2014-01-23 11:38:26 +01004890 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004891 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
4892
4893 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
4894 /*
4895 * on LL hardware queues are managed entirely by the FW
4896 * so we only advertise to mac we can do the queues thing
4897 */
4898 ar->hw->queues = 4;
4899
Bartosz Markowskif2595092013-12-10 16:20:39 +01004900 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
4901 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
4902 ar->hw->wiphy->n_iface_combinations =
4903 ARRAY_SIZE(ath10k_10x_if_comb);
4904 } else {
4905 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
4906 ar->hw->wiphy->n_iface_combinations =
4907 ARRAY_SIZE(ath10k_if_comb);
Michal Kaziorcf850d12014-07-24 20:07:00 +03004908
4909 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
Bartosz Markowskif2595092013-12-10 16:20:39 +01004910 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004911
Michal Kazior7c199992013-07-31 10:47:57 +02004912 ar->hw->netdev_features = NETIF_F_HW_CSUM;
4913
Janusz Dziedzic9702c682013-11-20 09:59:41 +02004914 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
4915 /* Init ath dfs pattern detector */
4916 ar->ath_common.debug_mask = ATH_DBG_DFS;
4917 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
4918 NL80211_DFS_UNSET);
4919
4920 if (!ar->dfs_detector)
Michal Kazior7aa7a722014-08-25 12:09:38 +02004921 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
Janusz Dziedzic9702c682013-11-20 09:59:41 +02004922 }
4923
Kalle Valo5e3dd152013-06-12 20:52:10 +03004924 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
4925 ath10k_reg_notifier);
4926 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004927 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
Michal Kaziord6015b22013-07-22 14:13:30 +02004928 goto err_free;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004929 }
4930
4931 ret = ieee80211_register_hw(ar->hw);
4932 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004933 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
Michal Kaziord6015b22013-07-22 14:13:30 +02004934 goto err_free;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004935 }
4936
4937 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
4938 ret = regulatory_hint(ar->hw->wiphy,
4939 ar->ath_common.regulatory.alpha2);
4940 if (ret)
Michal Kaziord6015b22013-07-22 14:13:30 +02004941 goto err_unregister;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004942 }
4943
4944 return 0;
Michal Kaziord6015b22013-07-22 14:13:30 +02004945
4946err_unregister:
Kalle Valo5e3dd152013-06-12 20:52:10 +03004947 ieee80211_unregister_hw(ar->hw);
Michal Kaziord6015b22013-07-22 14:13:30 +02004948err_free:
4949 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
4950 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
4951
Kalle Valo5e3dd152013-06-12 20:52:10 +03004952 return ret;
4953}
4954
4955void ath10k_mac_unregister(struct ath10k *ar)
4956{
4957 ieee80211_unregister_hw(ar->hw);
4958
Janusz Dziedzic9702c682013-11-20 09:59:41 +02004959 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
4960 ar->dfs_detector->exit(ar->dfs_detector);
4961
Kalle Valo5e3dd152013-06-12 20:52:10 +03004962 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
4963 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
4964
4965 SET_IEEE80211_DEV(ar->hw, NULL);
4966}