blob: ebff9c0a0784679be2dbe8419918fa1a52de38df [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mac.h"
19
20#include <net/mac80211.h>
21#include <linux/etherdevice.h>
22
Michal Kazior8cd13ca2013-07-16 09:38:54 +020023#include "hif.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030024#include "core.h"
25#include "debug.h"
26#include "wmi.h"
27#include "htt.h"
28#include "txrx.h"
Kalle Valo43d2a302014-09-10 18:23:30 +030029#include "testmode.h"
Michal Kaziord7579d12014-12-03 10:10:54 +020030#include "wmi.h"
Michal Kaziorb4aa5392015-03-31 10:26:24 +000031#include "wmi-tlv.h"
Michal Kaziord7579d12014-12-03 10:10:54 +020032#include "wmi-ops.h"
Janusz Dziedzic5fd3ac32015-03-23 17:32:53 +020033#include "wow.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030034
Michal Kaziordcc33092015-03-30 09:51:54 +030035/*********/
36/* Rates */
37/*********/
38
Michal Kaziordcc33092015-03-30 09:51:54 +030039static struct ieee80211_rate ath10k_rates[] = {
Michal Kazior5528e032015-03-30 09:51:56 +030040 { .bitrate = 10,
41 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
42 { .bitrate = 20,
43 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
45 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 { .bitrate = 55,
47 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
48 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
49 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
50 { .bitrate = 110,
51 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
52 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
53 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
Michal Kazior5653b392015-03-30 09:51:54 +030054
Michal Kazioraf001482015-03-30 09:51:56 +030055 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
56 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
57 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
58 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
59 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
60 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
61 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
62 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
Michal Kaziordcc33092015-03-30 09:51:54 +030063};
64
Michal Kazior8d7aa6b2015-03-30 09:51:57 +030065#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
66
67#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
68#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
69 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
Michal Kaziordcc33092015-03-30 09:51:54 +030070#define ath10k_g_rates (ath10k_rates + 0)
71#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
72
Michal Kazior486017c2015-03-30 09:51:54 +030073static bool ath10k_mac_bitrate_is_cck(int bitrate)
74{
75 switch (bitrate) {
76 case 10:
77 case 20:
78 case 55:
79 case 110:
80 return true;
81 }
82
83 return false;
84}
85
86static u8 ath10k_mac_bitrate_to_rate(int bitrate)
87{
88 return DIV_ROUND_UP(bitrate, 5) |
89 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
90}
91
Michal Kazior5528e032015-03-30 09:51:56 +030092u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
Yanbo Li4b7f3532015-11-12 10:36:10 -080093 u8 hw_rate, bool cck)
Michal Kazior5528e032015-03-30 09:51:56 +030094{
95 const struct ieee80211_rate *rate;
96 int i;
97
98 for (i = 0; i < sband->n_bitrates; i++) {
99 rate = &sband->bitrates[i];
100
Yanbo Li4b7f3532015-11-12 10:36:10 -0800101 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
102 continue;
103
Michal Kazior5528e032015-03-30 09:51:56 +0300104 if (rate->hw_value == hw_rate)
105 return i;
106 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
107 rate->hw_value_short == hw_rate)
108 return i;
109 }
110
111 return 0;
112}
113
Michal Kazior01cebe12015-03-30 09:51:56 +0300114u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
115 u32 bitrate)
116{
117 int i;
118
119 for (i = 0; i < sband->n_bitrates; i++)
120 if (sband->bitrates[i].bitrate == bitrate)
121 return i;
122
123 return 0;
124}
125
Michal Kazior3ae54222015-03-31 10:49:20 +0000126static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
127{
128 switch ((mcs_map >> (2 * nss)) & 0x3) {
129 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
130 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
131 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
132 }
133 return 0;
134}
135
Michal Kazior45c9abc2015-04-21 20:42:58 +0300136static u32
137ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
138{
139 int nss;
140
141 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
142 if (ht_mcs_mask[nss])
143 return nss + 1;
144
145 return 1;
146}
147
148static u32
149ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
150{
151 int nss;
152
153 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
154 if (vht_mcs_mask[nss])
155 return nss + 1;
156
157 return 1;
158}
Kalle Valo5e3dd152013-06-12 20:52:10 +0300159
160/**********/
161/* Crypto */
162/**********/
163
164static int ath10k_send_key(struct ath10k_vif *arvif,
165 struct ieee80211_key_conf *key,
166 enum set_key_cmd cmd,
Michal Kazior370e5672015-02-18 14:02:26 +0100167 const u8 *macaddr, u32 flags)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300168{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200169 struct ath10k *ar = arvif->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300170 struct wmi_vdev_install_key_arg arg = {
171 .vdev_id = arvif->vdev_id,
172 .key_idx = key->keyidx,
173 .key_len = key->keylen,
174 .key_data = key->key,
Michal Kazior370e5672015-02-18 14:02:26 +0100175 .key_flags = flags,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300176 .macaddr = macaddr,
177 };
178
Michal Kazior548db542013-07-05 16:15:15 +0300179 lockdep_assert_held(&arvif->ar->conf_mutex);
180
Kalle Valo5e3dd152013-06-12 20:52:10 +0300181 switch (key->cipher) {
182 case WLAN_CIPHER_SUITE_CCMP:
183 arg.key_cipher = WMI_CIPHER_AES_CCM;
Marek Kwaczynskie4e82e92015-01-24 12:14:53 +0200184 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300185 break;
186 case WLAN_CIPHER_SUITE_TKIP:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300187 arg.key_cipher = WMI_CIPHER_TKIP;
188 arg.key_txmic_len = 8;
189 arg.key_rxmic_len = 8;
190 break;
191 case WLAN_CIPHER_SUITE_WEP40:
192 case WLAN_CIPHER_SUITE_WEP104:
193 arg.key_cipher = WMI_CIPHER_WEP;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300194 break;
Johannes Berg3cb10942015-01-22 21:38:45 +0100195 case WLAN_CIPHER_SUITE_AES_CMAC:
Bartosz Markowskid7131c02015-03-10 14:32:19 +0100196 WARN_ON(1);
197 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300198 default:
Michal Kazior7aa7a722014-08-25 12:09:38 +0200199 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300200 return -EOPNOTSUPP;
201 }
202
Kalle Valob9e284e2015-10-05 17:56:35 +0300203 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
David Liuccec9032015-07-24 20:25:32 +0300204 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
David Liuccec9032015-07-24 20:25:32 +0300205
Kalle Valo5e3dd152013-06-12 20:52:10 +0300206 if (cmd == DISABLE_KEY) {
207 arg.key_cipher = WMI_CIPHER_NONE;
208 arg.key_data = NULL;
209 }
210
211 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
212}
213
214static int ath10k_install_key(struct ath10k_vif *arvif,
215 struct ieee80211_key_conf *key,
216 enum set_key_cmd cmd,
Michal Kazior370e5672015-02-18 14:02:26 +0100217 const u8 *macaddr, u32 flags)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300218{
219 struct ath10k *ar = arvif->ar;
220 int ret;
Nicholas Mc Guire8e9904f52015-03-30 15:39:19 +0300221 unsigned long time_left;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300222
Michal Kazior548db542013-07-05 16:15:15 +0300223 lockdep_assert_held(&ar->conf_mutex);
224
Wolfram Sang16735d02013-11-14 14:32:02 -0800225 reinit_completion(&ar->install_key_done);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300226
David Liuccec9032015-07-24 20:25:32 +0300227 if (arvif->nohwcrypt)
228 return 1;
229
Michal Kazior370e5672015-02-18 14:02:26 +0100230 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300231 if (ret)
232 return ret;
233
Nicholas Mc Guire8e9904f52015-03-30 15:39:19 +0300234 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
235 if (time_left == 0)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300236 return -ETIMEDOUT;
237
238 return 0;
239}
240
241static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
242 const u8 *addr)
243{
244 struct ath10k *ar = arvif->ar;
245 struct ath10k_peer *peer;
246 int ret;
247 int i;
Michal Kazior370e5672015-02-18 14:02:26 +0100248 u32 flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300249
250 lockdep_assert_held(&ar->conf_mutex);
251
Michal Kazior8674d902015-08-13 14:10:46 +0200252 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
Peter Oh7c97b722015-12-03 09:50:55 -0800253 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
254 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
Michal Kazior8674d902015-08-13 14:10:46 +0200255 return -EINVAL;
256
Kalle Valo5e3dd152013-06-12 20:52:10 +0300257 spin_lock_bh(&ar->data_lock);
258 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
259 spin_unlock_bh(&ar->data_lock);
260
261 if (!peer)
262 return -ENOENT;
263
264 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
265 if (arvif->wep_keys[i] == NULL)
266 continue;
Michal Kazior370e5672015-02-18 14:02:26 +0100267
Michal Kazior8674d902015-08-13 14:10:46 +0200268 switch (arvif->vif->type) {
269 case NL80211_IFTYPE_AP:
270 flags = WMI_KEY_PAIRWISE;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300271
Michal Kazior8674d902015-08-13 14:10:46 +0200272 if (arvif->def_wep_key_idx == i)
273 flags |= WMI_KEY_TX_USAGE;
Michal Kaziorce90b272015-04-10 13:23:21 +0000274
Michal Kazior8674d902015-08-13 14:10:46 +0200275 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
276 SET_KEY, addr, flags);
277 if (ret < 0)
278 return ret;
279 break;
280 case NL80211_IFTYPE_ADHOC:
281 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
282 SET_KEY, addr,
283 WMI_KEY_PAIRWISE);
284 if (ret < 0)
285 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300286
Michal Kazior8674d902015-08-13 14:10:46 +0200287 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
288 SET_KEY, addr, WMI_KEY_GROUP);
289 if (ret < 0)
290 return ret;
291 break;
292 default:
293 WARN_ON(1);
294 return -EINVAL;
295 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300296
Sujith Manoharanae167132014-11-25 11:46:59 +0530297 spin_lock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300298 peer->keys[i] = arvif->wep_keys[i];
Sujith Manoharanae167132014-11-25 11:46:59 +0530299 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300300 }
301
Michal Kaziorce90b272015-04-10 13:23:21 +0000302 /* In some cases (notably with static WEP IBSS with multiple keys)
303 * multicast Tx becomes broken. Both pairwise and groupwise keys are
304 * installed already. Using WMI_KEY_TX_USAGE in different combinations
305 * didn't seem help. Using def_keyid vdev parameter seems to be
306 * effective so use that.
307 *
308 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
309 */
Michal Kazior8674d902015-08-13 14:10:46 +0200310 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
311 return 0;
312
Michal Kaziorce90b272015-04-10 13:23:21 +0000313 if (arvif->def_wep_key_idx == -1)
314 return 0;
315
316 ret = ath10k_wmi_vdev_set_param(arvif->ar,
317 arvif->vdev_id,
318 arvif->ar->wmi.vdev_param->def_keyid,
319 arvif->def_wep_key_idx);
320 if (ret) {
321 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
322 arvif->vdev_id, ret);
323 return ret;
324 }
325
Kalle Valo5e3dd152013-06-12 20:52:10 +0300326 return 0;
327}
328
329static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
330 const u8 *addr)
331{
332 struct ath10k *ar = arvif->ar;
333 struct ath10k_peer *peer;
334 int first_errno = 0;
335 int ret;
336 int i;
Michal Kazior370e5672015-02-18 14:02:26 +0100337 u32 flags = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300338
339 lockdep_assert_held(&ar->conf_mutex);
340
341 spin_lock_bh(&ar->data_lock);
342 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
343 spin_unlock_bh(&ar->data_lock);
344
345 if (!peer)
346 return -ENOENT;
347
348 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
349 if (peer->keys[i] == NULL)
350 continue;
351
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +0200352 /* key flags are not required to delete the key */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300353 ret = ath10k_install_key(arvif, peer->keys[i],
Michal Kazior370e5672015-02-18 14:02:26 +0100354 DISABLE_KEY, addr, flags);
David Liuccec9032015-07-24 20:25:32 +0300355 if (ret < 0 && first_errno == 0)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300356 first_errno = ret;
357
David Liuccec9032015-07-24 20:25:32 +0300358 if (ret < 0)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200359 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300360 i, ret);
361
Sujith Manoharanae167132014-11-25 11:46:59 +0530362 spin_lock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300363 peer->keys[i] = NULL;
Sujith Manoharanae167132014-11-25 11:46:59 +0530364 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300365 }
366
367 return first_errno;
368}
369
Sujith Manoharan504f6cd2014-11-25 11:46:58 +0530370bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
371 u8 keyidx)
372{
373 struct ath10k_peer *peer;
374 int i;
375
376 lockdep_assert_held(&ar->data_lock);
377
378 /* We don't know which vdev this peer belongs to,
379 * since WMI doesn't give us that information.
380 *
381 * FIXME: multi-bss needs to be handled.
382 */
383 peer = ath10k_peer_find(ar, 0, addr);
384 if (!peer)
385 return false;
386
387 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
388 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
389 return true;
390 }
391
392 return false;
393}
394
Kalle Valo5e3dd152013-06-12 20:52:10 +0300395static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
396 struct ieee80211_key_conf *key)
397{
398 struct ath10k *ar = arvif->ar;
399 struct ath10k_peer *peer;
400 u8 addr[ETH_ALEN];
401 int first_errno = 0;
402 int ret;
403 int i;
Michal Kazior370e5672015-02-18 14:02:26 +0100404 u32 flags = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300405
406 lockdep_assert_held(&ar->conf_mutex);
407
408 for (;;) {
409 /* since ath10k_install_key we can't hold data_lock all the
410 * time, so we try to remove the keys incrementally */
411 spin_lock_bh(&ar->data_lock);
412 i = 0;
413 list_for_each_entry(peer, &ar->peers, list) {
414 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
415 if (peer->keys[i] == key) {
Kalle Valob25f32c2014-09-14 12:50:49 +0300416 ether_addr_copy(addr, peer->addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300417 peer->keys[i] = NULL;
418 break;
419 }
420 }
421
422 if (i < ARRAY_SIZE(peer->keys))
423 break;
424 }
425 spin_unlock_bh(&ar->data_lock);
426
427 if (i == ARRAY_SIZE(peer->keys))
428 break;
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +0200429 /* key flags are not required to delete the key */
Michal Kazior370e5672015-02-18 14:02:26 +0100430 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
David Liuccec9032015-07-24 20:25:32 +0300431 if (ret < 0 && first_errno == 0)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300432 first_errno = ret;
433
434 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200435 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +0200436 addr, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300437 }
438
439 return first_errno;
440}
441
Michal Kaziorad325cb2015-02-18 14:02:27 +0100442static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
443 struct ieee80211_key_conf *key)
444{
445 struct ath10k *ar = arvif->ar;
446 struct ath10k_peer *peer;
447 int ret;
448
449 lockdep_assert_held(&ar->conf_mutex);
450
451 list_for_each_entry(peer, &ar->peers, list) {
452 if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
453 continue;
454
455 if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
456 continue;
457
458 if (peer->keys[key->keyidx] == key)
459 continue;
460
461 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
462 arvif->vdev_id, key->keyidx);
463
464 ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
465 if (ret) {
466 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
467 arvif->vdev_id, peer->addr, ret);
468 return ret;
469 }
470 }
471
472 return 0;
473}
474
Kalle Valo5e3dd152013-06-12 20:52:10 +0300475/*********************/
476/* General utilities */
477/*********************/
478
479static inline enum wmi_phy_mode
480chan_to_phymode(const struct cfg80211_chan_def *chandef)
481{
482 enum wmi_phy_mode phymode = MODE_UNKNOWN;
483
484 switch (chandef->chan->band) {
485 case IEEE80211_BAND_2GHZ:
486 switch (chandef->width) {
487 case NL80211_CHAN_WIDTH_20_NOHT:
Peter Oh6faab122014-12-18 10:13:00 -0800488 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
489 phymode = MODE_11B;
490 else
491 phymode = MODE_11G;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300492 break;
493 case NL80211_CHAN_WIDTH_20:
494 phymode = MODE_11NG_HT20;
495 break;
496 case NL80211_CHAN_WIDTH_40:
497 phymode = MODE_11NG_HT40;
498 break;
John W. Linville0f817ed2013-06-27 13:50:09 -0400499 case NL80211_CHAN_WIDTH_5:
500 case NL80211_CHAN_WIDTH_10:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300501 case NL80211_CHAN_WIDTH_80:
502 case NL80211_CHAN_WIDTH_80P80:
503 case NL80211_CHAN_WIDTH_160:
504 phymode = MODE_UNKNOWN;
505 break;
506 }
507 break;
508 case IEEE80211_BAND_5GHZ:
509 switch (chandef->width) {
510 case NL80211_CHAN_WIDTH_20_NOHT:
511 phymode = MODE_11A;
512 break;
513 case NL80211_CHAN_WIDTH_20:
514 phymode = MODE_11NA_HT20;
515 break;
516 case NL80211_CHAN_WIDTH_40:
517 phymode = MODE_11NA_HT40;
518 break;
519 case NL80211_CHAN_WIDTH_80:
520 phymode = MODE_11AC_VHT80;
521 break;
John W. Linville0f817ed2013-06-27 13:50:09 -0400522 case NL80211_CHAN_WIDTH_5:
523 case NL80211_CHAN_WIDTH_10:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300524 case NL80211_CHAN_WIDTH_80P80:
525 case NL80211_CHAN_WIDTH_160:
526 phymode = MODE_UNKNOWN;
527 break;
528 }
529 break;
530 default:
531 break;
532 }
533
534 WARN_ON(phymode == MODE_UNKNOWN);
535 return phymode;
536}
537
538static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
539{
540/*
541 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
542 * 0 for no restriction
543 * 1 for 1/4 us
544 * 2 for 1/2 us
545 * 3 for 1 us
546 * 4 for 2 us
547 * 5 for 4 us
548 * 6 for 8 us
549 * 7 for 16 us
550 */
551 switch (mpdudensity) {
552 case 0:
553 return 0;
554 case 1:
555 case 2:
556 case 3:
557 /* Our lower layer calculations limit our precision to
558 1 microsecond */
559 return 1;
560 case 4:
561 return 2;
562 case 5:
563 return 4;
564 case 6:
565 return 8;
566 case 7:
567 return 16;
568 default:
569 return 0;
570 }
571}
572
Michal Kazior500ff9f2015-03-31 10:26:21 +0000573int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
574 struct cfg80211_chan_def *def)
575{
576 struct ieee80211_chanctx_conf *conf;
577
578 rcu_read_lock();
579 conf = rcu_dereference(vif->chanctx_conf);
580 if (!conf) {
581 rcu_read_unlock();
582 return -ENOENT;
583 }
584
585 *def = conf->def;
586 rcu_read_unlock();
587
588 return 0;
589}
590
591static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
592 struct ieee80211_chanctx_conf *conf,
593 void *data)
594{
595 int *num = data;
596
597 (*num)++;
598}
599
600static int ath10k_mac_num_chanctxs(struct ath10k *ar)
601{
602 int num = 0;
603
604 ieee80211_iter_chan_contexts_atomic(ar->hw,
605 ath10k_mac_num_chanctxs_iter,
606 &num);
607
608 return num;
609}
610
611static void
612ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
613 struct ieee80211_chanctx_conf *conf,
614 void *data)
615{
616 struct cfg80211_chan_def **def = data;
617
618 *def = &conf->def;
619}
620
Michal Kazior69427262016-03-06 16:14:30 +0200621static int ath10k_peer_create(struct ath10k *ar,
622 struct ieee80211_vif *vif,
623 struct ieee80211_sta *sta,
624 u32 vdev_id,
625 const u8 *addr,
Marek Puzyniak7390ed32015-03-30 09:51:52 +0300626 enum wmi_peer_type peer_type)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300627{
Michal Kaziore04cafb2015-08-05 12:15:24 +0200628 struct ath10k_vif *arvif;
Michal Kazior69427262016-03-06 16:14:30 +0200629 struct ath10k_peer *peer;
Michal Kaziore04cafb2015-08-05 12:15:24 +0200630 int num_peers = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300631 int ret;
632
633 lockdep_assert_held(&ar->conf_mutex);
634
Michal Kaziore04cafb2015-08-05 12:15:24 +0200635 num_peers = ar->num_peers;
636
637 /* Each vdev consumes a peer entry as well */
638 list_for_each_entry(arvif, &ar->arvifs, list)
639 num_peers++;
640
641 if (num_peers >= ar->max_num_peers)
Michal Kaziorcfd10612014-11-25 15:16:05 +0100642 return -ENOBUFS;
643
Marek Puzyniak7390ed32015-03-30 09:51:52 +0300644 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
Ben Greear479398b2013-11-04 09:19:34 -0800645 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200646 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +0200647 addr, vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300648 return ret;
Ben Greear479398b2013-11-04 09:19:34 -0800649 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300650
651 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
Ben Greear479398b2013-11-04 09:19:34 -0800652 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200653 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +0200654 addr, vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300655 return ret;
Ben Greear479398b2013-11-04 09:19:34 -0800656 }
Michal Kazior292a7532014-11-25 15:16:04 +0100657
Michal Kazior69427262016-03-06 16:14:30 +0200658 spin_lock_bh(&ar->data_lock);
659
660 peer = ath10k_peer_find(ar, vdev_id, addr);
661 if (!peer) {
662 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
663 addr, vdev_id);
664 ath10k_wmi_peer_delete(ar, vdev_id, addr);
665 spin_unlock_bh(&ar->data_lock);
666 return -ENOENT;
667 }
668
669 peer->vif = vif;
670 peer->sta = sta;
671
672 spin_unlock_bh(&ar->data_lock);
673
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100674 ar->num_peers++;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300675
676 return 0;
677}
678
Kalle Valo5a13e762014-01-20 11:01:46 +0200679static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
680{
681 struct ath10k *ar = arvif->ar;
682 u32 param;
683 int ret;
684
685 param = ar->wmi.pdev_param->sta_kickout_th;
686 ret = ath10k_wmi_pdev_set_param(ar, param,
687 ATH10K_KICKOUT_THRESHOLD);
688 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200689 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200690 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200691 return ret;
692 }
693
694 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
695 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
696 ATH10K_KEEPALIVE_MIN_IDLE);
697 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200698 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200699 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200700 return ret;
701 }
702
703 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
704 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
705 ATH10K_KEEPALIVE_MAX_IDLE);
706 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200707 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200708 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200709 return ret;
710 }
711
712 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
713 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
714 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
715 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200716 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200717 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +0200718 return ret;
719 }
720
721 return 0;
722}
723
Vivek Natarajanacab6402014-11-26 09:06:12 +0200724static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
Michal Kazior424121c2013-07-22 14:13:31 +0200725{
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200726 struct ath10k *ar = arvif->ar;
727 u32 vdev_param;
728
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200729 vdev_param = ar->wmi.vdev_param->rts_threshold;
730 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
Michal Kazior424121c2013-07-22 14:13:31 +0200731}
732
Kalle Valo5e3dd152013-06-12 20:52:10 +0300733static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
734{
735 int ret;
736
737 lockdep_assert_held(&ar->conf_mutex);
738
739 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
740 if (ret)
741 return ret;
742
743 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
744 if (ret)
745 return ret;
746
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100747 ar->num_peers--;
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100748
Kalle Valo5e3dd152013-06-12 20:52:10 +0300749 return 0;
750}
751
752static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
753{
754 struct ath10k_peer *peer, *tmp;
Michal Kazior69427262016-03-06 16:14:30 +0200755 int peer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300756
757 lockdep_assert_held(&ar->conf_mutex);
758
759 spin_lock_bh(&ar->data_lock);
760 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
761 if (peer->vdev_id != vdev_id)
762 continue;
763
Michal Kazior7aa7a722014-08-25 12:09:38 +0200764 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300765 peer->addr, vdev_id);
766
Michal Kazior69427262016-03-06 16:14:30 +0200767 for_each_set_bit(peer_id, peer->peer_ids,
768 ATH10K_MAX_NUM_PEER_IDS) {
769 ar->peer_map[peer_id] = NULL;
770 }
771
Kalle Valo5e3dd152013-06-12 20:52:10 +0300772 list_del(&peer->list);
773 kfree(peer);
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100774 ar->num_peers--;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300775 }
776 spin_unlock_bh(&ar->data_lock);
777}
778
Michal Kaziora96d7742013-07-16 09:38:56 +0200779static void ath10k_peer_cleanup_all(struct ath10k *ar)
780{
781 struct ath10k_peer *peer, *tmp;
782
783 lockdep_assert_held(&ar->conf_mutex);
784
785 spin_lock_bh(&ar->data_lock);
786 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
787 list_del(&peer->list);
788 kfree(peer);
789 }
790 spin_unlock_bh(&ar->data_lock);
Michal Kazior292a7532014-11-25 15:16:04 +0100791
792 ar->num_peers = 0;
Michal Kaziorcfd10612014-11-25 15:16:05 +0100793 ar->num_stations = 0;
Michal Kaziora96d7742013-07-16 09:38:56 +0200794}
795
Marek Puzyniak75d85fd2015-03-30 09:51:53 +0300796static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
797 struct ieee80211_sta *sta,
798 enum wmi_tdls_peer_state state)
799{
800 int ret;
801 struct wmi_tdls_peer_update_cmd_arg arg = {};
802 struct wmi_tdls_peer_capab_arg cap = {};
803 struct wmi_channel_arg chan_arg = {};
804
805 lockdep_assert_held(&ar->conf_mutex);
806
807 arg.vdev_id = vdev_id;
808 arg.peer_state = state;
809 ether_addr_copy(arg.addr, sta->addr);
810
811 cap.peer_max_sp = sta->max_sp;
812 cap.peer_uapsd_queues = sta->uapsd_queues;
813
814 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
815 !sta->tdls_initiator)
816 cap.is_peer_responder = 1;
817
818 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
819 if (ret) {
820 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
821 arg.addr, vdev_id, ret);
822 return ret;
823 }
824
825 return 0;
826}
827
Kalle Valo5e3dd152013-06-12 20:52:10 +0300828/************************/
829/* Interface management */
830/************************/
831
Michal Kazior64badcb2014-09-18 11:18:02 +0300832void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
833{
834 struct ath10k *ar = arvif->ar;
835
836 lockdep_assert_held(&ar->data_lock);
837
838 if (!arvif->beacon)
839 return;
840
841 if (!arvif->beacon_buf)
842 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
843 arvif->beacon->len, DMA_TO_DEVICE);
844
Michal Kazioraf213192015-01-29 14:29:52 +0200845 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
846 arvif->beacon_state != ATH10K_BEACON_SENT))
847 return;
848
Michal Kazior64badcb2014-09-18 11:18:02 +0300849 dev_kfree_skb_any(arvif->beacon);
850
851 arvif->beacon = NULL;
Michal Kazioraf213192015-01-29 14:29:52 +0200852 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
Michal Kazior64badcb2014-09-18 11:18:02 +0300853}
854
855static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
856{
857 struct ath10k *ar = arvif->ar;
858
859 lockdep_assert_held(&ar->data_lock);
860
861 ath10k_mac_vif_beacon_free(arvif);
862
863 if (arvif->beacon_buf) {
864 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
865 arvif->beacon_buf, arvif->beacon_paddr);
866 arvif->beacon_buf = NULL;
867 }
868}
869
Kalle Valo5e3dd152013-06-12 20:52:10 +0300870static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
871{
Nicholas Mc Guire8e9904f52015-03-30 15:39:19 +0300872 unsigned long time_left;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300873
Michal Kazior548db542013-07-05 16:15:15 +0300874 lockdep_assert_held(&ar->conf_mutex);
875
Michal Kazior7962b0d2014-10-28 10:34:38 +0100876 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
877 return -ESHUTDOWN;
878
Nicholas Mc Guire8e9904f52015-03-30 15:39:19 +0300879 time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
880 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
881 if (time_left == 0)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300882 return -ETIMEDOUT;
883
884 return 0;
885}
886
Michal Kazior1bbc0972014-04-08 09:45:47 +0300887static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300888{
Michal Kazior500ff9f2015-03-31 10:26:21 +0000889 struct cfg80211_chan_def *chandef = NULL;
Maninder Singh19be9e92015-07-16 09:25:33 +0530890 struct ieee80211_channel *channel = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300891 struct wmi_vdev_start_request_arg arg = {};
Kalle Valo5e3dd152013-06-12 20:52:10 +0300892 int ret = 0;
893
894 lockdep_assert_held(&ar->conf_mutex);
895
Michal Kazior500ff9f2015-03-31 10:26:21 +0000896 ieee80211_iter_chan_contexts_atomic(ar->hw,
897 ath10k_mac_get_any_chandef_iter,
898 &chandef);
899 if (WARN_ON_ONCE(!chandef))
900 return -ENOENT;
901
902 channel = chandef->chan;
903
Kalle Valo5e3dd152013-06-12 20:52:10 +0300904 arg.vdev_id = vdev_id;
905 arg.channel.freq = channel->center_freq;
Michal Kaziorc930f742014-01-23 11:38:25 +0100906 arg.channel.band_center_freq1 = chandef->center_freq1;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300907
908 /* TODO setup this dynamically, what in case we
909 don't have any vifs? */
Michal Kaziorc930f742014-01-23 11:38:25 +0100910 arg.channel.mode = chan_to_phymode(chandef);
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200911 arg.channel.chan_radar =
912 !!(channel->flags & IEEE80211_CHAN_RADAR);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300913
Michal Kazior89c5c842013-10-23 04:02:13 -0700914 arg.channel.min_power = 0;
Michal Kazior02256932013-10-23 04:02:14 -0700915 arg.channel.max_power = channel->max_power * 2;
916 arg.channel.max_reg_power = channel->max_reg_power * 2;
917 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300918
Michal Kazior7962b0d2014-10-28 10:34:38 +0100919 reinit_completion(&ar->vdev_setup_done);
920
Kalle Valo5e3dd152013-06-12 20:52:10 +0300921 ret = ath10k_wmi_vdev_start(ar, &arg);
922 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200923 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200924 vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300925 return ret;
926 }
927
928 ret = ath10k_vdev_setup_sync(ar);
929 if (ret) {
Ben Greear60028a82015-02-15 16:50:39 +0200930 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200931 vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300932 return ret;
933 }
934
935 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
936 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200937 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200938 vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300939 goto vdev_stop;
940 }
941
942 ar->monitor_vdev_id = vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300943
Michal Kazior7aa7a722014-08-25 12:09:38 +0200944 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
Michal Kazior1bbc0972014-04-08 09:45:47 +0300945 ar->monitor_vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300946 return 0;
947
948vdev_stop:
949 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
950 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200951 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200952 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300953
954 return ret;
955}
956
Michal Kazior1bbc0972014-04-08 09:45:47 +0300957static int ath10k_monitor_vdev_stop(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300958{
959 int ret = 0;
960
961 lockdep_assert_held(&ar->conf_mutex);
962
Marek Puzyniak52fa0192013-09-24 14:06:24 +0200963 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
964 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200965 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200966 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300967
Michal Kazior7962b0d2014-10-28 10:34:38 +0100968 reinit_completion(&ar->vdev_setup_done);
969
Kalle Valo5e3dd152013-06-12 20:52:10 +0300970 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
971 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200972 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200973 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300974
975 ret = ath10k_vdev_setup_sync(ar);
976 if (ret)
Ben Greear60028a82015-02-15 16:50:39 +0200977 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +0200978 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300979
Michal Kazior7aa7a722014-08-25 12:09:38 +0200980 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
Michal Kazior1bbc0972014-04-08 09:45:47 +0300981 ar->monitor_vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300982 return ret;
983}
984
Michal Kazior1bbc0972014-04-08 09:45:47 +0300985static int ath10k_monitor_vdev_create(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300986{
987 int bit, ret = 0;
988
989 lockdep_assert_held(&ar->conf_mutex);
990
Ben Greeara9aefb32014-08-12 11:02:19 +0300991 if (ar->free_vdev_map == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200992 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300993 return -ENOMEM;
994 }
995
Ben Greear16c11172014-09-23 14:17:16 -0700996 bit = __ffs64(ar->free_vdev_map);
Ben Greeara9aefb32014-08-12 11:02:19 +0300997
Ben Greear16c11172014-09-23 14:17:16 -0700998 ar->monitor_vdev_id = bit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300999
1000 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1001 WMI_VDEV_TYPE_MONITOR,
1002 0, ar->mac_addr);
1003 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001004 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001005 ar->monitor_vdev_id, ret);
Ben Greeara9aefb32014-08-12 11:02:19 +03001006 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001007 }
1008
Ben Greear16c11172014-09-23 14:17:16 -07001009 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001010 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001011 ar->monitor_vdev_id);
1012
Kalle Valo5e3dd152013-06-12 20:52:10 +03001013 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001014}
1015
Michal Kazior1bbc0972014-04-08 09:45:47 +03001016static int ath10k_monitor_vdev_delete(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001017{
1018 int ret = 0;
1019
1020 lockdep_assert_held(&ar->conf_mutex);
1021
Kalle Valo5e3dd152013-06-12 20:52:10 +03001022 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1023 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001024 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02001025 ar->monitor_vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001026 return ret;
1027 }
1028
Ben Greear16c11172014-09-23 14:17:16 -07001029 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001030
Michal Kazior7aa7a722014-08-25 12:09:38 +02001031 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001032 ar->monitor_vdev_id);
1033 return ret;
1034}
1035
Michal Kazior1bbc0972014-04-08 09:45:47 +03001036static int ath10k_monitor_start(struct ath10k *ar)
1037{
1038 int ret;
1039
1040 lockdep_assert_held(&ar->conf_mutex);
1041
Michal Kazior1bbc0972014-04-08 09:45:47 +03001042 ret = ath10k_monitor_vdev_create(ar);
1043 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001044 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
Michal Kazior1bbc0972014-04-08 09:45:47 +03001045 return ret;
1046 }
1047
1048 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1049 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001050 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
Michal Kazior1bbc0972014-04-08 09:45:47 +03001051 ath10k_monitor_vdev_delete(ar);
1052 return ret;
1053 }
1054
1055 ar->monitor_started = true;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001056 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
Michal Kazior1bbc0972014-04-08 09:45:47 +03001057
1058 return 0;
1059}
1060
Michal Kazior19337472014-08-28 12:58:16 +02001061static int ath10k_monitor_stop(struct ath10k *ar)
Michal Kazior1bbc0972014-04-08 09:45:47 +03001062{
1063 int ret;
1064
1065 lockdep_assert_held(&ar->conf_mutex);
1066
Michal Kazior1bbc0972014-04-08 09:45:47 +03001067 ret = ath10k_monitor_vdev_stop(ar);
Michal Kazior19337472014-08-28 12:58:16 +02001068 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001069 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
Michal Kazior19337472014-08-28 12:58:16 +02001070 return ret;
1071 }
Michal Kazior1bbc0972014-04-08 09:45:47 +03001072
1073 ret = ath10k_monitor_vdev_delete(ar);
Michal Kazior19337472014-08-28 12:58:16 +02001074 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001075 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
Michal Kazior19337472014-08-28 12:58:16 +02001076 return ret;
1077 }
Michal Kazior1bbc0972014-04-08 09:45:47 +03001078
1079 ar->monitor_started = false;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001080 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
Michal Kazior19337472014-08-28 12:58:16 +02001081
1082 return 0;
1083}
1084
Michal Kazior500ff9f2015-03-31 10:26:21 +00001085static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1086{
1087 int num_ctx;
1088
1089 /* At least one chanctx is required to derive a channel to start
1090 * monitor vdev on.
1091 */
1092 num_ctx = ath10k_mac_num_chanctxs(ar);
1093 if (num_ctx == 0)
1094 return false;
1095
1096 /* If there's already an existing special monitor interface then don't
1097 * bother creating another monitor vdev.
1098 */
1099 if (ar->monitor_arvif)
1100 return false;
1101
1102 return ar->monitor ||
Bob Copeland0d031c82015-09-09 12:47:34 -04001103 ar->filter_flags & FIF_OTHER_BSS ||
Michal Kazior500ff9f2015-03-31 10:26:21 +00001104 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1105}
1106
1107static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1108{
1109 int num_ctx;
1110
1111 num_ctx = ath10k_mac_num_chanctxs(ar);
1112
1113 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1114 * shouldn't allow this but make sure to prevent handling the following
1115 * case anyway since multi-channel DFS hasn't been tested at all.
1116 */
1117 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1118 return false;
1119
1120 return true;
1121}
1122
Michal Kazior19337472014-08-28 12:58:16 +02001123static int ath10k_monitor_recalc(struct ath10k *ar)
1124{
Michal Kazior500ff9f2015-03-31 10:26:21 +00001125 bool needed;
1126 bool allowed;
1127 int ret;
Michal Kazior19337472014-08-28 12:58:16 +02001128
1129 lockdep_assert_held(&ar->conf_mutex);
1130
Michal Kazior500ff9f2015-03-31 10:26:21 +00001131 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1132 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
Michal Kazior19337472014-08-28 12:58:16 +02001133
1134 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kazior500ff9f2015-03-31 10:26:21 +00001135 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1136 ar->monitor_started, needed, allowed);
Michal Kazior19337472014-08-28 12:58:16 +02001137
Michal Kazior500ff9f2015-03-31 10:26:21 +00001138 if (WARN_ON(needed && !allowed)) {
1139 if (ar->monitor_started) {
1140 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1141
1142 ret = ath10k_monitor_stop(ar);
1143 if (ret)
Kalle Valo2a995082015-10-05 17:56:37 +03001144 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1145 ret);
Michal Kazior500ff9f2015-03-31 10:26:21 +00001146 /* not serious */
1147 }
1148
1149 return -EPERM;
1150 }
1151
1152 if (needed == ar->monitor_started)
Michal Kazior19337472014-08-28 12:58:16 +02001153 return 0;
1154
Michal Kazior500ff9f2015-03-31 10:26:21 +00001155 if (needed)
Michal Kazior19337472014-08-28 12:58:16 +02001156 return ath10k_monitor_start(ar);
Michal Kazior500ff9f2015-03-31 10:26:21 +00001157 else
1158 return ath10k_monitor_stop(ar);
Michal Kazior1bbc0972014-04-08 09:45:47 +03001159}
1160
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02001161static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1162{
1163 struct ath10k *ar = arvif->ar;
1164 u32 vdev_param, rts_cts = 0;
1165
1166 lockdep_assert_held(&ar->conf_mutex);
1167
1168 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1169
Rajkumar Manoharan9a5ab0f2015-03-19 16:03:29 +02001170 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02001171
1172 if (arvif->num_legacy_stations > 0)
1173 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1174 WMI_RTSCTS_PROFILE);
Rajkumar Manoharan9a5ab0f2015-03-19 16:03:29 +02001175 else
1176 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1177 WMI_RTSCTS_PROFILE);
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02001178
1179 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1180 rts_cts);
1181}
1182
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001183static int ath10k_start_cac(struct ath10k *ar)
1184{
1185 int ret;
1186
1187 lockdep_assert_held(&ar->conf_mutex);
1188
1189 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1190
Michal Kazior19337472014-08-28 12:58:16 +02001191 ret = ath10k_monitor_recalc(ar);
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001192 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001193 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001194 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1195 return ret;
1196 }
1197
Michal Kazior7aa7a722014-08-25 12:09:38 +02001198 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001199 ar->monitor_vdev_id);
1200
1201 return 0;
1202}
1203
1204static int ath10k_stop_cac(struct ath10k *ar)
1205{
1206 lockdep_assert_held(&ar->conf_mutex);
1207
1208 /* CAC is not running - do nothing */
1209 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1210 return 0;
1211
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001212 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
Michal Kazior1bbc0972014-04-08 09:45:47 +03001213 ath10k_monitor_stop(ar);
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001214
Michal Kazior7aa7a722014-08-25 12:09:38 +02001215 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001216
1217 return 0;
1218}
1219
Michal Kazior500ff9f2015-03-31 10:26:21 +00001220static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1221 struct ieee80211_chanctx_conf *conf,
1222 void *data)
1223{
1224 bool *ret = data;
1225
1226 if (!*ret && conf->radar_enabled)
1227 *ret = true;
1228}
1229
1230static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1231{
1232 bool has_radar = false;
1233
1234 ieee80211_iter_chan_contexts_atomic(ar->hw,
1235 ath10k_mac_has_radar_iter,
1236 &has_radar);
1237
1238 return has_radar;
1239}
1240
Michal Kaziord6500972014-04-08 09:56:09 +03001241static void ath10k_recalc_radar_detection(struct ath10k *ar)
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001242{
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001243 int ret;
1244
1245 lockdep_assert_held(&ar->conf_mutex);
1246
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001247 ath10k_stop_cac(ar);
1248
Michal Kazior500ff9f2015-03-31 10:26:21 +00001249 if (!ath10k_mac_has_radar_enabled(ar))
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001250 return;
1251
Michal Kaziord6500972014-04-08 09:56:09 +03001252 if (ar->num_started_vdevs > 0)
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001253 return;
1254
1255 ret = ath10k_start_cac(ar);
1256 if (ret) {
1257 /*
1258 * Not possible to start CAC on current channel so starting
1259 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1260 * by indicating that radar was detected.
1261 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02001262 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
Marek Puzyniake8a50f82013-11-20 09:59:47 +02001263 ieee80211_radar_detected(ar->hw);
1264 }
1265}
1266
Vasanthakumar Thiagarajan822b7e02015-03-02 17:45:27 +05301267static int ath10k_vdev_stop(struct ath10k_vif *arvif)
Michal Kazior72654fa2014-04-08 09:56:09 +03001268{
1269 struct ath10k *ar = arvif->ar;
Vasanthakumar Thiagarajan822b7e02015-03-02 17:45:27 +05301270 int ret;
1271
1272 lockdep_assert_held(&ar->conf_mutex);
1273
1274 reinit_completion(&ar->vdev_setup_done);
1275
1276 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1277 if (ret) {
1278 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1279 arvif->vdev_id, ret);
1280 return ret;
1281 }
1282
1283 ret = ath10k_vdev_setup_sync(ar);
1284 if (ret) {
1285 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1286 arvif->vdev_id, ret);
1287 return ret;
1288 }
1289
1290 WARN_ON(ar->num_started_vdevs == 0);
1291
1292 if (ar->num_started_vdevs != 0) {
1293 ar->num_started_vdevs--;
1294 ath10k_recalc_radar_detection(ar);
1295 }
1296
1297 return ret;
1298}
1299
Michal Kazior500ff9f2015-03-31 10:26:21 +00001300static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1301 const struct cfg80211_chan_def *chandef,
1302 bool restart)
Michal Kazior72654fa2014-04-08 09:56:09 +03001303{
1304 struct ath10k *ar = arvif->ar;
Michal Kazior72654fa2014-04-08 09:56:09 +03001305 struct wmi_vdev_start_request_arg arg = {};
1306 int ret = 0;
1307
1308 lockdep_assert_held(&ar->conf_mutex);
1309
1310 reinit_completion(&ar->vdev_setup_done);
1311
1312 arg.vdev_id = arvif->vdev_id;
1313 arg.dtim_period = arvif->dtim_period;
1314 arg.bcn_intval = arvif->beacon_interval;
1315
1316 arg.channel.freq = chandef->chan->center_freq;
1317 arg.channel.band_center_freq1 = chandef->center_freq1;
1318 arg.channel.mode = chan_to_phymode(chandef);
1319
1320 arg.channel.min_power = 0;
1321 arg.channel.max_power = chandef->chan->max_power * 2;
1322 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1323 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1324
1325 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1326 arg.ssid = arvif->u.ap.ssid;
1327 arg.ssid_len = arvif->u.ap.ssid_len;
1328 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1329
1330 /* For now allow DFS for AP mode */
1331 arg.channel.chan_radar =
1332 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1333 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1334 arg.ssid = arvif->vif->bss_conf.ssid;
1335 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1336 }
1337
Michal Kazior7aa7a722014-08-25 12:09:38 +02001338 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kazior72654fa2014-04-08 09:56:09 +03001339 "mac vdev %d start center_freq %d phymode %s\n",
1340 arg.vdev_id, arg.channel.freq,
1341 ath10k_wmi_phymode_str(arg.channel.mode));
1342
Michal Kaziordc55e302014-07-29 12:53:36 +03001343 if (restart)
1344 ret = ath10k_wmi_vdev_restart(ar, &arg);
1345 else
1346 ret = ath10k_wmi_vdev_start(ar, &arg);
1347
Michal Kazior72654fa2014-04-08 09:56:09 +03001348 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001349 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
Michal Kazior72654fa2014-04-08 09:56:09 +03001350 arg.vdev_id, ret);
1351 return ret;
1352 }
1353
1354 ret = ath10k_vdev_setup_sync(ar);
1355 if (ret) {
Ben Greear60028a82015-02-15 16:50:39 +02001356 ath10k_warn(ar,
1357 "failed to synchronize setup for vdev %i restart %d: %d\n",
1358 arg.vdev_id, restart, ret);
Michal Kazior72654fa2014-04-08 09:56:09 +03001359 return ret;
1360 }
1361
Michal Kaziord6500972014-04-08 09:56:09 +03001362 ar->num_started_vdevs++;
1363 ath10k_recalc_radar_detection(ar);
1364
Michal Kazior72654fa2014-04-08 09:56:09 +03001365 return ret;
1366}
1367
Michal Kazior500ff9f2015-03-31 10:26:21 +00001368static int ath10k_vdev_start(struct ath10k_vif *arvif,
1369 const struct cfg80211_chan_def *def)
Michal Kaziordc55e302014-07-29 12:53:36 +03001370{
Michal Kazior500ff9f2015-03-31 10:26:21 +00001371 return ath10k_vdev_start_restart(arvif, def, false);
Michal Kaziordc55e302014-07-29 12:53:36 +03001372}
1373
Michal Kazior500ff9f2015-03-31 10:26:21 +00001374static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1375 const struct cfg80211_chan_def *def)
Michal Kaziordc55e302014-07-29 12:53:36 +03001376{
Michal Kazior500ff9f2015-03-31 10:26:21 +00001377 return ath10k_vdev_start_restart(arvif, def, true);
Michal Kazior72654fa2014-04-08 09:56:09 +03001378}
1379
Michal Kaziorfbb8f1b2015-01-13 16:30:12 +02001380static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1381 struct sk_buff *bcn)
1382{
1383 struct ath10k *ar = arvif->ar;
1384 struct ieee80211_mgmt *mgmt;
1385 const u8 *p2p_ie;
1386 int ret;
1387
Peter Oh08c27be2016-01-28 13:54:09 -08001388 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
Michal Kaziorfbb8f1b2015-01-13 16:30:12 +02001389 return 0;
1390
1391 mgmt = (void *)bcn->data;
1392 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1393 mgmt->u.beacon.variable,
1394 bcn->len - (mgmt->u.beacon.variable -
1395 bcn->data));
1396 if (!p2p_ie)
1397 return -ENOENT;
1398
1399 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1400 if (ret) {
1401 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1402 arvif->vdev_id, ret);
1403 return ret;
1404 }
1405
1406 return 0;
1407}
1408
1409static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1410 u8 oui_type, size_t ie_offset)
1411{
1412 size_t len;
1413 const u8 *next;
1414 const u8 *end;
1415 u8 *ie;
1416
1417 if (WARN_ON(skb->len < ie_offset))
1418 return -EINVAL;
1419
1420 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1421 skb->data + ie_offset,
1422 skb->len - ie_offset);
1423 if (!ie)
1424 return -ENOENT;
1425
1426 len = ie[1] + 2;
1427 end = skb->data + skb->len;
1428 next = ie + len;
1429
1430 if (WARN_ON(next > end))
1431 return -EINVAL;
1432
1433 memmove(ie, next, end - next);
1434 skb_trim(skb, skb->len - len);
1435
1436 return 0;
1437}
1438
1439static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1440{
1441 struct ath10k *ar = arvif->ar;
1442 struct ieee80211_hw *hw = ar->hw;
1443 struct ieee80211_vif *vif = arvif->vif;
1444 struct ieee80211_mutable_offsets offs = {};
1445 struct sk_buff *bcn;
1446 int ret;
1447
1448 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1449 return 0;
1450
Michal Kazior81a9a172015-03-05 16:02:17 +02001451 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1452 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1453 return 0;
1454
Michal Kaziorfbb8f1b2015-01-13 16:30:12 +02001455 bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1456 if (!bcn) {
1457 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1458 return -EPERM;
1459 }
1460
1461 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1462 if (ret) {
1463 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1464 kfree_skb(bcn);
1465 return ret;
1466 }
1467
1468 /* P2P IE is inserted by firmware automatically (as configured above)
1469 * so remove it from the base beacon template to avoid duplicate P2P
1470 * IEs in beacon frames.
1471 */
1472 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1473 offsetof(struct ieee80211_mgmt,
1474 u.beacon.variable));
1475
1476 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1477 0, NULL, 0);
1478 kfree_skb(bcn);
1479
1480 if (ret) {
1481 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1482 ret);
1483 return ret;
1484 }
1485
1486 return 0;
1487}
1488
1489static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1490{
1491 struct ath10k *ar = arvif->ar;
1492 struct ieee80211_hw *hw = ar->hw;
1493 struct ieee80211_vif *vif = arvif->vif;
1494 struct sk_buff *prb;
1495 int ret;
1496
1497 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1498 return 0;
1499
Michal Kazior81a9a172015-03-05 16:02:17 +02001500 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1501 return 0;
1502
Michal Kaziorfbb8f1b2015-01-13 16:30:12 +02001503 prb = ieee80211_proberesp_get(hw, vif);
1504 if (!prb) {
1505 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1506 return -EPERM;
1507 }
1508
1509 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1510 kfree_skb(prb);
1511
1512 if (ret) {
1513 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1514 ret);
1515 return ret;
1516 }
1517
1518 return 0;
1519}
1520
Michal Kazior500ff9f2015-03-31 10:26:21 +00001521static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1522{
1523 struct ath10k *ar = arvif->ar;
1524 struct cfg80211_chan_def def;
1525 int ret;
1526
1527 /* When originally vdev is started during assign_vif_chanctx() some
1528 * information is missing, notably SSID. Firmware revisions with beacon
1529 * offloading require the SSID to be provided during vdev (re)start to
1530 * handle hidden SSID properly.
1531 *
1532 * Vdev restart must be done after vdev has been both started and
1533 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1534 * deliver vdev restart response event causing timeouts during vdev
1535 * syncing in ath10k.
1536 *
1537 * Note: The vdev down/up and template reinstallation could be skipped
1538 * since only wmi-tlv firmware are known to have beacon offload and
1539 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1540 * response delivery. It's probably more robust to keep it as is.
1541 */
1542 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1543 return 0;
1544
1545 if (WARN_ON(!arvif->is_started))
1546 return -EINVAL;
1547
1548 if (WARN_ON(!arvif->is_up))
1549 return -EINVAL;
1550
1551 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1552 return -EINVAL;
1553
1554 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1555 if (ret) {
1556 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1557 arvif->vdev_id, ret);
1558 return ret;
1559 }
1560
1561 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1562 * firmware will crash upon vdev up.
1563 */
1564
1565 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1566 if (ret) {
1567 ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1568 return ret;
1569 }
1570
1571 ret = ath10k_mac_setup_prb_tmpl(arvif);
1572 if (ret) {
1573 ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1574 return ret;
1575 }
1576
1577 ret = ath10k_vdev_restart(arvif, &def);
1578 if (ret) {
1579 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1580 arvif->vdev_id, ret);
1581 return ret;
1582 }
1583
1584 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1585 arvif->bssid);
1586 if (ret) {
1587 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1588 arvif->vdev_id, ret);
1589 return ret;
1590 }
1591
1592 return 0;
1593}
1594
Kalle Valo5e3dd152013-06-12 20:52:10 +03001595static void ath10k_control_beaconing(struct ath10k_vif *arvif,
Kalle Valo5b07e072014-09-14 12:50:06 +03001596 struct ieee80211_bss_conf *info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001597{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001598 struct ath10k *ar = arvif->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001599 int ret = 0;
1600
Michal Kazior548db542013-07-05 16:15:15 +03001601 lockdep_assert_held(&arvif->ar->conf_mutex);
1602
Kalle Valo5e3dd152013-06-12 20:52:10 +03001603 if (!info->enable_beacon) {
Michal Kazior500ff9f2015-03-31 10:26:21 +00001604 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1605 if (ret)
1606 ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1607 arvif->vdev_id, ret);
Michal Kaziorc930f742014-01-23 11:38:25 +01001608
Michal Kaziorc930f742014-01-23 11:38:25 +01001609 arvif->is_up = false;
1610
Michal Kazior748afc42014-01-23 12:48:21 +01001611 spin_lock_bh(&arvif->ar->data_lock);
Michal Kazior64badcb2014-09-18 11:18:02 +03001612 ath10k_mac_vif_beacon_free(arvif);
Michal Kazior748afc42014-01-23 12:48:21 +01001613 spin_unlock_bh(&arvif->ar->data_lock);
1614
Kalle Valo5e3dd152013-06-12 20:52:10 +03001615 return;
1616 }
1617
1618 arvif->tx_seq_no = 0x1000;
1619
Michal Kaziorc930f742014-01-23 11:38:25 +01001620 arvif->aid = 0;
Kalle Valob25f32c2014-09-14 12:50:49 +03001621 ether_addr_copy(arvif->bssid, info->bssid);
Michal Kaziorc930f742014-01-23 11:38:25 +01001622
1623 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1624 arvif->bssid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001625 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001626 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02001627 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001628 return;
1629 }
Michal Kaziorc930f742014-01-23 11:38:25 +01001630
Michal Kaziorc930f742014-01-23 11:38:25 +01001631 arvif->is_up = true;
1632
Michal Kazior500ff9f2015-03-31 10:26:21 +00001633 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1634 if (ret) {
1635 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1636 arvif->vdev_id, ret);
1637 return;
1638 }
1639
Michal Kazior7aa7a722014-08-25 12:09:38 +02001640 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001641}
1642
1643static void ath10k_control_ibss(struct ath10k_vif *arvif,
1644 struct ieee80211_bss_conf *info,
1645 const u8 self_peer[ETH_ALEN])
1646{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001647 struct ath10k *ar = arvif->ar;
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02001648 u32 vdev_param;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001649 int ret = 0;
1650
Michal Kazior548db542013-07-05 16:15:15 +03001651 lockdep_assert_held(&arvif->ar->conf_mutex);
1652
Kalle Valo5e3dd152013-06-12 20:52:10 +03001653 if (!info->ibss_joined) {
Michal Kaziorc930f742014-01-23 11:38:25 +01001654 if (is_zero_ether_addr(arvif->bssid))
Kalle Valo5e3dd152013-06-12 20:52:10 +03001655 return;
1656
Joe Perches93803b32015-03-02 19:54:49 -08001657 eth_zero_addr(arvif->bssid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001658
1659 return;
1660 }
1661
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02001662 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1663 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001664 ATH10K_DEFAULT_ATIM);
1665 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001666 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001667 arvif->vdev_id, ret);
1668}
1669
Michal Kazior9f9b5742014-12-12 12:41:36 +01001670static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1671{
1672 struct ath10k *ar = arvif->ar;
1673 u32 param;
1674 u32 value;
1675 int ret;
1676
1677 lockdep_assert_held(&arvif->ar->conf_mutex);
1678
1679 if (arvif->u.sta.uapsd)
1680 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1681 else
1682 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1683
1684 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1685 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1686 if (ret) {
1687 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1688 value, arvif->vdev_id, ret);
1689 return ret;
1690 }
1691
1692 return 0;
1693}
1694
1695static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1696{
1697 struct ath10k *ar = arvif->ar;
1698 u32 param;
1699 u32 value;
1700 int ret;
1701
1702 lockdep_assert_held(&arvif->ar->conf_mutex);
1703
1704 if (arvif->u.sta.uapsd)
1705 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1706 else
1707 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1708
1709 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1710 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1711 param, value);
1712 if (ret) {
1713 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1714 value, arvif->vdev_id, ret);
1715 return ret;
1716 }
1717
1718 return 0;
1719}
1720
Michal Kazior424f2632015-07-09 13:08:35 +02001721static int ath10k_mac_num_vifs_started(struct ath10k *ar)
Michal Kaziorcffb41f2015-02-13 13:30:16 +01001722{
1723 struct ath10k_vif *arvif;
1724 int num = 0;
1725
1726 lockdep_assert_held(&ar->conf_mutex);
1727
1728 list_for_each_entry(arvif, &ar->arvifs, list)
Michal Kazior424f2632015-07-09 13:08:35 +02001729 if (arvif->is_started)
Michal Kaziorcffb41f2015-02-13 13:30:16 +01001730 num++;
1731
1732 return num;
1733}
1734
Michal Kaziorad088bf2013-10-16 15:44:46 +03001735static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001736{
Michal Kaziorad088bf2013-10-16 15:44:46 +03001737 struct ath10k *ar = arvif->ar;
Michal Kazior526549a2014-12-12 12:41:37 +01001738 struct ieee80211_vif *vif = arvif->vif;
Michal Kaziorad088bf2013-10-16 15:44:46 +03001739 struct ieee80211_conf *conf = &ar->hw->conf;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001740 enum wmi_sta_powersave_param param;
1741 enum wmi_sta_ps_mode psmode;
1742 int ret;
Michal Kazior526549a2014-12-12 12:41:37 +01001743 int ps_timeout;
Michal Kaziorcffb41f2015-02-13 13:30:16 +01001744 bool enable_ps;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001745
Michal Kazior548db542013-07-05 16:15:15 +03001746 lockdep_assert_held(&arvif->ar->conf_mutex);
1747
Michal Kaziorad088bf2013-10-16 15:44:46 +03001748 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1749 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001750
Michal Kaziorcffb41f2015-02-13 13:30:16 +01001751 enable_ps = arvif->ps;
1752
Michal Kazior424f2632015-07-09 13:08:35 +02001753 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
Michal Kaziorcffb41f2015-02-13 13:30:16 +01001754 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1755 ar->fw_features)) {
1756 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1757 arvif->vdev_id);
1758 enable_ps = false;
1759 }
1760
Janusz Dziedzic917826b2015-05-18 09:38:17 +00001761 if (!arvif->is_started) {
1762 /* mac80211 can update vif powersave state while disconnected.
1763 * Firmware doesn't behave nicely and consumes more power than
1764 * necessary if PS is disabled on a non-started vdev. Hence
1765 * force-enable PS for non-running vdevs.
1766 */
1767 psmode = WMI_STA_PS_MODE_ENABLED;
1768 } else if (enable_ps) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001769 psmode = WMI_STA_PS_MODE_ENABLED;
1770 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1771
Michal Kazior526549a2014-12-12 12:41:37 +01001772 ps_timeout = conf->dynamic_ps_timeout;
1773 if (ps_timeout == 0) {
1774 /* Firmware doesn't like 0 */
1775 ps_timeout = ieee80211_tu_to_usec(
1776 vif->bss_conf.beacon_int) / 1000;
1777 }
1778
Michal Kaziorad088bf2013-10-16 15:44:46 +03001779 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
Michal Kazior526549a2014-12-12 12:41:37 +01001780 ps_timeout);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001781 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001782 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02001783 arvif->vdev_id, ret);
Michal Kaziorad088bf2013-10-16 15:44:46 +03001784 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001785 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001786 } else {
1787 psmode = WMI_STA_PS_MODE_DISABLED;
1788 }
1789
Michal Kazior7aa7a722014-08-25 12:09:38 +02001790 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03001791 arvif->vdev_id, psmode ? "enable" : "disable");
1792
Michal Kaziorad088bf2013-10-16 15:44:46 +03001793 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1794 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001795 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02001796 psmode, arvif->vdev_id, ret);
Michal Kaziorad088bf2013-10-16 15:44:46 +03001797 return ret;
1798 }
1799
1800 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001801}
1802
Michal Kazior46725b152015-01-28 09:57:49 +02001803static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1804{
1805 struct ath10k *ar = arvif->ar;
1806 struct wmi_sta_keepalive_arg arg = {};
1807 int ret;
1808
1809 lockdep_assert_held(&arvif->ar->conf_mutex);
1810
1811 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1812 return 0;
1813
1814 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1815 return 0;
1816
1817 /* Some firmware revisions have a bug and ignore the `enabled` field.
1818 * Instead use the interval to disable the keepalive.
1819 */
1820 arg.vdev_id = arvif->vdev_id;
1821 arg.enabled = 1;
1822 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1823 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1824
1825 ret = ath10k_wmi_sta_keepalive(ar, &arg);
1826 if (ret) {
1827 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1828 arvif->vdev_id, ret);
1829 return ret;
1830 }
1831
1832 return 0;
1833}
1834
Michal Kazior81a9a172015-03-05 16:02:17 +02001835static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1836{
1837 struct ath10k *ar = arvif->ar;
1838 struct ieee80211_vif *vif = arvif->vif;
1839 int ret;
1840
Michal Kazior8513d952015-03-09 14:19:24 +01001841 lockdep_assert_held(&arvif->ar->conf_mutex);
1842
1843 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1844 return;
1845
Michal Kazior81a9a172015-03-05 16:02:17 +02001846 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1847 return;
1848
1849 if (!vif->csa_active)
1850 return;
1851
1852 if (!arvif->is_up)
1853 return;
1854
1855 if (!ieee80211_csa_is_complete(vif)) {
1856 ieee80211_csa_update_counter(vif);
1857
1858 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1859 if (ret)
1860 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1861 ret);
1862
1863 ret = ath10k_mac_setup_prb_tmpl(arvif);
1864 if (ret)
1865 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1866 ret);
1867 } else {
1868 ieee80211_csa_finish(vif);
1869 }
1870}
1871
1872static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1873{
1874 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1875 ap_csa_work);
1876 struct ath10k *ar = arvif->ar;
1877
1878 mutex_lock(&ar->conf_mutex);
1879 ath10k_mac_vif_ap_csa_count_down(arvif);
1880 mutex_unlock(&ar->conf_mutex);
1881}
1882
Michal Kaziorcc9904e2015-03-10 16:22:01 +02001883static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1884 struct ieee80211_vif *vif)
1885{
1886 struct sk_buff *skb = data;
1887 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1888 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1889
1890 if (vif->type != NL80211_IFTYPE_STATION)
1891 return;
1892
1893 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1894 return;
1895
1896 cancel_delayed_work(&arvif->connection_loss_work);
1897}
1898
1899void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1900{
1901 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1902 IEEE80211_IFACE_ITER_NORMAL,
1903 ath10k_mac_handle_beacon_iter,
1904 skb);
1905}
1906
1907static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1908 struct ieee80211_vif *vif)
1909{
1910 u32 *vdev_id = data;
1911 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1912 struct ath10k *ar = arvif->ar;
1913 struct ieee80211_hw *hw = ar->hw;
1914
1915 if (arvif->vdev_id != *vdev_id)
1916 return;
1917
1918 if (!arvif->is_up)
1919 return;
1920
1921 ieee80211_beacon_loss(vif);
1922
1923 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
1924 * (done by mac80211) succeeds but beacons do not resume then it
1925 * doesn't make sense to continue operation. Queue connection loss work
1926 * which can be cancelled when beacon is received.
1927 */
1928 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1929 ATH10K_CONNECTION_LOSS_HZ);
1930}
1931
1932void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
1933{
1934 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1935 IEEE80211_IFACE_ITER_NORMAL,
1936 ath10k_mac_handle_beacon_miss_iter,
1937 &vdev_id);
1938}
1939
1940static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
1941{
1942 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1943 connection_loss_work.work);
1944 struct ieee80211_vif *vif = arvif->vif;
1945
1946 if (!arvif->is_up)
1947 return;
1948
1949 ieee80211_connection_loss(vif);
1950}
1951
Kalle Valo5e3dd152013-06-12 20:52:10 +03001952/**********************/
1953/* Station management */
1954/**********************/
1955
Michal Kazior590922a2014-10-21 10:10:29 +03001956static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
1957 struct ieee80211_vif *vif)
1958{
1959 /* Some firmware revisions have unstable STA powersave when listen
1960 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
1961 * generate NullFunc frames properly even if buffered frames have been
1962 * indicated in Beacon TIM. Firmware would seldom wake up to pull
1963 * buffered frames. Often pinging the device from AP would simply fail.
1964 *
1965 * As a workaround set it to 1.
1966 */
1967 if (vif->type == NL80211_IFTYPE_STATION)
1968 return 1;
1969
1970 return ar->hw->conf.listen_interval;
1971}
1972
Kalle Valo5e3dd152013-06-12 20:52:10 +03001973static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
Michal Kazior590922a2014-10-21 10:10:29 +03001974 struct ieee80211_vif *vif,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001975 struct ieee80211_sta *sta,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001976 struct wmi_peer_assoc_complete_arg *arg)
1977{
Michal Kazior590922a2014-10-21 10:10:29 +03001978 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kaziorc51880e2015-03-30 09:51:57 +03001979 u32 aid;
Michal Kazior590922a2014-10-21 10:10:29 +03001980
Michal Kazior548db542013-07-05 16:15:15 +03001981 lockdep_assert_held(&ar->conf_mutex);
1982
Michal Kaziorc51880e2015-03-30 09:51:57 +03001983 if (vif->type == NL80211_IFTYPE_STATION)
1984 aid = vif->bss_conf.aid;
1985 else
1986 aid = sta->aid;
1987
Kalle Valob25f32c2014-09-14 12:50:49 +03001988 ether_addr_copy(arg->addr, sta->addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001989 arg->vdev_id = arvif->vdev_id;
Michal Kaziorc51880e2015-03-30 09:51:57 +03001990 arg->peer_aid = aid;
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02001991 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
Michal Kazior590922a2014-10-21 10:10:29 +03001992 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001993 arg->peer_num_spatial_streams = 1;
Michal Kazior590922a2014-10-21 10:10:29 +03001994 arg->peer_caps = vif->bss_conf.assoc_capability;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001995}
1996
1997static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
Michal Kazior590922a2014-10-21 10:10:29 +03001998 struct ieee80211_vif *vif,
Tamizh chelvam90eceb32015-10-29 14:27:42 +02001999 struct ieee80211_sta *sta,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002000 struct wmi_peer_assoc_complete_arg *arg)
2001{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002002 struct ieee80211_bss_conf *info = &vif->bss_conf;
Michal Kazior500ff9f2015-03-31 10:26:21 +00002003 struct cfg80211_chan_def def;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002004 struct cfg80211_bss *bss;
2005 const u8 *rsnie = NULL;
2006 const u8 *wpaie = NULL;
2007
Michal Kazior548db542013-07-05 16:15:15 +03002008 lockdep_assert_held(&ar->conf_mutex);
2009
Michal Kazior500ff9f2015-03-31 10:26:21 +00002010 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2011 return;
2012
2013 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2014 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002015 if (bss) {
2016 const struct cfg80211_bss_ies *ies;
2017
2018 rcu_read_lock();
2019 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2020
2021 ies = rcu_dereference(bss->ies);
2022
2023 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
Kalle Valo5b07e072014-09-14 12:50:06 +03002024 WLAN_OUI_TYPE_MICROSOFT_WPA,
2025 ies->data,
2026 ies->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002027 rcu_read_unlock();
2028 cfg80211_put_bss(ar->hw->wiphy, bss);
2029 }
2030
2031 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2032 if (rsnie || wpaie) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002033 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002034 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002035 }
2036
2037 if (wpaie) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002038 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002039 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002040 }
Tamizh chelvam90eceb32015-10-29 14:27:42 +02002041
2042 if (sta->mfp &&
2043 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, ar->fw_features)) {
2044 arg->peer_flags |= ar->wmi.peer_flags->pmf;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002045 }
2046}
2047
2048static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
Michal Kazior500ff9f2015-03-31 10:26:21 +00002049 struct ieee80211_vif *vif,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002050 struct ieee80211_sta *sta,
2051 struct wmi_peer_assoc_complete_arg *arg)
2052{
Michal Kazior45c9abc2015-04-21 20:42:58 +03002053 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002054 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
Michal Kazior500ff9f2015-03-31 10:26:21 +00002055 struct cfg80211_chan_def def;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002056 const struct ieee80211_supported_band *sband;
2057 const struct ieee80211_rate *rates;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002058 enum ieee80211_band band;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002059 u32 ratemask;
Michal Kazior486017c2015-03-30 09:51:54 +03002060 u8 rate;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002061 int i;
2062
Michal Kazior548db542013-07-05 16:15:15 +03002063 lockdep_assert_held(&ar->conf_mutex);
2064
Michal Kazior500ff9f2015-03-31 10:26:21 +00002065 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2066 return;
2067
Michal Kazior45c9abc2015-04-21 20:42:58 +03002068 band = def.chan->band;
2069 sband = ar->hw->wiphy->bands[band];
2070 ratemask = sta->supp_rates[band];
2071 ratemask &= arvif->bitrate_mask.control[band].legacy;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002072 rates = sband->bitrates;
2073
2074 rateset->num_rates = 0;
2075
2076 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2077 if (!(ratemask & 1))
2078 continue;
2079
Michal Kazior486017c2015-03-30 09:51:54 +03002080 rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2081 rateset->rates[rateset->num_rates] = rate;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002082 rateset->num_rates++;
2083 }
2084}
2085
Michal Kazior45c9abc2015-04-21 20:42:58 +03002086static bool
2087ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2088{
2089 int nss;
2090
2091 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2092 if (ht_mcs_mask[nss])
2093 return false;
2094
2095 return true;
2096}
2097
2098static bool
2099ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2100{
2101 int nss;
2102
2103 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2104 if (vht_mcs_mask[nss])
2105 return false;
2106
2107 return true;
2108}
2109
Kalle Valo5e3dd152013-06-12 20:52:10 +03002110static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
Michal Kazior45c9abc2015-04-21 20:42:58 +03002111 struct ieee80211_vif *vif,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002112 struct ieee80211_sta *sta,
2113 struct wmi_peer_assoc_complete_arg *arg)
2114{
2115 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002116 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2117 struct cfg80211_chan_def def;
2118 enum ieee80211_band band;
2119 const u8 *ht_mcs_mask;
2120 const u16 *vht_mcs_mask;
Vivek Natarajan72f8cef2015-10-06 15:19:34 +03002121 int i, n;
2122 u8 max_nss;
Kalle Valoaf762c02014-09-14 12:50:17 +03002123 u32 stbc;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002124
Michal Kazior548db542013-07-05 16:15:15 +03002125 lockdep_assert_held(&ar->conf_mutex);
2126
Michal Kazior45c9abc2015-04-21 20:42:58 +03002127 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2128 return;
2129
Kalle Valo5e3dd152013-06-12 20:52:10 +03002130 if (!ht_cap->ht_supported)
2131 return;
2132
Michal Kazior45c9abc2015-04-21 20:42:58 +03002133 band = def.chan->band;
2134 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2135 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2136
2137 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2138 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2139 return;
2140
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002141 arg->peer_flags |= ar->wmi.peer_flags->ht;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002142 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2143 ht_cap->ampdu_factor)) - 1;
2144
2145 arg->peer_mpdu_density =
2146 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2147
2148 arg->peer_ht_caps = ht_cap->cap;
2149 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2150
2151 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002152 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002153
2154 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002155 arg->peer_flags |= ar->wmi.peer_flags->bw40;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002156 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2157 }
2158
Michal Kazior45c9abc2015-04-21 20:42:58 +03002159 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2160 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2161 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002162
Michal Kazior45c9abc2015-04-21 20:42:58 +03002163 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2164 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2165 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002166
2167 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2168 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002169 arg->peer_flags |= ar->wmi.peer_flags->stbc;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002170 }
2171
2172 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002173 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2174 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2175 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2176 arg->peer_rate_caps |= stbc;
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002177 arg->peer_flags |= ar->wmi.peer_flags->stbc;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002178 }
2179
Kalle Valo5e3dd152013-06-12 20:52:10 +03002180 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2181 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2182 else if (ht_cap->mcs.rx_mask[1])
2183 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2184
Michal Kazior45c9abc2015-04-21 20:42:58 +03002185 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2186 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2187 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2188 max_nss = (i / 8) + 1;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002189 arg->peer_ht_rates.rates[n++] = i;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002190 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002191
Bartosz Markowskifd71f802014-02-10 13:12:55 +01002192 /*
2193 * This is a workaround for HT-enabled STAs which break the spec
2194 * and have no HT capabilities RX mask (no HT RX MCS map).
2195 *
2196 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2197 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2198 *
2199 * Firmware asserts if such situation occurs.
2200 */
2201 if (n == 0) {
2202 arg->peer_ht_rates.num_rates = 8;
2203 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2204 arg->peer_ht_rates.rates[i] = i;
2205 } else {
2206 arg->peer_ht_rates.num_rates = n;
Vivek Natarajan72f8cef2015-10-06 15:19:34 +03002207 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
Bartosz Markowskifd71f802014-02-10 13:12:55 +01002208 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002209
Michal Kazior7aa7a722014-08-25 12:09:38 +02002210 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03002211 arg->addr,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002212 arg->peer_ht_rates.num_rates,
2213 arg->peer_num_spatial_streams);
2214}
2215
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002216static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2217 struct ath10k_vif *arvif,
2218 struct ieee80211_sta *sta)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002219{
2220 u32 uapsd = 0;
2221 u32 max_sp = 0;
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002222 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002223
Michal Kazior548db542013-07-05 16:15:15 +03002224 lockdep_assert_held(&ar->conf_mutex);
2225
Kalle Valo5e3dd152013-06-12 20:52:10 +03002226 if (sta->wme && sta->uapsd_queues) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002227 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002228 sta->uapsd_queues, sta->max_sp);
2229
Kalle Valo5e3dd152013-06-12 20:52:10 +03002230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2231 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2232 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2233 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2234 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2235 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2236 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2237 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2238 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2239 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2240 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2241 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2242
Kalle Valo5e3dd152013-06-12 20:52:10 +03002243 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2244 max_sp = sta->max_sp;
2245
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002246 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2247 sta->addr,
2248 WMI_AP_PS_PEER_PARAM_UAPSD,
2249 uapsd);
2250 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002251 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002252 arvif->vdev_id, ret);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002253 return ret;
2254 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002255
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002256 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2257 sta->addr,
2258 WMI_AP_PS_PEER_PARAM_MAX_SP,
2259 max_sp);
2260 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002261 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002262 arvif->vdev_id, ret);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002263 return ret;
2264 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002265
2266 /* TODO setup this based on STA listen interval and
2267 beacon interval. Currently we don't know
2268 sta->listen_interval - mac80211 patch required.
2269 Currently use 10 seconds */
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002270 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
Kalle Valo5b07e072014-09-14 12:50:06 +03002271 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2272 10);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002273 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002274 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002275 arvif->vdev_id, ret);
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002276 return ret;
2277 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002278 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002279
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002280 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002281}
2282
Michal Kazior45c9abc2015-04-21 20:42:58 +03002283static u16
2284ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2285 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2286{
2287 int idx_limit;
2288 int nss;
2289 u16 mcs_map;
2290 u16 mcs;
2291
2292 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2293 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2294 vht_mcs_limit[nss];
2295
2296 if (mcs_map)
2297 idx_limit = fls(mcs_map) - 1;
2298 else
2299 idx_limit = -1;
2300
2301 switch (idx_limit) {
2302 case 0: /* fall through */
2303 case 1: /* fall through */
2304 case 2: /* fall through */
2305 case 3: /* fall through */
2306 case 4: /* fall through */
2307 case 5: /* fall through */
2308 case 6: /* fall through */
2309 default:
2310 /* see ath10k_mac_can_set_bitrate_mask() */
2311 WARN_ON(1);
2312 /* fall through */
2313 case -1:
2314 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2315 break;
2316 case 7:
2317 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2318 break;
2319 case 8:
2320 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2321 break;
2322 case 9:
2323 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2324 break;
2325 }
2326
2327 tx_mcs_set &= ~(0x3 << (nss * 2));
2328 tx_mcs_set |= mcs << (nss * 2);
2329 }
2330
2331 return tx_mcs_set;
2332}
2333
Kalle Valo5e3dd152013-06-12 20:52:10 +03002334static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
Michal Kazior500ff9f2015-03-31 10:26:21 +00002335 struct ieee80211_vif *vif,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002336 struct ieee80211_sta *sta,
2337 struct wmi_peer_assoc_complete_arg *arg)
2338{
2339 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002340 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior500ff9f2015-03-31 10:26:21 +00002341 struct cfg80211_chan_def def;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002342 enum ieee80211_band band;
2343 const u16 *vht_mcs_mask;
Sujith Manoharana24b88b2013-10-07 19:51:57 -07002344 u8 ampdu_factor;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002345
Michal Kazior500ff9f2015-03-31 10:26:21 +00002346 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2347 return;
2348
Kalle Valo5e3dd152013-06-12 20:52:10 +03002349 if (!vht_cap->vht_supported)
2350 return;
2351
Michal Kazior45c9abc2015-04-21 20:42:58 +03002352 band = def.chan->band;
2353 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2354
2355 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2356 return;
2357
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002358 arg->peer_flags |= ar->wmi.peer_flags->vht;
Yanbo Lid68bb122015-01-23 08:18:20 +08002359
Michal Kazior500ff9f2015-03-31 10:26:21 +00002360 if (def.chan->band == IEEE80211_BAND_2GHZ)
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002361 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
Yanbo Lid68bb122015-01-23 08:18:20 +08002362
Kalle Valo5e3dd152013-06-12 20:52:10 +03002363 arg->peer_vht_caps = vht_cap->cap;
2364
Sujith Manoharana24b88b2013-10-07 19:51:57 -07002365 ampdu_factor = (vht_cap->cap &
2366 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2367 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2368
2369 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2370 * zero in VHT IE. Using it would result in degraded throughput.
2371 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2372 * it if VHT max_mpdu is smaller. */
2373 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2374 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2375 ampdu_factor)) - 1);
2376
Kalle Valo5e3dd152013-06-12 20:52:10 +03002377 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002378 arg->peer_flags |= ar->wmi.peer_flags->bw80;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002379
2380 arg->peer_vht_rates.rx_max_rate =
2381 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2382 arg->peer_vht_rates.rx_mcs_set =
2383 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2384 arg->peer_vht_rates.tx_max_rate =
2385 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
Michal Kazior45c9abc2015-04-21 20:42:58 +03002386 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2387 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002388
Michal Kazior7aa7a722014-08-25 12:09:38 +02002389 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03002390 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002391}
2392
2393static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
Michal Kazior590922a2014-10-21 10:10:29 +03002394 struct ieee80211_vif *vif,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002395 struct ieee80211_sta *sta,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002396 struct wmi_peer_assoc_complete_arg *arg)
2397{
Michal Kazior590922a2014-10-21 10:10:29 +03002398 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2399
Kalle Valo5e3dd152013-06-12 20:52:10 +03002400 switch (arvif->vdev_type) {
2401 case WMI_VDEV_TYPE_AP:
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002402 if (sta->wme)
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002403 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002404
2405 if (sta->wme && sta->uapsd_queues) {
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002406 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
Janusz Dziedzicd3d3ff42014-01-21 07:06:53 +01002407 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2408 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002409 break;
2410 case WMI_VDEV_TYPE_STA:
Michal Kazior590922a2014-10-21 10:10:29 +03002411 if (vif->bss_conf.qos)
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002412 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002413 break;
Janusz Dziedzic627d9842014-12-17 12:29:54 +02002414 case WMI_VDEV_TYPE_IBSS:
2415 if (sta->wme)
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002416 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
Janusz Dziedzic627d9842014-12-17 12:29:54 +02002417 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002418 default:
2419 break;
2420 }
Janusz Dziedzic627d9842014-12-17 12:29:54 +02002421
2422 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
Tamizh chelvam3fab30f2015-10-29 14:27:37 +02002423 sta->addr, !!(arg->peer_flags &
2424 arvif->ar->wmi.peer_flags->qos));
Kalle Valo5e3dd152013-06-12 20:52:10 +03002425}
2426
Michal Kazior8d7aa6b2015-03-30 09:51:57 +03002427static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
Michal Kazior91b12082014-12-12 12:41:35 +01002428{
Michal Kazior8d7aa6b2015-03-30 09:51:57 +03002429 return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
2430 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
Michal Kazior91b12082014-12-12 12:41:35 +01002431}
2432
Kalle Valo5e3dd152013-06-12 20:52:10 +03002433static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
Michal Kazior590922a2014-10-21 10:10:29 +03002434 struct ieee80211_vif *vif,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002435 struct ieee80211_sta *sta,
2436 struct wmi_peer_assoc_complete_arg *arg)
2437{
Michal Kazior45c9abc2015-04-21 20:42:58 +03002438 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior500ff9f2015-03-31 10:26:21 +00002439 struct cfg80211_chan_def def;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002440 enum ieee80211_band band;
2441 const u8 *ht_mcs_mask;
2442 const u16 *vht_mcs_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002443 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2444
Michal Kazior500ff9f2015-03-31 10:26:21 +00002445 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2446 return;
2447
Michal Kazior45c9abc2015-04-21 20:42:58 +03002448 band = def.chan->band;
2449 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2450 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2451
2452 switch (band) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002453 case IEEE80211_BAND_2GHZ:
Michal Kazior45c9abc2015-04-21 20:42:58 +03002454 if (sta->vht_cap.vht_supported &&
2455 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
Yanbo Lid68bb122015-01-23 08:18:20 +08002456 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2457 phymode = MODE_11AC_VHT40;
2458 else
2459 phymode = MODE_11AC_VHT20;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002460 } else if (sta->ht_cap.ht_supported &&
2461 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002462 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2463 phymode = MODE_11NG_HT40;
2464 else
2465 phymode = MODE_11NG_HT20;
Michal Kazior8d7aa6b2015-03-30 09:51:57 +03002466 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002467 phymode = MODE_11G;
Michal Kazior91b12082014-12-12 12:41:35 +01002468 } else {
2469 phymode = MODE_11B;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002470 }
2471
2472 break;
2473 case IEEE80211_BAND_5GHZ:
Sujith Manoharan7cc45e92013-09-08 18:19:55 +03002474 /*
2475 * Check VHT first.
2476 */
Michal Kazior45c9abc2015-04-21 20:42:58 +03002477 if (sta->vht_cap.vht_supported &&
2478 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
Sujith Manoharan7cc45e92013-09-08 18:19:55 +03002479 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2480 phymode = MODE_11AC_VHT80;
2481 else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2482 phymode = MODE_11AC_VHT40;
2483 else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2484 phymode = MODE_11AC_VHT20;
Michal Kazior45c9abc2015-04-21 20:42:58 +03002485 } else if (sta->ht_cap.ht_supported &&
2486 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2487 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002488 phymode = MODE_11NA_HT40;
2489 else
2490 phymode = MODE_11NA_HT20;
2491 } else {
2492 phymode = MODE_11A;
2493 }
2494
2495 break;
2496 default:
2497 break;
2498 }
2499
Michal Kazior7aa7a722014-08-25 12:09:38 +02002500 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
Kalle Valo38a1d472013-09-08 17:56:14 +03002501 sta->addr, ath10k_wmi_phymode_str(phymode));
Kalle Valo60c3daa2013-09-08 17:56:07 +03002502
Kalle Valo5e3dd152013-06-12 20:52:10 +03002503 arg->peer_phymode = phymode;
2504 WARN_ON(phymode == MODE_UNKNOWN);
2505}
2506
Kalle Valob9ada652013-10-16 15:44:46 +03002507static int ath10k_peer_assoc_prepare(struct ath10k *ar,
Michal Kazior590922a2014-10-21 10:10:29 +03002508 struct ieee80211_vif *vif,
Kalle Valob9ada652013-10-16 15:44:46 +03002509 struct ieee80211_sta *sta,
Kalle Valob9ada652013-10-16 15:44:46 +03002510 struct wmi_peer_assoc_complete_arg *arg)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002511{
Michal Kazior548db542013-07-05 16:15:15 +03002512 lockdep_assert_held(&ar->conf_mutex);
2513
Kalle Valob9ada652013-10-16 15:44:46 +03002514 memset(arg, 0, sizeof(*arg));
Kalle Valo5e3dd152013-06-12 20:52:10 +03002515
Michal Kazior590922a2014-10-21 10:10:29 +03002516 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
Tamizh chelvam90eceb32015-10-29 14:27:42 +02002517 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
Michal Kazior500ff9f2015-03-31 10:26:21 +00002518 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
Michal Kazior45c9abc2015-04-21 20:42:58 +03002519 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
Michal Kazior500ff9f2015-03-31 10:26:21 +00002520 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
Michal Kazior590922a2014-10-21 10:10:29 +03002521 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2522 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002523
Kalle Valob9ada652013-10-16 15:44:46 +03002524 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002525}
2526
Michal Kazior90046f52014-02-14 14:45:51 +01002527static const u32 ath10k_smps_map[] = {
2528 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2529 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2530 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2531 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2532};
2533
2534static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2535 const u8 *addr,
2536 const struct ieee80211_sta_ht_cap *ht_cap)
2537{
2538 int smps;
2539
2540 if (!ht_cap->ht_supported)
2541 return 0;
2542
2543 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2544 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2545
2546 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2547 return -EINVAL;
2548
2549 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2550 WMI_PEER_SMPS_STATE,
2551 ath10k_smps_map[smps]);
2552}
2553
Michal Kazior139e1702015-02-15 16:50:42 +02002554static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2555 struct ieee80211_vif *vif,
2556 struct ieee80211_sta_vht_cap vht_cap)
2557{
2558 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2559 int ret;
2560 u32 param;
2561 u32 value;
2562
Vivek Natarajan08e75ea2015-08-04 10:45:11 +05302563 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2564 return 0;
2565
Michal Kazior139e1702015-02-15 16:50:42 +02002566 if (!(ar->vht_cap_info &
2567 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2568 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2569 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2570 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2571 return 0;
2572
2573 param = ar->wmi.vdev_param->txbf;
2574 value = 0;
2575
2576 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2577 return 0;
2578
2579 /* The following logic is correct. If a remote STA advertises support
2580 * for being a beamformer then we should enable us being a beamformee.
2581 */
2582
2583 if (ar->vht_cap_info &
2584 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2585 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2586 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2587 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2588
2589 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2590 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2591 }
2592
2593 if (ar->vht_cap_info &
2594 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2595 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2596 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2597 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2598
2599 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2600 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2601 }
2602
2603 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2604 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2605
2606 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2607 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2608
2609 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2610 if (ret) {
2611 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2612 value, ret);
2613 return ret;
2614 }
2615
2616 return 0;
2617}
2618
Kalle Valo5e3dd152013-06-12 20:52:10 +03002619/* can be called only in mac80211 callbacks due to `key_count` usage */
2620static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2621 struct ieee80211_vif *vif,
2622 struct ieee80211_bss_conf *bss_conf)
2623{
2624 struct ath10k *ar = hw->priv;
2625 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior90046f52014-02-14 14:45:51 +01002626 struct ieee80211_sta_ht_cap ht_cap;
Michal Kazior139e1702015-02-15 16:50:42 +02002627 struct ieee80211_sta_vht_cap vht_cap;
Kalle Valob9ada652013-10-16 15:44:46 +03002628 struct wmi_peer_assoc_complete_arg peer_arg;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002629 struct ieee80211_sta *ap_sta;
2630 int ret;
2631
Michal Kazior548db542013-07-05 16:15:15 +03002632 lockdep_assert_held(&ar->conf_mutex);
2633
Michal Kazior077efc82014-10-21 10:10:29 +03002634 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2635 arvif->vdev_id, arvif->bssid, arvif->aid);
2636
Kalle Valo5e3dd152013-06-12 20:52:10 +03002637 rcu_read_lock();
2638
2639 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2640 if (!ap_sta) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002641 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02002642 bss_conf->bssid, arvif->vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002643 rcu_read_unlock();
2644 return;
2645 }
2646
Michal Kazior90046f52014-02-14 14:45:51 +01002647 /* ap_sta must be accessed only within rcu section which must be left
2648 * before calling ath10k_setup_peer_smps() which might sleep. */
2649 ht_cap = ap_sta->ht_cap;
Michal Kazior139e1702015-02-15 16:50:42 +02002650 vht_cap = ap_sta->vht_cap;
Michal Kazior90046f52014-02-14 14:45:51 +01002651
Michal Kazior590922a2014-10-21 10:10:29 +03002652 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002653 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002654 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002655 bss_conf->bssid, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002656 rcu_read_unlock();
2657 return;
2658 }
2659
2660 rcu_read_unlock();
2661
Kalle Valob9ada652013-10-16 15:44:46 +03002662 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2663 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002664 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002665 bss_conf->bssid, arvif->vdev_id, ret);
Kalle Valob9ada652013-10-16 15:44:46 +03002666 return;
2667 }
2668
Michal Kazior90046f52014-02-14 14:45:51 +01002669 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2670 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002671 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002672 arvif->vdev_id, ret);
Michal Kazior90046f52014-02-14 14:45:51 +01002673 return;
2674 }
2675
Michal Kazior139e1702015-02-15 16:50:42 +02002676 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2677 if (ret) {
2678 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2679 arvif->vdev_id, bss_conf->bssid, ret);
2680 return;
2681 }
2682
Michal Kazior7aa7a722014-08-25 12:09:38 +02002683 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03002684 "mac vdev %d up (associated) bssid %pM aid %d\n",
2685 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2686
Michal Kazior077efc82014-10-21 10:10:29 +03002687 WARN_ON(arvif->is_up);
2688
Michal Kaziorc930f742014-01-23 11:38:25 +01002689 arvif->aid = bss_conf->aid;
Kalle Valob25f32c2014-09-14 12:50:49 +03002690 ether_addr_copy(arvif->bssid, bss_conf->bssid);
Michal Kaziorc930f742014-01-23 11:38:25 +01002691
2692 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2693 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002694 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002695 arvif->vdev_id, ret);
Michal Kaziorc930f742014-01-23 11:38:25 +01002696 return;
2697 }
2698
2699 arvif->is_up = true;
Michal Kazior0a987fb2015-02-13 13:30:15 +01002700
2701 /* Workaround: Some firmware revisions (tested with qca6174
2702 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2703 * poked with peer param command.
2704 */
2705 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2706 WMI_PEER_DUMMY_VAR, 1);
2707 if (ret) {
2708 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2709 arvif->bssid, arvif->vdev_id, ret);
2710 return;
2711 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002712}
2713
Kalle Valo5e3dd152013-06-12 20:52:10 +03002714static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2715 struct ieee80211_vif *vif)
2716{
2717 struct ath10k *ar = hw->priv;
2718 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior139e1702015-02-15 16:50:42 +02002719 struct ieee80211_sta_vht_cap vht_cap = {};
Kalle Valo5e3dd152013-06-12 20:52:10 +03002720 int ret;
2721
Michal Kazior548db542013-07-05 16:15:15 +03002722 lockdep_assert_held(&ar->conf_mutex);
2723
Michal Kazior077efc82014-10-21 10:10:29 +03002724 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2725 arvif->vdev_id, arvif->bssid);
Kalle Valo60c3daa2013-09-08 17:56:07 +03002726
Kalle Valo5e3dd152013-06-12 20:52:10 +03002727 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
Michal Kazior077efc82014-10-21 10:10:29 +03002728 if (ret)
2729 ath10k_warn(ar, "faield to down vdev %i: %d\n",
2730 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002731
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +02002732 arvif->def_wep_key_idx = -1;
2733
Michal Kazior139e1702015-02-15 16:50:42 +02002734 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2735 if (ret) {
2736 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2737 arvif->vdev_id, ret);
2738 return;
2739 }
2740
Michal Kaziorc930f742014-01-23 11:38:25 +01002741 arvif->is_up = false;
Michal Kaziorcc9904e2015-03-10 16:22:01 +02002742
2743 cancel_delayed_work_sync(&arvif->connection_loss_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002744}
2745
Michal Kazior590922a2014-10-21 10:10:29 +03002746static int ath10k_station_assoc(struct ath10k *ar,
2747 struct ieee80211_vif *vif,
2748 struct ieee80211_sta *sta,
2749 bool reassoc)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002750{
Michal Kazior590922a2014-10-21 10:10:29 +03002751 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Kalle Valob9ada652013-10-16 15:44:46 +03002752 struct wmi_peer_assoc_complete_arg peer_arg;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002753 int ret = 0;
2754
Michal Kazior548db542013-07-05 16:15:15 +03002755 lockdep_assert_held(&ar->conf_mutex);
2756
Michal Kazior590922a2014-10-21 10:10:29 +03002757 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002758 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002759 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02002760 sta->addr, arvif->vdev_id, ret);
Kalle Valob9ada652013-10-16 15:44:46 +03002761 return ret;
2762 }
2763
2764 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2765 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002766 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002767 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002768 return ret;
2769 }
2770
Michal Kaziorb1ecde32014-10-21 10:10:29 +03002771 /* Re-assoc is run only to update supported rates for given station. It
2772 * doesn't make much sense to reconfigure the peer completely.
2773 */
2774 if (!reassoc) {
2775 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2776 &sta->ht_cap);
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02002777 if (ret) {
Michal Kaziorb1ecde32014-10-21 10:10:29 +03002778 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02002779 arvif->vdev_id, ret);
2780 return ret;
2781 }
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02002782
Michal Kaziorb1ecde32014-10-21 10:10:29 +03002783 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2784 if (ret) {
2785 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2786 sta->addr, arvif->vdev_id, ret);
2787 return ret;
2788 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002789
Michal Kaziorb1ecde32014-10-21 10:10:29 +03002790 if (!sta->wme) {
2791 arvif->num_legacy_stations++;
2792 ret = ath10k_recalc_rtscts_prot(arvif);
2793 if (ret) {
2794 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2795 arvif->vdev_id, ret);
2796 return ret;
2797 }
2798 }
2799
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +02002800 /* Plumb cached keys only for static WEP */
2801 if (arvif->def_wep_key_idx != -1) {
2802 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2803 if (ret) {
2804 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2805 arvif->vdev_id, ret);
2806 return ret;
2807 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002808 }
2809 }
2810
Kalle Valo5e3dd152013-06-12 20:52:10 +03002811 return ret;
2812}
2813
Michal Kazior590922a2014-10-21 10:10:29 +03002814static int ath10k_station_disassoc(struct ath10k *ar,
2815 struct ieee80211_vif *vif,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002816 struct ieee80211_sta *sta)
2817{
Michal Kazior590922a2014-10-21 10:10:29 +03002818 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002819 int ret = 0;
2820
2821 lockdep_assert_held(&ar->conf_mutex);
2822
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02002823 if (!sta->wme) {
2824 arvif->num_legacy_stations--;
2825 ret = ath10k_recalc_rtscts_prot(arvif);
2826 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002827 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02002828 arvif->vdev_id, ret);
2829 return ret;
2830 }
2831 }
2832
Kalle Valo5e3dd152013-06-12 20:52:10 +03002833 ret = ath10k_clear_peer_keys(arvif, sta->addr);
2834 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002835 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02002836 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002837 return ret;
2838 }
2839
2840 return ret;
2841}
2842
2843/**************/
2844/* Regulatory */
2845/**************/
2846
2847static int ath10k_update_channel_list(struct ath10k *ar)
2848{
2849 struct ieee80211_hw *hw = ar->hw;
2850 struct ieee80211_supported_band **bands;
2851 enum ieee80211_band band;
2852 struct ieee80211_channel *channel;
2853 struct wmi_scan_chan_list_arg arg = {0};
2854 struct wmi_channel_arg *ch;
2855 bool passive;
2856 int len;
2857 int ret;
2858 int i;
2859
Michal Kazior548db542013-07-05 16:15:15 +03002860 lockdep_assert_held(&ar->conf_mutex);
2861
Kalle Valo5e3dd152013-06-12 20:52:10 +03002862 bands = hw->wiphy->bands;
2863 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
2864 if (!bands[band])
2865 continue;
2866
2867 for (i = 0; i < bands[band]->n_channels; i++) {
2868 if (bands[band]->channels[i].flags &
2869 IEEE80211_CHAN_DISABLED)
2870 continue;
2871
2872 arg.n_channels++;
2873 }
2874 }
2875
2876 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2877 arg.channels = kzalloc(len, GFP_KERNEL);
2878 if (!arg.channels)
2879 return -ENOMEM;
2880
2881 ch = arg.channels;
2882 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
2883 if (!bands[band])
2884 continue;
2885
2886 for (i = 0; i < bands[band]->n_channels; i++) {
2887 channel = &bands[band]->channels[i];
2888
2889 if (channel->flags & IEEE80211_CHAN_DISABLED)
2890 continue;
2891
2892 ch->allow_ht = true;
2893
2894 /* FIXME: when should we really allow VHT? */
2895 ch->allow_vht = true;
2896
2897 ch->allow_ibss =
Luis R. Rodriguez8fe02e12013-10-21 19:22:25 +02002898 !(channel->flags & IEEE80211_CHAN_NO_IR);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002899
2900 ch->ht40plus =
2901 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2902
Marek Puzyniake8a50f82013-11-20 09:59:47 +02002903 ch->chan_radar =
2904 !!(channel->flags & IEEE80211_CHAN_RADAR);
2905
Luis R. Rodriguez8fe02e12013-10-21 19:22:25 +02002906 passive = channel->flags & IEEE80211_CHAN_NO_IR;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002907 ch->passive = passive;
2908
2909 ch->freq = channel->center_freq;
Michal Kazior2d667212014-09-18 15:21:21 +02002910 ch->band_center_freq1 = channel->center_freq;
Michal Kazior89c5c842013-10-23 04:02:13 -07002911 ch->min_power = 0;
Michal Kazior02256932013-10-23 04:02:14 -07002912 ch->max_power = channel->max_power * 2;
2913 ch->max_reg_power = channel->max_reg_power * 2;
2914 ch->max_antenna_gain = channel->max_antenna_gain * 2;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002915 ch->reg_class_id = 0; /* FIXME */
2916
2917 /* FIXME: why use only legacy modes, why not any
2918 * HT/VHT modes? Would that even make any
2919 * difference? */
2920 if (channel->band == IEEE80211_BAND_2GHZ)
2921 ch->mode = MODE_11G;
2922 else
2923 ch->mode = MODE_11A;
2924
2925 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2926 continue;
2927
Michal Kazior7aa7a722014-08-25 12:09:38 +02002928 ath10k_dbg(ar, ATH10K_DBG_WMI,
Kalle Valo60c3daa2013-09-08 17:56:07 +03002929 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
2930 ch - arg.channels, arg.n_channels,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002931 ch->freq, ch->max_power, ch->max_reg_power,
2932 ch->max_antenna_gain, ch->mode);
2933
2934 ch++;
2935 }
2936 }
2937
2938 ret = ath10k_wmi_scan_chan_list(ar, &arg);
2939 kfree(arg.channels);
2940
2941 return ret;
2942}
2943
Marek Puzyniak821af6a2014-03-21 17:46:57 +02002944static enum wmi_dfs_region
2945ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
2946{
2947 switch (dfs_region) {
2948 case NL80211_DFS_UNSET:
2949 return WMI_UNINIT_DFS_DOMAIN;
2950 case NL80211_DFS_FCC:
2951 return WMI_FCC_DFS_DOMAIN;
2952 case NL80211_DFS_ETSI:
2953 return WMI_ETSI_DFS_DOMAIN;
2954 case NL80211_DFS_JP:
2955 return WMI_MKK4_DFS_DOMAIN;
2956 }
2957 return WMI_UNINIT_DFS_DOMAIN;
2958}
2959
Michal Kaziorf7843d72013-07-16 09:38:52 +02002960static void ath10k_regd_update(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002961{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002962 struct reg_dmn_pair_mapping *regpair;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002963 int ret;
Marek Puzyniak821af6a2014-03-21 17:46:57 +02002964 enum wmi_dfs_region wmi_dfs_reg;
2965 enum nl80211_dfs_regions nl_dfs_reg;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002966
Michal Kaziorf7843d72013-07-16 09:38:52 +02002967 lockdep_assert_held(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002968
2969 ret = ath10k_update_channel_list(ar);
2970 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002971 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002972
2973 regpair = ar->ath_common.regulatory.regpair;
Michal Kaziorf7843d72013-07-16 09:38:52 +02002974
Marek Puzyniak821af6a2014-03-21 17:46:57 +02002975 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
2976 nl_dfs_reg = ar->dfs_detector->region;
2977 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
2978 } else {
2979 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
2980 }
2981
Kalle Valo5e3dd152013-06-12 20:52:10 +03002982 /* Target allows setting up per-band regdomain but ath_common provides
2983 * a combined one only */
2984 ret = ath10k_wmi_pdev_set_regdomain(ar,
Kalle Valoef8c0012014-02-13 18:13:12 +02002985 regpair->reg_domain,
2986 regpair->reg_domain, /* 2ghz */
2987 regpair->reg_domain, /* 5ghz */
Kalle Valo5e3dd152013-06-12 20:52:10 +03002988 regpair->reg_2ghz_ctl,
Marek Puzyniak821af6a2014-03-21 17:46:57 +02002989 regpair->reg_5ghz_ctl,
2990 wmi_dfs_reg);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002991 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002992 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
Michal Kaziorf7843d72013-07-16 09:38:52 +02002993}
Michal Kazior548db542013-07-05 16:15:15 +03002994
Michal Kaziorf7843d72013-07-16 09:38:52 +02002995static void ath10k_reg_notifier(struct wiphy *wiphy,
2996 struct regulatory_request *request)
2997{
2998 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
2999 struct ath10k *ar = hw->priv;
Janusz Dziedzic9702c682013-11-20 09:59:41 +02003000 bool result;
Michal Kaziorf7843d72013-07-16 09:38:52 +02003001
3002 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3003
Janusz Dziedzic9702c682013-11-20 09:59:41 +02003004 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003005 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
Janusz Dziedzic9702c682013-11-20 09:59:41 +02003006 request->dfs_region);
3007 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3008 request->dfs_region);
3009 if (!result)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003010 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
Janusz Dziedzic9702c682013-11-20 09:59:41 +02003011 request->dfs_region);
3012 }
3013
Michal Kaziorf7843d72013-07-16 09:38:52 +02003014 mutex_lock(&ar->conf_mutex);
3015 if (ar->state == ATH10K_STATE_ON)
3016 ath10k_regd_update(ar);
Michal Kazior548db542013-07-05 16:15:15 +03003017 mutex_unlock(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003018}
3019
3020/***************/
3021/* TX handlers */
3022/***************/
3023
Michal Kaziora30c7d02016-03-06 16:14:23 +02003024enum ath10k_mac_tx_path {
3025 ATH10K_MAC_TX_HTT,
3026 ATH10K_MAC_TX_HTT_MGMT,
3027 ATH10K_MAC_TX_WMI_MGMT,
3028 ATH10K_MAC_TX_UNKNOWN,
3029};
3030
Michal Kazior96d828d2015-03-31 10:26:23 +00003031void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3032{
3033 lockdep_assert_held(&ar->htt.tx_lock);
3034
3035 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3036 ar->tx_paused |= BIT(reason);
3037 ieee80211_stop_queues(ar->hw);
3038}
3039
3040static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3041 struct ieee80211_vif *vif)
3042{
3043 struct ath10k *ar = data;
3044 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3045
3046 if (arvif->tx_paused)
3047 return;
3048
3049 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3050}
3051
3052void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3053{
3054 lockdep_assert_held(&ar->htt.tx_lock);
3055
3056 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3057 ar->tx_paused &= ~BIT(reason);
3058
3059 if (ar->tx_paused)
3060 return;
3061
3062 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3063 IEEE80211_IFACE_ITER_RESUME_ALL,
3064 ath10k_mac_tx_unlock_iter,
3065 ar);
Michal Kazior3a73d1a2015-08-06 14:46:54 +02003066
3067 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
Michal Kazior96d828d2015-03-31 10:26:23 +00003068}
3069
3070void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3071{
3072 struct ath10k *ar = arvif->ar;
3073
3074 lockdep_assert_held(&ar->htt.tx_lock);
3075
3076 WARN_ON(reason >= BITS_PER_LONG);
3077 arvif->tx_paused |= BIT(reason);
3078 ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3079}
3080
3081void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3082{
3083 struct ath10k *ar = arvif->ar;
3084
3085 lockdep_assert_held(&ar->htt.tx_lock);
3086
3087 WARN_ON(reason >= BITS_PER_LONG);
3088 arvif->tx_paused &= ~BIT(reason);
3089
3090 if (ar->tx_paused)
3091 return;
3092
3093 if (arvif->tx_paused)
3094 return;
3095
3096 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3097}
3098
Michal Kaziorb4aa5392015-03-31 10:26:24 +00003099static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3100 enum wmi_tlv_tx_pause_id pause_id,
3101 enum wmi_tlv_tx_pause_action action)
3102{
3103 struct ath10k *ar = arvif->ar;
3104
3105 lockdep_assert_held(&ar->htt.tx_lock);
3106
Michal Kazioracd0b272015-07-09 13:08:38 +02003107 switch (action) {
3108 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3109 ath10k_mac_vif_tx_lock(arvif, pause_id);
Michal Kaziorb4aa5392015-03-31 10:26:24 +00003110 break;
Michal Kazioracd0b272015-07-09 13:08:38 +02003111 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3112 ath10k_mac_vif_tx_unlock(arvif, pause_id);
3113 break;
Michal Kaziorb4aa5392015-03-31 10:26:24 +00003114 default:
Michal Kazioracd0b272015-07-09 13:08:38 +02003115 ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3116 action, arvif->vdev_id);
Michal Kaziorb4aa5392015-03-31 10:26:24 +00003117 break;
3118 }
3119}
3120
3121struct ath10k_mac_tx_pause {
3122 u32 vdev_id;
3123 enum wmi_tlv_tx_pause_id pause_id;
3124 enum wmi_tlv_tx_pause_action action;
3125};
3126
3127static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3128 struct ieee80211_vif *vif)
3129{
3130 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3131 struct ath10k_mac_tx_pause *arg = data;
3132
Michal Kazioracd0b272015-07-09 13:08:38 +02003133 if (arvif->vdev_id != arg->vdev_id)
3134 return;
3135
Michal Kaziorb4aa5392015-03-31 10:26:24 +00003136 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3137}
3138
Michal Kazioracd0b272015-07-09 13:08:38 +02003139void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3140 enum wmi_tlv_tx_pause_id pause_id,
3141 enum wmi_tlv_tx_pause_action action)
Michal Kaziorb4aa5392015-03-31 10:26:24 +00003142{
3143 struct ath10k_mac_tx_pause arg = {
3144 .vdev_id = vdev_id,
3145 .pause_id = pause_id,
3146 .action = action,
3147 };
3148
3149 spin_lock_bh(&ar->htt.tx_lock);
3150 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3151 IEEE80211_IFACE_ITER_RESUME_ALL,
3152 ath10k_mac_handle_tx_pause_iter,
3153 &arg);
3154 spin_unlock_bh(&ar->htt.tx_lock);
3155}
3156
Michal Kaziord740d8f2015-03-30 09:51:51 +03003157static enum ath10k_hw_txrx_mode
Michal Kazior6a2636d2015-11-18 06:59:16 +01003158ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3159 struct ieee80211_vif *vif,
3160 struct ieee80211_sta *sta,
3161 struct sk_buff *skb)
Michal Kaziord740d8f2015-03-30 09:51:51 +03003162{
3163 const struct ieee80211_hdr *hdr = (void *)skb->data;
3164 __le16 fc = hdr->frame_control;
3165
3166 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3167 return ATH10K_HW_TXRX_RAW;
3168
3169 if (ieee80211_is_mgmt(fc))
3170 return ATH10K_HW_TXRX_MGMT;
3171
3172 /* Workaround:
3173 *
3174 * NullFunc frames are mostly used to ping if a client or AP are still
3175 * reachable and responsive. This implies tx status reports must be
3176 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3177 * come to a conclusion that the other end disappeared and tear down
3178 * BSS connection or it can never disconnect from BSS/client (which is
3179 * the case).
3180 *
3181 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3182 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3183 * which seems to deliver correct tx reports for NullFunc frames. The
3184 * downside of using it is it ignores client powersave state so it can
3185 * end up disconnecting sleeping clients in AP mode. It should fix STA
3186 * mode though because AP don't sleep.
3187 */
3188 if (ar->htt.target_version_major < 3 &&
3189 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3190 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
3191 return ATH10K_HW_TXRX_MGMT;
3192
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03003193 /* Workaround:
3194 *
3195 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3196 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3197 * to work with Ethernet txmode so use it.
David Liuccec9032015-07-24 20:25:32 +03003198 *
3199 * FIXME: Check if raw mode works with TDLS.
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03003200 */
3201 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3202 return ATH10K_HW_TXRX_ETHERNET;
3203
David Liuccec9032015-07-24 20:25:32 +03003204 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3205 return ATH10K_HW_TXRX_RAW;
3206
Michal Kaziord740d8f2015-03-30 09:51:51 +03003207 return ATH10K_HW_TXRX_NATIVE_WIFI;
3208}
3209
David Liuccec9032015-07-24 20:25:32 +03003210static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
Michal Kaziorfd12cb32015-11-18 06:59:15 +01003211 struct sk_buff *skb)
3212{
3213 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3214 const struct ieee80211_hdr *hdr = (void *)skb->data;
David Liuccec9032015-07-24 20:25:32 +03003215 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3216 IEEE80211_TX_CTL_INJECTED;
Michal Kaziorfd12cb32015-11-18 06:59:15 +01003217
3218 if (!ieee80211_has_protected(hdr->frame_control))
3219 return false;
3220
David Liuccec9032015-07-24 20:25:32 +03003221 if ((info->flags & mask) == mask)
3222 return false;
Michal Kaziorfd12cb32015-11-18 06:59:15 +01003223
David Liuccec9032015-07-24 20:25:32 +03003224 if (vif)
3225 return !ath10k_vif_to_arvif(vif)->nohwcrypt;
Michal Kaziorfd12cb32015-11-18 06:59:15 +01003226
David Liuccec9032015-07-24 20:25:32 +03003227 return true;
3228}
3229
Michal Kazior4b604552014-07-21 21:03:09 +03003230/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3231 * Control in the header.
Kalle Valo5e3dd152013-06-12 20:52:10 +03003232 */
Michal Kazior4b604552014-07-21 21:03:09 +03003233static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003234{
3235 struct ieee80211_hdr *hdr = (void *)skb->data;
Michal Kaziorc21c64d2014-07-21 21:03:10 +03003236 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003237 u8 *qos_ctl;
3238
3239 if (!ieee80211_is_data_qos(hdr->frame_control))
3240 return;
3241
3242 qos_ctl = ieee80211_get_qos_ctl(hdr);
Michal Kaziorba0ccd72013-07-22 14:25:28 +02003243 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3244 skb->data, (void *)qos_ctl - (void *)skb->data);
3245 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
Michal Kaziorc21c64d2014-07-21 21:03:10 +03003246
Michal Kazior8bad8dc2015-03-11 14:25:26 +01003247 /* Some firmware revisions don't handle sending QoS NullFunc well.
3248 * These frames are mainly used for CQM purposes so it doesn't really
3249 * matter whether QoS NullFunc or NullFunc are sent.
Michal Kaziorc21c64d2014-07-21 21:03:10 +03003250 */
Michal Kaziorbf0a26d2015-01-24 12:14:51 +02003251 hdr = (void *)skb->data;
Michal Kazior8bad8dc2015-03-11 14:25:26 +01003252 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
Michal Kazior609db222015-11-18 06:59:22 +01003253 cb->flags &= ~ATH10K_SKB_F_QOS;
Michal Kazior8bad8dc2015-03-11 14:25:26 +01003254
3255 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003256}
3257
Michal Kaziord740d8f2015-03-30 09:51:51 +03003258static void ath10k_tx_h_8023(struct sk_buff *skb)
3259{
3260 struct ieee80211_hdr *hdr;
3261 struct rfc1042_hdr *rfc1042;
3262 struct ethhdr *eth;
3263 size_t hdrlen;
3264 u8 da[ETH_ALEN];
3265 u8 sa[ETH_ALEN];
3266 __be16 type;
3267
3268 hdr = (void *)skb->data;
3269 hdrlen = ieee80211_hdrlen(hdr->frame_control);
3270 rfc1042 = (void *)skb->data + hdrlen;
3271
3272 ether_addr_copy(da, ieee80211_get_DA(hdr));
3273 ether_addr_copy(sa, ieee80211_get_SA(hdr));
3274 type = rfc1042->snap_type;
3275
3276 skb_pull(skb, hdrlen + sizeof(*rfc1042));
3277 skb_push(skb, sizeof(*eth));
3278
3279 eth = (void *)skb->data;
3280 ether_addr_copy(eth->h_dest, da);
3281 ether_addr_copy(eth->h_source, sa);
3282 eth->h_proto = type;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003283}
3284
Michal Kazior4b604552014-07-21 21:03:09 +03003285static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3286 struct ieee80211_vif *vif,
3287 struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003288{
3289 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003290 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3291
3292 /* This is case only for P2P_GO */
Peter Oh08c27be2016-01-28 13:54:09 -08003293 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003294 return;
3295
3296 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3297 spin_lock_bh(&ar->data_lock);
3298 if (arvif->u.ap.noa_data)
3299 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3300 GFP_ATOMIC))
3301 memcpy(skb_put(skb, arvif->u.ap.noa_len),
3302 arvif->u.ap.noa_data,
3303 arvif->u.ap.noa_len);
3304 spin_unlock_bh(&ar->data_lock);
3305 }
3306}
3307
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003308static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3309 struct ieee80211_vif *vif,
Michal Kaziordd4717b2016-03-06 16:14:39 +02003310 struct ieee80211_txq *txq,
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003311 struct sk_buff *skb)
3312{
3313 struct ieee80211_hdr *hdr = (void *)skb->data;
3314 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3315
3316 cb->flags = 0;
3317 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3318 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3319
3320 if (ieee80211_is_mgmt(hdr->frame_control))
3321 cb->flags |= ATH10K_SKB_F_MGMT;
3322
3323 if (ieee80211_is_data_qos(hdr->frame_control))
3324 cb->flags |= ATH10K_SKB_F_QOS;
3325
3326 cb->vif = vif;
Michal Kaziordd4717b2016-03-06 16:14:39 +02003327 cb->txq = txq;
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003328}
3329
Vasanthakumar Thiagarajand39de992015-11-05 11:34:00 +05303330bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
Michal Kazior8d6d3622014-11-24 14:58:31 +01003331{
3332 /* FIXME: Not really sure since when the behaviour changed. At some
3333 * point new firmware stopped requiring creation of peer entries for
3334 * offchannel tx (and actually creating them causes issues with wmi-htc
3335 * tx credit replenishment and reliability). Assuming it's at least 3.4
3336 * because that's when the `freq` was introduced to TX_FRM HTT command.
3337 */
Vasanthakumar Thiagarajan8921f5f2015-11-05 11:33:59 +05303338 return (ar->htt.target_version_major >= 3 &&
Vasanthakumar Thiagarajand39de992015-11-05 11:34:00 +05303339 ar->htt.target_version_minor >= 4 &&
3340 ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
Michal Kazior8d6d3622014-11-24 14:58:31 +01003341}
3342
Michal Kaziord740d8f2015-03-30 09:51:51 +03003343static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003344{
Michal Kaziord740d8f2015-03-30 09:51:51 +03003345 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
Bartosz Markowski5e00d312013-09-26 17:47:12 +02003346 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003347
Michal Kaziord740d8f2015-03-30 09:51:51 +03003348 spin_lock_bh(&ar->data_lock);
3349
3350 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3351 ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3352 ret = -ENOSPC;
3353 goto unlock;
Michal Kazior961d4c32013-08-09 10:13:34 +02003354 }
3355
Michal Kaziord740d8f2015-03-30 09:51:51 +03003356 __skb_queue_tail(q, skb);
3357 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3358
3359unlock:
3360 spin_unlock_bh(&ar->data_lock);
3361
3362 return ret;
3363}
3364
Michal Kaziora30c7d02016-03-06 16:14:23 +02003365static enum ath10k_mac_tx_path
3366ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3367 struct sk_buff *skb,
3368 enum ath10k_hw_txrx_mode txmode)
3369{
3370 switch (txmode) {
3371 case ATH10K_HW_TXRX_RAW:
3372 case ATH10K_HW_TXRX_NATIVE_WIFI:
3373 case ATH10K_HW_TXRX_ETHERNET:
3374 return ATH10K_MAC_TX_HTT;
3375 case ATH10K_HW_TXRX_MGMT:
3376 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3377 ar->fw_features))
3378 return ATH10K_MAC_TX_WMI_MGMT;
3379 else if (ar->htt.target_version_major >= 3)
3380 return ATH10K_MAC_TX_HTT;
3381 else
3382 return ATH10K_MAC_TX_HTT_MGMT;
3383 }
3384
3385 return ATH10K_MAC_TX_UNKNOWN;
3386}
3387
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003388static int ath10k_mac_tx_submit(struct ath10k *ar,
3389 enum ath10k_hw_txrx_mode txmode,
Michal Kazior6421969f2016-03-06 16:14:25 +02003390 enum ath10k_mac_tx_path txpath,
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003391 struct sk_buff *skb)
Michal Kaziord740d8f2015-03-30 09:51:51 +03003392{
Michal Kaziord740d8f2015-03-30 09:51:51 +03003393 struct ath10k_htt *htt = &ar->htt;
Michal Kazior6421969f2016-03-06 16:14:25 +02003394 int ret = -EINVAL;
Michal Kaziora30c7d02016-03-06 16:14:23 +02003395
3396 switch (txpath) {
3397 case ATH10K_MAC_TX_HTT:
Michal Kazior8a933962015-11-18 06:59:17 +01003398 ret = ath10k_htt_tx(htt, txmode, skb);
Michal Kaziord740d8f2015-03-30 09:51:51 +03003399 break;
Michal Kaziora30c7d02016-03-06 16:14:23 +02003400 case ATH10K_MAC_TX_HTT_MGMT:
3401 ret = ath10k_htt_mgmt_tx(htt, skb);
3402 break;
3403 case ATH10K_MAC_TX_WMI_MGMT:
3404 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3405 break;
3406 case ATH10K_MAC_TX_UNKNOWN:
3407 WARN_ON_ONCE(1);
3408 ret = -EINVAL;
Michal Kaziord740d8f2015-03-30 09:51:51 +03003409 break;
Bartosz Markowski5e00d312013-09-26 17:47:12 +02003410 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03003411
3412 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003413 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3414 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003415 ieee80211_free_txskb(ar->hw, skb);
3416 }
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003417
3418 return ret;
3419}
3420
3421/* This function consumes the sk_buff regardless of return value as far as
3422 * caller is concerned so no freeing is necessary afterwards.
3423 */
3424static int ath10k_mac_tx(struct ath10k *ar,
3425 struct ieee80211_vif *vif,
3426 struct ieee80211_sta *sta,
3427 enum ath10k_hw_txrx_mode txmode,
Michal Kazior6421969f2016-03-06 16:14:25 +02003428 enum ath10k_mac_tx_path txpath,
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003429 struct sk_buff *skb)
3430{
3431 struct ieee80211_hw *hw = ar->hw;
3432 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3433 int ret;
3434
3435 /* We should disable CCK RATE due to P2P */
3436 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3437 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3438
3439 switch (txmode) {
3440 case ATH10K_HW_TXRX_MGMT:
3441 case ATH10K_HW_TXRX_NATIVE_WIFI:
3442 ath10k_tx_h_nwifi(hw, skb);
3443 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3444 ath10k_tx_h_seq_no(vif, skb);
3445 break;
3446 case ATH10K_HW_TXRX_ETHERNET:
3447 ath10k_tx_h_8023(skb);
3448 break;
3449 case ATH10K_HW_TXRX_RAW:
3450 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3451 WARN_ON_ONCE(1);
3452 ieee80211_free_txskb(hw, skb);
3453 return -ENOTSUPP;
3454 }
3455 }
3456
3457 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3458 if (!ath10k_mac_tx_frm_has_freq(ar)) {
3459 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
3460 skb);
3461
3462 skb_queue_tail(&ar->offchan_tx_queue, skb);
3463 ieee80211_queue_work(hw, &ar->offchan_tx_work);
3464 return 0;
3465 }
3466 }
3467
Michal Kazior6421969f2016-03-06 16:14:25 +02003468 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003469 if (ret) {
3470 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3471 return ret;
3472 }
3473
3474 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003475}
3476
3477void ath10k_offchan_tx_purge(struct ath10k *ar)
3478{
3479 struct sk_buff *skb;
3480
3481 for (;;) {
3482 skb = skb_dequeue(&ar->offchan_tx_queue);
3483 if (!skb)
3484 break;
3485
3486 ieee80211_free_txskb(ar->hw, skb);
3487 }
3488}
3489
3490void ath10k_offchan_tx_work(struct work_struct *work)
3491{
3492 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3493 struct ath10k_peer *peer;
Michal Kazior8a933962015-11-18 06:59:17 +01003494 struct ath10k_vif *arvif;
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003495 enum ath10k_hw_txrx_mode txmode;
Michal Kazior6421969f2016-03-06 16:14:25 +02003496 enum ath10k_mac_tx_path txpath;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003497 struct ieee80211_hdr *hdr;
Michal Kazior8a933962015-11-18 06:59:17 +01003498 struct ieee80211_vif *vif;
3499 struct ieee80211_sta *sta;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003500 struct sk_buff *skb;
3501 const u8 *peer_addr;
3502 int vdev_id;
3503 int ret;
Nicholas Mc Guire8e9904f52015-03-30 15:39:19 +03003504 unsigned long time_left;
Michal Kazioradaeed72015-08-05 12:15:23 +02003505 bool tmp_peer_created = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003506
3507 /* FW requirement: We must create a peer before FW will send out
3508 * an offchannel frame. Otherwise the frame will be stuck and
3509 * never transmitted. We delete the peer upon tx completion.
3510 * It is unlikely that a peer for offchannel tx will already be
3511 * present. However it may be in some rare cases so account for that.
3512 * Otherwise we might remove a legitimate peer and break stuff. */
3513
3514 for (;;) {
3515 skb = skb_dequeue(&ar->offchan_tx_queue);
3516 if (!skb)
3517 break;
3518
3519 mutex_lock(&ar->conf_mutex);
3520
Michal Kazior7aa7a722014-08-25 12:09:38 +02003521 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03003522 skb);
3523
3524 hdr = (struct ieee80211_hdr *)skb->data;
3525 peer_addr = ieee80211_get_DA(hdr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003526
3527 spin_lock_bh(&ar->data_lock);
Michal Kazior609db222015-11-18 06:59:22 +01003528 vdev_id = ar->scan.vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003529 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3530 spin_unlock_bh(&ar->data_lock);
3531
3532 if (peer)
Kalle Valo60c3daa2013-09-08 17:56:07 +03003533 /* FIXME: should this use ath10k_warn()? */
Michal Kazior7aa7a722014-08-25 12:09:38 +02003534 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03003535 peer_addr, vdev_id);
3536
3537 if (!peer) {
Michal Kazior69427262016-03-06 16:14:30 +02003538 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3539 peer_addr,
Marek Puzyniak7390ed32015-03-30 09:51:52 +03003540 WMI_PEER_TYPE_DEFAULT);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003541 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003542 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03003543 peer_addr, vdev_id, ret);
Michal Kazioradaeed72015-08-05 12:15:23 +02003544 tmp_peer_created = (ret == 0);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003545 }
3546
3547 spin_lock_bh(&ar->data_lock);
Wolfram Sang16735d02013-11-14 14:32:02 -08003548 reinit_completion(&ar->offchan_tx_completed);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003549 ar->offchan_tx_skb = skb;
3550 spin_unlock_bh(&ar->data_lock);
3551
Michal Kazior8a933962015-11-18 06:59:17 +01003552 /* It's safe to access vif and sta - conf_mutex guarantees that
3553 * sta_state() and remove_interface() are locked exclusively
3554 * out wrt to this offchannel worker.
3555 */
3556 arvif = ath10k_get_arvif(ar, vdev_id);
3557 if (arvif) {
3558 vif = arvif->vif;
3559 sta = ieee80211_find_sta(vif, peer_addr);
3560 } else {
3561 vif = NULL;
3562 sta = NULL;
3563 }
3564
3565 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
Michal Kazior6421969f2016-03-06 16:14:25 +02003566 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
Michal Kazior8a933962015-11-18 06:59:17 +01003567
Michal Kazior6421969f2016-03-06 16:14:25 +02003568 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003569 if (ret) {
3570 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3571 ret);
3572 /* not serious */
3573 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03003574
Nicholas Mc Guire8e9904f52015-03-30 15:39:19 +03003575 time_left =
3576 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3577 if (time_left == 0)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003578 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03003579 skb);
3580
Michal Kazioradaeed72015-08-05 12:15:23 +02003581 if (!peer && tmp_peer_created) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03003582 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3583 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003584 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03003585 peer_addr, vdev_id, ret);
3586 }
3587
3588 mutex_unlock(&ar->conf_mutex);
3589 }
3590}
3591
Bartosz Markowski5e00d312013-09-26 17:47:12 +02003592void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3593{
3594 struct sk_buff *skb;
3595
3596 for (;;) {
3597 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3598 if (!skb)
3599 break;
3600
3601 ieee80211_free_txskb(ar->hw, skb);
3602 }
3603}
3604
3605void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3606{
3607 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3608 struct sk_buff *skb;
3609 int ret;
3610
3611 for (;;) {
3612 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3613 if (!skb)
3614 break;
3615
3616 ret = ath10k_wmi_mgmt_tx(ar, skb);
Michal Kazior5fb5e412013-10-28 07:18:13 +01003617 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003618 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02003619 ret);
Michal Kazior5fb5e412013-10-28 07:18:13 +01003620 ieee80211_free_txskb(ar->hw, skb);
3621 }
Bartosz Markowski5e00d312013-09-26 17:47:12 +02003622 }
3623}
3624
Michal Kazior29946872016-03-06 16:14:34 +02003625static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3626{
3627 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3628
3629 if (!txq)
3630 return;
3631
3632 INIT_LIST_HEAD(&artxq->list);
3633}
3634
3635static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3636{
3637 struct ath10k_txq *artxq = (void *)txq->drv_priv;
Michal Kaziordd4717b2016-03-06 16:14:39 +02003638 struct ath10k_skb_cb *cb;
3639 struct sk_buff *msdu;
3640 int msdu_id;
Michal Kazior29946872016-03-06 16:14:34 +02003641
3642 if (!txq)
3643 return;
3644
3645 spin_lock_bh(&ar->txqs_lock);
3646 if (!list_empty(&artxq->list))
3647 list_del_init(&artxq->list);
3648 spin_unlock_bh(&ar->txqs_lock);
Michal Kaziordd4717b2016-03-06 16:14:39 +02003649
3650 spin_lock_bh(&ar->htt.tx_lock);
3651 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3652 cb = ATH10K_SKB_CB(msdu);
3653 if (cb->txq == txq)
3654 cb->txq = NULL;
3655 }
3656 spin_unlock_bh(&ar->htt.tx_lock);
Michal Kazior29946872016-03-06 16:14:34 +02003657}
3658
Michal Kazior426e10e2016-03-06 16:14:43 +02003659struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3660 u16 peer_id,
3661 u8 tid)
3662{
3663 struct ath10k_peer *peer;
3664
3665 lockdep_assert_held(&ar->data_lock);
3666
3667 peer = ar->peer_map[peer_id];
3668 if (!peer)
3669 return NULL;
3670
3671 if (peer->sta)
3672 return peer->sta->txq[tid];
3673 else if (peer->vif)
3674 return peer->vif->txq;
3675 else
3676 return NULL;
3677}
3678
Michal Kazior29946872016-03-06 16:14:34 +02003679static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3680 struct ieee80211_txq *txq)
3681{
Michal Kazior426e10e2016-03-06 16:14:43 +02003682 struct ath10k *ar = hw->priv;
3683 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3684
3685 /* No need to get locks */
3686
3687 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3688 return true;
3689
3690 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3691 return true;
3692
3693 if (artxq->num_fw_queued < artxq->num_push_allowed)
3694 return true;
3695
3696 return false;
Michal Kazior29946872016-03-06 16:14:34 +02003697}
3698
Michal Kazior426e10e2016-03-06 16:14:43 +02003699int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3700 struct ieee80211_txq *txq)
Michal Kazior29946872016-03-06 16:14:34 +02003701{
3702 const bool is_mgmt = false;
3703 const bool is_presp = false;
3704 struct ath10k *ar = hw->priv;
3705 struct ath10k_htt *htt = &ar->htt;
Michal Kazior3cc0fef2016-03-06 16:14:41 +02003706 struct ath10k_txq *artxq = (void *)txq->drv_priv;
Michal Kazior29946872016-03-06 16:14:34 +02003707 struct ieee80211_vif *vif = txq->vif;
3708 struct ieee80211_sta *sta = txq->sta;
3709 enum ath10k_hw_txrx_mode txmode;
3710 enum ath10k_mac_tx_path txpath;
3711 struct sk_buff *skb;
Michal Kazior426e10e2016-03-06 16:14:43 +02003712 size_t skb_len;
Michal Kazior29946872016-03-06 16:14:34 +02003713 int ret;
3714
3715 spin_lock_bh(&ar->htt.tx_lock);
3716 ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp);
3717 spin_unlock_bh(&ar->htt.tx_lock);
3718
3719 if (ret)
3720 return ret;
3721
3722 skb = ieee80211_tx_dequeue(hw, txq);
3723 if (!skb) {
3724 spin_lock_bh(&ar->htt.tx_lock);
3725 ath10k_htt_tx_dec_pending(htt, is_mgmt);
3726 spin_unlock_bh(&ar->htt.tx_lock);
3727
3728 return -ENOENT;
3729 }
3730
Michal Kaziordd4717b2016-03-06 16:14:39 +02003731 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
Michal Kazior29946872016-03-06 16:14:34 +02003732
Michal Kazior426e10e2016-03-06 16:14:43 +02003733 skb_len = skb->len;
Michal Kazior29946872016-03-06 16:14:34 +02003734 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3735 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3736
3737 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3738 if (unlikely(ret)) {
3739 ath10k_warn(ar, "failed to push frame: %d\n", ret);
3740
3741 spin_lock_bh(&ar->htt.tx_lock);
3742 ath10k_htt_tx_dec_pending(htt, is_mgmt);
3743 spin_unlock_bh(&ar->htt.tx_lock);
3744
3745 return ret;
3746 }
3747
Michal Kazior3cc0fef2016-03-06 16:14:41 +02003748 spin_lock_bh(&ar->htt.tx_lock);
3749 artxq->num_fw_queued++;
3750 spin_unlock_bh(&ar->htt.tx_lock);
3751
Michal Kazior426e10e2016-03-06 16:14:43 +02003752 return skb_len;
Michal Kazior29946872016-03-06 16:14:34 +02003753}
3754
3755void ath10k_mac_tx_push_pending(struct ath10k *ar)
3756{
3757 struct ieee80211_hw *hw = ar->hw;
3758 struct ieee80211_txq *txq;
3759 struct ath10k_txq *artxq;
3760 struct ath10k_txq *last;
3761 int ret;
3762 int max;
3763
3764 spin_lock_bh(&ar->txqs_lock);
3765 rcu_read_lock();
3766
3767 last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3768 while (!list_empty(&ar->txqs)) {
3769 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3770 txq = container_of((void *)artxq, struct ieee80211_txq,
3771 drv_priv);
3772
3773 /* Prevent aggressive sta/tid taking over tx queue */
3774 max = 16;
3775 while (max--) {
3776 ret = ath10k_mac_tx_push_txq(hw, txq);
3777 if (ret < 0)
3778 break;
3779 }
3780
3781 list_del_init(&artxq->list);
Michal Kaziorc1a43d92016-03-06 16:14:36 +02003782 ath10k_htt_tx_txq_update(hw, txq);
Michal Kazior29946872016-03-06 16:14:34 +02003783
3784 if (artxq == last || (ret < 0 && ret != -ENOENT)) {
3785 if (ret != -ENOENT)
3786 list_add_tail(&artxq->list, &ar->txqs);
3787 break;
3788 }
3789 }
3790
3791 rcu_read_unlock();
3792 spin_unlock_bh(&ar->txqs_lock);
3793}
3794
Kalle Valo5e3dd152013-06-12 20:52:10 +03003795/************/
3796/* Scanning */
3797/************/
3798
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003799void __ath10k_scan_finish(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003800{
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003801 lockdep_assert_held(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003802
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003803 switch (ar->scan.state) {
3804 case ATH10K_SCAN_IDLE:
3805 break;
3806 case ATH10K_SCAN_RUNNING:
Michal Kazior7305d3e2014-11-24 14:58:33 +01003807 case ATH10K_SCAN_ABORTING:
3808 if (!ar->scan.is_roc)
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003809 ieee80211_scan_completed(ar->hw,
3810 (ar->scan.state ==
3811 ATH10K_SCAN_ABORTING));
Michal Kaziord710e752015-07-09 13:08:36 +02003812 else if (ar->scan.roc_notify)
3813 ieee80211_remain_on_channel_expired(ar->hw);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003814 /* fall through */
3815 case ATH10K_SCAN_STARTING:
3816 ar->scan.state = ATH10K_SCAN_IDLE;
3817 ar->scan_channel = NULL;
Michal Kaziorbd877442015-11-18 06:59:19 +01003818 ar->scan.roc_freq = 0;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003819 ath10k_offchan_tx_purge(ar);
3820 cancel_delayed_work(&ar->scan.timeout);
3821 complete_all(&ar->scan.completed);
3822 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003823 }
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003824}
Kalle Valo5e3dd152013-06-12 20:52:10 +03003825
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003826void ath10k_scan_finish(struct ath10k *ar)
3827{
3828 spin_lock_bh(&ar->data_lock);
3829 __ath10k_scan_finish(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003830 spin_unlock_bh(&ar->data_lock);
3831}
3832
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003833static int ath10k_scan_stop(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003834{
3835 struct wmi_stop_scan_arg arg = {
3836 .req_id = 1, /* FIXME */
3837 .req_type = WMI_SCAN_STOP_ONE,
3838 .u.scan_id = ATH10K_SCAN_ID,
3839 };
3840 int ret;
3841
3842 lockdep_assert_held(&ar->conf_mutex);
3843
Kalle Valo5e3dd152013-06-12 20:52:10 +03003844 ret = ath10k_wmi_stop_scan(ar, &arg);
3845 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003846 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003847 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003848 }
3849
Kalle Valo5e3dd152013-06-12 20:52:10 +03003850 ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003851 if (ret == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02003852 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03003853 ret = -ETIMEDOUT;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003854 } else if (ret > 0) {
3855 ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003856 }
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003857
3858out:
3859 /* Scan state should be updated upon scan completion but in case
3860 * firmware fails to deliver the event (for whatever reason) it is
3861 * desired to clean up scan state anyway. Firmware may have just
3862 * dropped the scan completion event delivery due to transport pipe
3863 * being overflown with data and/or it can recover on its own before
3864 * next scan request is submitted.
3865 */
3866 spin_lock_bh(&ar->data_lock);
3867 if (ar->scan.state != ATH10K_SCAN_IDLE)
3868 __ath10k_scan_finish(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003869 spin_unlock_bh(&ar->data_lock);
3870
3871 return ret;
3872}
3873
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003874static void ath10k_scan_abort(struct ath10k *ar)
3875{
3876 int ret;
3877
3878 lockdep_assert_held(&ar->conf_mutex);
3879
3880 spin_lock_bh(&ar->data_lock);
3881
3882 switch (ar->scan.state) {
3883 case ATH10K_SCAN_IDLE:
3884 /* This can happen if timeout worker kicked in and called
3885 * abortion while scan completion was being processed.
3886 */
3887 break;
3888 case ATH10K_SCAN_STARTING:
3889 case ATH10K_SCAN_ABORTING:
Michal Kazior7aa7a722014-08-25 12:09:38 +02003890 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003891 ath10k_scan_state_str(ar->scan.state),
3892 ar->scan.state);
3893 break;
3894 case ATH10K_SCAN_RUNNING:
3895 ar->scan.state = ATH10K_SCAN_ABORTING;
3896 spin_unlock_bh(&ar->data_lock);
3897
3898 ret = ath10k_scan_stop(ar);
3899 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003900 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003901
3902 spin_lock_bh(&ar->data_lock);
3903 break;
3904 }
3905
3906 spin_unlock_bh(&ar->data_lock);
3907}
3908
3909void ath10k_scan_timeout_work(struct work_struct *work)
3910{
3911 struct ath10k *ar = container_of(work, struct ath10k,
3912 scan.timeout.work);
3913
3914 mutex_lock(&ar->conf_mutex);
3915 ath10k_scan_abort(ar);
3916 mutex_unlock(&ar->conf_mutex);
3917}
3918
Kalle Valo5e3dd152013-06-12 20:52:10 +03003919static int ath10k_start_scan(struct ath10k *ar,
3920 const struct wmi_start_scan_arg *arg)
3921{
3922 int ret;
3923
3924 lockdep_assert_held(&ar->conf_mutex);
3925
3926 ret = ath10k_wmi_start_scan(ar, arg);
3927 if (ret)
3928 return ret;
3929
Kalle Valo5e3dd152013-06-12 20:52:10 +03003930 ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
3931 if (ret == 0) {
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003932 ret = ath10k_scan_stop(ar);
3933 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003934 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02003935
3936 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003937 }
3938
Ben Greear2f9eec02015-02-15 16:50:38 +02003939 /* If we failed to start the scan, return error code at
3940 * this point. This is probably due to some issue in the
3941 * firmware, but no need to wedge the driver due to that...
3942 */
3943 spin_lock_bh(&ar->data_lock);
3944 if (ar->scan.state == ATH10K_SCAN_IDLE) {
3945 spin_unlock_bh(&ar->data_lock);
3946 return -EINVAL;
3947 }
3948 spin_unlock_bh(&ar->data_lock);
3949
Kalle Valo5e3dd152013-06-12 20:52:10 +03003950 return 0;
3951}
3952
3953/**********************/
3954/* mac80211 callbacks */
3955/**********************/
3956
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003957static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
3958 struct ieee80211_tx_control *control,
3959 struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03003960{
Kalle Valo5e3dd152013-06-12 20:52:10 +03003961 struct ath10k *ar = hw->priv;
Michal Kazior6421969f2016-03-06 16:14:25 +02003962 struct ath10k_htt *htt = &ar->htt;
Michal Kazior4b604552014-07-21 21:03:09 +03003963 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3964 struct ieee80211_vif *vif = info->control.vif;
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03003965 struct ieee80211_sta *sta = control->sta;
Michal Kaziordd4717b2016-03-06 16:14:39 +02003966 struct ieee80211_txq *txq = NULL;
Michal Kazior6421969f2016-03-06 16:14:25 +02003967 struct ieee80211_hdr *hdr = (void *)skb->data;
Michal Kazior8a933962015-11-18 06:59:17 +01003968 enum ath10k_hw_txrx_mode txmode;
Michal Kazior6421969f2016-03-06 16:14:25 +02003969 enum ath10k_mac_tx_path txpath;
3970 bool is_htt;
3971 bool is_mgmt;
3972 bool is_presp;
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01003973 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03003974
Michal Kaziordd4717b2016-03-06 16:14:39 +02003975 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003976
Michal Kazior8a933962015-11-18 06:59:17 +01003977 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
Michal Kazior6421969f2016-03-06 16:14:25 +02003978 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3979 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
3980 txpath == ATH10K_MAC_TX_HTT_MGMT);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003981
Michal Kazior6421969f2016-03-06 16:14:25 +02003982 if (is_htt) {
3983 spin_lock_bh(&ar->htt.tx_lock);
3984
3985 is_mgmt = ieee80211_is_mgmt(hdr->frame_control);
3986 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3987
3988 ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp);
3989 if (ret) {
3990 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
3991 ret);
3992 spin_unlock_bh(&ar->htt.tx_lock);
3993 ieee80211_free_txskb(ar->hw, skb);
3994 return;
3995 }
3996
3997 spin_unlock_bh(&ar->htt.tx_lock);
3998 }
3999
4000 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4001 if (ret) {
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01004002 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
Michal Kazior6421969f2016-03-06 16:14:25 +02004003 if (is_htt) {
4004 spin_lock_bh(&ar->htt.tx_lock);
4005 ath10k_htt_tx_dec_pending(htt, is_mgmt);
4006 spin_unlock_bh(&ar->htt.tx_lock);
4007 }
4008 return;
4009 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004010}
4011
Michal Kazior29946872016-03-06 16:14:34 +02004012static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4013 struct ieee80211_txq *txq)
4014{
4015 struct ath10k *ar = hw->priv;
4016 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4017
4018 if (ath10k_mac_tx_can_push(hw, txq)) {
4019 spin_lock_bh(&ar->txqs_lock);
4020 if (list_empty(&artxq->list))
4021 list_add_tail(&artxq->list, &ar->txqs);
4022 spin_unlock_bh(&ar->txqs_lock);
4023
4024 tasklet_schedule(&ar->htt.txrx_compl_task);
4025 }
Michal Kaziorc1a43d92016-03-06 16:14:36 +02004026
4027 ath10k_htt_tx_txq_update(hw, txq);
Michal Kazior29946872016-03-06 16:14:34 +02004028}
4029
Michal Kaziorbca7baf2014-05-26 12:46:03 +03004030/* Must not be called with conf_mutex held as workers can use that also. */
Michal Kazior7962b0d2014-10-28 10:34:38 +01004031void ath10k_drain_tx(struct ath10k *ar)
Michal Kaziorbca7baf2014-05-26 12:46:03 +03004032{
4033 /* make sure rcu-protected mac80211 tx path itself is drained */
4034 synchronize_net();
4035
4036 ath10k_offchan_tx_purge(ar);
4037 ath10k_mgmt_over_wmi_tx_purge(ar);
4038
4039 cancel_work_sync(&ar->offchan_tx_work);
4040 cancel_work_sync(&ar->wmi_mgmt_tx_work);
4041}
4042
Michal Kazioraffd3212013-07-16 09:54:35 +02004043void ath10k_halt(struct ath10k *ar)
Michal Kazior818bdd12013-07-16 09:38:57 +02004044{
Michal Kaziord9bc4b92014-04-23 19:30:06 +03004045 struct ath10k_vif *arvif;
4046
Michal Kazior818bdd12013-07-16 09:38:57 +02004047 lockdep_assert_held(&ar->conf_mutex);
4048
Michal Kazior19337472014-08-28 12:58:16 +02004049 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4050 ar->filter_flags = 0;
4051 ar->monitor = false;
Michal Kazior500ff9f2015-03-31 10:26:21 +00004052 ar->monitor_arvif = NULL;
Michal Kazior19337472014-08-28 12:58:16 +02004053
4054 if (ar->monitor_started)
Michal Kazior1bbc0972014-04-08 09:45:47 +03004055 ath10k_monitor_stop(ar);
Michal Kazior19337472014-08-28 12:58:16 +02004056
4057 ar->monitor_started = false;
Michal Kazior96d828d2015-03-31 10:26:23 +00004058 ar->tx_paused = 0;
Michal Kazior1bbc0972014-04-08 09:45:47 +03004059
Michal Kazior5c81c7f2014-08-05 14:54:44 +02004060 ath10k_scan_finish(ar);
Michal Kazior818bdd12013-07-16 09:38:57 +02004061 ath10k_peer_cleanup_all(ar);
4062 ath10k_core_stop(ar);
4063 ath10k_hif_power_down(ar);
4064
4065 spin_lock_bh(&ar->data_lock);
Michal Kazior64badcb2014-09-18 11:18:02 +03004066 list_for_each_entry(arvif, &ar->arvifs, list)
4067 ath10k_mac_vif_beacon_cleanup(arvif);
Michal Kazior818bdd12013-07-16 09:38:57 +02004068 spin_unlock_bh(&ar->data_lock);
4069}
4070
Ben Greear46acf7b2014-05-16 17:15:38 +03004071static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4072{
4073 struct ath10k *ar = hw->priv;
4074
4075 mutex_lock(&ar->conf_mutex);
4076
Rajkumar Manoharan166de3f2015-10-27 17:51:11 +05304077 *tx_ant = ar->cfg_tx_chainmask;
4078 *rx_ant = ar->cfg_rx_chainmask;
Ben Greear46acf7b2014-05-16 17:15:38 +03004079
4080 mutex_unlock(&ar->conf_mutex);
4081
4082 return 0;
4083}
4084
Ben Greear5572a952014-11-24 16:22:10 +02004085static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4086{
4087 /* It is not clear that allowing gaps in chainmask
4088 * is helpful. Probably it will not do what user
4089 * is hoping for, so warn in that case.
4090 */
4091 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4092 return;
4093
4094 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4095 dbg, cm);
4096}
4097
Rajkumar Manoharanf58512f2015-10-27 17:51:13 +05304098static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4099{
4100 int nsts = ar->vht_cap_info;
4101
4102 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4103 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4104
4105 /* If firmware does not deliver to host number of space-time
4106 * streams supported, assume it support up to 4 BF STS and return
4107 * the value for VHT CAP: nsts-1)
4108 */
4109 if (nsts == 0)
4110 return 3;
4111
4112 return nsts;
4113}
4114
4115static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4116{
4117 int sound_dim = ar->vht_cap_info;
4118
4119 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4120 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4121
4122 /* If the sounding dimension is not advertised by the firmware,
4123 * let's use a default value of 1
4124 */
4125 if (sound_dim == 0)
4126 return 1;
4127
4128 return sound_dim;
4129}
4130
4131static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4132{
4133 struct ieee80211_sta_vht_cap vht_cap = {0};
4134 u16 mcs_map;
4135 u32 val;
4136 int i;
4137
4138 vht_cap.vht_supported = 1;
4139 vht_cap.cap = ar->vht_cap_info;
4140
4141 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4142 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4143 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4144 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4145 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4146
4147 vht_cap.cap |= val;
4148 }
4149
4150 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4151 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4152 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4153 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4154 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4155
4156 vht_cap.cap |= val;
4157 }
4158
4159 mcs_map = 0;
4160 for (i = 0; i < 8; i++) {
4161 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4162 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4163 else
4164 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4165 }
4166
4167 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4168 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4169
4170 return vht_cap;
4171}
4172
4173static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4174{
4175 int i;
4176 struct ieee80211_sta_ht_cap ht_cap = {0};
4177
4178 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4179 return ht_cap;
4180
4181 ht_cap.ht_supported = 1;
4182 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4183 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4184 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4185 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
Peter Ohe33a99e2015-12-31 15:26:20 +02004186 ht_cap.cap |=
4187 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
Rajkumar Manoharanf58512f2015-10-27 17:51:13 +05304188
4189 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4190 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4191
4192 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4193 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4194
4195 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4196 u32 smps;
4197
4198 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4199 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4200
4201 ht_cap.cap |= smps;
4202 }
4203
4204 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
4205 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4206
4207 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4208 u32 stbc;
4209
4210 stbc = ar->ht_cap_info;
4211 stbc &= WMI_HT_CAP_RX_STBC;
4212 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4213 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4214 stbc &= IEEE80211_HT_CAP_RX_STBC;
4215
4216 ht_cap.cap |= stbc;
4217 }
4218
4219 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4220 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4221
4222 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4223 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4224
4225 /* max AMSDU is implicitly taken from vht_cap_info */
4226 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4227 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4228
4229 for (i = 0; i < ar->num_rf_chains; i++) {
4230 if (ar->cfg_rx_chainmask & BIT(i))
4231 ht_cap.mcs.rx_mask[i] = 0xFF;
4232 }
4233
4234 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4235
4236 return ht_cap;
4237}
4238
Rajkumar Manoharan5036fe02015-10-27 17:51:14 +05304239static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4240{
4241 struct ieee80211_supported_band *band;
4242 struct ieee80211_sta_vht_cap vht_cap;
4243 struct ieee80211_sta_ht_cap ht_cap;
4244
4245 ht_cap = ath10k_get_ht_cap(ar);
4246 vht_cap = ath10k_create_vht_cap(ar);
4247
4248 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4249 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
4250 band->ht_cap = ht_cap;
4251
4252 /* Enable the VHT support at 2.4 GHz */
4253 band->vht_cap = vht_cap;
4254 }
4255 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4256 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
4257 band->ht_cap = ht_cap;
4258 band->vht_cap = vht_cap;
4259 }
4260}
4261
Ben Greear46acf7b2014-05-16 17:15:38 +03004262static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4263{
4264 int ret;
4265
4266 lockdep_assert_held(&ar->conf_mutex);
4267
Ben Greear5572a952014-11-24 16:22:10 +02004268 ath10k_check_chain_mask(ar, tx_ant, "tx");
4269 ath10k_check_chain_mask(ar, rx_ant, "rx");
4270
Ben Greear46acf7b2014-05-16 17:15:38 +03004271 ar->cfg_tx_chainmask = tx_ant;
4272 ar->cfg_rx_chainmask = rx_ant;
4273
4274 if ((ar->state != ATH10K_STATE_ON) &&
4275 (ar->state != ATH10K_STATE_RESTARTED))
4276 return 0;
4277
4278 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4279 tx_ant);
4280 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004281 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
Ben Greear46acf7b2014-05-16 17:15:38 +03004282 ret, tx_ant);
4283 return ret;
4284 }
4285
4286 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4287 rx_ant);
4288 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004289 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
Ben Greear46acf7b2014-05-16 17:15:38 +03004290 ret, rx_ant);
4291 return ret;
4292 }
4293
Rajkumar Manoharan5036fe02015-10-27 17:51:14 +05304294 /* Reload HT/VHT capability */
4295 ath10k_mac_setup_ht_vht_cap(ar);
4296
Ben Greear46acf7b2014-05-16 17:15:38 +03004297 return 0;
4298}
4299
4300static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4301{
4302 struct ath10k *ar = hw->priv;
4303 int ret;
4304
4305 mutex_lock(&ar->conf_mutex);
4306 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4307 mutex_unlock(&ar->conf_mutex);
4308 return ret;
4309}
4310
Kalle Valo5e3dd152013-06-12 20:52:10 +03004311static int ath10k_start(struct ieee80211_hw *hw)
4312{
4313 struct ath10k *ar = hw->priv;
Mohammed Shafi Shajakhan1fe374f2016-01-13 21:16:30 +05304314 u32 param;
Michal Kazior818bdd12013-07-16 09:38:57 +02004315 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004316
Michal Kaziorbca7baf2014-05-26 12:46:03 +03004317 /*
4318 * This makes sense only when restarting hw. It is harmless to call
4319 * uncoditionally. This is necessary to make sure no HTT/WMI tx
4320 * commands will be submitted while restarting.
4321 */
4322 ath10k_drain_tx(ar);
4323
Michal Kazior548db542013-07-05 16:15:15 +03004324 mutex_lock(&ar->conf_mutex);
4325
Michal Kaziorc5058f52014-05-26 12:46:03 +03004326 switch (ar->state) {
4327 case ATH10K_STATE_OFF:
4328 ar->state = ATH10K_STATE_ON;
4329 break;
4330 case ATH10K_STATE_RESTARTING:
4331 ath10k_halt(ar);
4332 ar->state = ATH10K_STATE_RESTARTED;
4333 break;
4334 case ATH10K_STATE_ON:
4335 case ATH10K_STATE_RESTARTED:
4336 case ATH10K_STATE_WEDGED:
4337 WARN_ON(1);
Michal Kazior818bdd12013-07-16 09:38:57 +02004338 ret = -EINVAL;
Michal Kaziorae254432014-05-26 12:46:02 +03004339 goto err;
Kalle Valo43d2a302014-09-10 18:23:30 +03004340 case ATH10K_STATE_UTF:
4341 ret = -EBUSY;
4342 goto err;
Michal Kazior818bdd12013-07-16 09:38:57 +02004343 }
4344
4345 ret = ath10k_hif_power_up(ar);
4346 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004347 ath10k_err(ar, "Could not init hif: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03004348 goto err_off;
Michal Kazior818bdd12013-07-16 09:38:57 +02004349 }
4350
Kalle Valo43d2a302014-09-10 18:23:30 +03004351 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
Michal Kazior818bdd12013-07-16 09:38:57 +02004352 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004353 ath10k_err(ar, "Could not init core: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03004354 goto err_power_down;
Michal Kazior818bdd12013-07-16 09:38:57 +02004355 }
4356
Mohammed Shafi Shajakhan1fe374f2016-01-13 21:16:30 +05304357 param = ar->wmi.pdev_param->pmf_qos;
4358 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
Michal Kaziorae254432014-05-26 12:46:02 +03004359 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004360 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03004361 goto err_core_stop;
4362 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004363
Mohammed Shafi Shajakhan1fe374f2016-01-13 21:16:30 +05304364 param = ar->wmi.pdev_param->dynamic_bw;
4365 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
Michal Kaziorae254432014-05-26 12:46:02 +03004366 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004367 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
Michal Kaziorae254432014-05-26 12:46:02 +03004368 goto err_core_stop;
4369 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004370
Michal Kaziorcf327842015-03-31 10:26:25 +00004371 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4372 ret = ath10k_wmi_adaptive_qcs(ar, true);
4373 if (ret) {
4374 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4375 ret);
4376 goto err_core_stop;
4377 }
4378 }
4379
Janusz Dziedzic24ab13e2015-04-01 22:53:18 +03004380 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
Mohammed Shafi Shajakhan1fe374f2016-01-13 21:16:30 +05304381 param = ar->wmi.pdev_param->burst_enable;
4382 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
Janusz Dziedzic24ab13e2015-04-01 22:53:18 +03004383 if (ret) {
4384 ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4385 goto err_core_stop;
4386 }
4387 }
4388
Rajkumar Manoharan166de3f2015-10-27 17:51:11 +05304389 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
Ben Greear46acf7b2014-05-16 17:15:38 +03004390
Marek Puzyniakab6258e2014-01-29 15:03:31 +02004391 /*
4392 * By default FW set ARP frames ac to voice (6). In that case ARP
4393 * exchange is not working properly for UAPSD enabled AP. ARP requests
4394 * which arrives with access category 0 are processed by network stack
4395 * and send back with access category 0, but FW changes access category
4396 * to 6. Set ARP frames access category to best effort (0) solves
4397 * this problem.
4398 */
4399
Mohammed Shafi Shajakhan1fe374f2016-01-13 21:16:30 +05304400 param = ar->wmi.pdev_param->arp_ac_override;
4401 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
Marek Puzyniakab6258e2014-01-29 15:03:31 +02004402 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004403 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
Marek Puzyniakab6258e2014-01-29 15:03:31 +02004404 ret);
Michal Kaziorae254432014-05-26 12:46:02 +03004405 goto err_core_stop;
Marek Puzyniakab6258e2014-01-29 15:03:31 +02004406 }
4407
Maharaja62f77f02015-10-21 11:49:18 +03004408 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4409 ar->fw_features)) {
4410 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4411 WMI_CCA_DETECT_LEVEL_AUTO,
4412 WMI_CCA_DETECT_MARGIN_AUTO);
4413 if (ret) {
4414 ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4415 ret);
4416 goto err_core_stop;
4417 }
4418 }
4419
Mohammed Shafi Shajakhan1fe374f2016-01-13 21:16:30 +05304420 param = ar->wmi.pdev_param->ani_enable;
4421 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
Ashok Raj Nagarajan575f1c32015-03-19 16:37:59 +05304422 if (ret) {
4423 ath10k_warn(ar, "failed to enable ani by default: %d\n",
4424 ret);
4425 goto err_core_stop;
4426 }
4427
Ashok Raj Nagarajanb3e71d72015-03-19 16:38:00 +05304428 ar->ani_enabled = true;
4429
Mohammed Shafi Shajakhan8351c052016-01-13 21:16:33 +05304430 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
4431 param = ar->wmi.pdev_param->peer_stats_update_period;
4432 ret = ath10k_wmi_pdev_set_param(ar, param,
4433 PEER_DEFAULT_STATS_UPDATE_PERIOD);
4434 if (ret) {
4435 ath10k_warn(ar,
4436 "failed to set peer stats period : %d\n",
4437 ret);
4438 goto err_core_stop;
4439 }
4440 }
4441
Michal Kaziord6500972014-04-08 09:56:09 +03004442 ar->num_started_vdevs = 0;
Michal Kaziorf7843d72013-07-16 09:38:52 +02004443 ath10k_regd_update(ar);
4444
Simon Wunderlich855aed12014-08-02 09:12:54 +03004445 ath10k_spectral_start(ar);
Rajkumar Manoharan8515b5c2015-03-15 20:36:22 +05304446 ath10k_thermal_set_throttling(ar);
Simon Wunderlich855aed12014-08-02 09:12:54 +03004447
Michal Kaziorae254432014-05-26 12:46:02 +03004448 mutex_unlock(&ar->conf_mutex);
4449 return 0;
4450
4451err_core_stop:
4452 ath10k_core_stop(ar);
4453
4454err_power_down:
4455 ath10k_hif_power_down(ar);
4456
4457err_off:
4458 ar->state = ATH10K_STATE_OFF;
4459
4460err:
Michal Kazior548db542013-07-05 16:15:15 +03004461 mutex_unlock(&ar->conf_mutex);
Michal Kaziorc60bdd82014-01-29 07:26:31 +01004462 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004463}
4464
4465static void ath10k_stop(struct ieee80211_hw *hw)
4466{
4467 struct ath10k *ar = hw->priv;
4468
Michal Kaziorbca7baf2014-05-26 12:46:03 +03004469 ath10k_drain_tx(ar);
4470
Michal Kazior548db542013-07-05 16:15:15 +03004471 mutex_lock(&ar->conf_mutex);
Michal Kaziorc5058f52014-05-26 12:46:03 +03004472 if (ar->state != ATH10K_STATE_OFF) {
Michal Kazior818bdd12013-07-16 09:38:57 +02004473 ath10k_halt(ar);
Michal Kaziorc5058f52014-05-26 12:46:03 +03004474 ar->state = ATH10K_STATE_OFF;
4475 }
Michal Kazior548db542013-07-05 16:15:15 +03004476 mutex_unlock(&ar->conf_mutex);
4477
Michal Kazior5c81c7f2014-08-05 14:54:44 +02004478 cancel_delayed_work_sync(&ar->scan.timeout);
Michal Kazioraffd3212013-07-16 09:54:35 +02004479 cancel_work_sync(&ar->restart_work);
4480}
4481
Michal Kaziorad088bf2013-10-16 15:44:46 +03004482static int ath10k_config_ps(struct ath10k *ar)
Michal Kazioraffd3212013-07-16 09:54:35 +02004483{
Michal Kaziorad088bf2013-10-16 15:44:46 +03004484 struct ath10k_vif *arvif;
4485 int ret = 0;
Michal Kazioraffd3212013-07-16 09:54:35 +02004486
4487 lockdep_assert_held(&ar->conf_mutex);
4488
Michal Kaziorad088bf2013-10-16 15:44:46 +03004489 list_for_each_entry(arvif, &ar->arvifs, list) {
4490 ret = ath10k_mac_vif_setup_ps(arvif);
4491 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004492 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
Michal Kaziorad088bf2013-10-16 15:44:46 +03004493 break;
4494 }
4495 }
Michal Kazioraffd3212013-07-16 09:54:35 +02004496
Michal Kaziorad088bf2013-10-16 15:44:46 +03004497 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004498}
4499
Michal Kazior7d9d5582014-10-21 10:40:15 +03004500static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4501{
4502 int ret;
4503 u32 param;
4504
4505 lockdep_assert_held(&ar->conf_mutex);
4506
4507 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4508
4509 param = ar->wmi.pdev_param->txpower_limit2g;
4510 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4511 if (ret) {
4512 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4513 txpower, ret);
4514 return ret;
4515 }
4516
4517 param = ar->wmi.pdev_param->txpower_limit5g;
4518 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4519 if (ret) {
4520 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4521 txpower, ret);
4522 return ret;
4523 }
4524
4525 return 0;
4526}
4527
4528static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4529{
4530 struct ath10k_vif *arvif;
4531 int ret, txpower = -1;
4532
4533 lockdep_assert_held(&ar->conf_mutex);
4534
4535 list_for_each_entry(arvif, &ar->arvifs, list) {
4536 WARN_ON(arvif->txpower < 0);
4537
4538 if (txpower == -1)
4539 txpower = arvif->txpower;
4540 else
4541 txpower = min(txpower, arvif->txpower);
4542 }
4543
4544 if (WARN_ON(txpower == -1))
4545 return -EINVAL;
4546
4547 ret = ath10k_mac_txpower_setup(ar, txpower);
4548 if (ret) {
4549 ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4550 txpower, ret);
4551 return ret;
4552 }
4553
4554 return 0;
4555}
4556
Kalle Valo5e3dd152013-06-12 20:52:10 +03004557static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4558{
Kalle Valo5e3dd152013-06-12 20:52:10 +03004559 struct ath10k *ar = hw->priv;
4560 struct ieee80211_conf *conf = &hw->conf;
4561 int ret = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004562
4563 mutex_lock(&ar->conf_mutex);
4564
Michal Kazioraffd3212013-07-16 09:54:35 +02004565 if (changed & IEEE80211_CONF_CHANGE_PS)
4566 ath10k_config_ps(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004567
4568 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
Michal Kazior19337472014-08-28 12:58:16 +02004569 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4570 ret = ath10k_monitor_recalc(ar);
4571 if (ret)
4572 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004573 }
4574
4575 mutex_unlock(&ar->conf_mutex);
4576 return ret;
4577}
4578
Ben Greear5572a952014-11-24 16:22:10 +02004579static u32 get_nss_from_chainmask(u16 chain_mask)
4580{
Rajkumar Manoharanf680f702015-11-03 11:51:33 +05304581 if ((chain_mask & 0xf) == 0xf)
Ben Greear5572a952014-11-24 16:22:10 +02004582 return 4;
4583 else if ((chain_mask & 0x7) == 0x7)
4584 return 3;
4585 else if ((chain_mask & 0x3) == 0x3)
4586 return 2;
4587 return 1;
4588}
4589
Vivek Natarajana48e2cc2015-08-04 10:45:12 +05304590static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4591{
4592 u32 value = 0;
4593 struct ath10k *ar = arvif->ar;
Bartosz Markowski707a0c82015-09-02 13:20:19 +02004594 int nsts;
Bartosz Markowski0c6d6f22015-09-02 13:20:20 +02004595 int sound_dim;
Vivek Natarajana48e2cc2015-08-04 10:45:12 +05304596
4597 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4598 return 0;
4599
Bartosz Markowski707a0c82015-09-02 13:20:19 +02004600 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
Vivek Natarajana48e2cc2015-08-04 10:45:12 +05304601 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4602 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
Bartosz Markowski707a0c82015-09-02 13:20:19 +02004603 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
Vivek Natarajana48e2cc2015-08-04 10:45:12 +05304604
Bartosz Markowski0c6d6f22015-09-02 13:20:20 +02004605 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
Vivek Natarajana48e2cc2015-08-04 10:45:12 +05304606 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4607 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
Bartosz Markowski0c6d6f22015-09-02 13:20:20 +02004608 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
Vivek Natarajana48e2cc2015-08-04 10:45:12 +05304609
4610 if (!value)
4611 return 0;
4612
4613 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4614 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4615
4616 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4617 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4618 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4619
4620 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4621 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4622
4623 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4624 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4625 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4626
4627 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4628 ar->wmi.vdev_param->txbf, value);
4629}
4630
Kalle Valo5e3dd152013-06-12 20:52:10 +03004631/*
4632 * TODO:
4633 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4634 * because we will send mgmt frames without CCK. This requirement
4635 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4636 * in the TX packet.
4637 */
4638static int ath10k_add_interface(struct ieee80211_hw *hw,
4639 struct ieee80211_vif *vif)
4640{
4641 struct ath10k *ar = hw->priv;
4642 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kaziorbb8f0c62016-03-06 16:14:27 +02004643 struct ath10k_peer *peer;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004644 enum wmi_sta_powersave_param param;
4645 int ret = 0;
Kalle Valo5a13e762014-01-20 11:01:46 +02004646 u32 value;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004647 int bit;
Michal Kazior96d828d2015-03-31 10:26:23 +00004648 int i;
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02004649 u32 vdev_param;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004650
Johannes Berg848955c2014-11-11 12:48:42 +01004651 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4652
Kalle Valo5e3dd152013-06-12 20:52:10 +03004653 mutex_lock(&ar->conf_mutex);
4654
Michal Kazior0dbd09e2013-07-31 10:55:14 +02004655 memset(arvif, 0, sizeof(*arvif));
Michal Kazior29946872016-03-06 16:14:34 +02004656 ath10k_mac_txq_init(vif->txq);
Michal Kazior0dbd09e2013-07-31 10:55:14 +02004657
Kalle Valo5e3dd152013-06-12 20:52:10 +03004658 arvif->ar = ar;
4659 arvif->vif = vif;
4660
Ben Greeare63b33f2013-10-22 14:54:14 -07004661 INIT_LIST_HEAD(&arvif->list);
Michal Kazior81a9a172015-03-05 16:02:17 +02004662 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
Michal Kaziorcc9904e2015-03-10 16:22:01 +02004663 INIT_DELAYED_WORK(&arvif->connection_loss_work,
4664 ath10k_mac_vif_sta_connection_loss_work);
Michal Kaziorcc4827b2013-10-16 15:44:45 +03004665
Michal Kazior45c9abc2015-04-21 20:42:58 +03004666 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4667 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4668 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4669 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4670 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4671 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4672 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004673
Michal Kaziore04cafb2015-08-05 12:15:24 +02004674 if (ar->num_peers >= ar->max_num_peers) {
4675 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
Michal Kazior503422d2015-08-19 13:08:53 +02004676 ret = -ENOBUFS;
4677 goto err;
Michal Kaziore04cafb2015-08-05 12:15:24 +02004678 }
4679
Ben Greeara9aefb32014-08-12 11:02:19 +03004680 if (ar->free_vdev_map == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004681 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03004682 ret = -EBUSY;
Michal Kazior9dad14a2013-10-16 15:44:45 +03004683 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004684 }
Ben Greear16c11172014-09-23 14:17:16 -07004685 bit = __ffs64(ar->free_vdev_map);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004686
Ben Greear16c11172014-09-23 14:17:16 -07004687 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4688 bit, ar->free_vdev_map);
4689
4690 arvif->vdev_id = bit;
Peter Oh6e4de1a2016-01-28 13:54:10 -08004691 arvif->vdev_subtype =
4692 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004693
Kalle Valo5e3dd152013-06-12 20:52:10 +03004694 switch (vif->type) {
Michal Kazior75d2bd42014-12-12 12:41:39 +01004695 case NL80211_IFTYPE_P2P_DEVICE:
4696 arvif->vdev_type = WMI_VDEV_TYPE_STA;
Peter Oh6e4de1a2016-01-28 13:54:10 -08004697 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4698 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
Michal Kazior75d2bd42014-12-12 12:41:39 +01004699 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004700 case NL80211_IFTYPE_UNSPECIFIED:
4701 case NL80211_IFTYPE_STATION:
4702 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4703 if (vif->p2p)
Peter Oh6e4de1a2016-01-28 13:54:10 -08004704 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4705 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004706 break;
4707 case NL80211_IFTYPE_ADHOC:
4708 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4709 break;
Bob Copelandb6c7baf2015-09-09 12:47:36 -04004710 case NL80211_IFTYPE_MESH_POINT:
Peter Oh0b3d76e2016-01-28 13:54:07 -08004711 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
Peter Oh6e4de1a2016-01-28 13:54:10 -08004712 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4713 (ar, WMI_VDEV_SUBTYPE_MESH_11S);
Peter Ohbb58b892015-11-24 09:37:35 -08004714 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
Bob Copelandb6c7baf2015-09-09 12:47:36 -04004715 ret = -EINVAL;
4716 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4717 goto err;
4718 }
4719 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4720 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004721 case NL80211_IFTYPE_AP:
4722 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4723
4724 if (vif->p2p)
Peter Oh6e4de1a2016-01-28 13:54:10 -08004725 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4726 (ar, WMI_VDEV_SUBTYPE_P2P_GO);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004727 break;
4728 case NL80211_IFTYPE_MONITOR:
4729 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4730 break;
4731 default:
4732 WARN_ON(1);
4733 break;
4734 }
4735
Michal Kazior96d828d2015-03-31 10:26:23 +00004736 /* Using vdev_id as queue number will make it very easy to do per-vif
4737 * tx queue locking. This shouldn't wrap due to interface combinations
4738 * but do a modulo for correctness sake and prevent using offchannel tx
4739 * queues for regular vif tx.
4740 */
4741 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4742 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4743 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4744
Michal Kazior64badcb2014-09-18 11:18:02 +03004745 /* Some firmware revisions don't wait for beacon tx completion before
4746 * sending another SWBA event. This could lead to hardware using old
4747 * (freed) beacon data in some cases, e.g. tx credit starvation
4748 * combined with missed TBTT. This is very very rare.
4749 *
4750 * On non-IOMMU-enabled hosts this could be a possible security issue
4751 * because hw could beacon some random data on the air. On
4752 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4753 * device would crash.
4754 *
4755 * Since there are no beacon tx completions (implicit nor explicit)
4756 * propagated to host the only workaround for this is to allocate a
4757 * DMA-coherent buffer for a lifetime of a vif and use it for all
4758 * beacon tx commands. Worst case for this approach is some beacons may
4759 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4760 */
4761 if (vif->type == NL80211_IFTYPE_ADHOC ||
Bob Copelandb6c7baf2015-09-09 12:47:36 -04004762 vif->type == NL80211_IFTYPE_MESH_POINT ||
Michal Kazior64badcb2014-09-18 11:18:02 +03004763 vif->type == NL80211_IFTYPE_AP) {
4764 arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4765 IEEE80211_MAX_FRAME_LEN,
4766 &arvif->beacon_paddr,
Rajkumar Manoharan82d7aba2014-10-10 17:38:27 +05304767 GFP_ATOMIC);
Michal Kazior64badcb2014-09-18 11:18:02 +03004768 if (!arvif->beacon_buf) {
4769 ret = -ENOMEM;
4770 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4771 ret);
4772 goto err;
4773 }
4774 }
David Liuccec9032015-07-24 20:25:32 +03004775 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4776 arvif->nohwcrypt = true;
4777
4778 if (arvif->nohwcrypt &&
4779 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4780 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4781 goto err;
4782 }
Michal Kazior64badcb2014-09-18 11:18:02 +03004783
4784 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4785 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4786 arvif->beacon_buf ? "single-buf" : "per-skb");
Kalle Valo5e3dd152013-06-12 20:52:10 +03004787
4788 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4789 arvif->vdev_subtype, vif->addr);
4790 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004791 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004792 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004793 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004794 }
4795
Ben Greear16c11172014-09-23 14:17:16 -07004796 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
Michal Kazior05791192013-10-16 15:44:45 +03004797 list_add(&arvif->list, &ar->arvifs);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004798
Michal Kazior46725b152015-01-28 09:57:49 +02004799 /* It makes no sense to have firmware do keepalives. mac80211 already
4800 * takes care of this with idle connection polling.
4801 */
4802 ret = ath10k_mac_vif_disable_keepalive(arvif);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004803 if (ret) {
Michal Kazior46725b152015-01-28 09:57:49 +02004804 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004805 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004806 goto err_vdev_delete;
4807 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004808
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +02004809 arvif->def_wep_key_idx = -1;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004810
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02004811 vdev_param = ar->wmi.vdev_param->tx_encap_type;
4812 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03004813 ATH10K_HW_TXRX_NATIVE_WIFI);
Bartosz Markowskiebc9abd2013-10-15 09:26:20 +02004814 /* 10.X firmware does not support this VDEV parameter. Do not warn */
Michal Kazior9dad14a2013-10-16 15:44:45 +03004815 if (ret && ret != -EOPNOTSUPP) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004816 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004817 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004818 goto err_vdev_delete;
4819 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004820
Rajkumar Manoharan8a75fc52016-03-02 20:13:52 +05304821 /* Configuring number of spatial stream for monitor interface is causing
4822 * target assert in qca9888 and qca6174.
4823 */
4824 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
Ben Greear5572a952014-11-24 16:22:10 +02004825 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4826
4827 vdev_param = ar->wmi.vdev_param->nss;
4828 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4829 nss);
4830 if (ret) {
4831 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4832 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4833 ret);
4834 goto err_vdev_delete;
4835 }
4836 }
4837
Michal Kaziore57e0572015-03-24 13:14:03 +00004838 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4839 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
Michal Kazior69427262016-03-06 16:14:30 +02004840 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4841 vif->addr, WMI_PEER_TYPE_DEFAULT);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004842 if (ret) {
Michal Kaziore57e0572015-03-24 13:14:03 +00004843 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004844 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004845 goto err_vdev_delete;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004846 }
Michal Kaziorbb8f0c62016-03-06 16:14:27 +02004847
4848 spin_lock_bh(&ar->data_lock);
4849
4850 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4851 if (!peer) {
4852 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4853 vif->addr, arvif->vdev_id);
4854 spin_unlock_bh(&ar->data_lock);
4855 ret = -ENOENT;
4856 goto err_peer_delete;
4857 }
4858
4859 arvif->peer_id = find_first_bit(peer->peer_ids,
4860 ATH10K_MAX_NUM_PEER_IDS);
4861
4862 spin_unlock_bh(&ar->data_lock);
4863 } else {
4864 arvif->peer_id = HTT_INVALID_PEERID;
Michal Kaziore57e0572015-03-24 13:14:03 +00004865 }
Marek Puzyniakcdf07402013-12-30 09:07:51 +01004866
Michal Kaziore57e0572015-03-24 13:14:03 +00004867 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
Kalle Valo5a13e762014-01-20 11:01:46 +02004868 ret = ath10k_mac_set_kickout(arvif);
4869 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004870 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004871 arvif->vdev_id, ret);
Kalle Valo5a13e762014-01-20 11:01:46 +02004872 goto err_peer_delete;
4873 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004874 }
4875
4876 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
4877 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
4878 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
4879 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
4880 param, value);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004881 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004882 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004883 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004884 goto err_peer_delete;
4885 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004886
Michal Kazior9f9b5742014-12-12 12:41:36 +01004887 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004888 if (ret) {
Michal Kazior9f9b5742014-12-12 12:41:36 +01004889 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004890 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004891 goto err_peer_delete;
4892 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004893
Michal Kazior9f9b5742014-12-12 12:41:36 +01004894 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004895 if (ret) {
Michal Kazior9f9b5742014-12-12 12:41:36 +01004896 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02004897 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004898 goto err_peer_delete;
4899 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03004900 }
4901
Vivek Natarajana48e2cc2015-08-04 10:45:12 +05304902 ret = ath10k_mac_set_txbf_conf(arvif);
4903 if (ret) {
4904 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
4905 arvif->vdev_id, ret);
4906 goto err_peer_delete;
4907 }
4908
Michal Kazior424121c2013-07-22 14:13:31 +02004909 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004910 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02004911 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
Michal Kazior679c54a2013-07-05 16:15:04 +03004912 arvif->vdev_id, ret);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004913 goto err_peer_delete;
4914 }
Michal Kazior679c54a2013-07-05 16:15:04 +03004915
Michal Kazior7d9d5582014-10-21 10:40:15 +03004916 arvif->txpower = vif->bss_conf.txpower;
4917 ret = ath10k_mac_txpower_recalc(ar);
4918 if (ret) {
4919 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
4920 goto err_peer_delete;
4921 }
4922
Michal Kazior500ff9f2015-03-31 10:26:21 +00004923 if (vif->type == NL80211_IFTYPE_MONITOR) {
4924 ar->monitor_arvif = arvif;
4925 ret = ath10k_monitor_recalc(ar);
4926 if (ret) {
4927 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4928 goto err_peer_delete;
4929 }
4930 }
4931
Michal Kazior6d2d51e2015-08-07 09:08:21 +02004932 spin_lock_bh(&ar->htt.tx_lock);
4933 if (!ar->tx_paused)
4934 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
4935 spin_unlock_bh(&ar->htt.tx_lock);
4936
Kalle Valo5e3dd152013-06-12 20:52:10 +03004937 mutex_unlock(&ar->conf_mutex);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004938 return 0;
4939
4940err_peer_delete:
Michal Kaziore57e0572015-03-24 13:14:03 +00004941 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4942 arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
Michal Kazior9dad14a2013-10-16 15:44:45 +03004943 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
4944
4945err_vdev_delete:
4946 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
Ben Greear16c11172014-09-23 14:17:16 -07004947 ar->free_vdev_map |= 1LL << arvif->vdev_id;
Michal Kazior05791192013-10-16 15:44:45 +03004948 list_del(&arvif->list);
Michal Kazior9dad14a2013-10-16 15:44:45 +03004949
4950err:
Michal Kazior64badcb2014-09-18 11:18:02 +03004951 if (arvif->beacon_buf) {
4952 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
4953 arvif->beacon_buf, arvif->beacon_paddr);
4954 arvif->beacon_buf = NULL;
4955 }
4956
Michal Kazior9dad14a2013-10-16 15:44:45 +03004957 mutex_unlock(&ar->conf_mutex);
4958
Kalle Valo5e3dd152013-06-12 20:52:10 +03004959 return ret;
4960}
4961
Michal Kaziorb4aa5392015-03-31 10:26:24 +00004962static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
4963{
4964 int i;
4965
4966 for (i = 0; i < BITS_PER_LONG; i++)
4967 ath10k_mac_vif_tx_unlock(arvif, i);
4968}
4969
Kalle Valo5e3dd152013-06-12 20:52:10 +03004970static void ath10k_remove_interface(struct ieee80211_hw *hw,
4971 struct ieee80211_vif *vif)
4972{
4973 struct ath10k *ar = hw->priv;
4974 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior69427262016-03-06 16:14:30 +02004975 struct ath10k_peer *peer;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004976 int ret;
Michal Kazior69427262016-03-06 16:14:30 +02004977 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03004978
Michal Kazior81a9a172015-03-05 16:02:17 +02004979 cancel_work_sync(&arvif->ap_csa_work);
Michal Kaziorcc9904e2015-03-10 16:22:01 +02004980 cancel_delayed_work_sync(&arvif->connection_loss_work);
Michal Kazior81a9a172015-03-05 16:02:17 +02004981
Sujith Manoharan5d011f52014-11-25 11:47:00 +05304982 mutex_lock(&ar->conf_mutex);
4983
Michal Kaziored543882013-09-13 14:16:56 +02004984 spin_lock_bh(&ar->data_lock);
Michal Kazior64badcb2014-09-18 11:18:02 +03004985 ath10k_mac_vif_beacon_cleanup(arvif);
Michal Kaziored543882013-09-13 14:16:56 +02004986 spin_unlock_bh(&ar->data_lock);
4987
Simon Wunderlich855aed12014-08-02 09:12:54 +03004988 ret = ath10k_spectral_vif_stop(arvif);
4989 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02004990 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
Simon Wunderlich855aed12014-08-02 09:12:54 +03004991 arvif->vdev_id, ret);
4992
Ben Greear16c11172014-09-23 14:17:16 -07004993 ar->free_vdev_map |= 1LL << arvif->vdev_id;
Michal Kazior05791192013-10-16 15:44:45 +03004994 list_del(&arvif->list);
Kalle Valo5e3dd152013-06-12 20:52:10 +03004995
Michal Kaziore57e0572015-03-24 13:14:03 +00004996 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4997 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
Michal Kazior2c512052015-02-15 16:50:40 +02004998 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
4999 vif->addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005000 if (ret)
Michal Kaziore57e0572015-03-24 13:14:03 +00005001 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02005002 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005003
5004 kfree(arvif->u.ap.noa_data);
5005 }
5006
Michal Kazior7aa7a722014-08-25 12:09:38 +02005007 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03005008 arvif->vdev_id);
5009
Kalle Valo5e3dd152013-06-12 20:52:10 +03005010 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5011 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005012 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02005013 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005014
Michal Kazior2c512052015-02-15 16:50:40 +02005015 /* Some firmware revisions don't notify host about self-peer removal
5016 * until after associated vdev is deleted.
5017 */
Michal Kaziore57e0572015-03-24 13:14:03 +00005018 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5019 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
Michal Kazior2c512052015-02-15 16:50:40 +02005020 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5021 vif->addr);
5022 if (ret)
5023 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5024 arvif->vdev_id, ret);
5025
5026 spin_lock_bh(&ar->data_lock);
5027 ar->num_peers--;
5028 spin_unlock_bh(&ar->data_lock);
5029 }
5030
Michal Kazior69427262016-03-06 16:14:30 +02005031 spin_lock_bh(&ar->data_lock);
5032 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5033 peer = ar->peer_map[i];
5034 if (!peer)
5035 continue;
5036
5037 if (peer->vif == vif) {
5038 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5039 vif->addr, arvif->vdev_id);
5040 peer->vif = NULL;
5041 }
5042 }
5043 spin_unlock_bh(&ar->data_lock);
5044
Kalle Valo5e3dd152013-06-12 20:52:10 +03005045 ath10k_peer_cleanup(ar, arvif->vdev_id);
Michal Kaziordd4717b2016-03-06 16:14:39 +02005046 ath10k_mac_txq_unref(ar, vif->txq);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005047
Michal Kazior500ff9f2015-03-31 10:26:21 +00005048 if (vif->type == NL80211_IFTYPE_MONITOR) {
5049 ar->monitor_arvif = NULL;
5050 ret = ath10k_monitor_recalc(ar);
5051 if (ret)
5052 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5053 }
5054
Michal Kaziorb4aa5392015-03-31 10:26:24 +00005055 spin_lock_bh(&ar->htt.tx_lock);
5056 ath10k_mac_vif_tx_unlock_all(arvif);
5057 spin_unlock_bh(&ar->htt.tx_lock);
5058
Michal Kazior29946872016-03-06 16:14:34 +02005059 ath10k_mac_txq_unref(ar, vif->txq);
5060
Kalle Valo5e3dd152013-06-12 20:52:10 +03005061 mutex_unlock(&ar->conf_mutex);
5062}
5063
5064/*
5065 * FIXME: Has to be verified.
5066 */
5067#define SUPPORTED_FILTERS \
Johannes Bergdf140462015-04-22 14:40:58 +02005068 (FIF_ALLMULTI | \
Kalle Valo5e3dd152013-06-12 20:52:10 +03005069 FIF_CONTROL | \
5070 FIF_PSPOLL | \
5071 FIF_OTHER_BSS | \
5072 FIF_BCN_PRBRESP_PROMISC | \
5073 FIF_PROBE_REQ | \
5074 FIF_FCSFAIL)
5075
5076static void ath10k_configure_filter(struct ieee80211_hw *hw,
5077 unsigned int changed_flags,
5078 unsigned int *total_flags,
5079 u64 multicast)
5080{
5081 struct ath10k *ar = hw->priv;
5082 int ret;
5083
5084 mutex_lock(&ar->conf_mutex);
5085
5086 changed_flags &= SUPPORTED_FILTERS;
5087 *total_flags &= SUPPORTED_FILTERS;
5088 ar->filter_flags = *total_flags;
5089
Michal Kazior19337472014-08-28 12:58:16 +02005090 ret = ath10k_monitor_recalc(ar);
5091 if (ret)
5092 ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005093
5094 mutex_unlock(&ar->conf_mutex);
5095}
5096
5097static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5098 struct ieee80211_vif *vif,
5099 struct ieee80211_bss_conf *info,
5100 u32 changed)
5101{
5102 struct ath10k *ar = hw->priv;
5103 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5104 int ret = 0;
Kalle Valoaf762c02014-09-14 12:50:17 +03005105 u32 vdev_param, pdev_param, slottime, preamble;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005106
5107 mutex_lock(&ar->conf_mutex);
5108
5109 if (changed & BSS_CHANGED_IBSS)
5110 ath10k_control_ibss(arvif, info, vif->addr);
5111
5112 if (changed & BSS_CHANGED_BEACON_INT) {
5113 arvif->beacon_interval = info->beacon_int;
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02005114 vdev_param = ar->wmi.vdev_param->beacon_interval;
5115 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03005116 arvif->beacon_interval);
Michal Kazior7aa7a722014-08-25 12:09:38 +02005117 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03005118 "mac vdev %d beacon_interval %d\n",
5119 arvif->vdev_id, arvif->beacon_interval);
5120
Kalle Valo5e3dd152013-06-12 20:52:10 +03005121 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005122 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005123 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005124 }
5125
5126 if (changed & BSS_CHANGED_BEACON) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02005127 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03005128 "vdev %d set beacon tx mode to staggered\n",
5129 arvif->vdev_id);
5130
Bartosz Markowski226a3392013-09-26 17:47:16 +02005131 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5132 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03005133 WMI_BEACON_STAGGERED_MODE);
5134 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005135 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005136 arvif->vdev_id, ret);
Michal Kaziorfbb8f1b2015-01-13 16:30:12 +02005137
5138 ret = ath10k_mac_setup_bcn_tmpl(arvif);
5139 if (ret)
5140 ath10k_warn(ar, "failed to update beacon template: %d\n",
5141 ret);
Bob Copelandb6c7baf2015-09-09 12:47:36 -04005142
5143 if (ieee80211_vif_is_mesh(vif)) {
5144 /* mesh doesn't use SSID but firmware needs it */
5145 strncpy(arvif->u.ap.ssid, "mesh",
5146 sizeof(arvif->u.ap.ssid));
5147 arvif->u.ap.ssid_len = 4;
5148 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03005149 }
5150
Michal Kaziorfbb8f1b2015-01-13 16:30:12 +02005151 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5152 ret = ath10k_mac_setup_prb_tmpl(arvif);
5153 if (ret)
5154 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5155 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005156 }
5157
Michal Kaziorba2479f2015-01-24 12:14:51 +02005158 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03005159 arvif->dtim_period = info->dtim_period;
5160
Michal Kazior7aa7a722014-08-25 12:09:38 +02005161 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03005162 "mac vdev %d dtim_period %d\n",
5163 arvif->vdev_id, arvif->dtim_period);
5164
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02005165 vdev_param = ar->wmi.vdev_param->dtim_period;
5166 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03005167 arvif->dtim_period);
5168 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005169 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005170 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005171 }
5172
5173 if (changed & BSS_CHANGED_SSID &&
5174 vif->type == NL80211_IFTYPE_AP) {
5175 arvif->u.ap.ssid_len = info->ssid_len;
5176 if (info->ssid_len)
5177 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5178 arvif->u.ap.hidden_ssid = info->hidden_ssid;
5179 }
5180
Michal Kazior077efc82014-10-21 10:10:29 +03005181 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5182 ether_addr_copy(arvif->bssid, info->bssid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005183
5184 if (changed & BSS_CHANGED_BEACON_ENABLED)
5185 ath10k_control_beaconing(arvif, info);
5186
5187 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02005188 arvif->use_cts_prot = info->use_cts_prot;
Michal Kazior7aa7a722014-08-25 12:09:38 +02005189 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02005190 arvif->vdev_id, info->use_cts_prot);
Kalle Valo60c3daa2013-09-08 17:56:07 +03005191
Marek Kwaczynskie81bd102014-03-11 12:58:00 +02005192 ret = ath10k_recalc_rtscts_prot(arvif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005193 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005194 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02005195 arvif->vdev_id, ret);
Michal Kaziora87fd4b2015-03-02 11:21:17 +01005196
5197 vdev_param = ar->wmi.vdev_param->protection_mode;
5198 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5199 info->use_cts_prot ? 1 : 0);
5200 if (ret)
5201 ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
Kalle Valo617b0f42015-10-05 17:56:35 +03005202 info->use_cts_prot, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005203 }
5204
5205 if (changed & BSS_CHANGED_ERP_SLOT) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03005206 if (info->use_short_slot)
5207 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5208
5209 else
5210 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5211
Michal Kazior7aa7a722014-08-25 12:09:38 +02005212 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03005213 arvif->vdev_id, slottime);
5214
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02005215 vdev_param = ar->wmi.vdev_param->slot_time;
5216 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03005217 slottime);
5218 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005219 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005220 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005221 }
5222
5223 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03005224 if (info->use_short_preamble)
5225 preamble = WMI_VDEV_PREAMBLE_SHORT;
5226 else
5227 preamble = WMI_VDEV_PREAMBLE_LONG;
5228
Michal Kazior7aa7a722014-08-25 12:09:38 +02005229 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03005230 "mac vdev %d preamble %dn",
5231 arvif->vdev_id, preamble);
5232
Bartosz Markowski6d1506e2013-09-26 17:47:15 +02005233 vdev_param = ar->wmi.vdev_param->preamble;
5234 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
Kalle Valo5e3dd152013-06-12 20:52:10 +03005235 preamble);
5236 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005237 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005238 arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005239 }
5240
5241 if (changed & BSS_CHANGED_ASSOC) {
Michal Kaziore556f112014-08-28 12:58:17 +02005242 if (info->assoc) {
5243 /* Workaround: Make sure monitor vdev is not running
5244 * when associating to prevent some firmware revisions
5245 * (e.g. 10.1 and 10.2) from crashing.
5246 */
5247 if (ar->monitor_started)
5248 ath10k_monitor_stop(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005249 ath10k_bss_assoc(hw, vif, info);
Michal Kaziore556f112014-08-28 12:58:17 +02005250 ath10k_monitor_recalc(ar);
Michal Kazior077efc82014-10-21 10:10:29 +03005251 } else {
5252 ath10k_bss_disassoc(hw, vif);
Michal Kaziore556f112014-08-28 12:58:17 +02005253 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03005254 }
5255
Michal Kazior7d9d5582014-10-21 10:40:15 +03005256 if (changed & BSS_CHANGED_TXPOWER) {
5257 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5258 arvif->vdev_id, info->txpower);
5259
5260 arvif->txpower = info->txpower;
5261 ret = ath10k_mac_txpower_recalc(ar);
5262 if (ret)
5263 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5264 }
5265
Michal Kaziorbf14e652014-12-12 12:41:38 +01005266 if (changed & BSS_CHANGED_PS) {
Michal Kaziorcffb41f2015-02-13 13:30:16 +01005267 arvif->ps = vif->bss_conf.ps;
5268
5269 ret = ath10k_config_ps(ar);
Michal Kaziorbf14e652014-12-12 12:41:38 +01005270 if (ret)
5271 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5272 arvif->vdev_id, ret);
5273 }
5274
Kalle Valo5e3dd152013-06-12 20:52:10 +03005275 mutex_unlock(&ar->conf_mutex);
5276}
5277
5278static int ath10k_hw_scan(struct ieee80211_hw *hw,
5279 struct ieee80211_vif *vif,
David Spinadelc56ef672014-02-05 15:21:13 +02005280 struct ieee80211_scan_request *hw_req)
Kalle Valo5e3dd152013-06-12 20:52:10 +03005281{
5282 struct ath10k *ar = hw->priv;
5283 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
David Spinadelc56ef672014-02-05 15:21:13 +02005284 struct cfg80211_scan_request *req = &hw_req->req;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005285 struct wmi_start_scan_arg arg;
5286 int ret = 0;
5287 int i;
5288
5289 mutex_lock(&ar->conf_mutex);
5290
5291 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02005292 switch (ar->scan.state) {
5293 case ATH10K_SCAN_IDLE:
5294 reinit_completion(&ar->scan.started);
5295 reinit_completion(&ar->scan.completed);
5296 ar->scan.state = ATH10K_SCAN_STARTING;
5297 ar->scan.is_roc = false;
5298 ar->scan.vdev_id = arvif->vdev_id;
5299 ret = 0;
5300 break;
5301 case ATH10K_SCAN_STARTING:
5302 case ATH10K_SCAN_RUNNING:
5303 case ATH10K_SCAN_ABORTING:
Kalle Valo5e3dd152013-06-12 20:52:10 +03005304 ret = -EBUSY;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02005305 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005306 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03005307 spin_unlock_bh(&ar->data_lock);
5308
Michal Kazior5c81c7f2014-08-05 14:54:44 +02005309 if (ret)
5310 goto exit;
5311
Kalle Valo5e3dd152013-06-12 20:52:10 +03005312 memset(&arg, 0, sizeof(arg));
5313 ath10k_wmi_start_scan_init(ar, &arg);
5314 arg.vdev_id = arvif->vdev_id;
5315 arg.scan_id = ATH10K_SCAN_ID;
5316
Kalle Valo5e3dd152013-06-12 20:52:10 +03005317 if (req->ie_len) {
5318 arg.ie_len = req->ie_len;
5319 memcpy(arg.ie, req->ie, arg.ie_len);
5320 }
5321
5322 if (req->n_ssids) {
5323 arg.n_ssids = req->n_ssids;
5324 for (i = 0; i < arg.n_ssids; i++) {
5325 arg.ssids[i].len = req->ssids[i].ssid_len;
5326 arg.ssids[i].ssid = req->ssids[i].ssid;
5327 }
Michal Kaziordcd4a562013-07-31 10:55:12 +02005328 } else {
5329 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005330 }
5331
5332 if (req->n_channels) {
5333 arg.n_channels = req->n_channels;
5334 for (i = 0; i < arg.n_channels; i++)
5335 arg.channels[i] = req->channels[i]->center_freq;
5336 }
5337
5338 ret = ath10k_start_scan(ar, &arg);
5339 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02005340 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005341 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02005342 ar->scan.state = ATH10K_SCAN_IDLE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005343 spin_unlock_bh(&ar->data_lock);
5344 }
5345
Michal Kazior634349b2015-09-03 10:43:45 +02005346 /* Add a 200ms margin to account for event/command processing */
5347 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5348 msecs_to_jiffies(arg.max_scan_time +
5349 200));
5350
Kalle Valo5e3dd152013-06-12 20:52:10 +03005351exit:
5352 mutex_unlock(&ar->conf_mutex);
5353 return ret;
5354}
5355
5356static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5357 struct ieee80211_vif *vif)
5358{
5359 struct ath10k *ar = hw->priv;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005360
5361 mutex_lock(&ar->conf_mutex);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02005362 ath10k_scan_abort(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005363 mutex_unlock(&ar->conf_mutex);
Michal Kazior4eb2e162014-10-28 10:23:09 +01005364
5365 cancel_delayed_work_sync(&ar->scan.timeout);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005366}
5367
Michal Kaziorcfb27d22013-12-02 09:06:36 +01005368static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5369 struct ath10k_vif *arvif,
5370 enum set_key_cmd cmd,
5371 struct ieee80211_key_conf *key)
5372{
5373 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5374 int ret;
5375
5376 /* 10.1 firmware branch requires default key index to be set to group
5377 * key index after installing it. Otherwise FW/HW Txes corrupted
5378 * frames with multi-vif APs. This is not required for main firmware
5379 * branch (e.g. 636).
5380 *
Michal Kazior8461baf2015-04-10 13:23:22 +00005381 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5382 *
5383 * FIXME: It remains unknown if this is required for multi-vif STA
5384 * interfaces on 10.1.
5385 */
Michal Kaziorcfb27d22013-12-02 09:06:36 +01005386
Michal Kazior8461baf2015-04-10 13:23:22 +00005387 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5388 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
Michal Kaziorcfb27d22013-12-02 09:06:36 +01005389 return;
5390
5391 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5392 return;
5393
5394 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5395 return;
5396
5397 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5398 return;
5399
5400 if (cmd != SET_KEY)
5401 return;
5402
5403 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5404 key->keyidx);
5405 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005406 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02005407 arvif->vdev_id, ret);
Michal Kaziorcfb27d22013-12-02 09:06:36 +01005408}
5409
Kalle Valo5e3dd152013-06-12 20:52:10 +03005410static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5411 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5412 struct ieee80211_key_conf *key)
5413{
5414 struct ath10k *ar = hw->priv;
5415 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5416 struct ath10k_peer *peer;
5417 const u8 *peer_addr;
5418 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5419 key->cipher == WLAN_CIPHER_SUITE_WEP104;
5420 int ret = 0;
Michal Kazior29a10002015-04-10 13:05:58 +00005421 int ret2;
Michal Kazior370e5672015-02-18 14:02:26 +01005422 u32 flags = 0;
Michal Kazior29a10002015-04-10 13:05:58 +00005423 u32 flags2;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005424
Bartosz Markowskid7131c02015-03-10 14:32:19 +01005425 /* this one needs to be done in software */
5426 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5427 return 1;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005428
David Liuccec9032015-07-24 20:25:32 +03005429 if (arvif->nohwcrypt)
5430 return 1;
5431
Kalle Valo5e3dd152013-06-12 20:52:10 +03005432 if (key->keyidx > WMI_MAX_KEY_INDEX)
5433 return -ENOSPC;
5434
5435 mutex_lock(&ar->conf_mutex);
5436
5437 if (sta)
5438 peer_addr = sta->addr;
5439 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5440 peer_addr = vif->bss_conf.bssid;
5441 else
5442 peer_addr = vif->addr;
5443
5444 key->hw_key_idx = key->keyidx;
5445
Michal Kazior7c8cc7e2015-04-01 22:53:19 +03005446 if (is_wep) {
5447 if (cmd == SET_KEY)
5448 arvif->wep_keys[key->keyidx] = key;
5449 else
5450 arvif->wep_keys[key->keyidx] = NULL;
5451 }
5452
Kalle Valo5e3dd152013-06-12 20:52:10 +03005453 /* the peer should not disappear in mid-way (unless FW goes awry) since
5454 * we already hold conf_mutex. we just make sure its there now. */
5455 spin_lock_bh(&ar->data_lock);
5456 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5457 spin_unlock_bh(&ar->data_lock);
5458
5459 if (!peer) {
5460 if (cmd == SET_KEY) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02005461 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03005462 peer_addr);
5463 ret = -EOPNOTSUPP;
5464 goto exit;
5465 } else {
5466 /* if the peer doesn't exist there is no key to disable
5467 * anymore */
5468 goto exit;
5469 }
5470 }
5471
Michal Kazior7cc45732015-03-09 14:24:17 +01005472 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5473 flags |= WMI_KEY_PAIRWISE;
5474 else
5475 flags |= WMI_KEY_GROUP;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005476
Kalle Valo5e3dd152013-06-12 20:52:10 +03005477 if (is_wep) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03005478 if (cmd == DISABLE_KEY)
5479 ath10k_clear_vdev_key(arvif, key);
Michal Kazior370e5672015-02-18 14:02:26 +01005480
Michal Kaziorad325cb2015-02-18 14:02:27 +01005481 /* When WEP keys are uploaded it's possible that there are
5482 * stations associated already (e.g. when merging) without any
5483 * keys. Static WEP needs an explicit per-peer key upload.
5484 */
5485 if (vif->type == NL80211_IFTYPE_ADHOC &&
5486 cmd == SET_KEY)
5487 ath10k_mac_vif_update_wep_key(arvif, key);
5488
Michal Kazior370e5672015-02-18 14:02:26 +01005489 /* 802.1x never sets the def_wep_key_idx so each set_key()
5490 * call changes default tx key.
5491 *
5492 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5493 * after first set_key().
5494 */
5495 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5496 flags |= WMI_KEY_TX_USAGE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005497 }
5498
Michal Kazior370e5672015-02-18 14:02:26 +01005499 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005500 if (ret) {
David Liuccec9032015-07-24 20:25:32 +03005501 WARN_ON(ret > 0);
Michal Kazior7aa7a722014-08-25 12:09:38 +02005502 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
Ben Greear69244e52014-02-27 18:50:00 +02005503 arvif->vdev_id, peer_addr, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005504 goto exit;
5505 }
5506
Michal Kazior29a10002015-04-10 13:05:58 +00005507 /* mac80211 sets static WEP keys as groupwise while firmware requires
5508 * them to be installed twice as both pairwise and groupwise.
5509 */
5510 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5511 flags2 = flags;
5512 flags2 &= ~WMI_KEY_GROUP;
5513 flags2 |= WMI_KEY_PAIRWISE;
5514
5515 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5516 if (ret) {
David Liuccec9032015-07-24 20:25:32 +03005517 WARN_ON(ret > 0);
Michal Kazior29a10002015-04-10 13:05:58 +00005518 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5519 arvif->vdev_id, peer_addr, ret);
5520 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5521 peer_addr, flags);
David Liuccec9032015-07-24 20:25:32 +03005522 if (ret2) {
5523 WARN_ON(ret2 > 0);
Michal Kazior29a10002015-04-10 13:05:58 +00005524 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5525 arvif->vdev_id, peer_addr, ret2);
David Liuccec9032015-07-24 20:25:32 +03005526 }
Michal Kazior29a10002015-04-10 13:05:58 +00005527 goto exit;
5528 }
5529 }
5530
Michal Kaziorcfb27d22013-12-02 09:06:36 +01005531 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5532
Kalle Valo5e3dd152013-06-12 20:52:10 +03005533 spin_lock_bh(&ar->data_lock);
5534 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5535 if (peer && cmd == SET_KEY)
5536 peer->keys[key->keyidx] = key;
5537 else if (peer && cmd == DISABLE_KEY)
5538 peer->keys[key->keyidx] = NULL;
5539 else if (peer == NULL)
5540 /* impossible unless FW goes crazy */
Michal Kazior7aa7a722014-08-25 12:09:38 +02005541 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005542 spin_unlock_bh(&ar->data_lock);
5543
5544exit:
5545 mutex_unlock(&ar->conf_mutex);
5546 return ret;
5547}
5548
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +02005549static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5550 struct ieee80211_vif *vif,
5551 int keyidx)
5552{
5553 struct ath10k *ar = hw->priv;
5554 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5555 int ret;
5556
5557 mutex_lock(&arvif->ar->conf_mutex);
5558
5559 if (arvif->ar->state != ATH10K_STATE_ON)
5560 goto unlock;
5561
5562 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5563 arvif->vdev_id, keyidx);
5564
5565 ret = ath10k_wmi_vdev_set_param(arvif->ar,
5566 arvif->vdev_id,
5567 arvif->ar->wmi.vdev_param->def_keyid,
5568 keyidx);
5569
5570 if (ret) {
5571 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5572 arvif->vdev_id,
5573 ret);
5574 goto unlock;
5575 }
5576
5577 arvif->def_wep_key_idx = keyidx;
Michal Kazior370e5672015-02-18 14:02:26 +01005578
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +02005579unlock:
5580 mutex_unlock(&arvif->ar->conf_mutex);
5581}
5582
Michal Kazior9797feb2014-02-14 14:49:48 +01005583static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5584{
5585 struct ath10k *ar;
5586 struct ath10k_vif *arvif;
5587 struct ath10k_sta *arsta;
5588 struct ieee80211_sta *sta;
Michal Kazior45c9abc2015-04-21 20:42:58 +03005589 struct cfg80211_chan_def def;
5590 enum ieee80211_band band;
5591 const u8 *ht_mcs_mask;
5592 const u16 *vht_mcs_mask;
Michal Kazior9797feb2014-02-14 14:49:48 +01005593 u32 changed, bw, nss, smps;
5594 int err;
5595
5596 arsta = container_of(wk, struct ath10k_sta, update_wk);
5597 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5598 arvif = arsta->arvif;
5599 ar = arvif->ar;
5600
Michal Kazior45c9abc2015-04-21 20:42:58 +03005601 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5602 return;
5603
5604 band = def.chan->band;
5605 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5606 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5607
Michal Kazior9797feb2014-02-14 14:49:48 +01005608 spin_lock_bh(&ar->data_lock);
5609
5610 changed = arsta->changed;
5611 arsta->changed = 0;
5612
5613 bw = arsta->bw;
5614 nss = arsta->nss;
5615 smps = arsta->smps;
5616
5617 spin_unlock_bh(&ar->data_lock);
5618
5619 mutex_lock(&ar->conf_mutex);
5620
Michal Kazior45c9abc2015-04-21 20:42:58 +03005621 nss = max_t(u32, 1, nss);
5622 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5623 ath10k_mac_max_vht_nss(vht_mcs_mask)));
5624
Michal Kazior9797feb2014-02-14 14:49:48 +01005625 if (changed & IEEE80211_RC_BW_CHANGED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02005626 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01005627 sta->addr, bw);
5628
5629 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5630 WMI_PEER_CHAN_WIDTH, bw);
5631 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005632 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01005633 sta->addr, bw, err);
5634 }
5635
5636 if (changed & IEEE80211_RC_NSS_CHANGED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02005637 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01005638 sta->addr, nss);
5639
5640 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5641 WMI_PEER_NSS, nss);
5642 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005643 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01005644 sta->addr, nss, err);
5645 }
5646
5647 if (changed & IEEE80211_RC_SMPS_CHANGED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02005648 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01005649 sta->addr, smps);
5650
5651 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5652 WMI_PEER_SMPS_STATE, smps);
5653 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005654 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
Michal Kazior9797feb2014-02-14 14:49:48 +01005655 sta->addr, smps, err);
5656 }
5657
Janusz Dziedzic55884c02014-12-17 12:30:02 +02005658 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5659 changed & IEEE80211_RC_NSS_CHANGED) {
5660 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02005661 sta->addr);
5662
Michal Kazior590922a2014-10-21 10:10:29 +03005663 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02005664 if (err)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005665 ath10k_warn(ar, "failed to reassociate station: %pM\n",
Chun-Yeow Yeoh44d6fa92014-03-07 10:19:30 +02005666 sta->addr);
5667 }
5668
Michal Kazior9797feb2014-02-14 14:49:48 +01005669 mutex_unlock(&ar->conf_mutex);
5670}
5671
Marek Puzyniak7c354242015-03-30 09:51:52 +03005672static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5673 struct ieee80211_sta *sta)
Michal Kaziorcfd10612014-11-25 15:16:05 +01005674{
5675 struct ath10k *ar = arvif->ar;
5676
5677 lockdep_assert_held(&ar->conf_mutex);
5678
Marek Puzyniak7c354242015-03-30 09:51:52 +03005679 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
Michal Kaziorcfd10612014-11-25 15:16:05 +01005680 return 0;
5681
5682 if (ar->num_stations >= ar->max_num_stations)
5683 return -ENOBUFS;
5684
5685 ar->num_stations++;
5686
5687 return 0;
5688}
5689
Marek Puzyniak7c354242015-03-30 09:51:52 +03005690static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5691 struct ieee80211_sta *sta)
Michal Kaziorcfd10612014-11-25 15:16:05 +01005692{
5693 struct ath10k *ar = arvif->ar;
5694
5695 lockdep_assert_held(&ar->conf_mutex);
5696
Marek Puzyniak7c354242015-03-30 09:51:52 +03005697 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
Michal Kaziorcfd10612014-11-25 15:16:05 +01005698 return;
5699
5700 ar->num_stations--;
5701}
5702
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005703struct ath10k_mac_tdls_iter_data {
5704 u32 num_tdls_stations;
5705 struct ieee80211_vif *curr_vif;
5706};
5707
5708static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5709 struct ieee80211_sta *sta)
5710{
5711 struct ath10k_mac_tdls_iter_data *iter_data = data;
5712 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5713 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5714
5715 if (sta->tdls && sta_vif == iter_data->curr_vif)
5716 iter_data->num_tdls_stations++;
5717}
5718
5719static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5720 struct ieee80211_vif *vif)
5721{
5722 struct ath10k_mac_tdls_iter_data data = {};
5723
5724 data.curr_vif = vif;
5725
5726 ieee80211_iterate_stations_atomic(hw,
5727 ath10k_mac_tdls_vif_stations_count_iter,
5728 &data);
5729 return data.num_tdls_stations;
5730}
5731
5732static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5733 struct ieee80211_vif *vif)
5734{
5735 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5736 int *num_tdls_vifs = data;
5737
5738 if (vif->type != NL80211_IFTYPE_STATION)
5739 return;
5740
5741 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5742 (*num_tdls_vifs)++;
5743}
5744
5745static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5746{
5747 int num_tdls_vifs = 0;
5748
5749 ieee80211_iterate_active_interfaces_atomic(hw,
5750 IEEE80211_IFACE_ITER_NORMAL,
5751 ath10k_mac_tdls_vifs_count_iter,
5752 &num_tdls_vifs);
5753 return num_tdls_vifs;
5754}
5755
Kalle Valo5e3dd152013-06-12 20:52:10 +03005756static int ath10k_sta_state(struct ieee80211_hw *hw,
5757 struct ieee80211_vif *vif,
5758 struct ieee80211_sta *sta,
5759 enum ieee80211_sta_state old_state,
5760 enum ieee80211_sta_state new_state)
5761{
5762 struct ath10k *ar = hw->priv;
5763 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior9797feb2014-02-14 14:49:48 +01005764 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
Michal Kaziorbb8f0c62016-03-06 16:14:27 +02005765 struct ath10k_peer *peer;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005766 int ret = 0;
Michal Kazior69427262016-03-06 16:14:30 +02005767 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005768
Michal Kazior76f90022014-02-25 09:29:57 +02005769 if (old_state == IEEE80211_STA_NOTEXIST &&
5770 new_state == IEEE80211_STA_NONE) {
5771 memset(arsta, 0, sizeof(*arsta));
5772 arsta->arvif = arvif;
5773 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
Michal Kazior29946872016-03-06 16:14:34 +02005774
5775 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5776 ath10k_mac_txq_init(sta->txq[i]);
Michal Kazior76f90022014-02-25 09:29:57 +02005777 }
5778
Michal Kazior9797feb2014-02-14 14:49:48 +01005779 /* cancel must be done outside the mutex to avoid deadlock */
5780 if ((old_state == IEEE80211_STA_NONE &&
5781 new_state == IEEE80211_STA_NOTEXIST))
5782 cancel_work_sync(&arsta->update_wk);
5783
Kalle Valo5e3dd152013-06-12 20:52:10 +03005784 mutex_lock(&ar->conf_mutex);
5785
5786 if (old_state == IEEE80211_STA_NOTEXIST &&
Michal Kazior077efc82014-10-21 10:10:29 +03005787 new_state == IEEE80211_STA_NONE) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03005788 /*
5789 * New station addition.
5790 */
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005791 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5792 u32 num_tdls_stations;
5793 u32 num_tdls_vifs;
5794
Michal Kaziorcfd10612014-11-25 15:16:05 +01005795 ath10k_dbg(ar, ATH10K_DBG_MAC,
5796 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5797 arvif->vdev_id, sta->addr,
5798 ar->num_stations + 1, ar->max_num_stations,
5799 ar->num_peers + 1, ar->max_num_peers);
Bartosz Markowski0e759f32014-01-02 14:38:33 +01005800
Marek Puzyniak7c354242015-03-30 09:51:52 +03005801 ret = ath10k_mac_inc_num_stations(arvif, sta);
Michal Kaziorcfd10612014-11-25 15:16:05 +01005802 if (ret) {
5803 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5804 ar->max_num_stations);
Bartosz Markowski0e759f32014-01-02 14:38:33 +01005805 goto exit;
5806 }
5807
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005808 if (sta->tdls)
5809 peer_type = WMI_PEER_TYPE_TDLS;
5810
Michal Kazior69427262016-03-06 16:14:30 +02005811 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5812 sta->addr, peer_type);
Michal Kaziora52c0282014-11-25 15:16:03 +01005813 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02005814 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
Ben Greear479398b2013-11-04 09:19:34 -08005815 sta->addr, arvif->vdev_id, ret);
Marek Puzyniak7c354242015-03-30 09:51:52 +03005816 ath10k_mac_dec_num_stations(arvif, sta);
Michal Kaziora52c0282014-11-25 15:16:03 +01005817 goto exit;
5818 }
Michal Kazior077efc82014-10-21 10:10:29 +03005819
Michal Kaziorbb8f0c62016-03-06 16:14:27 +02005820 spin_lock_bh(&ar->data_lock);
5821
5822 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5823 if (!peer) {
5824 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5825 vif->addr, arvif->vdev_id);
5826 spin_unlock_bh(&ar->data_lock);
5827 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5828 ath10k_mac_dec_num_stations(arvif, sta);
5829 ret = -ENOENT;
5830 goto exit;
5831 }
5832
5833 arsta->peer_id = find_first_bit(peer->peer_ids,
5834 ATH10K_MAX_NUM_PEER_IDS);
5835
5836 spin_unlock_bh(&ar->data_lock);
5837
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005838 if (!sta->tdls)
5839 goto exit;
Michal Kazior077efc82014-10-21 10:10:29 +03005840
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005841 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5842 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5843
5844 if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
5845 num_tdls_stations == 0) {
5846 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
5847 arvif->vdev_id, ar->max_num_tdls_vdevs);
5848 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5849 ath10k_mac_dec_num_stations(arvif, sta);
5850 ret = -ENOBUFS;
5851 goto exit;
5852 }
5853
5854 if (num_tdls_stations == 0) {
5855 /* This is the first tdls peer in current vif */
5856 enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
5857
5858 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5859 state);
Michal Kazior077efc82014-10-21 10:10:29 +03005860 if (ret) {
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005861 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
Michal Kazior077efc82014-10-21 10:10:29 +03005862 arvif->vdev_id, ret);
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005863 ath10k_peer_delete(ar, arvif->vdev_id,
5864 sta->addr);
5865 ath10k_mac_dec_num_stations(arvif, sta);
Michal Kazior077efc82014-10-21 10:10:29 +03005866 goto exit;
5867 }
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005868 }
Michal Kazior077efc82014-10-21 10:10:29 +03005869
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005870 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5871 WMI_TDLS_PEER_STATE_PEERING);
5872 if (ret) {
5873 ath10k_warn(ar,
5874 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
5875 sta->addr, arvif->vdev_id, ret);
5876 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5877 ath10k_mac_dec_num_stations(arvif, sta);
5878
5879 if (num_tdls_stations != 0)
5880 goto exit;
5881 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5882 WMI_TDLS_DISABLE);
Michal Kazior077efc82014-10-21 10:10:29 +03005883 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03005884 } else if ((old_state == IEEE80211_STA_NONE &&
5885 new_state == IEEE80211_STA_NOTEXIST)) {
5886 /*
5887 * Existing station deletion.
5888 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02005889 ath10k_dbg(ar, ATH10K_DBG_MAC,
Kalle Valo60c3daa2013-09-08 17:56:07 +03005890 "mac vdev %d peer delete %pM (sta gone)\n",
5891 arvif->vdev_id, sta->addr);
Michal Kazior077efc82014-10-21 10:10:29 +03005892
Kalle Valo5e3dd152013-06-12 20:52:10 +03005893 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5894 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005895 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005896 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005897
Marek Puzyniak7c354242015-03-30 09:51:52 +03005898 ath10k_mac_dec_num_stations(arvif, sta);
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005899
Michal Kazior69427262016-03-06 16:14:30 +02005900 spin_lock_bh(&ar->data_lock);
5901 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5902 peer = ar->peer_map[i];
5903 if (!peer)
5904 continue;
5905
5906 if (peer->sta == sta) {
5907 ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
5908 sta->addr, arvif->vdev_id);
5909 peer->sta = NULL;
5910 }
5911 }
5912 spin_unlock_bh(&ar->data_lock);
5913
Michal Kazior29946872016-03-06 16:14:34 +02005914 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5915 ath10k_mac_txq_unref(ar, sta->txq[i]);
5916
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005917 if (!sta->tdls)
5918 goto exit;
5919
5920 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
5921 goto exit;
5922
5923 /* This was the last tdls peer in current vif */
5924 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5925 WMI_TDLS_DISABLE);
5926 if (ret) {
5927 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5928 arvif->vdev_id, ret);
5929 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03005930 } else if (old_state == IEEE80211_STA_AUTH &&
5931 new_state == IEEE80211_STA_ASSOC &&
5932 (vif->type == NL80211_IFTYPE_AP ||
Bob Copelandb6c7baf2015-09-09 12:47:36 -04005933 vif->type == NL80211_IFTYPE_MESH_POINT ||
Kalle Valo5e3dd152013-06-12 20:52:10 +03005934 vif->type == NL80211_IFTYPE_ADHOC)) {
5935 /*
5936 * New association.
5937 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02005938 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03005939 sta->addr);
5940
Michal Kazior590922a2014-10-21 10:10:29 +03005941 ret = ath10k_station_assoc(ar, vif, sta, false);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005942 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005943 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005944 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005945 } else if (old_state == IEEE80211_STA_ASSOC &&
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005946 new_state == IEEE80211_STA_AUTHORIZED &&
5947 sta->tdls) {
5948 /*
5949 * Tdls station authorized.
5950 */
5951 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
5952 sta->addr);
5953
5954 ret = ath10k_station_assoc(ar, vif, sta, false);
5955 if (ret) {
5956 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
5957 sta->addr, arvif->vdev_id, ret);
5958 goto exit;
5959 }
5960
5961 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5962 WMI_TDLS_PEER_STATE_CONNECTED);
5963 if (ret)
5964 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
5965 sta->addr, arvif->vdev_id, ret);
5966 } else if (old_state == IEEE80211_STA_ASSOC &&
5967 new_state == IEEE80211_STA_AUTH &&
5968 (vif->type == NL80211_IFTYPE_AP ||
Bob Copelandb6c7baf2015-09-09 12:47:36 -04005969 vif->type == NL80211_IFTYPE_MESH_POINT ||
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03005970 vif->type == NL80211_IFTYPE_ADHOC)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03005971 /*
5972 * Disassociation.
5973 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02005974 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
Kalle Valo60c3daa2013-09-08 17:56:07 +03005975 sta->addr);
5976
Michal Kazior590922a2014-10-21 10:10:29 +03005977 ret = ath10k_station_disassoc(ar, vif, sta);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005978 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02005979 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
Ben Greear69244e52014-02-27 18:50:00 +02005980 sta->addr, arvif->vdev_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03005981 }
Bartosz Markowski0e759f32014-01-02 14:38:33 +01005982exit:
Kalle Valo5e3dd152013-06-12 20:52:10 +03005983 mutex_unlock(&ar->conf_mutex);
5984 return ret;
5985}
5986
5987static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
Kalle Valo5b07e072014-09-14 12:50:06 +03005988 u16 ac, bool enable)
Kalle Valo5e3dd152013-06-12 20:52:10 +03005989{
5990 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kaziorb0e56152015-01-24 12:14:52 +02005991 struct wmi_sta_uapsd_auto_trig_arg arg = {};
5992 u32 prio = 0, acc = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03005993 u32 value = 0;
5994 int ret = 0;
5995
Michal Kazior548db542013-07-05 16:15:15 +03005996 lockdep_assert_held(&ar->conf_mutex);
5997
Kalle Valo5e3dd152013-06-12 20:52:10 +03005998 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
5999 return 0;
6000
6001 switch (ac) {
6002 case IEEE80211_AC_VO:
6003 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6004 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
Michal Kaziorb0e56152015-01-24 12:14:52 +02006005 prio = 7;
6006 acc = 3;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006007 break;
6008 case IEEE80211_AC_VI:
6009 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6010 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
Michal Kaziorb0e56152015-01-24 12:14:52 +02006011 prio = 5;
6012 acc = 2;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006013 break;
6014 case IEEE80211_AC_BE:
6015 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6016 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
Michal Kaziorb0e56152015-01-24 12:14:52 +02006017 prio = 2;
6018 acc = 1;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006019 break;
6020 case IEEE80211_AC_BK:
6021 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6022 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
Michal Kaziorb0e56152015-01-24 12:14:52 +02006023 prio = 0;
6024 acc = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006025 break;
6026 }
6027
6028 if (enable)
6029 arvif->u.sta.uapsd |= value;
6030 else
6031 arvif->u.sta.uapsd &= ~value;
6032
6033 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6034 WMI_STA_PS_PARAM_UAPSD,
6035 arvif->u.sta.uapsd);
6036 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02006037 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03006038 goto exit;
6039 }
6040
6041 if (arvif->u.sta.uapsd)
6042 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6043 else
6044 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6045
6046 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6047 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6048 value);
6049 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02006050 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03006051
Michal Kazior9f9b5742014-12-12 12:41:36 +01006052 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6053 if (ret) {
6054 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6055 arvif->vdev_id, ret);
6056 return ret;
6057 }
6058
6059 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6060 if (ret) {
6061 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6062 arvif->vdev_id, ret);
6063 return ret;
6064 }
6065
Michal Kaziorb0e56152015-01-24 12:14:52 +02006066 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6067 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6068 /* Only userspace can make an educated decision when to send
6069 * trigger frame. The following effectively disables u-UAPSD
6070 * autotrigger in firmware (which is enabled by default
6071 * provided the autotrigger service is available).
6072 */
6073
6074 arg.wmm_ac = acc;
6075 arg.user_priority = prio;
6076 arg.service_interval = 0;
6077 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6078 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6079
6080 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6081 arvif->bssid, &arg, 1);
6082 if (ret) {
6083 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6084 ret);
6085 return ret;
6086 }
6087 }
6088
Kalle Valo5e3dd152013-06-12 20:52:10 +03006089exit:
6090 return ret;
6091}
6092
6093static int ath10k_conf_tx(struct ieee80211_hw *hw,
6094 struct ieee80211_vif *vif, u16 ac,
6095 const struct ieee80211_tx_queue_params *params)
6096{
6097 struct ath10k *ar = hw->priv;
Michal Kazior5e752e42015-01-19 09:53:41 +01006098 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Kalle Valo5e3dd152013-06-12 20:52:10 +03006099 struct wmi_wmm_params_arg *p = NULL;
6100 int ret;
6101
6102 mutex_lock(&ar->conf_mutex);
6103
6104 switch (ac) {
6105 case IEEE80211_AC_VO:
Michal Kazior5e752e42015-01-19 09:53:41 +01006106 p = &arvif->wmm_params.ac_vo;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006107 break;
6108 case IEEE80211_AC_VI:
Michal Kazior5e752e42015-01-19 09:53:41 +01006109 p = &arvif->wmm_params.ac_vi;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006110 break;
6111 case IEEE80211_AC_BE:
Michal Kazior5e752e42015-01-19 09:53:41 +01006112 p = &arvif->wmm_params.ac_be;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006113 break;
6114 case IEEE80211_AC_BK:
Michal Kazior5e752e42015-01-19 09:53:41 +01006115 p = &arvif->wmm_params.ac_bk;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006116 break;
6117 }
6118
6119 if (WARN_ON(!p)) {
6120 ret = -EINVAL;
6121 goto exit;
6122 }
6123
6124 p->cwmin = params->cw_min;
6125 p->cwmax = params->cw_max;
6126 p->aifs = params->aifs;
6127
6128 /*
6129 * The channel time duration programmed in the HW is in absolute
6130 * microseconds, while mac80211 gives the txop in units of
6131 * 32 microseconds.
6132 */
6133 p->txop = params->txop * 32;
6134
Michal Kazior7fc979a2015-01-28 09:57:28 +02006135 if (ar->wmi.ops->gen_vdev_wmm_conf) {
6136 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6137 &arvif->wmm_params);
6138 if (ret) {
6139 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6140 arvif->vdev_id, ret);
6141 goto exit;
6142 }
6143 } else {
6144 /* This won't work well with multi-interface cases but it's
6145 * better than nothing.
6146 */
6147 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6148 if (ret) {
6149 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6150 goto exit;
6151 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03006152 }
6153
6154 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6155 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02006156 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03006157
6158exit:
6159 mutex_unlock(&ar->conf_mutex);
6160 return ret;
6161}
6162
6163#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
6164
6165static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6166 struct ieee80211_vif *vif,
6167 struct ieee80211_channel *chan,
6168 int duration,
6169 enum ieee80211_roc_type type)
6170{
6171 struct ath10k *ar = hw->priv;
6172 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6173 struct wmi_start_scan_arg arg;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006174 int ret = 0;
Michal Kaziorfcf98442015-03-31 11:03:47 +00006175 u32 scan_time_msec;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006176
6177 mutex_lock(&ar->conf_mutex);
6178
6179 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006180 switch (ar->scan.state) {
6181 case ATH10K_SCAN_IDLE:
6182 reinit_completion(&ar->scan.started);
6183 reinit_completion(&ar->scan.completed);
6184 reinit_completion(&ar->scan.on_channel);
6185 ar->scan.state = ATH10K_SCAN_STARTING;
6186 ar->scan.is_roc = true;
6187 ar->scan.vdev_id = arvif->vdev_id;
6188 ar->scan.roc_freq = chan->center_freq;
Michal Kaziord710e752015-07-09 13:08:36 +02006189 ar->scan.roc_notify = true;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006190 ret = 0;
6191 break;
6192 case ATH10K_SCAN_STARTING:
6193 case ATH10K_SCAN_RUNNING:
6194 case ATH10K_SCAN_ABORTING:
Kalle Valo5e3dd152013-06-12 20:52:10 +03006195 ret = -EBUSY;
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006196 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006197 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03006198 spin_unlock_bh(&ar->data_lock);
6199
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006200 if (ret)
6201 goto exit;
6202
Michal Kaziorfcf98442015-03-31 11:03:47 +00006203 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
Michal Kaziordcca0bd2014-11-24 14:58:32 +01006204
Kalle Valo5e3dd152013-06-12 20:52:10 +03006205 memset(&arg, 0, sizeof(arg));
6206 ath10k_wmi_start_scan_init(ar, &arg);
6207 arg.vdev_id = arvif->vdev_id;
6208 arg.scan_id = ATH10K_SCAN_ID;
6209 arg.n_channels = 1;
6210 arg.channels[0] = chan->center_freq;
Michal Kaziorfcf98442015-03-31 11:03:47 +00006211 arg.dwell_time_active = scan_time_msec;
6212 arg.dwell_time_passive = scan_time_msec;
6213 arg.max_scan_time = scan_time_msec;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006214 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6215 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
Michal Kaziordbd3f9f2015-03-31 11:03:48 +00006216 arg.burst_duration_ms = duration;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006217
6218 ret = ath10k_start_scan(ar, &arg);
6219 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02006220 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03006221 spin_lock_bh(&ar->data_lock);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006222 ar->scan.state = ATH10K_SCAN_IDLE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006223 spin_unlock_bh(&ar->data_lock);
6224 goto exit;
6225 }
6226
6227 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
6228 if (ret == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02006229 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006230
6231 ret = ath10k_scan_stop(ar);
6232 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02006233 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006234
Kalle Valo5e3dd152013-06-12 20:52:10 +03006235 ret = -ETIMEDOUT;
6236 goto exit;
6237 }
6238
Michal Kaziorfcf98442015-03-31 11:03:47 +00006239 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6240 msecs_to_jiffies(duration));
6241
Kalle Valo5e3dd152013-06-12 20:52:10 +03006242 ret = 0;
6243exit:
6244 mutex_unlock(&ar->conf_mutex);
6245 return ret;
6246}
6247
6248static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6249{
6250 struct ath10k *ar = hw->priv;
6251
6252 mutex_lock(&ar->conf_mutex);
Michal Kaziord710e752015-07-09 13:08:36 +02006253
6254 spin_lock_bh(&ar->data_lock);
6255 ar->scan.roc_notify = false;
6256 spin_unlock_bh(&ar->data_lock);
6257
Michal Kazior5c81c7f2014-08-05 14:54:44 +02006258 ath10k_scan_abort(ar);
Michal Kaziord710e752015-07-09 13:08:36 +02006259
Kalle Valo5e3dd152013-06-12 20:52:10 +03006260 mutex_unlock(&ar->conf_mutex);
6261
Michal Kazior4eb2e162014-10-28 10:23:09 +01006262 cancel_delayed_work_sync(&ar->scan.timeout);
6263
Kalle Valo5e3dd152013-06-12 20:52:10 +03006264 return 0;
6265}
6266
6267/*
6268 * Both RTS and Fragmentation threshold are interface-specific
6269 * in ath10k, but device-specific in mac80211.
6270 */
Kalle Valo5e3dd152013-06-12 20:52:10 +03006271
6272static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6273{
Kalle Valo5e3dd152013-06-12 20:52:10 +03006274 struct ath10k *ar = hw->priv;
Michal Kaziorad088bf2013-10-16 15:44:46 +03006275 struct ath10k_vif *arvif;
6276 int ret = 0;
Michal Kazior548db542013-07-05 16:15:15 +03006277
Michal Kaziorad088bf2013-10-16 15:44:46 +03006278 mutex_lock(&ar->conf_mutex);
6279 list_for_each_entry(arvif, &ar->arvifs, list) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02006280 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
Michal Kaziorad088bf2013-10-16 15:44:46 +03006281 arvif->vdev_id, value);
Kalle Valo60c3daa2013-09-08 17:56:07 +03006282
Michal Kaziorad088bf2013-10-16 15:44:46 +03006283 ret = ath10k_mac_set_rts(arvif, value);
6284 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02006285 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
Michal Kaziorad088bf2013-10-16 15:44:46 +03006286 arvif->vdev_id, ret);
6287 break;
6288 }
6289 }
6290 mutex_unlock(&ar->conf_mutex);
6291
6292 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006293}
6294
Michal Kazior92092fe2015-08-03 11:16:43 +02006295static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6296{
6297 /* Even though there's a WMI enum for fragmentation threshold no known
6298 * firmware actually implements it. Moreover it is not possible to rely
6299 * frame fragmentation to mac80211 because firmware clears the "more
6300 * fragments" bit in frame control making it impossible for remote
6301 * devices to reassemble frames.
6302 *
6303 * Hence implement a dummy callback just to say fragmentation isn't
6304 * supported. This effectively prevents mac80211 from doing frame
6305 * fragmentation in software.
6306 */
6307 return -EOPNOTSUPP;
6308}
6309
Emmanuel Grumbach77be2c52014-03-27 11:30:29 +02006310static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6311 u32 queues, bool drop)
Kalle Valo5e3dd152013-06-12 20:52:10 +03006312{
6313 struct ath10k *ar = hw->priv;
Michal Kazioraffd3212013-07-16 09:54:35 +02006314 bool skip;
Nicholas Mc Guired4298a32015-06-15 14:46:43 +03006315 long time_left;
Kalle Valo5e3dd152013-06-12 20:52:10 +03006316
6317 /* mac80211 doesn't care if we really xmit queued frames or not
6318 * we'll collect those frames either way if we stop/delete vdevs */
6319 if (drop)
6320 return;
6321
Michal Kazior548db542013-07-05 16:15:15 +03006322 mutex_lock(&ar->conf_mutex);
6323
Michal Kazioraffd3212013-07-16 09:54:35 +02006324 if (ar->state == ATH10K_STATE_WEDGED)
6325 goto skip;
6326
Nicholas Mc Guired4298a32015-06-15 14:46:43 +03006327 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
Kalle Valo5e3dd152013-06-12 20:52:10 +03006328 bool empty;
Michal Kazioraffd3212013-07-16 09:54:35 +02006329
Michal Kazioredb82362013-07-05 16:15:14 +03006330 spin_lock_bh(&ar->htt.tx_lock);
Michal Kazior0945baf2013-09-18 14:43:18 +02006331 empty = (ar->htt.num_pending_tx == 0);
Michal Kazioredb82362013-07-05 16:15:14 +03006332 spin_unlock_bh(&ar->htt.tx_lock);
Michal Kazioraffd3212013-07-16 09:54:35 +02006333
Michal Kazior7962b0d2014-10-28 10:34:38 +01006334 skip = (ar->state == ATH10K_STATE_WEDGED) ||
6335 test_bit(ATH10K_FLAG_CRASH_FLUSH,
6336 &ar->dev_flags);
Michal Kazioraffd3212013-07-16 09:54:35 +02006337
6338 (empty || skip);
Kalle Valo5e3dd152013-06-12 20:52:10 +03006339 }), ATH10K_FLUSH_TIMEOUT_HZ);
Michal Kazioraffd3212013-07-16 09:54:35 +02006340
Nicholas Mc Guired4298a32015-06-15 14:46:43 +03006341 if (time_left == 0 || skip)
6342 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6343 skip, ar->state, time_left);
Michal Kazior548db542013-07-05 16:15:15 +03006344
Michal Kazioraffd3212013-07-16 09:54:35 +02006345skip:
Michal Kazior548db542013-07-05 16:15:15 +03006346 mutex_unlock(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +03006347}
6348
6349/* TODO: Implement this function properly
6350 * For now it is needed to reply to Probe Requests in IBSS mode.
6351 * Propably we need this information from FW.
6352 */
6353static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6354{
6355 return 1;
6356}
6357
Eliad Pellercf2c92d2014-11-04 11:43:54 +02006358static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6359 enum ieee80211_reconfig_type reconfig_type)
Michal Kazioraffd3212013-07-16 09:54:35 +02006360{
6361 struct ath10k *ar = hw->priv;
6362
Eliad Pellercf2c92d2014-11-04 11:43:54 +02006363 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6364 return;
6365
Michal Kazioraffd3212013-07-16 09:54:35 +02006366 mutex_lock(&ar->conf_mutex);
6367
6368 /* If device failed to restart it will be in a different state, e.g.
6369 * ATH10K_STATE_WEDGED */
6370 if (ar->state == ATH10K_STATE_RESTARTED) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02006371 ath10k_info(ar, "device successfully recovered\n");
Michal Kazioraffd3212013-07-16 09:54:35 +02006372 ar->state = ATH10K_STATE_ON;
Michal Kazior7962b0d2014-10-28 10:34:38 +01006373 ieee80211_wake_queues(ar->hw);
Michal Kazioraffd3212013-07-16 09:54:35 +02006374 }
6375
6376 mutex_unlock(&ar->conf_mutex);
6377}
6378
Michal Kazior2e1dea42013-07-31 10:32:40 +02006379static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6380 struct survey_info *survey)
6381{
6382 struct ath10k *ar = hw->priv;
6383 struct ieee80211_supported_band *sband;
6384 struct survey_info *ar_survey = &ar->survey[idx];
6385 int ret = 0;
6386
6387 mutex_lock(&ar->conf_mutex);
6388
6389 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
6390 if (sband && idx >= sband->n_channels) {
6391 idx -= sband->n_channels;
6392 sband = NULL;
6393 }
6394
6395 if (!sband)
6396 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
6397
6398 if (!sband || idx >= sband->n_channels) {
6399 ret = -ENOENT;
6400 goto exit;
6401 }
6402
6403 spin_lock_bh(&ar->data_lock);
6404 memcpy(survey, ar_survey, sizeof(*survey));
6405 spin_unlock_bh(&ar->data_lock);
6406
6407 survey->channel = &sband->channels[idx];
6408
Felix Fietkaufa1d4df2014-10-23 17:04:28 +03006409 if (ar->rx_channel == survey->channel)
6410 survey->filled |= SURVEY_INFO_IN_USE;
6411
Michal Kazior2e1dea42013-07-31 10:32:40 +02006412exit:
6413 mutex_unlock(&ar->conf_mutex);
6414 return ret;
6415}
6416
Michal Kazior3ae54222015-03-31 10:49:20 +00006417static bool
6418ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6419 enum ieee80211_band band,
6420 const struct cfg80211_bitrate_mask *mask)
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006421{
Michal Kazior3ae54222015-03-31 10:49:20 +00006422 int num_rates = 0;
6423 int i;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006424
Michal Kazior3ae54222015-03-31 10:49:20 +00006425 num_rates += hweight32(mask->control[band].legacy);
6426
6427 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6428 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6429
6430 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6431 num_rates += hweight16(mask->control[band].vht_mcs[i]);
6432
6433 return num_rates == 1;
6434}
6435
6436static bool
6437ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6438 enum ieee80211_band band,
6439 const struct cfg80211_bitrate_mask *mask,
6440 int *nss)
6441{
6442 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6443 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6444 u8 ht_nss_mask = 0;
6445 u8 vht_nss_mask = 0;
6446 int i;
6447
6448 if (mask->control[band].legacy)
6449 return false;
6450
6451 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6452 if (mask->control[band].ht_mcs[i] == 0)
6453 continue;
6454 else if (mask->control[band].ht_mcs[i] ==
6455 sband->ht_cap.mcs.rx_mask[i])
6456 ht_nss_mask |= BIT(i);
6457 else
6458 return false;
6459 }
6460
6461 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6462 if (mask->control[band].vht_mcs[i] == 0)
6463 continue;
6464 else if (mask->control[band].vht_mcs[i] ==
6465 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6466 vht_nss_mask |= BIT(i);
6467 else
6468 return false;
6469 }
6470
6471 if (ht_nss_mask != vht_nss_mask)
6472 return false;
6473
6474 if (ht_nss_mask == 0)
6475 return false;
6476
6477 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6478 return false;
6479
6480 *nss = fls(ht_nss_mask);
6481
6482 return true;
6483}
6484
6485static int
6486ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6487 enum ieee80211_band band,
6488 const struct cfg80211_bitrate_mask *mask,
6489 u8 *rate, u8 *nss)
6490{
6491 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6492 int rate_idx;
6493 int i;
6494 u16 bitrate;
6495 u8 preamble;
6496 u8 hw_rate;
6497
6498 if (hweight32(mask->control[band].legacy) == 1) {
6499 rate_idx = ffs(mask->control[band].legacy) - 1;
6500
6501 hw_rate = sband->bitrates[rate_idx].hw_value;
6502 bitrate = sband->bitrates[rate_idx].bitrate;
6503
6504 if (ath10k_mac_bitrate_is_cck(bitrate))
6505 preamble = WMI_RATE_PREAMBLE_CCK;
6506 else
6507 preamble = WMI_RATE_PREAMBLE_OFDM;
6508
6509 *nss = 1;
6510 *rate = preamble << 6 |
6511 (*nss - 1) << 4 |
6512 hw_rate << 0;
6513
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006514 return 0;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006515 }
6516
Michal Kazior3ae54222015-03-31 10:49:20 +00006517 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6518 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6519 *nss = i + 1;
6520 *rate = WMI_RATE_PREAMBLE_HT << 6 |
6521 (*nss - 1) << 4 |
6522 (ffs(mask->control[band].ht_mcs[i]) - 1);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006523
Michal Kazior3ae54222015-03-31 10:49:20 +00006524 return 0;
6525 }
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006526 }
6527
Michal Kazior3ae54222015-03-31 10:49:20 +00006528 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6529 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6530 *nss = i + 1;
6531 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
6532 (*nss - 1) << 4 |
6533 (ffs(mask->control[band].vht_mcs[i]) - 1);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006534
Michal Kazior3ae54222015-03-31 10:49:20 +00006535 return 0;
6536 }
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006537 }
6538
Michal Kazior3ae54222015-03-31 10:49:20 +00006539 return -EINVAL;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006540}
6541
Michal Kazior3ae54222015-03-31 10:49:20 +00006542static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
Rajkumar Manoharanbd4a41e2015-09-16 13:19:00 +05306543 u8 rate, u8 nss, u8 sgi, u8 ldpc)
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006544{
6545 struct ath10k *ar = arvif->ar;
6546 u32 vdev_param;
Michal Kazior3ae54222015-03-31 10:49:20 +00006547 int ret;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006548
Michal Kazior3ae54222015-03-31 10:49:20 +00006549 lockdep_assert_held(&ar->conf_mutex);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006550
Michal Kazior3ae54222015-03-31 10:49:20 +00006551 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6552 arvif->vdev_id, rate, nss, sgi);
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01006553
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006554 vdev_param = ar->wmi.vdev_param->fixed_rate;
Michal Kazior3ae54222015-03-31 10:49:20 +00006555 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006556 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02006557 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
Michal Kazior3ae54222015-03-31 10:49:20 +00006558 rate, ret);
6559 return ret;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006560 }
6561
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006562 vdev_param = ar->wmi.vdev_param->nss;
Michal Kazior3ae54222015-03-31 10:49:20 +00006563 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006564 if (ret) {
Michal Kazior3ae54222015-03-31 10:49:20 +00006565 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6566 return ret;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006567 }
6568
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01006569 vdev_param = ar->wmi.vdev_param->sgi;
Michal Kazior3ae54222015-03-31 10:49:20 +00006570 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01006571 if (ret) {
Michal Kazior3ae54222015-03-31 10:49:20 +00006572 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6573 return ret;
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01006574 }
6575
Rajkumar Manoharanbd4a41e2015-09-16 13:19:00 +05306576 vdev_param = ar->wmi.vdev_param->ldpc;
6577 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6578 if (ret) {
6579 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6580 return ret;
6581 }
6582
Michal Kazior3ae54222015-03-31 10:49:20 +00006583 return 0;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006584}
6585
Michal Kazior45c9abc2015-04-21 20:42:58 +03006586static bool
6587ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6588 enum ieee80211_band band,
6589 const struct cfg80211_bitrate_mask *mask)
6590{
6591 int i;
6592 u16 vht_mcs;
6593
6594 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6595 * to express all VHT MCS rate masks. Effectively only the following
6596 * ranges can be used: none, 0-7, 0-8 and 0-9.
6597 */
6598 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6599 vht_mcs = mask->control[band].vht_mcs[i];
6600
6601 switch (vht_mcs) {
6602 case 0:
6603 case BIT(8) - 1:
6604 case BIT(9) - 1:
6605 case BIT(10) - 1:
6606 break;
6607 default:
6608 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6609 return false;
6610 }
6611 }
6612
6613 return true;
6614}
6615
6616static void ath10k_mac_set_bitrate_mask_iter(void *data,
6617 struct ieee80211_sta *sta)
6618{
6619 struct ath10k_vif *arvif = data;
6620 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6621 struct ath10k *ar = arvif->ar;
6622
6623 if (arsta->arvif != arvif)
6624 return;
6625
6626 spin_lock_bh(&ar->data_lock);
6627 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6628 spin_unlock_bh(&ar->data_lock);
6629
6630 ieee80211_queue_work(ar->hw, &arsta->update_wk);
6631}
6632
Michal Kazior3ae54222015-03-31 10:49:20 +00006633static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6634 struct ieee80211_vif *vif,
6635 const struct cfg80211_bitrate_mask *mask)
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006636{
6637 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Michal Kazior500ff9f2015-03-31 10:26:21 +00006638 struct cfg80211_chan_def def;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006639 struct ath10k *ar = arvif->ar;
Michal Kazior500ff9f2015-03-31 10:26:21 +00006640 enum ieee80211_band band;
Michal Kazior45c9abc2015-04-21 20:42:58 +03006641 const u8 *ht_mcs_mask;
6642 const u16 *vht_mcs_mask;
Michal Kazior3ae54222015-03-31 10:49:20 +00006643 u8 rate;
6644 u8 nss;
6645 u8 sgi;
Rajkumar Manoharanbd4a41e2015-09-16 13:19:00 +05306646 u8 ldpc;
Michal Kazior3ae54222015-03-31 10:49:20 +00006647 int single_nss;
6648 int ret;
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01006649
Michal Kazior500ff9f2015-03-31 10:26:21 +00006650 if (ath10k_mac_vif_chan(vif, &def))
6651 return -EPERM;
6652
Michal Kazior500ff9f2015-03-31 10:26:21 +00006653 band = def.chan->band;
Michal Kazior45c9abc2015-04-21 20:42:58 +03006654 ht_mcs_mask = mask->control[band].ht_mcs;
6655 vht_mcs_mask = mask->control[band].vht_mcs;
Rajkumar Manoharanbd4a41e2015-09-16 13:19:00 +05306656 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
Michal Kazior500ff9f2015-03-31 10:26:21 +00006657
Michal Kazior3ae54222015-03-31 10:49:20 +00006658 sgi = mask->control[band].gi;
6659 if (sgi == NL80211_TXRATE_FORCE_LGI)
Janusz Dziedzic9f81f722014-01-17 20:04:14 +01006660 return -EINVAL;
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006661
Michal Kazior3ae54222015-03-31 10:49:20 +00006662 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6663 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6664 &rate, &nss);
6665 if (ret) {
6666 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6667 arvif->vdev_id, ret);
6668 return ret;
6669 }
6670 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6671 &single_nss)) {
6672 rate = WMI_FIXED_RATE_NONE;
6673 nss = single_nss;
6674 } else {
6675 rate = WMI_FIXED_RATE_NONE;
Michal Kazior45c9abc2015-04-21 20:42:58 +03006676 nss = min(ar->num_rf_chains,
6677 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6678 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6679
6680 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6681 return -EINVAL;
6682
6683 mutex_lock(&ar->conf_mutex);
6684
6685 arvif->bitrate_mask = *mask;
6686 ieee80211_iterate_stations_atomic(ar->hw,
6687 ath10k_mac_set_bitrate_mask_iter,
6688 arvif);
6689
6690 mutex_unlock(&ar->conf_mutex);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006691 }
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006692
6693 mutex_lock(&ar->conf_mutex);
6694
Rajkumar Manoharanbd4a41e2015-09-16 13:19:00 +05306695 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006696 if (ret) {
Michal Kazior3ae54222015-03-31 10:49:20 +00006697 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6698 arvif->vdev_id, ret);
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006699 goto exit;
6700 }
6701
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006702exit:
6703 mutex_unlock(&ar->conf_mutex);
Michal Kazior3ae54222015-03-31 10:49:20 +00006704
Janusz Dziedzic51ab1a02014-01-08 09:08:33 +01006705 return ret;
6706}
6707
Michal Kazior9797feb2014-02-14 14:49:48 +01006708static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6709 struct ieee80211_vif *vif,
6710 struct ieee80211_sta *sta,
6711 u32 changed)
6712{
6713 struct ath10k *ar = hw->priv;
6714 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6715 u32 bw, smps;
6716
6717 spin_lock_bh(&ar->data_lock);
6718
Michal Kazior7aa7a722014-08-25 12:09:38 +02006719 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kazior9797feb2014-02-14 14:49:48 +01006720 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6721 sta->addr, changed, sta->bandwidth, sta->rx_nss,
6722 sta->smps_mode);
6723
6724 if (changed & IEEE80211_RC_BW_CHANGED) {
6725 bw = WMI_PEER_CHWIDTH_20MHZ;
6726
6727 switch (sta->bandwidth) {
6728 case IEEE80211_STA_RX_BW_20:
6729 bw = WMI_PEER_CHWIDTH_20MHZ;
6730 break;
6731 case IEEE80211_STA_RX_BW_40:
6732 bw = WMI_PEER_CHWIDTH_40MHZ;
6733 break;
6734 case IEEE80211_STA_RX_BW_80:
6735 bw = WMI_PEER_CHWIDTH_80MHZ;
6736 break;
6737 case IEEE80211_STA_RX_BW_160:
Masanari Iidad939be32015-02-27 23:52:31 +09006738 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02006739 sta->bandwidth, sta->addr);
Michal Kazior9797feb2014-02-14 14:49:48 +01006740 bw = WMI_PEER_CHWIDTH_20MHZ;
6741 break;
6742 }
6743
6744 arsta->bw = bw;
6745 }
6746
6747 if (changed & IEEE80211_RC_NSS_CHANGED)
6748 arsta->nss = sta->rx_nss;
6749
6750 if (changed & IEEE80211_RC_SMPS_CHANGED) {
6751 smps = WMI_PEER_SMPS_PS_NONE;
6752
6753 switch (sta->smps_mode) {
6754 case IEEE80211_SMPS_AUTOMATIC:
6755 case IEEE80211_SMPS_OFF:
6756 smps = WMI_PEER_SMPS_PS_NONE;
6757 break;
6758 case IEEE80211_SMPS_STATIC:
6759 smps = WMI_PEER_SMPS_STATIC;
6760 break;
6761 case IEEE80211_SMPS_DYNAMIC:
6762 smps = WMI_PEER_SMPS_DYNAMIC;
6763 break;
6764 case IEEE80211_SMPS_NUM_MODES:
Michal Kazior7aa7a722014-08-25 12:09:38 +02006765 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
Kalle Valobe6546f2014-03-25 14:18:51 +02006766 sta->smps_mode, sta->addr);
Michal Kazior9797feb2014-02-14 14:49:48 +01006767 smps = WMI_PEER_SMPS_PS_NONE;
6768 break;
6769 }
6770
6771 arsta->smps = smps;
6772 }
6773
Michal Kazior9797feb2014-02-14 14:49:48 +01006774 arsta->changed |= changed;
6775
6776 spin_unlock_bh(&ar->data_lock);
6777
6778 ieee80211_queue_work(hw, &arsta->update_wk);
6779}
6780
Chun-Yeow Yeoh26ebbcc2014-02-25 09:29:54 +02006781static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
6782{
6783 /*
6784 * FIXME: Return 0 for time being. Need to figure out whether FW
6785 * has the API to fetch 64-bit local TSF
6786 */
6787
6788 return 0;
6789}
6790
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02006791static int ath10k_ampdu_action(struct ieee80211_hw *hw,
6792 struct ieee80211_vif *vif,
Sara Sharon50ea05e2015-12-30 16:06:04 +02006793 struct ieee80211_ampdu_params *params)
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02006794{
Michal Kazior7aa7a722014-08-25 12:09:38 +02006795 struct ath10k *ar = hw->priv;
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02006796 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
Sara Sharon50ea05e2015-12-30 16:06:04 +02006797 struct ieee80211_sta *sta = params->sta;
6798 enum ieee80211_ampdu_mlme_action action = params->action;
6799 u16 tid = params->tid;
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02006800
Michal Kazior7aa7a722014-08-25 12:09:38 +02006801 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02006802 arvif->vdev_id, sta->addr, tid, action);
6803
6804 switch (action) {
6805 case IEEE80211_AMPDU_RX_START:
6806 case IEEE80211_AMPDU_RX_STOP:
6807 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
6808 * creation/removal. Do we need to verify this?
6809 */
6810 return 0;
6811 case IEEE80211_AMPDU_TX_START:
6812 case IEEE80211_AMPDU_TX_STOP_CONT:
6813 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6814 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6815 case IEEE80211_AMPDU_TX_OPERATIONAL:
6816 /* Firmware offloads Tx aggregation entirely so deny mac80211
6817 * Tx aggregation requests.
6818 */
6819 return -EOPNOTSUPP;
6820 }
6821
6822 return -EINVAL;
6823}
6824
Michal Kazior500ff9f2015-03-31 10:26:21 +00006825static void
Michal Kaziord7bf4b42015-06-03 12:16:54 +02006826ath10k_mac_update_rx_channel(struct ath10k *ar,
6827 struct ieee80211_chanctx_conf *ctx,
6828 struct ieee80211_vif_chanctx_switch *vifs,
6829 int n_vifs)
Michal Kazior500ff9f2015-03-31 10:26:21 +00006830{
6831 struct cfg80211_chan_def *def = NULL;
6832
6833 /* Both locks are required because ar->rx_channel is modified. This
6834 * allows readers to hold either lock.
6835 */
6836 lockdep_assert_held(&ar->conf_mutex);
6837 lockdep_assert_held(&ar->data_lock);
6838
Michal Kaziord7bf4b42015-06-03 12:16:54 +02006839 WARN_ON(ctx && vifs);
6840 WARN_ON(vifs && n_vifs != 1);
6841
Michal Kazior500ff9f2015-03-31 10:26:21 +00006842 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
6843 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
6844 * ppdu on Rx may reduce performance on low-end systems. It should be
6845 * possible to make tables/hashmaps to speed the lookup up (be vary of
6846 * cpu data cache lines though regarding sizes) but to keep the initial
6847 * implementation simple and less intrusive fallback to the slow lookup
6848 * only for multi-channel cases. Single-channel cases will remain to
6849 * use the old channel derival and thus performance should not be
6850 * affected much.
6851 */
6852 rcu_read_lock();
Michal Kaziord7bf4b42015-06-03 12:16:54 +02006853 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
Michal Kazior500ff9f2015-03-31 10:26:21 +00006854 ieee80211_iter_chan_contexts_atomic(ar->hw,
Kalle Valo617b0f42015-10-05 17:56:35 +03006855 ath10k_mac_get_any_chandef_iter,
6856 &def);
Michal Kaziord7bf4b42015-06-03 12:16:54 +02006857
6858 if (vifs)
6859 def = &vifs[0].new_ctx->def;
6860
Michal Kazior500ff9f2015-03-31 10:26:21 +00006861 ar->rx_channel = def->chan;
Michal Kaziord7bf4b42015-06-03 12:16:54 +02006862 } else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) {
6863 ar->rx_channel = ctx->def.chan;
Michal Kazior500ff9f2015-03-31 10:26:21 +00006864 } else {
6865 ar->rx_channel = NULL;
6866 }
6867 rcu_read_unlock();
6868}
6869
Michal Kazior7be6d1b2015-09-03 10:44:51 +02006870static void
6871ath10k_mac_update_vif_chan(struct ath10k *ar,
6872 struct ieee80211_vif_chanctx_switch *vifs,
6873 int n_vifs)
6874{
6875 struct ath10k_vif *arvif;
6876 int ret;
6877 int i;
6878
6879 lockdep_assert_held(&ar->conf_mutex);
6880
6881 /* First stop monitor interface. Some FW versions crash if there's a
6882 * lone monitor interface.
6883 */
6884 if (ar->monitor_started)
6885 ath10k_monitor_stop(ar);
6886
6887 for (i = 0; i < n_vifs; i++) {
6888 arvif = ath10k_vif_to_arvif(vifs[i].vif);
6889
6890 ath10k_dbg(ar, ATH10K_DBG_MAC,
6891 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
6892 arvif->vdev_id,
6893 vifs[i].old_ctx->def.chan->center_freq,
6894 vifs[i].new_ctx->def.chan->center_freq,
6895 vifs[i].old_ctx->def.width,
6896 vifs[i].new_ctx->def.width);
6897
6898 if (WARN_ON(!arvif->is_started))
6899 continue;
6900
6901 if (WARN_ON(!arvif->is_up))
6902 continue;
6903
6904 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
6905 if (ret) {
6906 ath10k_warn(ar, "failed to down vdev %d: %d\n",
6907 arvif->vdev_id, ret);
6908 continue;
6909 }
6910 }
6911
6912 /* All relevant vdevs are downed and associated channel resources
6913 * should be available for the channel switch now.
6914 */
6915
6916 spin_lock_bh(&ar->data_lock);
6917 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
6918 spin_unlock_bh(&ar->data_lock);
6919
6920 for (i = 0; i < n_vifs; i++) {
6921 arvif = ath10k_vif_to_arvif(vifs[i].vif);
6922
6923 if (WARN_ON(!arvif->is_started))
6924 continue;
6925
6926 if (WARN_ON(!arvif->is_up))
6927 continue;
6928
6929 ret = ath10k_mac_setup_bcn_tmpl(arvif);
6930 if (ret)
6931 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
6932 ret);
6933
6934 ret = ath10k_mac_setup_prb_tmpl(arvif);
6935 if (ret)
6936 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
6937 ret);
6938
6939 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
6940 if (ret) {
6941 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
6942 arvif->vdev_id, ret);
6943 continue;
6944 }
6945
6946 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
6947 arvif->bssid);
6948 if (ret) {
6949 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
6950 arvif->vdev_id, ret);
6951 continue;
6952 }
6953 }
6954
6955 ath10k_monitor_recalc(ar);
6956}
6957
Michal Kazior500ff9f2015-03-31 10:26:21 +00006958static int
6959ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
6960 struct ieee80211_chanctx_conf *ctx)
6961{
6962 struct ath10k *ar = hw->priv;
Michal Kazior500ff9f2015-03-31 10:26:21 +00006963
6964 ath10k_dbg(ar, ATH10K_DBG_MAC,
6965 "mac chanctx add freq %hu width %d ptr %p\n",
6966 ctx->def.chan->center_freq, ctx->def.width, ctx);
6967
6968 mutex_lock(&ar->conf_mutex);
6969
6970 spin_lock_bh(&ar->data_lock);
Michal Kaziord7bf4b42015-06-03 12:16:54 +02006971 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
Michal Kazior500ff9f2015-03-31 10:26:21 +00006972 spin_unlock_bh(&ar->data_lock);
6973
6974 ath10k_recalc_radar_detection(ar);
6975 ath10k_monitor_recalc(ar);
6976
6977 mutex_unlock(&ar->conf_mutex);
6978
6979 return 0;
6980}
6981
6982static void
6983ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
6984 struct ieee80211_chanctx_conf *ctx)
6985{
6986 struct ath10k *ar = hw->priv;
6987
6988 ath10k_dbg(ar, ATH10K_DBG_MAC,
6989 "mac chanctx remove freq %hu width %d ptr %p\n",
6990 ctx->def.chan->center_freq, ctx->def.width, ctx);
6991
6992 mutex_lock(&ar->conf_mutex);
6993
6994 spin_lock_bh(&ar->data_lock);
Michal Kaziord7bf4b42015-06-03 12:16:54 +02006995 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
Michal Kazior500ff9f2015-03-31 10:26:21 +00006996 spin_unlock_bh(&ar->data_lock);
6997
6998 ath10k_recalc_radar_detection(ar);
6999 ath10k_monitor_recalc(ar);
7000
7001 mutex_unlock(&ar->conf_mutex);
7002}
7003
Michal Kazior9713e3d2015-09-03 10:44:52 +02007004struct ath10k_mac_change_chanctx_arg {
7005 struct ieee80211_chanctx_conf *ctx;
7006 struct ieee80211_vif_chanctx_switch *vifs;
7007 int n_vifs;
7008 int next_vif;
7009};
7010
7011static void
7012ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7013 struct ieee80211_vif *vif)
7014{
7015 struct ath10k_mac_change_chanctx_arg *arg = data;
7016
7017 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7018 return;
7019
7020 arg->n_vifs++;
7021}
7022
7023static void
7024ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7025 struct ieee80211_vif *vif)
7026{
7027 struct ath10k_mac_change_chanctx_arg *arg = data;
7028 struct ieee80211_chanctx_conf *ctx;
7029
7030 ctx = rcu_access_pointer(vif->chanctx_conf);
7031 if (ctx != arg->ctx)
7032 return;
7033
7034 if (WARN_ON(arg->next_vif == arg->n_vifs))
7035 return;
7036
7037 arg->vifs[arg->next_vif].vif = vif;
7038 arg->vifs[arg->next_vif].old_ctx = ctx;
7039 arg->vifs[arg->next_vif].new_ctx = ctx;
7040 arg->next_vif++;
7041}
7042
Michal Kazior500ff9f2015-03-31 10:26:21 +00007043static void
7044ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7045 struct ieee80211_chanctx_conf *ctx,
7046 u32 changed)
7047{
7048 struct ath10k *ar = hw->priv;
Michal Kazior9713e3d2015-09-03 10:44:52 +02007049 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
Michal Kazior500ff9f2015-03-31 10:26:21 +00007050
7051 mutex_lock(&ar->conf_mutex);
7052
7053 ath10k_dbg(ar, ATH10K_DBG_MAC,
Michal Kazior089ab7a2015-06-03 12:16:55 +02007054 "mac chanctx change freq %hu width %d ptr %p changed %x\n",
7055 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
Michal Kazior500ff9f2015-03-31 10:26:21 +00007056
7057 /* This shouldn't really happen because channel switching should use
7058 * switch_vif_chanctx().
7059 */
7060 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7061 goto unlock;
7062
Michal Kazior9713e3d2015-09-03 10:44:52 +02007063 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7064 ieee80211_iterate_active_interfaces_atomic(
7065 hw,
7066 IEEE80211_IFACE_ITER_NORMAL,
7067 ath10k_mac_change_chanctx_cnt_iter,
7068 &arg);
7069 if (arg.n_vifs == 0)
7070 goto radar;
7071
7072 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7073 GFP_KERNEL);
7074 if (!arg.vifs)
7075 goto radar;
7076
7077 ieee80211_iterate_active_interfaces_atomic(
7078 hw,
7079 IEEE80211_IFACE_ITER_NORMAL,
7080 ath10k_mac_change_chanctx_fill_iter,
7081 &arg);
7082 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7083 kfree(arg.vifs);
7084 }
7085
7086radar:
Michal Kazior500ff9f2015-03-31 10:26:21 +00007087 ath10k_recalc_radar_detection(ar);
7088
7089 /* FIXME: How to configure Rx chains properly? */
7090
7091 /* No other actions are actually necessary. Firmware maintains channel
7092 * definitions per vdev internally and there's no host-side channel
7093 * context abstraction to configure, e.g. channel width.
7094 */
7095
7096unlock:
7097 mutex_unlock(&ar->conf_mutex);
7098}
7099
7100static int
7101ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7102 struct ieee80211_vif *vif,
7103 struct ieee80211_chanctx_conf *ctx)
7104{
7105 struct ath10k *ar = hw->priv;
Michal Kazior500ff9f2015-03-31 10:26:21 +00007106 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7107 int ret;
7108
7109 mutex_lock(&ar->conf_mutex);
7110
7111 ath10k_dbg(ar, ATH10K_DBG_MAC,
7112 "mac chanctx assign ptr %p vdev_id %i\n",
7113 ctx, arvif->vdev_id);
7114
7115 if (WARN_ON(arvif->is_started)) {
7116 mutex_unlock(&ar->conf_mutex);
7117 return -EBUSY;
7118 }
7119
Michal Kazior089ab7a2015-06-03 12:16:55 +02007120 ret = ath10k_vdev_start(arvif, &ctx->def);
Michal Kazior500ff9f2015-03-31 10:26:21 +00007121 if (ret) {
7122 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7123 arvif->vdev_id, vif->addr,
Michal Kazior089ab7a2015-06-03 12:16:55 +02007124 ctx->def.chan->center_freq, ret);
Michal Kazior500ff9f2015-03-31 10:26:21 +00007125 goto err;
7126 }
7127
7128 arvif->is_started = true;
7129
Michal Kaziorf23e587e2015-07-09 13:08:37 +02007130 ret = ath10k_mac_vif_setup_ps(arvif);
7131 if (ret) {
7132 ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7133 arvif->vdev_id, ret);
7134 goto err_stop;
7135 }
7136
Michal Kazior500ff9f2015-03-31 10:26:21 +00007137 if (vif->type == NL80211_IFTYPE_MONITOR) {
7138 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7139 if (ret) {
7140 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7141 arvif->vdev_id, ret);
7142 goto err_stop;
7143 }
7144
7145 arvif->is_up = true;
7146 }
7147
7148 mutex_unlock(&ar->conf_mutex);
7149 return 0;
7150
7151err_stop:
7152 ath10k_vdev_stop(arvif);
7153 arvif->is_started = false;
Michal Kaziorf23e587e2015-07-09 13:08:37 +02007154 ath10k_mac_vif_setup_ps(arvif);
Michal Kazior500ff9f2015-03-31 10:26:21 +00007155
7156err:
7157 mutex_unlock(&ar->conf_mutex);
7158 return ret;
7159}
7160
7161static void
7162ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7163 struct ieee80211_vif *vif,
7164 struct ieee80211_chanctx_conf *ctx)
7165{
7166 struct ath10k *ar = hw->priv;
7167 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7168 int ret;
7169
7170 mutex_lock(&ar->conf_mutex);
7171
7172 ath10k_dbg(ar, ATH10K_DBG_MAC,
7173 "mac chanctx unassign ptr %p vdev_id %i\n",
7174 ctx, arvif->vdev_id);
7175
7176 WARN_ON(!arvif->is_started);
7177
7178 if (vif->type == NL80211_IFTYPE_MONITOR) {
7179 WARN_ON(!arvif->is_up);
7180
7181 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7182 if (ret)
7183 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7184 arvif->vdev_id, ret);
7185
7186 arvif->is_up = false;
7187 }
7188
7189 ret = ath10k_vdev_stop(arvif);
7190 if (ret)
7191 ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7192 arvif->vdev_id, ret);
7193
7194 arvif->is_started = false;
7195
7196 mutex_unlock(&ar->conf_mutex);
7197}
7198
7199static int
7200ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7201 struct ieee80211_vif_chanctx_switch *vifs,
7202 int n_vifs,
7203 enum ieee80211_chanctx_switch_mode mode)
7204{
7205 struct ath10k *ar = hw->priv;
Michal Kazior500ff9f2015-03-31 10:26:21 +00007206
7207 mutex_lock(&ar->conf_mutex);
7208
7209 ath10k_dbg(ar, ATH10K_DBG_MAC,
7210 "mac chanctx switch n_vifs %d mode %d\n",
7211 n_vifs, mode);
Michal Kazior7be6d1b2015-09-03 10:44:51 +02007212 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
Michal Kazior500ff9f2015-03-31 10:26:21 +00007213
7214 mutex_unlock(&ar->conf_mutex);
7215 return 0;
7216}
7217
Kalle Valo5e3dd152013-06-12 20:52:10 +03007218static const struct ieee80211_ops ath10k_ops = {
Michal Kaziorf2f6eca2016-03-01 11:32:46 +01007219 .tx = ath10k_mac_op_tx,
Michal Kazior29946872016-03-06 16:14:34 +02007220 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
Kalle Valo5e3dd152013-06-12 20:52:10 +03007221 .start = ath10k_start,
7222 .stop = ath10k_stop,
7223 .config = ath10k_config,
7224 .add_interface = ath10k_add_interface,
7225 .remove_interface = ath10k_remove_interface,
7226 .configure_filter = ath10k_configure_filter,
7227 .bss_info_changed = ath10k_bss_info_changed,
7228 .hw_scan = ath10k_hw_scan,
7229 .cancel_hw_scan = ath10k_cancel_hw_scan,
7230 .set_key = ath10k_set_key,
SenthilKumar Jegadeesan627613f2015-01-29 13:50:38 +02007231 .set_default_unicast_key = ath10k_set_default_unicast_key,
Kalle Valo5e3dd152013-06-12 20:52:10 +03007232 .sta_state = ath10k_sta_state,
7233 .conf_tx = ath10k_conf_tx,
7234 .remain_on_channel = ath10k_remain_on_channel,
7235 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
7236 .set_rts_threshold = ath10k_set_rts_threshold,
Michal Kazior92092fe2015-08-03 11:16:43 +02007237 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
Kalle Valo5e3dd152013-06-12 20:52:10 +03007238 .flush = ath10k_flush,
7239 .tx_last_beacon = ath10k_tx_last_beacon,
Ben Greear46acf7b2014-05-16 17:15:38 +03007240 .set_antenna = ath10k_set_antenna,
7241 .get_antenna = ath10k_get_antenna,
Eliad Pellercf2c92d2014-11-04 11:43:54 +02007242 .reconfig_complete = ath10k_reconfig_complete,
Michal Kazior2e1dea42013-07-31 10:32:40 +02007243 .get_survey = ath10k_get_survey,
Michal Kazior3ae54222015-03-31 10:49:20 +00007244 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
Michal Kazior9797feb2014-02-14 14:49:48 +01007245 .sta_rc_update = ath10k_sta_rc_update,
Chun-Yeow Yeoh26ebbcc2014-02-25 09:29:54 +02007246 .get_tsf = ath10k_get_tsf,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02007247 .ampdu_action = ath10k_ampdu_action,
Ben Greear6cddcc72014-09-29 14:41:46 +03007248 .get_et_sset_count = ath10k_debug_get_et_sset_count,
7249 .get_et_stats = ath10k_debug_get_et_stats,
7250 .get_et_strings = ath10k_debug_get_et_strings,
Michal Kazior500ff9f2015-03-31 10:26:21 +00007251 .add_chanctx = ath10k_mac_op_add_chanctx,
7252 .remove_chanctx = ath10k_mac_op_remove_chanctx,
7253 .change_chanctx = ath10k_mac_op_change_chanctx,
7254 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
7255 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
7256 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
Kalle Valo43d2a302014-09-10 18:23:30 +03007257
7258 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7259
Michal Kazior8cd13ca2013-07-16 09:38:54 +02007260#ifdef CONFIG_PM
Janusz Dziedzic5fd3ac32015-03-23 17:32:53 +02007261 .suspend = ath10k_wow_op_suspend,
7262 .resume = ath10k_wow_op_resume,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02007263#endif
Rajkumar Manoharanf5045982015-01-12 14:07:27 +02007264#ifdef CONFIG_MAC80211_DEBUGFS
7265 .sta_add_debugfs = ath10k_sta_add_debugfs,
7266#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03007267};
7268
Kalle Valo5e3dd152013-06-12 20:52:10 +03007269#define CHAN2G(_channel, _freq, _flags) { \
7270 .band = IEEE80211_BAND_2GHZ, \
7271 .hw_value = (_channel), \
7272 .center_freq = (_freq), \
7273 .flags = (_flags), \
7274 .max_antenna_gain = 0, \
7275 .max_power = 30, \
7276}
7277
7278#define CHAN5G(_channel, _freq, _flags) { \
7279 .band = IEEE80211_BAND_5GHZ, \
7280 .hw_value = (_channel), \
7281 .center_freq = (_freq), \
7282 .flags = (_flags), \
7283 .max_antenna_gain = 0, \
7284 .max_power = 30, \
7285}
7286
7287static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7288 CHAN2G(1, 2412, 0),
7289 CHAN2G(2, 2417, 0),
7290 CHAN2G(3, 2422, 0),
7291 CHAN2G(4, 2427, 0),
7292 CHAN2G(5, 2432, 0),
7293 CHAN2G(6, 2437, 0),
7294 CHAN2G(7, 2442, 0),
7295 CHAN2G(8, 2447, 0),
7296 CHAN2G(9, 2452, 0),
7297 CHAN2G(10, 2457, 0),
7298 CHAN2G(11, 2462, 0),
7299 CHAN2G(12, 2467, 0),
7300 CHAN2G(13, 2472, 0),
7301 CHAN2G(14, 2484, 0),
7302};
7303
7304static const struct ieee80211_channel ath10k_5ghz_channels[] = {
Michal Kazior429ff562013-06-26 08:54:54 +02007305 CHAN5G(36, 5180, 0),
7306 CHAN5G(40, 5200, 0),
7307 CHAN5G(44, 5220, 0),
7308 CHAN5G(48, 5240, 0),
7309 CHAN5G(52, 5260, 0),
7310 CHAN5G(56, 5280, 0),
7311 CHAN5G(60, 5300, 0),
7312 CHAN5G(64, 5320, 0),
7313 CHAN5G(100, 5500, 0),
7314 CHAN5G(104, 5520, 0),
7315 CHAN5G(108, 5540, 0),
7316 CHAN5G(112, 5560, 0),
7317 CHAN5G(116, 5580, 0),
7318 CHAN5G(120, 5600, 0),
7319 CHAN5G(124, 5620, 0),
7320 CHAN5G(128, 5640, 0),
7321 CHAN5G(132, 5660, 0),
7322 CHAN5G(136, 5680, 0),
7323 CHAN5G(140, 5700, 0),
Peter Oh4a7898f2015-03-18 11:39:18 -07007324 CHAN5G(144, 5720, 0),
Michal Kazior429ff562013-06-26 08:54:54 +02007325 CHAN5G(149, 5745, 0),
7326 CHAN5G(153, 5765, 0),
7327 CHAN5G(157, 5785, 0),
7328 CHAN5G(161, 5805, 0),
7329 CHAN5G(165, 5825, 0),
Kalle Valo5e3dd152013-06-12 20:52:10 +03007330};
7331
Michal Kaziore7b54192014-08-07 11:03:27 +02007332struct ath10k *ath10k_mac_create(size_t priv_size)
Kalle Valo5e3dd152013-06-12 20:52:10 +03007333{
7334 struct ieee80211_hw *hw;
7335 struct ath10k *ar;
7336
Michal Kaziore7b54192014-08-07 11:03:27 +02007337 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03007338 if (!hw)
7339 return NULL;
7340
7341 ar = hw->priv;
7342 ar->hw = hw;
7343
7344 return ar;
7345}
7346
7347void ath10k_mac_destroy(struct ath10k *ar)
7348{
7349 ieee80211_free_hw(ar->hw);
7350}
7351
7352static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7353 {
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307354 .max = 8,
7355 .types = BIT(NL80211_IFTYPE_STATION)
7356 | BIT(NL80211_IFTYPE_P2P_CLIENT)
Michal Kaziord531cb82013-07-31 10:55:13 +02007357 },
7358 {
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307359 .max = 3,
7360 .types = BIT(NL80211_IFTYPE_P2P_GO)
Michal Kaziord531cb82013-07-31 10:55:13 +02007361 },
7362 {
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307363 .max = 1,
7364 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
Michal Kazior75d2bd42014-12-12 12:41:39 +01007365 },
7366 {
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307367 .max = 7,
7368 .types = BIT(NL80211_IFTYPE_AP)
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007369#ifdef CONFIG_MAC80211_MESH
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307370 | BIT(NL80211_IFTYPE_MESH_POINT)
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007371#endif
Michal Kaziord531cb82013-07-31 10:55:13 +02007372 },
Kalle Valo5e3dd152013-06-12 20:52:10 +03007373};
7374
Bartosz Markowskif2595092013-12-10 16:20:39 +01007375static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007376 {
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307377 .max = 8,
7378 .types = BIT(NL80211_IFTYPE_AP)
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007379#ifdef CONFIG_MAC80211_MESH
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307380 | BIT(NL80211_IFTYPE_MESH_POINT)
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007381#endif
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007382 },
Mohammed Shafi Shajakhan78f7aeb2015-11-21 15:24:41 +05307383 {
7384 .max = 1,
7385 .types = BIT(NL80211_IFTYPE_STATION)
7386 },
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007387};
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007388
7389static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7390 {
7391 .limits = ath10k_if_limits,
7392 .n_limits = ARRAY_SIZE(ath10k_if_limits),
7393 .max_interfaces = 8,
7394 .num_different_channels = 1,
7395 .beacon_int_infra_match = true,
7396 },
Bartosz Markowskif2595092013-12-10 16:20:39 +01007397};
7398
7399static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007400 {
Bartosz Markowskif2595092013-12-10 16:20:39 +01007401 .limits = ath10k_10x_if_limits,
7402 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007403 .max_interfaces = 8,
7404 .num_different_channels = 1,
7405 .beacon_int_infra_match = true,
Bartosz Markowskif2595092013-12-10 16:20:39 +01007406#ifdef CONFIG_ATH10K_DFS_CERTIFIED
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007407 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7408 BIT(NL80211_CHAN_WIDTH_20) |
7409 BIT(NL80211_CHAN_WIDTH_40) |
7410 BIT(NL80211_CHAN_WIDTH_80),
Marek Puzyniake8a50f82013-11-20 09:59:47 +02007411#endif
Bartosz Markowskif2595092013-12-10 16:20:39 +01007412 },
Kalle Valo5e3dd152013-06-12 20:52:10 +03007413};
7414
Michal Kaziorcf327842015-03-31 10:26:25 +00007415static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7416 {
7417 .max = 2,
Michal Kaziored25b112015-07-09 13:08:39 +02007418 .types = BIT(NL80211_IFTYPE_STATION),
7419 },
7420 {
7421 .max = 2,
7422 .types = BIT(NL80211_IFTYPE_AP) |
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007423#ifdef CONFIG_MAC80211_MESH
7424 BIT(NL80211_IFTYPE_MESH_POINT) |
7425#endif
Michal Kaziorcf327842015-03-31 10:26:25 +00007426 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7427 BIT(NL80211_IFTYPE_P2P_GO),
7428 },
7429 {
7430 .max = 1,
7431 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7432 },
7433};
7434
Michal Kaziored25b112015-07-09 13:08:39 +02007435static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7436 {
7437 .max = 2,
7438 .types = BIT(NL80211_IFTYPE_STATION),
7439 },
7440 {
7441 .max = 2,
7442 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7443 },
7444 {
7445 .max = 1,
7446 .types = BIT(NL80211_IFTYPE_AP) |
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007447#ifdef CONFIG_MAC80211_MESH
7448 BIT(NL80211_IFTYPE_MESH_POINT) |
7449#endif
Michal Kaziored25b112015-07-09 13:08:39 +02007450 BIT(NL80211_IFTYPE_P2P_GO),
7451 },
7452 {
7453 .max = 1,
7454 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7455 },
7456};
7457
Michal Kaziorcf327842015-03-31 10:26:25 +00007458static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7459 {
7460 .max = 1,
7461 .types = BIT(NL80211_IFTYPE_STATION),
7462 },
7463 {
7464 .max = 1,
7465 .types = BIT(NL80211_IFTYPE_ADHOC),
7466 },
7467};
7468
7469/* FIXME: This is not thouroughly tested. These combinations may over- or
7470 * underestimate hw/fw capabilities.
7471 */
7472static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7473 {
7474 .limits = ath10k_tlv_if_limit,
7475 .num_different_channels = 1,
Michal Kaziored25b112015-07-09 13:08:39 +02007476 .max_interfaces = 4,
Michal Kaziorcf327842015-03-31 10:26:25 +00007477 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7478 },
7479 {
7480 .limits = ath10k_tlv_if_limit_ibss,
7481 .num_different_channels = 1,
7482 .max_interfaces = 2,
7483 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7484 },
7485};
7486
7487static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7488 {
7489 .limits = ath10k_tlv_if_limit,
Michal Kaziored25b112015-07-09 13:08:39 +02007490 .num_different_channels = 1,
7491 .max_interfaces = 4,
Michal Kaziorcf327842015-03-31 10:26:25 +00007492 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7493 },
7494 {
Michal Kaziored25b112015-07-09 13:08:39 +02007495 .limits = ath10k_tlv_qcs_if_limit,
7496 .num_different_channels = 2,
7497 .max_interfaces = 4,
7498 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7499 },
7500 {
Michal Kaziorcf327842015-03-31 10:26:25 +00007501 .limits = ath10k_tlv_if_limit_ibss,
7502 .num_different_channels = 1,
7503 .max_interfaces = 2,
7504 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7505 },
7506};
7507
Raja Manicf36fef2015-06-22 20:22:25 +05307508static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7509 {
7510 .max = 1,
7511 .types = BIT(NL80211_IFTYPE_STATION),
7512 },
7513 {
7514 .max = 16,
7515 .types = BIT(NL80211_IFTYPE_AP)
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007516#ifdef CONFIG_MAC80211_MESH
7517 | BIT(NL80211_IFTYPE_MESH_POINT)
7518#endif
Raja Manicf36fef2015-06-22 20:22:25 +05307519 },
7520};
7521
7522static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7523 {
7524 .limits = ath10k_10_4_if_limits,
7525 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7526 .max_interfaces = 16,
7527 .num_different_channels = 1,
7528 .beacon_int_infra_match = true,
7529#ifdef CONFIG_ATH10K_DFS_CERTIFIED
7530 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7531 BIT(NL80211_CHAN_WIDTH_20) |
7532 BIT(NL80211_CHAN_WIDTH_40) |
7533 BIT(NL80211_CHAN_WIDTH_80),
7534#endif
7535 },
7536};
7537
Kalle Valo5e3dd152013-06-12 20:52:10 +03007538static void ath10k_get_arvif_iter(void *data, u8 *mac,
7539 struct ieee80211_vif *vif)
7540{
7541 struct ath10k_vif_iter *arvif_iter = data;
7542 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7543
7544 if (arvif->vdev_id == arvif_iter->vdev_id)
7545 arvif_iter->arvif = arvif;
7546}
7547
7548struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7549{
7550 struct ath10k_vif_iter arvif_iter;
7551 u32 flags;
7552
7553 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7554 arvif_iter.vdev_id = vdev_id;
7555
7556 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7557 ieee80211_iterate_active_interfaces_atomic(ar->hw,
7558 flags,
7559 ath10k_get_arvif_iter,
7560 &arvif_iter);
7561 if (!arvif_iter.arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02007562 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03007563 return NULL;
7564 }
7565
7566 return arvif_iter.arvif;
7567}
7568
7569int ath10k_mac_register(struct ath10k *ar)
7570{
Johannes Berg3cb10942015-01-22 21:38:45 +01007571 static const u32 cipher_suites[] = {
7572 WLAN_CIPHER_SUITE_WEP40,
7573 WLAN_CIPHER_SUITE_WEP104,
7574 WLAN_CIPHER_SUITE_TKIP,
7575 WLAN_CIPHER_SUITE_CCMP,
7576 WLAN_CIPHER_SUITE_AES_CMAC,
7577 };
Kalle Valo5e3dd152013-06-12 20:52:10 +03007578 struct ieee80211_supported_band *band;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007579 void *channels;
7580 int ret;
7581
7582 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7583
7584 SET_IEEE80211_DEV(ar->hw, ar->dev);
7585
Michal Kaziorc94aa7e2015-03-24 12:38:11 +00007586 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7587 ARRAY_SIZE(ath10k_5ghz_channels)) !=
7588 ATH10K_NUM_CHANS);
7589
Kalle Valo5e3dd152013-06-12 20:52:10 +03007590 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7591 channels = kmemdup(ath10k_2ghz_channels,
7592 sizeof(ath10k_2ghz_channels),
7593 GFP_KERNEL);
Michal Kaziord6015b22013-07-22 14:13:30 +02007594 if (!channels) {
7595 ret = -ENOMEM;
7596 goto err_free;
7597 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03007598
7599 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
7600 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7601 band->channels = channels;
7602 band->n_bitrates = ath10k_g_rates_size;
7603 band->bitrates = ath10k_g_rates;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007604
7605 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
7606 }
7607
7608 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7609 channels = kmemdup(ath10k_5ghz_channels,
7610 sizeof(ath10k_5ghz_channels),
7611 GFP_KERNEL);
7612 if (!channels) {
Michal Kaziord6015b22013-07-22 14:13:30 +02007613 ret = -ENOMEM;
7614 goto err_free;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007615 }
7616
7617 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
7618 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7619 band->channels = channels;
7620 band->n_bitrates = ath10k_a_rates_size;
7621 band->bitrates = ath10k_a_rates;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007622 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
7623 }
7624
Rajkumar Manoharan5036fe02015-10-27 17:51:14 +05307625 ath10k_mac_setup_ht_vht_cap(ar);
7626
Kalle Valo5e3dd152013-06-12 20:52:10 +03007627 ar->hw->wiphy->interface_modes =
7628 BIT(NL80211_IFTYPE_STATION) |
Bob Copelandb6c7baf2015-09-09 12:47:36 -04007629 BIT(NL80211_IFTYPE_AP) |
7630 BIT(NL80211_IFTYPE_MESH_POINT);
Bartosz Markowskid3541812013-12-10 16:20:40 +01007631
Rajkumar Manoharan166de3f2015-10-27 17:51:11 +05307632 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7633 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
Ben Greear46acf7b2014-05-16 17:15:38 +03007634
Bartosz Markowskid3541812013-12-10 16:20:40 +01007635 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
7636 ar->hw->wiphy->interface_modes |=
Michal Kazior75d2bd42014-12-12 12:41:39 +01007637 BIT(NL80211_IFTYPE_P2P_DEVICE) |
Bartosz Markowskid3541812013-12-10 16:20:40 +01007638 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7639 BIT(NL80211_IFTYPE_P2P_GO);
Kalle Valo5e3dd152013-06-12 20:52:10 +03007640
Johannes Berg30686bf2015-06-02 21:39:54 +02007641 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7642 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7643 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7644 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7645 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7646 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
7647 ieee80211_hw_set(ar->hw, AP_LINK_PS);
7648 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
Johannes Berg30686bf2015-06-02 21:39:54 +02007649 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
7650 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
7651 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
7652 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
7653 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
7654 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
Kalle Valo5e3dd152013-06-12 20:52:10 +03007655
David Liuccec9032015-07-24 20:25:32 +03007656 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7657 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
7658
Eliad Peller0d8614b2014-09-10 14:07:36 +03007659 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
Janusz Dziedzic0cd9bc12015-04-10 13:23:23 +00007660 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
Eliad Peller0d8614b2014-09-10 14:07:36 +03007661
Kalle Valo5e3dd152013-06-12 20:52:10 +03007662 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
Eliad Peller0d8614b2014-09-10 14:07:36 +03007663 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007664
7665 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
Johannes Berg30686bf2015-06-02 21:39:54 +02007666 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
7667 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
Kalle Valo5e3dd152013-06-12 20:52:10 +03007668 }
7669
7670 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
7671 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
7672
7673 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
Michal Kazior9797feb2014-02-14 14:49:48 +01007674 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
Michal Kazior29946872016-03-06 16:14:34 +02007675 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
Kalle Valo5e3dd152013-06-12 20:52:10 +03007676
Kalle Valo5e3dd152013-06-12 20:52:10 +03007677 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
7678
Michal Kaziorfbb8f1b2015-01-13 16:30:12 +02007679 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
7680 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
7681
7682 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
7683 * that userspace (e.g. wpa_supplicant/hostapd) can generate
7684 * correct Probe Responses. This is more of a hack advert..
7685 */
7686 ar->hw->wiphy->probe_resp_offload |=
7687 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
7688 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
7689 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
7690 }
7691
Marek Puzyniak75d85fd2015-03-30 09:51:53 +03007692 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
7693 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
7694
Kalle Valo5e3dd152013-06-12 20:52:10 +03007695 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
Michal Kaziorc2df44b2014-01-23 11:38:26 +01007696 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007697 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
7698
7699 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
Rajkumar Manoharan78157a12014-11-17 16:44:15 +02007700 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
7701
Janusz.Dziedzic@tieto.com37a0b392015-03-12 13:11:41 +01007702 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
7703
Janusz Dziedzic5fd3ac32015-03-23 17:32:53 +02007704 ret = ath10k_wow_init(ar);
7705 if (ret) {
7706 ath10k_warn(ar, "failed to init wow: %d\n", ret);
7707 goto err_free;
7708 }
7709
Janusz Dziedzicc7025342015-06-15 14:46:41 +03007710 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
7711
Kalle Valo5e3dd152013-06-12 20:52:10 +03007712 /*
7713 * on LL hardware queues are managed entirely by the FW
7714 * so we only advertise to mac we can do the queues thing
7715 */
Michal Kazior96d828d2015-03-31 10:26:23 +00007716 ar->hw->queues = IEEE80211_MAX_QUEUES;
7717
7718 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
7719 * something that vdev_ids can't reach so that we don't stop the queue
7720 * accidentally.
7721 */
7722 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007723
Kalle Valo5cc7caf2014-12-17 12:20:54 +02007724 switch (ar->wmi.op_version) {
7725 case ATH10K_FW_WMI_OP_VERSION_MAIN:
Bartosz Markowskif2595092013-12-10 16:20:39 +01007726 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
7727 ar->hw->wiphy->n_iface_combinations =
7728 ARRAY_SIZE(ath10k_if_comb);
Michal Kaziorcf850d12014-07-24 20:07:00 +03007729 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
Kalle Valo5cc7caf2014-12-17 12:20:54 +02007730 break;
Michal Kaziorcf327842015-03-31 10:26:25 +00007731 case ATH10K_FW_WMI_OP_VERSION_TLV:
7732 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
7733 ar->hw->wiphy->iface_combinations =
7734 ath10k_tlv_qcs_if_comb;
7735 ar->hw->wiphy->n_iface_combinations =
7736 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
7737 } else {
7738 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
7739 ar->hw->wiphy->n_iface_combinations =
7740 ARRAY_SIZE(ath10k_tlv_if_comb);
7741 }
7742 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7743 break;
Kalle Valo5cc7caf2014-12-17 12:20:54 +02007744 case ATH10K_FW_WMI_OP_VERSION_10_1:
7745 case ATH10K_FW_WMI_OP_VERSION_10_2:
Rajkumar Manoharan4a16fbe2014-12-17 12:21:12 +02007746 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
Kalle Valo5cc7caf2014-12-17 12:20:54 +02007747 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
7748 ar->hw->wiphy->n_iface_combinations =
7749 ARRAY_SIZE(ath10k_10x_if_comb);
7750 break;
Raja Mani9bd21322015-06-22 20:10:09 +05307751 case ATH10K_FW_WMI_OP_VERSION_10_4:
Raja Manicf36fef2015-06-22 20:22:25 +05307752 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
7753 ar->hw->wiphy->n_iface_combinations =
7754 ARRAY_SIZE(ath10k_10_4_if_comb);
Raja Mani9bd21322015-06-22 20:10:09 +05307755 break;
Kalle Valo5cc7caf2014-12-17 12:20:54 +02007756 case ATH10K_FW_WMI_OP_VERSION_UNSET:
7757 case ATH10K_FW_WMI_OP_VERSION_MAX:
7758 WARN_ON(1);
7759 ret = -EINVAL;
7760 goto err_free;
Bartosz Markowskif2595092013-12-10 16:20:39 +01007761 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03007762
David Liuccec9032015-07-24 20:25:32 +03007763 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7764 ar->hw->netdev_features = NETIF_F_HW_CSUM;
Michal Kazior7c199992013-07-31 10:47:57 +02007765
Janusz Dziedzic9702c682013-11-20 09:59:41 +02007766 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
7767 /* Init ath dfs pattern detector */
7768 ar->ath_common.debug_mask = ATH_DBG_DFS;
7769 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
7770 NL80211_DFS_UNSET);
7771
7772 if (!ar->dfs_detector)
Michal Kazior7aa7a722014-08-25 12:09:38 +02007773 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
Janusz Dziedzic9702c682013-11-20 09:59:41 +02007774 }
7775
Kalle Valo5e3dd152013-06-12 20:52:10 +03007776 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
7777 ath10k_reg_notifier);
7778 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02007779 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
Jeff Johnson0e339442015-10-08 09:15:53 -07007780 goto err_dfs_detector_exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007781 }
7782
Johannes Berg3cb10942015-01-22 21:38:45 +01007783 ar->hw->wiphy->cipher_suites = cipher_suites;
7784 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
7785
Kalle Valo5e3dd152013-06-12 20:52:10 +03007786 ret = ieee80211_register_hw(ar->hw);
7787 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02007788 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
Jeff Johnson0e339442015-10-08 09:15:53 -07007789 goto err_dfs_detector_exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007790 }
7791
7792 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
7793 ret = regulatory_hint(ar->hw->wiphy,
7794 ar->ath_common.regulatory.alpha2);
7795 if (ret)
Michal Kaziord6015b22013-07-22 14:13:30 +02007796 goto err_unregister;
Kalle Valo5e3dd152013-06-12 20:52:10 +03007797 }
7798
7799 return 0;
Michal Kaziord6015b22013-07-22 14:13:30 +02007800
7801err_unregister:
Kalle Valo5e3dd152013-06-12 20:52:10 +03007802 ieee80211_unregister_hw(ar->hw);
Jeff Johnson0e339442015-10-08 09:15:53 -07007803
7804err_dfs_detector_exit:
7805 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7806 ar->dfs_detector->exit(ar->dfs_detector);
7807
Michal Kaziord6015b22013-07-22 14:13:30 +02007808err_free:
7809 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
7810 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
7811
Jeff Johnson0e339442015-10-08 09:15:53 -07007812 SET_IEEE80211_DEV(ar->hw, NULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +03007813 return ret;
7814}
7815
7816void ath10k_mac_unregister(struct ath10k *ar)
7817{
7818 ieee80211_unregister_hw(ar->hw);
7819
Janusz Dziedzic9702c682013-11-20 09:59:41 +02007820 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7821 ar->dfs_detector->exit(ar->dfs_detector);
7822
Kalle Valo5e3dd152013-06-12 20:52:10 +03007823 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
7824 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
7825
7826 SET_IEEE80211_DEV(ar->hw, NULL);
7827}