blob: ec9a8e7bae1de2934d9fddcd26e7d4b481cdafb9 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Johannes Berg8b4139d2014-07-24 14:05:26 +02009 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010010 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020026 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020034 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Johannes Berg8b4139d2014-07-24 14:05:26 +020035 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010036 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/etherdevice.h>
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "iwl-eeprom-parse.h"
71#include "fw-api-scan.h"
72
73#define IWL_PLCP_QUIET_THRESH 1
74#define IWL_ACTIVE_QUIET_TIME 10
Alexander Bondar8a110d92014-03-12 17:31:19 +020075
76struct iwl_mvm_scan_params {
77 u32 max_out_time;
78 u32 suspend_time;
Alexander Bondar50df8a32014-03-12 20:30:51 +020079 bool passive_fragmented;
80 struct _dwell {
81 u16 passive;
82 u16 active;
83 } dwell[IEEE80211_NUM_BANDS];
Alexander Bondar8a110d92014-03-12 17:31:19 +020084};
Johannes Berg8ca151b2013-01-24 14:25:36 +010085
David Spinadeld2496222014-05-20 12:46:37 +030086enum iwl_umac_scan_uid_type {
87 IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
88 IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
89 IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
90 IWL_UMAC_SCAN_UID_SCHED_SCAN,
91};
92
93static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
94 enum iwl_umac_scan_uid_type type, bool notify);
95
96static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
97{
98 if (mvm->scan_rx_ant != ANT_NONE)
99 return mvm->scan_rx_ant;
100 return mvm->fw->valid_rx_ant;
101}
102
Johannes Berg8ca151b2013-01-24 14:25:36 +0100103static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
104{
105 u16 rx_chain;
Oren Givon91b05d12013-08-19 08:36:48 +0300106 u8 rx_ant;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107
David Spinadeld2496222014-05-20 12:46:37 +0300108 rx_ant = iwl_mvm_scan_rx_ant(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100109 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
110 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
111 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
112 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
113 return cpu_to_le16(rx_chain);
114}
115
David Spinadelfb98be52014-05-04 12:51:10 +0300116static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117{
David Spinadelfb98be52014-05-04 12:51:10 +0300118 if (band == IEEE80211_BAND_2GHZ)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100119 return cpu_to_le32(PHY_BAND_24);
120 else
121 return cpu_to_le32(PHY_BAND_5);
122}
123
124static inline __le32
125iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
126 bool no_cck)
127{
128 u32 tx_ant;
129
130 mvm->scan_last_antenna_idx =
Johannes Berg4ed735e2014-02-12 21:47:44 +0100131 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100132 mvm->scan_last_antenna_idx);
133 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
134
135 if (band == IEEE80211_BAND_2GHZ && !no_cck)
136 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
137 tx_ant);
138 else
139 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
140}
141
142/*
143 * We insert the SSIDs in an inverted order, because the FW will
144 * invert it back. The most prioritized SSID, which is first in the
145 * request list, is not copied here, but inserted directly to the probe
146 * request.
147 */
David Spinadelfb98be52014-05-04 12:51:10 +0300148static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
149 struct cfg80211_ssid *ssids,
150 int n_ssids, int first)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100151{
152 int fw_idx, req_idx;
153
David Spinadelfb98be52014-05-04 12:51:10 +0300154 for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
David Spinadelfe04e832013-07-04 15:17:48 +0300155 req_idx--, fw_idx++) {
David Spinadelfb98be52014-05-04 12:51:10 +0300156 cmd_ssid[fw_idx].id = WLAN_EID_SSID;
157 cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
158 memcpy(cmd_ssid[fw_idx].ssid,
159 ssids[req_idx].ssid,
160 ssids[req_idx].ssid_len);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100161 }
162}
163
164/*
165 * If req->n_ssids > 0, it means we should do an active scan.
166 * In case of active scan w/o directed scan, we receive a zero-length SSID
167 * just to notify that this scan is active and not passive.
168 * In order to notify the FW of the number of SSIDs we wish to scan (including
169 * the zero-length one), we need to set the corresponding bits in chan->type,
David Spinadel20f1a5d2013-08-21 09:14:27 +0300170 * one for each SSID, and set the active bit (first). If the first SSID is
171 * already included in the probe template, so we need to set only
172 * req->n_ssids - 1 bits in addition to the first bit.
Johannes Berg8ca151b2013-01-24 14:25:36 +0100173 */
David Spinadel720daf22014-12-29 15:43:48 +0200174static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
175 enum ieee80211_band band, int n_ssids)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100176{
David Spinadel720daf22014-12-29 15:43:48 +0200177 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
178 return 10;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100179 if (band == IEEE80211_BAND_2GHZ)
David Spinadel39745332014-09-10 16:40:41 +0300180 return 20 + 3 * (n_ssids + 1);
181 return 10 + 2 * (n_ssids + 1);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100182}
183
David Spinadel720daf22014-12-29 15:43:48 +0200184static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
185 enum ieee80211_band band)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100186{
David Spinadel720daf22014-12-29 15:43:48 +0200187 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
188 return 110;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100189 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
190}
191
192static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
David Spinadel20f1a5d2013-08-21 09:14:27 +0300193 struct cfg80211_scan_request *req,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200194 bool basic_ssid,
195 struct iwl_mvm_scan_params *params)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100196{
Johannes Berg8ca151b2013-01-24 14:25:36 +0100197 struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
198 (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
199 int i;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300200 int type = BIT(req->n_ssids) - 1;
Alexander Bondar50df8a32014-03-12 20:30:51 +0200201 enum ieee80211_band band = req->channels[0]->band;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300202
203 if (!basic_ssid)
204 type |= BIT(req->n_ssids);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100205
206 for (i = 0; i < cmd->channel_count; i++) {
207 chan->channel = cpu_to_le16(req->channels[i]->hw_value);
David Spinadel20f1a5d2013-08-21 09:14:27 +0300208 chan->type = cpu_to_le32(type);
Luis R. Rodriguez8fe02e12013-10-21 19:22:25 +0200209 if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
David Spinadelbb963c42013-07-23 14:13:32 +0300210 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200211 chan->active_dwell = cpu_to_le16(params->dwell[band].active);
212 chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100213 chan->iteration_count = cpu_to_le16(1);
214 chan++;
215 }
216}
217
218/*
219 * Fill in probe request with the following parameters:
220 * TA is our vif HW address, which mac80211 ensures we have.
221 * Packet is broadcasted, so this is both SA and DA.
222 * The probe request IE is made out of two: first comes the most prioritized
223 * SSID if a directed scan is requested. Second comes whatever extra
224 * information was given to us as the scan request IE.
225 */
226static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
227 int n_ssids, const u8 *ssid, int ssid_len,
David Spinadel633e2712014-02-06 16:15:23 +0200228 const u8 *band_ie, int band_ie_len,
229 const u8 *common_ie, int common_ie_len,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100230 int left)
231{
232 int len = 0;
233 u8 *pos = NULL;
234
235 /* Make sure there is enough space for the probe request,
236 * two mandatory IEs and the data */
237 left -= 24;
238 if (left < 0)
239 return 0;
240
241 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
242 eth_broadcast_addr(frame->da);
243 memcpy(frame->sa, ta, ETH_ALEN);
244 eth_broadcast_addr(frame->bssid);
245 frame->seq_ctrl = 0;
246
247 len += 24;
248
249 /* for passive scans, no need to fill anything */
250 if (n_ssids == 0)
251 return (u16)len;
252
253 /* points to the payload of the request */
254 pos = &frame->u.probe_req.variable[0];
255
256 /* fill in our SSID IE */
257 left -= ssid_len + 2;
258 if (left < 0)
259 return 0;
260 *pos++ = WLAN_EID_SSID;
261 *pos++ = ssid_len;
262 if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
263 memcpy(pos, ssid, ssid_len);
264 pos += ssid_len;
265 }
266
267 len += ssid_len + 2;
268
David Spinadel633e2712014-02-06 16:15:23 +0200269 if (WARN_ON(left < band_ie_len + common_ie_len))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100270 return len;
271
David Spinadel633e2712014-02-06 16:15:23 +0200272 if (band_ie && band_ie_len) {
273 memcpy(pos, band_ie, band_ie_len);
274 pos += band_ie_len;
275 len += band_ie_len;
276 }
277
278 if (common_ie && common_ie_len) {
279 memcpy(pos, common_ie, common_ie_len);
280 pos += common_ie_len;
281 len += common_ie_len;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100282 }
283
284 return (u16)len;
285}
286
Alexander Bondar8a110d92014-03-12 17:31:19 +0200287static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
288 struct ieee80211_vif *vif)
Haim Dreyfuss61f63252013-11-03 23:02:59 +0200289{
Alexander Bondar8a110d92014-03-12 17:31:19 +0200290 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
291 bool *global_bound = data;
Haim Dreyfuss61f63252013-11-03 23:02:59 +0200292
David Spinadel1e2ebe02014-10-26 15:53:27 +0200293 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
294 mvmvif->phy_ctxt->id < MAX_PHYS)
Alexander Bondar8a110d92014-03-12 17:31:19 +0200295 *global_bound = true;
296}
297
298static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200299 struct ieee80211_vif *vif,
Johannes Bergab480032014-06-04 10:13:50 +0200300 int n_ssids, u32 flags,
Alexander Bondar8a110d92014-03-12 17:31:19 +0200301 struct iwl_mvm_scan_params *params)
302{
303 bool global_bound = false;
Alexander Bondar50df8a32014-03-12 20:30:51 +0200304 enum ieee80211_band band;
David Spinadel2ce89cd2014-07-22 13:11:18 +0300305 u8 frag_passive_dwell = 0;
Alexander Bondar8a110d92014-03-12 17:31:19 +0200306
307 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
308 IEEE80211_IFACE_ITER_NORMAL,
309 iwl_mvm_scan_condition_iterator,
310 &global_bound);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200311
Alexander Bondaref67f18d2014-03-30 10:47:08 +0300312 if (!global_bound)
313 goto not_bound;
314
David Spinadel2ce89cd2014-07-22 13:11:18 +0300315 params->suspend_time = 30;
316 params->max_out_time = 170;
Alexander Bondaref67f18d2014-03-30 10:47:08 +0300317
318 if (iwl_mvm_low_latency(mvm)) {
David Spinadel2ce89cd2014-07-22 13:11:18 +0300319 if (mvm->fw->ucode_capa.api[0] &
320 IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
321 params->suspend_time = 105;
322 params->max_out_time = 70;
323 frag_passive_dwell = 20;
324 } else {
325 params->suspend_time = 120;
326 params->max_out_time = 120;
327 }
328 }
329
330 if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
331 IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
332 /*
333 * P2P device scan should not be fragmented to avoid negative
334 * impact on P2P device discovery. Configure max_out_time to be
335 * equal to dwell time on passive channel. Take a longest
336 * possible value, one that corresponds to 2GHz band
337 */
338 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
339 u32 passive_dwell =
David Spinadel720daf22014-12-29 15:43:48 +0200340 iwl_mvm_get_passive_dwell(mvm,
341 IEEE80211_BAND_2GHZ);
David Spinadel2ce89cd2014-07-22 13:11:18 +0300342 params->max_out_time = passive_dwell;
343 } else {
344 params->passive_fragmented = true;
345 }
Alexander Bondar50df8a32014-03-12 20:30:51 +0200346 }
347
Johannes Bergab480032014-06-04 10:13:50 +0200348 if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
349 params->max_out_time = 200;
350
Alexander Bondaref67f18d2014-03-30 10:47:08 +0300351not_bound:
352
Alexander Bondar50df8a32014-03-12 20:30:51 +0200353 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
David Spinadel2ce89cd2014-07-22 13:11:18 +0300354 if (params->passive_fragmented)
355 params->dwell[band].passive = frag_passive_dwell;
356 else
357 params->dwell[band].passive =
David Spinadel720daf22014-12-29 15:43:48 +0200358 iwl_mvm_get_passive_dwell(mvm, band);
359 params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200360 n_ssids);
361 }
Haim Dreyfuss61f63252013-11-03 23:02:59 +0200362}
363
Andrei Otcheretianski73897bd2014-07-09 18:59:14 +0300364static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
365{
366 /* require rrm scan whenever the fw supports it */
367 return mvm->fw->ucode_capa.capa[0] &
368 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
369}
370
Andrei Otcheretianski48849a42014-09-09 10:58:49 +0300371static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
372 bool is_sched_scan)
373{
374 int max_probe_len;
375
376 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
377 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
378 else
379 max_probe_len = mvm->fw->ucode_capa.max_probe_length;
380
381 /* we create the 802.11 header and SSID element */
382 max_probe_len -= 24 + 2;
383
384 /* basic ssid is added only for hw_scan with and old api */
385 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) &&
386 !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) &&
387 !is_sched_scan)
388 max_probe_len -= 32;
389
Andrei Otcheretianski66dc5272014-09-02 17:55:40 +0300390 /* DS parameter set element is added on 2.4GHZ band if required */
391 if (iwl_mvm_rrm_scan_needed(mvm))
392 max_probe_len -= 3;
393
Andrei Otcheretianski48849a42014-09-09 10:58:49 +0300394 return max_probe_len;
395}
396
397int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
398{
399 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
400
401 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN))
402 return max_ie_len;
403
404 /* TODO: [BUG] This function should return the maximum allowed size of
405 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
406 * in the same command. So the correct implementation of this function
407 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
408 * command has only 512 bytes and it would leave us with about 240
409 * bytes for scan IEs, which is clearly not enough. So meanwhile
410 * we will report an incorrect value. This may result in a failure to
411 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
412 * functions with -ENOBUFS, if a large enough probe will be provided.
413 */
414 return max_ie_len;
415}
416
Johannes Berg8ca151b2013-01-24 14:25:36 +0100417int iwl_mvm_scan_request(struct iwl_mvm *mvm,
418 struct ieee80211_vif *vif,
419 struct cfg80211_scan_request *req)
420{
421 struct iwl_host_cmd hcmd = {
422 .id = SCAN_REQUEST_CMD,
423 .len = { 0, },
424 .data = { mvm->scan_cmd, },
Johannes Berg8ca151b2013-01-24 14:25:36 +0100425 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
426 };
427 struct iwl_scan_cmd *cmd = mvm->scan_cmd;
428 int ret;
429 u32 status;
430 int ssid_len = 0;
431 u8 *ssid = NULL;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300432 bool basic_ssid = !(mvm->fw->ucode_capa.flags &
433 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200434 struct iwl_mvm_scan_params params = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +0100435
436 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbach748fa67c2014-03-27 10:06:29 +0200437
438 /* we should have failed registration if scan_cmd was NULL */
439 if (WARN_ON(mvm->scan_cmd == NULL))
440 return -ENOMEM;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100441
442 IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
443 mvm->scan_status = IWL_MVM_SCAN_OS;
David Spinadel05646792014-06-02 09:59:49 +0300444 memset(cmd, 0, ksize(cmd));
Alexander Bondar8a110d92014-03-12 17:31:19 +0200445
Johannes Berg8ca151b2013-01-24 14:25:36 +0100446 cmd->channel_count = (u8)req->n_channels;
447 cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
448 cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
449 cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200450
Johannes Bergab480032014-06-04 10:13:50 +0200451 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200452 cmd->max_out_time = cpu_to_le32(params.max_out_time);
453 cmd->suspend_time = cpu_to_le32(params.suspend_time);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200454 if (params.passive_fragmented)
455 cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
Alexander Bondar8a110d92014-03-12 17:31:19 +0200456
David Spinadelfb98be52014-05-04 12:51:10 +0300457 cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100458 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
459 MAC_FILTER_IN_BEACON);
Ilan Peerd91b06d2013-02-11 08:50:45 +0200460
461 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
462 cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
463 else
464 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
465
Johannes Berg8ca151b2013-01-24 14:25:36 +0100466 cmd->repeats = cpu_to_le32(1);
467
468 /*
469 * If the user asked for passive scan, don't change to active scan if
470 * you see any activity on the channel - remain passive.
471 */
472 if (req->n_ssids > 0) {
473 cmd->passive2active = cpu_to_le16(1);
David Spinadel26e05cc2013-07-08 13:12:29 +0300474 cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300475 if (basic_ssid) {
476 ssid = req->ssids[0].ssid;
477 ssid_len = req->ssids[0].ssid_len;
478 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100479 } else {
480 cmd->passive2active = 0;
David Spinadel26e05cc2013-07-08 13:12:29 +0300481 cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100482 }
483
David Spinadelfb98be52014-05-04 12:51:10 +0300484 iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
485 basic_ssid ? 1 : 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100486
Emmanuel Grumbach8e2a8662014-01-28 12:27:31 +0200487 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
Emmanuel Grumbach3856b782014-09-22 12:03:41 +0300488 3 << TX_CMD_FLG_BT_PRIO_POS);
489
Johannes Berg8ca151b2013-01-24 14:25:36 +0100490 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
491 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
492 cmd->tx_cmd.rate_n_flags =
493 iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
494 req->no_cck);
495
496 cmd->tx_cmd.len =
497 cpu_to_le16(iwl_mvm_fill_probe_req(
498 (struct ieee80211_mgmt *)cmd->data,
499 vif->addr,
500 req->n_ssids, ssid, ssid_len,
David Spinadel633e2712014-02-06 16:15:23 +0200501 req->ie, req->ie_len, NULL, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100502 mvm->fw->ucode_capa.max_probe_length));
503
Alexander Bondar50df8a32014-03-12 20:30:51 +0200504 iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100505
506 cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
507 le16_to_cpu(cmd->tx_cmd.len) +
508 (cmd->channel_count * sizeof(struct iwl_scan_channel)));
509 hcmd.len[0] = le16_to_cpu(cmd->len);
510
511 status = SCAN_RESPONSE_OK;
512 ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
513 if (!ret && status == SCAN_RESPONSE_OK) {
514 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
515 } else {
516 /*
517 * If the scan failed, it usually means that the FW was unable
518 * to allocate the time events. Warn on it, but maybe we
519 * should try to send the command again with different params.
520 */
521 IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
522 status, ret);
523 mvm->scan_status = IWL_MVM_SCAN_NONE;
524 ret = -EIO;
525 }
526 return ret;
527}
528
529int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
530 struct iwl_device_cmd *cmd)
531{
532 struct iwl_rx_packet *pkt = rxb_addr(rxb);
533 struct iwl_cmd_response *resp = (void *)pkt->data;
534
535 IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
536 le32_to_cpu(resp->status));
537 return 0;
538}
539
540int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
541 struct iwl_device_cmd *cmd)
542{
543 struct iwl_rx_packet *pkt = rxb_addr(rxb);
544 struct iwl_scan_complete_notif *notif = (void *)pkt->data;
545
Arik Nemtsov91b80252014-02-10 12:49:39 +0200546 lockdep_assert_held(&mvm->mutex);
547
Johannes Berg8ca151b2013-01-24 14:25:36 +0100548 IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
549 notif->status, notif->scanned_channels);
550
Arik Nemtsov91b80252014-02-10 12:49:39 +0200551 if (mvm->scan_status == IWL_MVM_SCAN_OS)
552 mvm->scan_status = IWL_MVM_SCAN_NONE;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100553 ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
554
Arik Nemtsov519e2022013-10-17 17:51:35 +0300555 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
556
Johannes Berg8ca151b2013-01-24 14:25:36 +0100557 return 0;
558}
559
David Spinadelfb98be52014-05-04 12:51:10 +0300560int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
561 struct iwl_rx_cmd_buffer *rxb,
562 struct iwl_device_cmd *cmd)
David Spinadel35a000b2013-08-28 09:29:43 +0300563{
564 struct iwl_rx_packet *pkt = rxb_addr(rxb);
David Spinadel35a000b2013-08-28 09:29:43 +0300565
David Spinadeld2496222014-05-20 12:46:37 +0300566 if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
567 !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
David Spinadelfb98be52014-05-04 12:51:10 +0300568 struct iwl_sched_scan_results *notif = (void *)pkt->data;
569
David Spinadeld2496222014-05-20 12:46:37 +0300570 if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
571 return 0;
David Spinadelfb98be52014-05-04 12:51:10 +0300572 }
573
David Spinadeld2496222014-05-20 12:46:37 +0300574 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
575 ieee80211_sched_scan_results(mvm->hw);
David Spinadel35a000b2013-08-28 09:29:43 +0300576
577 return 0;
578}
579
Johannes Berg8ca151b2013-01-24 14:25:36 +0100580static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
581 struct iwl_rx_packet *pkt, void *data)
582{
583 struct iwl_mvm *mvm =
584 container_of(notif_wait, struct iwl_mvm, notif_wait);
585 struct iwl_scan_complete_notif *notif;
586 u32 *resp;
587
588 switch (pkt->hdr.cmd) {
589 case SCAN_ABORT_CMD:
590 resp = (void *)pkt->data;
591 if (*resp == CAN_ABORT_STATUS) {
592 IWL_DEBUG_SCAN(mvm,
593 "Scan can be aborted, wait until completion\n");
594 return false;
595 }
596
Emmanuel Grumbach5a3e9f72013-09-15 14:39:02 +0300597 /*
598 * If scan cannot be aborted, it means that we had a
599 * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
600 * ieee80211_scan_completed already.
601 */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100602 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
603 *resp);
604 return true;
605
606 case SCAN_COMPLETE_NOTIFICATION:
607 notif = (void *)pkt->data;
608 IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
609 notif->status);
610 return true;
611
612 default:
613 WARN_ON(1);
614 return false;
615 };
616}
617
David Spinadelfb98be52014-05-04 12:51:10 +0300618static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100619{
620 struct iwl_notification_wait wait_scan_abort;
621 static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
622 SCAN_COMPLETE_NOTIFICATION };
623 int ret;
624
625 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
626 scan_abort_notif,
627 ARRAY_SIZE(scan_abort_notif),
628 iwl_mvm_scan_abort_notif, NULL);
629
Emmanuel Grumbacha1022922014-05-12 11:36:41 +0300630 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100631 if (ret) {
632 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
David Spinadel992f81f2014-01-09 14:22:55 +0200633 /* mac80211's state will be cleaned in the nic_restart flow */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100634 goto out_remove_notif;
635 }
636
Arik Nemtsov91b80252014-02-10 12:49:39 +0200637 return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100638
639out_remove_notif:
640 iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
Arik Nemtsov91b80252014-02-10 12:49:39 +0200641 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100642}
David Spinadel35a000b2013-08-28 09:29:43 +0300643
644int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
645 struct iwl_rx_cmd_buffer *rxb,
646 struct iwl_device_cmd *cmd)
647{
648 struct iwl_rx_packet *pkt = rxb_addr(rxb);
David Spinadelfb98be52014-05-04 12:51:10 +0300649 u8 status, ebs_status;
David Spinadel35a000b2013-08-28 09:29:43 +0300650
David Spinadelfb98be52014-05-04 12:51:10 +0300651 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
652 struct iwl_periodic_scan_complete *scan_notif;
653
654 scan_notif = (void *)pkt->data;
655 status = scan_notif->status;
656 ebs_status = scan_notif->ebs_status;
657 } else {
658 struct iwl_scan_offload_complete *scan_notif;
659
660 scan_notif = (void *)pkt->data;
661 status = scan_notif->status;
662 ebs_status = scan_notif->ebs_status;
663 }
Johannes Berga6623e82014-01-27 15:40:53 +0100664 /* scan status must be locked for proper checking */
665 lockdep_assert_held(&mvm->mutex);
666
Haim Dreyfusse820c2d2014-04-06 11:19:09 +0300667 IWL_DEBUG_SCAN(mvm,
David Spinadelfb98be52014-05-04 12:51:10 +0300668 "%s completed, status %s, EBS status %s\n",
669 mvm->scan_status == IWL_MVM_SCAN_SCHED ?
670 "Scheduled scan" : "Scan",
671 status == IWL_SCAN_OFFLOAD_COMPLETED ?
672 "completed" : "aborted",
673 ebs_status == IWL_SCAN_EBS_SUCCESS ?
674 "success" : "failed");
Haim Dreyfusse820c2d2014-04-06 11:19:09 +0300675
David Spinadel35a000b2013-08-28 09:29:43 +0300676
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200677 /* only call mac80211 completion if the stop was initiated by FW */
678 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
Johannes Berga6623e82014-01-27 15:40:53 +0100679 mvm->scan_status = IWL_MVM_SCAN_NONE;
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200680 ieee80211_sched_scan_stopped(mvm->hw);
David Spinadelfb98be52014-05-04 12:51:10 +0300681 } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
682 mvm->scan_status = IWL_MVM_SCAN_NONE;
683 ieee80211_scan_completed(mvm->hw,
684 status == IWL_SCAN_OFFLOAD_ABORTED);
Eliad Peller4ff78182014-06-22 14:44:44 +0300685 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200686 }
David Spinadel35a000b2013-08-28 09:29:43 +0300687
David Spinadelfb98be52014-05-04 12:51:10 +0300688 mvm->last_ebs_successful = !ebs_status;
Haim Dreyfusse820c2d2014-04-06 11:19:09 +0300689
David Spinadel35a000b2013-08-28 09:29:43 +0300690 return 0;
691}
692
693static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
694 struct ieee80211_vif *vif,
David Spinadel633e2712014-02-06 16:15:23 +0200695 struct ieee80211_scan_ies *ies,
David Spinadel35a000b2013-08-28 09:29:43 +0300696 enum ieee80211_band band,
697 struct iwl_tx_cmd *cmd,
698 u8 *data)
699{
700 u16 cmd_len;
701
702 cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
703 cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
704 cmd->sta_id = mvm->aux_sta.sta_id;
705
706 cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
707
708 cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
709 vif->addr,
710 1, NULL, 0,
David Spinadel633e2712014-02-06 16:15:23 +0200711 ies->ies[band], ies->len[band],
712 ies->common_ies, ies->common_ie_len,
David Spinadel35a000b2013-08-28 09:29:43 +0300713 SCAN_OFFLOAD_PROBE_REQ_SIZE);
714 cmd->len = cpu_to_le16(cmd_len);
715}
716
717static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
718 struct ieee80211_vif *vif,
719 struct cfg80211_sched_scan_request *req,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200720 struct iwl_scan_offload_cmd *scan,
721 struct iwl_mvm_scan_params *params)
David Spinadel35a000b2013-08-28 09:29:43 +0300722{
Eliad Peller89879412014-05-26 18:44:35 +0300723 scan->channel_count = req->n_channels;
David Spinadel35a000b2013-08-28 09:29:43 +0300724 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
725 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
726 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
727 scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200728
Alexander Bondar50df8a32014-03-12 20:30:51 +0200729 scan->max_out_time = cpu_to_le32(params->max_out_time);
730 scan->suspend_time = cpu_to_le32(params->suspend_time);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200731
David Spinadel35a000b2013-08-28 09:29:43 +0300732 scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
733 MAC_FILTER_IN_BEACON);
734 scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
735 scan->rep_count = cpu_to_le32(1);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200736
737 if (params->passive_fragmented)
738 scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
David Spinadel35a000b2013-08-28 09:29:43 +0300739}
740
741static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
742{
743 int i;
744
745 for (i = 0; i < PROBE_OPTION_MAX; i++) {
746 if (!ssid_list[i].len)
747 break;
748 if (ssid_list[i].len == ssid_len &&
749 !memcmp(ssid_list->ssid, ssid, ssid_len))
750 return i;
751 }
752 return -1;
753}
754
755static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
David Spinadelfb98be52014-05-04 12:51:10 +0300756 struct iwl_ssid_ie *direct_scan,
757 u32 *ssid_bitmap, bool basic_ssid)
David Spinadel35a000b2013-08-28 09:29:43 +0300758{
759 int i, j;
760 int index;
761
762 /*
763 * copy SSIDs from match list.
764 * iwl_config_sched_scan_profiles() uses the order of these ssids to
765 * config match list.
766 */
767 for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
Johannes Bergea73cbc2014-01-24 10:53:53 +0100768 /* skip empty SSID matchsets */
769 if (!req->match_sets[i].ssid.ssid_len)
770 continue;
David Spinadelfb98be52014-05-04 12:51:10 +0300771 direct_scan[i].id = WLAN_EID_SSID;
772 direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
773 memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
774 direct_scan[i].len);
David Spinadel35a000b2013-08-28 09:29:43 +0300775 }
776
777 /* add SSIDs from scan SSID list */
778 *ssid_bitmap = 0;
779 for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
780 index = iwl_ssid_exist(req->ssids[j].ssid,
781 req->ssids[j].ssid_len,
David Spinadelfb98be52014-05-04 12:51:10 +0300782 direct_scan);
David Spinadel35a000b2013-08-28 09:29:43 +0300783 if (index < 0) {
David Spinadelfb98be52014-05-04 12:51:10 +0300784 if (!req->ssids[j].ssid_len && basic_ssid)
David Spinadel35a000b2013-08-28 09:29:43 +0300785 continue;
David Spinadelfb98be52014-05-04 12:51:10 +0300786 direct_scan[i].id = WLAN_EID_SSID;
787 direct_scan[i].len = req->ssids[j].ssid_len;
788 memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
789 direct_scan[i].len);
David Spinadel35a000b2013-08-28 09:29:43 +0300790 *ssid_bitmap |= BIT(i + 1);
791 i++;
792 } else {
793 *ssid_bitmap |= BIT(index + 1);
794 }
795 }
796}
797
798static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
799 struct cfg80211_sched_scan_request *req,
David Spinadel762533b2014-06-05 11:20:43 +0300800 u8 *channels_buffer,
David Spinadel35a000b2013-08-28 09:29:43 +0300801 enum ieee80211_band band,
Eliad Peller89879412014-05-26 18:44:35 +0300802 int *head,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200803 u32 ssid_bitmap,
804 struct iwl_mvm_scan_params *params)
David Spinadel35a000b2013-08-28 09:29:43 +0300805{
David Spinadel762533b2014-06-05 11:20:43 +0300806 u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
807 __le32 *type = (__le32 *)channels_buffer;
808 __le16 *channel_number = (__le16 *)(type + n_channels);
809 __le16 *iter_count = channel_number + n_channels;
810 __le32 *iter_interval = (__le32 *)(iter_count + n_channels);
811 u8 *active_dwell = (u8 *)(iter_interval + n_channels);
812 u8 *passive_dwell = active_dwell + n_channels;
Eliad Peller89879412014-05-26 18:44:35 +0300813 int i, index = 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300814
Eliad Peller89879412014-05-26 18:44:35 +0300815 for (i = 0; i < req->n_channels; i++) {
816 struct ieee80211_channel *chan = req->channels[i];
David Spinadel35a000b2013-08-28 09:29:43 +0300817
Eliad Peller89879412014-05-26 18:44:35 +0300818 if (chan->band != band)
819 continue;
David Spinadel35a000b2013-08-28 09:29:43 +0300820
Eliad Peller89879412014-05-26 18:44:35 +0300821 index = *head;
822 (*head)++;
David Spinadel35a000b2013-08-28 09:29:43 +0300823
David Spinadel762533b2014-06-05 11:20:43 +0300824 channel_number[index] = cpu_to_le16(chan->hw_value);
825 active_dwell[index] = params->dwell[band].active;
826 passive_dwell[index] = params->dwell[band].passive;
David Spinadel35a000b2013-08-28 09:29:43 +0300827
David Spinadel762533b2014-06-05 11:20:43 +0300828 iter_count[index] = cpu_to_le16(1);
829 iter_interval[index] = 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300830
Eliad Peller89879412014-05-26 18:44:35 +0300831 if (!(chan->flags & IEEE80211_CHAN_NO_IR))
David Spinadel762533b2014-06-05 11:20:43 +0300832 type[index] |=
David Spinadel35a000b2013-08-28 09:29:43 +0300833 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
834
David Spinadel762533b2014-06-05 11:20:43 +0300835 type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
836 IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
David Spinadel35a000b2013-08-28 09:29:43 +0300837
Eliad Peller89879412014-05-26 18:44:35 +0300838 if (chan->flags & IEEE80211_CHAN_NO_HT40)
David Spinadel762533b2014-06-05 11:20:43 +0300839 type[index] |=
David Spinadel35a000b2013-08-28 09:29:43 +0300840 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
841
842 /* scan for all SSIDs from req->ssids */
David Spinadel762533b2014-06-05 11:20:43 +0300843 type[index] |= cpu_to_le32(ssid_bitmap);
David Spinadel35a000b2013-08-28 09:29:43 +0300844 }
845}
846
847int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
848 struct ieee80211_vif *vif,
849 struct cfg80211_sched_scan_request *req,
David Spinadel633e2712014-02-06 16:15:23 +0200850 struct ieee80211_scan_ies *ies)
David Spinadel35a000b2013-08-28 09:29:43 +0300851{
David Spinadel35a000b2013-08-28 09:29:43 +0300852 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
853 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
854 int head = 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300855 u32 ssid_bitmap;
856 int cmd_len;
857 int ret;
David Spinadel762533b2014-06-05 11:20:43 +0300858 u8 *probes;
David Spinadelfb98be52014-05-04 12:51:10 +0300859 bool basic_ssid = !(mvm->fw->ucode_capa.flags &
860 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
David Spinadel35a000b2013-08-28 09:29:43 +0300861
862 struct iwl_scan_offload_cfg *scan_cfg;
863 struct iwl_host_cmd cmd = {
864 .id = SCAN_OFFLOAD_CONFIG_CMD,
David Spinadel35a000b2013-08-28 09:29:43 +0300865 };
Alexander Bondar50df8a32014-03-12 20:30:51 +0200866 struct iwl_mvm_scan_params params = {};
David Spinadel35a000b2013-08-28 09:29:43 +0300867
868 lockdep_assert_held(&mvm->mutex);
869
David Spinadel35a000b2013-08-28 09:29:43 +0300870 cmd_len = sizeof(struct iwl_scan_offload_cfg) +
David Spinadel762533b2014-06-05 11:20:43 +0300871 mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
Emmanuel Grumbach66092532014-02-20 14:58:30 +0200872 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
David Spinadel35a000b2013-08-28 09:29:43 +0300873
874 scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
875 if (!scan_cfg)
876 return -ENOMEM;
877
David Spinadel762533b2014-06-05 11:20:43 +0300878 probes = scan_cfg->data +
879 mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
880
Johannes Bergab480032014-06-04 10:13:50 +0200881 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200882 iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
David Spinadel35a000b2013-08-28 09:29:43 +0300883 scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
884
David Spinadelfb98be52014-05-04 12:51:10 +0300885 iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
886 &ssid_bitmap, basic_ssid);
David Spinadel35a000b2013-08-28 09:29:43 +0300887 /* build tx frames for supported bands */
888 if (band_2ghz) {
889 iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
890 IEEE80211_BAND_2GHZ,
891 &scan_cfg->scan_cmd.tx_cmd[0],
David Spinadel762533b2014-06-05 11:20:43 +0300892 probes);
893 iwl_build_channel_cfg(mvm, req, scan_cfg->data,
Eliad Peller89879412014-05-26 18:44:35 +0300894 IEEE80211_BAND_2GHZ, &head,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200895 ssid_bitmap, &params);
David Spinadel35a000b2013-08-28 09:29:43 +0300896 }
897 if (band_5ghz) {
898 iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
899 IEEE80211_BAND_5GHZ,
900 &scan_cfg->scan_cmd.tx_cmd[1],
David Spinadel762533b2014-06-05 11:20:43 +0300901 probes +
David Spinadel35a000b2013-08-28 09:29:43 +0300902 SCAN_OFFLOAD_PROBE_REQ_SIZE);
David Spinadel762533b2014-06-05 11:20:43 +0300903 iwl_build_channel_cfg(mvm, req, scan_cfg->data,
Eliad Peller89879412014-05-26 18:44:35 +0300904 IEEE80211_BAND_5GHZ, &head,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200905 ssid_bitmap, &params);
David Spinadel35a000b2013-08-28 09:29:43 +0300906 }
907
908 cmd.data[0] = scan_cfg;
909 cmd.len[0] = cmd_len;
910 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
911
912 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
913
914 ret = iwl_mvm_send_cmd(mvm, &cmd);
915 kfree(scan_cfg);
916 return ret;
917}
918
919int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
920 struct cfg80211_sched_scan_request *req)
921{
922 struct iwl_scan_offload_profile *profile;
923 struct iwl_scan_offload_profile_cfg *profile_cfg;
924 struct iwl_scan_offload_blacklist *blacklist;
925 struct iwl_host_cmd cmd = {
926 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
David Spinadel35a000b2013-08-28 09:29:43 +0300927 .len[1] = sizeof(*profile_cfg),
928 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
929 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
930 };
931 int blacklist_len;
932 int i;
933 int ret;
934
935 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
936 return -EIO;
937
938 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
939 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
940 else
941 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
942
943 blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
944 if (!blacklist)
945 return -ENOMEM;
946
947 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
948 if (!profile_cfg) {
949 ret = -ENOMEM;
950 goto free_blacklist;
951 }
952
953 cmd.data[0] = blacklist;
954 cmd.len[0] = sizeof(*blacklist) * blacklist_len;
955 cmd.data[1] = profile_cfg;
956
957 /* No blacklist configuration */
958
959 profile_cfg->num_profiles = req->n_match_sets;
960 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
961 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
962 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
David Spinadel6e0bbe52013-12-30 09:59:45 +0200963 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
964 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
David Spinadel35a000b2013-08-28 09:29:43 +0300965
966 for (i = 0; i < req->n_match_sets; i++) {
967 profile = &profile_cfg->profiles[i];
968 profile->ssid_index = i;
969 /* Support any cipher and auth algorithm */
970 profile->unicast_cipher = 0xff;
971 profile->auth_alg = 0xff;
972 profile->network_type = IWL_NETWORK_TYPE_ANY;
973 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
974 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
975 }
976
977 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
978
979 ret = iwl_mvm_send_cmd(mvm, &cmd);
980 kfree(profile_cfg);
981free_blacklist:
982 kfree(blacklist);
983
984 return ret;
985}
986
David Spinadeld2496222014-05-20 12:46:37 +0300987static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
988 struct cfg80211_sched_scan_request *req)
989{
990 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
991 IWL_DEBUG_SCAN(mvm,
992 "Sending scheduled scan with filtering, n_match_sets %d\n",
993 req->n_match_sets);
994 return false;
995 }
996
997 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
998 return true;
999}
1000
David Spinadel35a000b2013-08-28 09:29:43 +03001001int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1002 struct cfg80211_sched_scan_request *req)
1003{
1004 struct iwl_scan_offload_req scan_req = {
1005 .watchdog = IWL_SCHED_SCAN_WATCHDOG,
1006
1007 .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
David Spinadelb14fc2b2014-06-25 13:17:53 +03001008 .schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
David Spinadel35a000b2013-08-28 09:29:43 +03001009 .schedule_line[0].full_scan_mul = 1,
1010
1011 .schedule_line[1].iterations = 0xff,
David Spinadelb14fc2b2014-06-25 13:17:53 +03001012 .schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
David Spinadel35a000b2013-08-28 09:29:43 +03001013 .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
1014 };
1015
David Spinadeld2496222014-05-20 12:46:37 +03001016 if (iwl_mvm_scan_pass_all(mvm, req))
Eliad Pellerde33fb52013-11-10 12:59:46 +02001017 scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
David Spinadel35a000b2013-08-28 09:29:43 +03001018
Haim Dreyfusse820c2d2014-04-06 11:19:09 +03001019 if (mvm->last_ebs_successful &&
1020 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
1021 scan_req.flags |=
1022 cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
1023
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001024 return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
David Spinadel35a000b2013-08-28 09:29:43 +03001025 sizeof(scan_req), &scan_req);
1026}
1027
Luciano Coelhob141c232014-10-01 13:22:40 +03001028int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
1029 struct ieee80211_vif *vif,
1030 struct cfg80211_sched_scan_request *req,
1031 struct ieee80211_scan_ies *ies)
1032{
1033 int ret;
1034
David Spinadelb975e552014-11-17 12:30:05 +02001035 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
1036 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1037 if (ret)
1038 return ret;
1039 ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
1040 } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
David Spinadeld2496222014-05-20 12:46:37 +03001041 mvm->scan_status = IWL_MVM_SCAN_SCHED;
Luciano Coelhob141c232014-10-01 13:22:40 +03001042 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1043 if (ret)
1044 return ret;
1045 ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
1046 } else {
David Spinadeld2496222014-05-20 12:46:37 +03001047 mvm->scan_status = IWL_MVM_SCAN_SCHED;
Luciano Coelhob141c232014-10-01 13:22:40 +03001048 ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
1049 if (ret)
1050 return ret;
1051 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1052 if (ret)
1053 return ret;
1054 ret = iwl_mvm_sched_scan_start(mvm, req);
1055 }
1056
1057 return ret;
1058}
1059
David Spinadelfb98be52014-05-04 12:51:10 +03001060static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
David Spinadel35a000b2013-08-28 09:29:43 +03001061{
1062 int ret;
1063 struct iwl_host_cmd cmd = {
1064 .id = SCAN_OFFLOAD_ABORT_CMD,
David Spinadel35a000b2013-08-28 09:29:43 +03001065 };
1066 u32 status;
1067
1068 /* Exit instantly with error when device is not ready
1069 * to receive scan abort command or it does not perform
1070 * scheduled scan currently */
David Spinadelfb98be52014-05-04 12:51:10 +03001071 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1072 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
1073 mvm->scan_status != IWL_MVM_SCAN_OS))
David Spinadel35a000b2013-08-28 09:29:43 +03001074 return -EIO;
1075
1076 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
1077 if (ret)
1078 return ret;
1079
1080 if (status != CAN_ABORT_STATUS) {
1081 /*
1082 * The scan abort will return 1 for success or
1083 * 2 for "failure". A failure condition can be
1084 * due to simply not being in an active scan which
1085 * can occur if we send the scan abort before the
1086 * microcode has notified us that a scan is completed.
1087 */
1088 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001089 ret = -ENOENT;
David Spinadel35a000b2013-08-28 09:29:43 +03001090 }
1091
1092 return ret;
1093}
1094
David Spinadelfb98be52014-05-04 12:51:10 +03001095int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
David Spinadel35a000b2013-08-28 09:29:43 +03001096{
1097 int ret;
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001098 struct iwl_notification_wait wait_scan_done;
1099 static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
David Spinadelfb98be52014-05-04 12:51:10 +03001100 bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
David Spinadel35a000b2013-08-28 09:29:43 +03001101
1102 lockdep_assert_held(&mvm->mutex);
1103
David Spinadeld2496222014-05-20 12:46:37 +03001104 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
1105 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
1106 notify);
1107
David Spinadelfb98be52014-05-04 12:51:10 +03001108 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1109 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
1110 mvm->scan_status != IWL_MVM_SCAN_OS)) {
1111 IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001112 return 0;
David Spinadel35a000b2013-08-28 09:29:43 +03001113 }
1114
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001115 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
1116 scan_done_notif,
1117 ARRAY_SIZE(scan_done_notif),
1118 NULL, NULL);
1119
David Spinadelfb98be52014-05-04 12:51:10 +03001120 ret = iwl_mvm_send_scan_offload_abort(mvm);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001121 if (ret) {
David Spinadelfb98be52014-05-04 12:51:10 +03001122 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
1123 sched ? "offloaded " : "", ret);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001124 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1125 return ret;
1126 }
1127
David Spinadelfb98be52014-05-04 12:51:10 +03001128 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
1129 sched ? "offloaded " : "");
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001130
1131 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1132 if (ret)
1133 return ret;
1134
1135 /*
1136 * Clear the scan status so the next scan requests will succeed. This
1137 * also ensures the Rx handler doesn't do anything, as the scan was
Eliad Peller4ff78182014-06-22 14:44:44 +03001138 * stopped from above. Since the rx handler won't do anything now,
1139 * we have to release the scan reference here.
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001140 */
Eliad Peller4ff78182014-06-22 14:44:44 +03001141 if (mvm->scan_status == IWL_MVM_SCAN_OS)
1142 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1143
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001144 mvm->scan_status = IWL_MVM_SCAN_NONE;
1145
David Spinadelfb98be52014-05-04 12:51:10 +03001146 if (notify) {
1147 if (sched)
1148 ieee80211_sched_scan_stopped(mvm->hw);
1149 else
1150 ieee80211_scan_completed(mvm->hw, true);
1151 }
David Spinadel636a2cd2014-05-01 15:57:22 +03001152
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001153 return 0;
David Spinadel35a000b2013-08-28 09:29:43 +03001154}
David Spinadelfb98be52014-05-04 12:51:10 +03001155
1156static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
1157 struct iwl_scan_req_tx_cmd *tx_cmd,
1158 bool no_cck)
1159{
1160 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
1161 TX_CMD_FLG_BT_DIS);
1162 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
1163 IEEE80211_BAND_2GHZ,
1164 no_cck);
1165 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
1166
1167 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
1168 TX_CMD_FLG_BT_DIS);
1169 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
1170 IEEE80211_BAND_5GHZ,
1171 no_cck);
1172 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
1173}
1174
1175static void
1176iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
1177 struct ieee80211_channel **channels,
1178 int n_channels, u32 ssid_bitmap,
1179 struct iwl_scan_req_unified_lmac *cmd)
1180{
1181 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
1182 int i;
1183
1184 for (i = 0; i < n_channels; i++) {
1185 channel_cfg[i].channel_num =
1186 cpu_to_le16(channels[i]->hw_value);
1187 channel_cfg[i].iter_count = cpu_to_le16(1);
1188 channel_cfg[i].iter_interval = 0;
1189 channel_cfg[i].flags =
1190 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
1191 ssid_bitmap);
1192 }
1193}
1194
Andrei Otcheretianski66dc5272014-09-02 17:55:40 +03001195static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
1196 size_t len, u8 *const pos)
1197{
1198 static const u8 before_ds_params[] = {
1199 WLAN_EID_SSID,
1200 WLAN_EID_SUPP_RATES,
1201 WLAN_EID_REQUEST,
1202 WLAN_EID_EXT_SUPP_RATES,
1203 };
1204 size_t offs;
1205 u8 *newpos = pos;
1206
1207 if (!iwl_mvm_rrm_scan_needed(mvm)) {
1208 memcpy(newpos, ies, len);
1209 return newpos + len;
1210 }
1211
1212 offs = ieee80211_ie_split(ies, len,
1213 before_ds_params,
1214 ARRAY_SIZE(before_ds_params),
1215 0);
1216
1217 memcpy(newpos, ies, offs);
1218 newpos += offs;
1219
1220 /* Add a placeholder for DS Parameter Set element */
1221 *newpos++ = WLAN_EID_DS_PARAMS;
1222 *newpos++ = 1;
1223 *newpos++ = 0;
1224
1225 memcpy(newpos, ies + offs, len - offs);
1226 newpos += len - offs;
1227
1228 return newpos;
1229}
1230
David Spinadelfb98be52014-05-04 12:51:10 +03001231static void
1232iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1233 struct ieee80211_scan_ies *ies,
Johannes Bergeffd05a2014-11-18 17:21:19 +01001234 struct iwl_scan_probe_req *preq,
1235 const u8 *mac_addr, const u8 *mac_addr_mask)
David Spinadelfb98be52014-05-04 12:51:10 +03001236{
David Spinadelfb98be52014-05-04 12:51:10 +03001237 struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
Andrei Otcheretianski66dc5272014-09-02 17:55:40 +03001238 u8 *pos, *newpos;
David Spinadelfb98be52014-05-04 12:51:10 +03001239
Johannes Bergeffd05a2014-11-18 17:21:19 +01001240 /*
1241 * Unfortunately, right now the offload scan doesn't support randomising
1242 * within the firmware, so until the firmware API is ready we implement
1243 * it in the driver. This means that the scan iterations won't really be
1244 * random, only when it's restarted, but at least that helps a bit.
1245 */
1246 if (mac_addr)
1247 get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
1248 else
1249 memcpy(frame->sa, vif->addr, ETH_ALEN);
1250
David Spinadelfb98be52014-05-04 12:51:10 +03001251 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1252 eth_broadcast_addr(frame->da);
David Spinadelfb98be52014-05-04 12:51:10 +03001253 eth_broadcast_addr(frame->bssid);
1254 frame->seq_ctrl = 0;
1255
1256 pos = frame->u.probe_req.variable;
1257 *pos++ = WLAN_EID_SSID;
1258 *pos++ = 0;
1259
1260 preq->mac_header.offset = 0;
1261 preq->mac_header.len = cpu_to_le16(24 + 2);
1262
Andrei Otcheretianski66dc5272014-09-02 17:55:40 +03001263 /* Insert ds parameter set element on 2.4 GHz band */
1264 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
1265 ies->ies[IEEE80211_BAND_2GHZ],
1266 ies->len[IEEE80211_BAND_2GHZ],
1267 pos);
David Spinadelfb98be52014-05-04 12:51:10 +03001268 preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
Andrei Otcheretianski66dc5272014-09-02 17:55:40 +03001269 preq->band_data[0].len = cpu_to_le16(newpos - pos);
1270 pos = newpos;
David Spinadelfb98be52014-05-04 12:51:10 +03001271
1272 memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
1273 ies->len[IEEE80211_BAND_5GHZ]);
1274 preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
1275 preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
1276 pos += ies->len[IEEE80211_BAND_5GHZ];
1277
1278 memcpy(pos, ies->common_ies, ies->common_ie_len);
1279 preq->common_data.offset = cpu_to_le16(pos - preq->buf);
1280 preq->common_data.len = cpu_to_le16(ies->common_ie_len);
1281}
1282
1283static void
1284iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
1285 struct iwl_scan_req_unified_lmac *cmd,
1286 struct iwl_mvm_scan_params *params)
1287{
David Spinadelaf913442014-06-12 19:29:40 +03001288 memset(cmd, 0, ksize(cmd));
David Spinadel2ce89cd2014-07-22 13:11:18 +03001289 cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
1290 cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
1291 if (params->passive_fragmented)
1292 cmd->fragmented_dwell =
1293 params->dwell[IEEE80211_BAND_2GHZ].passive;
David Spinadelfb98be52014-05-04 12:51:10 +03001294 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
1295 cmd->max_out_time = cpu_to_le32(params->max_out_time);
1296 cmd->suspend_time = cpu_to_le32(params->suspend_time);
1297 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
David Spinadelfb98be52014-05-04 12:51:10 +03001298 cmd->iter_num = cpu_to_le32(1);
David Spinadelaf913442014-06-12 19:29:40 +03001299
1300 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1301 mvm->last_ebs_successful) {
1302 cmd->channel_opt[0].flags =
1303 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1304 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1305 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1306 cmd->channel_opt[1].flags =
1307 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1308 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1309 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1310 }
Andrei Otcheretianski73897bd2014-07-09 18:59:14 +03001311
1312 if (iwl_mvm_rrm_scan_needed(mvm))
1313 cmd->scan_flags |=
1314 cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
David Spinadelfb98be52014-05-04 12:51:10 +03001315}
1316
1317int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
1318 struct ieee80211_vif *vif,
1319 struct ieee80211_scan_request *req)
1320{
1321 struct iwl_host_cmd hcmd = {
1322 .id = SCAN_OFFLOAD_REQUEST_CMD,
1323 .len = { sizeof(struct iwl_scan_req_unified_lmac) +
1324 sizeof(struct iwl_scan_channel_cfg_lmac) *
1325 mvm->fw->ucode_capa.n_scan_channels +
1326 sizeof(struct iwl_scan_probe_req), },
1327 .data = { mvm->scan_cmd, },
1328 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1329 };
1330 struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
David Spinadeld2496222014-05-20 12:46:37 +03001331 struct iwl_scan_probe_req *preq;
David Spinadelfb98be52014-05-04 12:51:10 +03001332 struct iwl_mvm_scan_params params = {};
1333 u32 flags;
Johannes Bergc8660dd2014-11-17 15:06:52 +01001334 u32 ssid_bitmap = 0;
David Spinadelfb98be52014-05-04 12:51:10 +03001335 int ret, i;
1336
1337 lockdep_assert_held(&mvm->mutex);
1338
1339 /* we should have failed registration if scan_cmd was NULL */
1340 if (WARN_ON(mvm->scan_cmd == NULL))
1341 return -ENOMEM;
1342
Andrei Otcheretianski48849a42014-09-09 10:58:49 +03001343 if (req->req.n_ssids > PROBE_OPTION_MAX ||
1344 req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
1345 req->ies.len[NL80211_BAND_5GHZ] >
1346 iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
1347 req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
1348 return -ENOBUFS;
David Spinadelfb98be52014-05-04 12:51:10 +03001349
1350 mvm->scan_status = IWL_MVM_SCAN_OS;
1351
1352 iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
1353 &params);
1354
1355 iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
1356
1357 cmd->n_channels = (u8)req->req.n_channels;
1358
1359 flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
1360
1361 if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
1362 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
1363
1364 if (params.passive_fragmented)
1365 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
1366
1367 if (req->req.n_ssids == 0)
1368 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
1369
Andrei Otcheretianski73897bd2014-07-09 18:59:14 +03001370 cmd->scan_flags |= cpu_to_le32(flags);
David Spinadelfb98be52014-05-04 12:51:10 +03001371
1372 cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
1373 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
1374 MAC_FILTER_IN_BEACON);
1375 iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
1376 iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
1377 req->req.n_ssids, 0);
1378
1379 cmd->schedule[0].delay = 0;
1380 cmd->schedule[0].iterations = 1;
1381 cmd->schedule[0].full_scan_mul = 0;
1382 cmd->schedule[1].delay = 0;
1383 cmd->schedule[1].iterations = 0;
1384 cmd->schedule[1].full_scan_mul = 0;
1385
1386 for (i = 1; i <= req->req.n_ssids; i++)
1387 ssid_bitmap |= BIT(i);
1388
1389 iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
1390 req->req.n_channels, ssid_bitmap,
1391 cmd);
1392
David Spinadeld2496222014-05-20 12:46:37 +03001393 preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
1394 mvm->fw->ucode_capa.n_scan_channels);
1395
Johannes Bergeffd05a2014-11-18 17:21:19 +01001396 iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
1397 req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
1398 req->req.mac_addr : NULL,
1399 req->req.mac_addr_mask);
David Spinadelfb98be52014-05-04 12:51:10 +03001400
1401 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1402 if (!ret) {
1403 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
1404 } else {
1405 /*
1406 * If the scan failed, it usually means that the FW was unable
1407 * to allocate the time events. Warn on it, but maybe we
1408 * should try to send the command again with different params.
1409 */
1410 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1411 mvm->scan_status = IWL_MVM_SCAN_NONE;
1412 ret = -EIO;
1413 }
1414 return ret;
1415}
1416
1417int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
1418 struct ieee80211_vif *vif,
1419 struct cfg80211_sched_scan_request *req,
1420 struct ieee80211_scan_ies *ies)
1421{
1422 struct iwl_host_cmd hcmd = {
1423 .id = SCAN_OFFLOAD_REQUEST_CMD,
1424 .len = { sizeof(struct iwl_scan_req_unified_lmac) +
1425 sizeof(struct iwl_scan_channel_cfg_lmac) *
1426 mvm->fw->ucode_capa.n_scan_channels +
1427 sizeof(struct iwl_scan_probe_req), },
1428 .data = { mvm->scan_cmd, },
1429 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1430 };
1431 struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
David Spinadeld2496222014-05-20 12:46:37 +03001432 struct iwl_scan_probe_req *preq;
David Spinadelfb98be52014-05-04 12:51:10 +03001433 struct iwl_mvm_scan_params params = {};
1434 int ret;
1435 u32 flags = 0, ssid_bitmap = 0;
1436
1437 lockdep_assert_held(&mvm->mutex);
1438
1439 /* we should have failed registration if scan_cmd was NULL */
1440 if (WARN_ON(mvm->scan_cmd == NULL))
1441 return -ENOMEM;
1442
Andrei Otcheretianski48849a42014-09-09 10:58:49 +03001443 if (req->n_ssids > PROBE_OPTION_MAX ||
1444 ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
1445 ies->len[NL80211_BAND_5GHZ] >
1446 iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
1447 req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
David Spinadelfb98be52014-05-04 12:51:10 +03001448 return -ENOBUFS;
1449
1450 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
1451
1452 iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
1453
1454 cmd->n_channels = (u8)req->n_channels;
1455
David Spinadeld2496222014-05-20 12:46:37 +03001456 if (iwl_mvm_scan_pass_all(mvm, req))
David Spinadelfb98be52014-05-04 12:51:10 +03001457 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
David Spinadel7e2a3882014-12-23 14:38:09 +02001458 else
1459 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
David Spinadelfb98be52014-05-04 12:51:10 +03001460
1461 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
1462 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
1463
1464 if (params.passive_fragmented)
1465 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
1466
1467 if (req->n_ssids == 0)
1468 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
1469
Andrei Otcheretianski73897bd2014-07-09 18:59:14 +03001470 cmd->scan_flags |= cpu_to_le32(flags);
David Spinadelfb98be52014-05-04 12:51:10 +03001471
1472 cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
1473 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
1474 MAC_FILTER_IN_BEACON);
1475 iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
1476 iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
1477
David Spinadelb14fc2b2014-06-25 13:17:53 +03001478 cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
David Spinadelfb98be52014-05-04 12:51:10 +03001479 cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
1480 cmd->schedule[0].full_scan_mul = 1;
1481
David Spinadelb14fc2b2014-06-25 13:17:53 +03001482 cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
David Spinadelfb98be52014-05-04 12:51:10 +03001483 cmd->schedule[1].iterations = 0xff;
1484 cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
1485
1486 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
1487 ssid_bitmap, cmd);
1488
David Spinadeld2496222014-05-20 12:46:37 +03001489 preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
1490 mvm->fw->ucode_capa.n_scan_channels);
1491
Johannes Bergeffd05a2014-11-18 17:21:19 +01001492 iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
1493 req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
1494 req->mac_addr : NULL,
1495 req->mac_addr_mask);
David Spinadelfb98be52014-05-04 12:51:10 +03001496
1497 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1498 if (!ret) {
1499 IWL_DEBUG_SCAN(mvm,
1500 "Sched scan request was sent successfully\n");
1501 } else {
1502 /*
1503 * If the scan failed, it usually means that the FW was unable
1504 * to allocate the time events. Warn on it, but maybe we
1505 * should try to send the command again with different params.
1506 */
1507 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1508 mvm->scan_status = IWL_MVM_SCAN_NONE;
1509 ret = -EIO;
1510 }
1511 return ret;
1512}
1513
1514
1515int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
1516{
David Spinadeld2496222014-05-20 12:46:37 +03001517 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
1518 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
1519 true);
1520
Emmanuel Grumbach9b520d82014-11-04 15:54:11 +02001521 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1522 return 0;
1523
1524 if (iwl_mvm_is_radio_killed(mvm)) {
1525 ieee80211_scan_completed(mvm->hw, true);
1526 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1527 mvm->scan_status = IWL_MVM_SCAN_NONE;
1528 return 0;
1529 }
1530
David Spinadelfb98be52014-05-04 12:51:10 +03001531 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
1532 return iwl_mvm_scan_offload_stop(mvm, true);
1533 return iwl_mvm_cancel_regular_scan(mvm);
1534}
David Spinadeld2496222014-05-20 12:46:37 +03001535
1536/* UMAC scan API */
1537
1538struct iwl_umac_scan_done {
1539 struct iwl_mvm *mvm;
1540 enum iwl_umac_scan_uid_type type;
1541};
1542
1543static int rate_to_scan_rate_flag(unsigned int rate)
1544{
1545 static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
1546 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
1547 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
1548 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
1549 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
1550 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
1551 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
1552 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
1553 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
1554 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
1555 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
1556 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
1557 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
1558 };
1559
1560 return rate_to_scan_rate[rate];
1561}
1562
1563static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
1564{
1565 struct ieee80211_supported_band *band;
1566 unsigned int rates = 0;
1567 int i;
1568
1569 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
1570 for (i = 0; i < band->n_bitrates; i++)
1571 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1572 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
1573 for (i = 0; i < band->n_bitrates; i++)
1574 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1575
1576 /* Set both basic rates and supported rates */
1577 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
1578
1579 return cpu_to_le32(rates);
1580}
1581
1582int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1583{
1584
1585 struct iwl_scan_config *scan_config;
1586 struct ieee80211_supported_band *band;
1587 int num_channels =
1588 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
1589 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
1590 int ret, i, j = 0, cmd_size, data_size;
1591 struct iwl_host_cmd cmd = {
1592 .id = SCAN_CFG_CMD,
1593 };
1594
1595 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
1596 return -ENOBUFS;
1597
1598 cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
1599
1600 scan_config = kzalloc(cmd_size, GFP_KERNEL);
1601 if (!scan_config)
1602 return -ENOMEM;
1603
1604 data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
1605 scan_config->hdr.size = cpu_to_le16(data_size);
1606 scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
1607 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
1608 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
1609 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
1610 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
1611 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
1612 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1613 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
1614 SCAN_CONFIG_N_CHANNELS(num_channels));
1615 scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant);
1616 scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1617 scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1618 scan_config->out_of_channel_time = cpu_to_le32(170);
1619 scan_config->suspend_time = cpu_to_le32(30);
1620 scan_config->dwell_active = 20;
1621 scan_config->dwell_passive = 110;
1622 scan_config->dwell_fragmented = 20;
1623
1624 memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1625
1626 scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
1627 scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
1628 IWL_CHANNEL_FLAG_ACCURATE_EBS |
1629 IWL_CHANNEL_FLAG_EBS_ADD |
1630 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
1631
1632 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
1633 for (i = 0; i < band->n_channels; i++, j++)
1634 scan_config->channel_array[j] = band->channels[i].center_freq;
1635 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
1636 for (i = 0; i < band->n_channels; i++, j++)
1637 scan_config->channel_array[j] = band->channels[i].center_freq;
1638
1639 cmd.data[0] = scan_config;
1640 cmd.len[0] = cmd_size;
1641 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1642
1643 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1644
1645 ret = iwl_mvm_send_cmd(mvm, &cmd);
1646
1647 kfree(scan_config);
1648 return ret;
1649}
1650
1651static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
1652{
1653 int i;
1654
1655 for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
1656 if (mvm->scan_uid[i] == uid)
1657 return i;
1658
1659 return i;
1660}
1661
1662static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
1663{
1664 return iwl_mvm_find_scan_uid(mvm, 0);
1665}
1666
1667static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
1668 enum iwl_umac_scan_uid_type type)
1669{
1670 int i;
1671
1672 for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
1673 if (mvm->scan_uid[i] & type)
1674 return true;
1675
1676 return false;
1677}
1678
1679static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
1680 enum iwl_umac_scan_uid_type type)
1681{
1682 u32 uid;
1683
1684 /* make sure exactly one bit is on in scan type */
1685 WARN_ON(hweight8(type) != 1);
1686
1687 /*
1688 * Make sure scan uids are unique. If one scan lasts long time while
1689 * others are completing frequently, the seq number will wrap up and
1690 * we may have more than one scan with the same uid.
1691 */
1692 do {
1693 uid = type | (mvm->scan_seq_num <<
1694 IWL_UMAC_SCAN_UID_SEQ_OFFSET);
1695 mvm->scan_seq_num++;
1696 } while (iwl_mvm_find_scan_uid(mvm, uid) <
1697 IWL_MVM_MAX_SIMULTANEOUS_SCANS);
1698
1699 IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
1700
1701 return uid;
1702}
1703
1704static void
1705iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
1706 struct iwl_scan_req_umac *cmd,
1707 struct iwl_mvm_scan_params *params)
1708{
1709 memset(cmd, 0, ksize(cmd));
1710 cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
1711 sizeof(struct iwl_mvm_umac_cmd_hdr));
1712 cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
1713 cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
1714 if (params->passive_fragmented)
1715 cmd->fragmented_dwell =
1716 params->dwell[IEEE80211_BAND_2GHZ].passive;
1717 cmd->max_out_time = cpu_to_le32(params->max_out_time);
1718 cmd->suspend_time = cpu_to_le32(params->suspend_time);
1719 cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
1720}
1721
1722static void
1723iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1724 struct ieee80211_channel **channels,
1725 int n_channels, u32 ssid_bitmap,
1726 struct iwl_scan_req_umac *cmd)
1727{
1728 struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
1729 int i;
1730
1731 for (i = 0; i < n_channels; i++) {
1732 channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
1733 channel_cfg[i].channel_num = channels[i]->hw_value;
1734 channel_cfg[i].iter_count = 1;
1735 channel_cfg[i].iter_interval = 0;
1736 }
1737}
1738
1739static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
1740 struct cfg80211_ssid *ssids,
1741 int fragmented)
1742{
1743 int flags = 0;
1744
1745 if (n_ssids == 0)
1746 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1747
1748 if (n_ssids == 1 && ssids[0].ssid_len != 0)
1749 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1750
1751 if (fragmented)
1752 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1753
1754 if (iwl_mvm_rrm_scan_needed(mvm))
1755 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1756
1757 return flags;
1758}
1759
1760int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1761 struct ieee80211_scan_request *req)
1762{
1763 struct iwl_host_cmd hcmd = {
1764 .id = SCAN_REQ_UMAC,
1765 .len = { iwl_mvm_scan_size(mvm), },
1766 .data = { mvm->scan_cmd, },
1767 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1768 };
1769 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1770 struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1771 sizeof(struct iwl_scan_channel_cfg_umac) *
1772 mvm->fw->ucode_capa.n_scan_channels;
1773 struct iwl_mvm_scan_params params = {};
1774 u32 uid, flags;
Johannes Bergc8660dd2014-11-17 15:06:52 +01001775 u32 ssid_bitmap = 0;
David Spinadeld2496222014-05-20 12:46:37 +03001776 int ret, i, uid_idx;
1777
1778 lockdep_assert_held(&mvm->mutex);
1779
1780 uid_idx = iwl_mvm_find_free_scan_uid(mvm);
1781 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
1782 return -EBUSY;
1783
1784 /* we should have failed registration if scan_cmd was NULL */
1785 if (WARN_ON(mvm->scan_cmd == NULL))
1786 return -ENOMEM;
1787
1788 if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
1789 req->ies.common_ie_len +
1790 req->ies.len[NL80211_BAND_2GHZ] +
1791 req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
1792 SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
1793 mvm->fw->ucode_capa.n_scan_channels))
1794 return -ENOBUFS;
1795
1796 iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
1797 &params);
1798
1799 iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
1800
1801 uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
1802 mvm->scan_uid[uid_idx] = uid;
1803 cmd->uid = cpu_to_le32(uid);
1804
1805 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
1806
1807 flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
1808 req->req.ssids,
1809 params.passive_fragmented);
1810
1811 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1812
1813 cmd->general_flags = cpu_to_le32(flags);
1814 cmd->n_channels = req->req.n_channels;
1815
1816 for (i = 0; i < req->req.n_ssids; i++)
1817 ssid_bitmap |= BIT(i);
1818
1819 iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
1820 req->req.n_channels, ssid_bitmap, cmd);
1821
1822 sec_part->schedule[0].iter_count = 1;
1823 sec_part->delay = 0;
1824
Johannes Bergeffd05a2014-11-18 17:21:19 +01001825 iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
1826 req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
1827 req->req.mac_addr : NULL,
1828 req->req.mac_addr_mask);
David Spinadeld2496222014-05-20 12:46:37 +03001829
1830 iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
1831 req->req.n_ssids, 0);
1832
1833 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1834 if (!ret) {
1835 IWL_DEBUG_SCAN(mvm,
1836 "Scan request was sent successfully\n");
1837 } else {
1838 /*
1839 * If the scan failed, it usually means that the FW was unable
1840 * to allocate the time events. Warn on it, but maybe we
1841 * should try to send the command again with different params.
1842 */
1843 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1844 }
1845 return ret;
1846}
1847
1848int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1849 struct cfg80211_sched_scan_request *req,
1850 struct ieee80211_scan_ies *ies)
1851{
1852
1853 struct iwl_host_cmd hcmd = {
1854 .id = SCAN_REQ_UMAC,
1855 .len = { iwl_mvm_scan_size(mvm), },
1856 .data = { mvm->scan_cmd, },
1857 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1858 };
1859 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1860 struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1861 sizeof(struct iwl_scan_channel_cfg_umac) *
1862 mvm->fw->ucode_capa.n_scan_channels;
1863 struct iwl_mvm_scan_params params = {};
1864 u32 uid, flags;
Johannes Bergc8660dd2014-11-17 15:06:52 +01001865 u32 ssid_bitmap = 0;
David Spinadeld2496222014-05-20 12:46:37 +03001866 int ret, uid_idx;
1867
1868 lockdep_assert_held(&mvm->mutex);
1869
1870 uid_idx = iwl_mvm_find_free_scan_uid(mvm);
1871 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
1872 return -EBUSY;
1873
1874 /* we should have failed registration if scan_cmd was NULL */
1875 if (WARN_ON(mvm->scan_cmd == NULL))
1876 return -ENOMEM;
1877
1878 if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
1879 ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
1880 ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
1881 SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
1882 mvm->fw->ucode_capa.n_scan_channels))
1883 return -ENOBUFS;
1884
1885 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
1886 &params);
1887
1888 iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
1889
1890 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1891
1892 uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
1893 mvm->scan_uid[uid_idx] = uid;
1894 cmd->uid = cpu_to_le32(uid);
1895
1896 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
1897
1898 flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
1899 params.passive_fragmented);
1900
1901 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1902
1903 if (iwl_mvm_scan_pass_all(mvm, req))
1904 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1905 else
1906 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1907
1908 cmd->general_flags = cpu_to_le32(flags);
1909
1910 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1911 mvm->last_ebs_successful)
1912 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1913 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1914 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1915
1916 cmd->n_channels = req->n_channels;
1917
1918 iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
1919 false);
1920
1921 /* This API uses bits 0-19 instead of 1-20. */
1922 ssid_bitmap = ssid_bitmap >> 1;
1923
1924 iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
1925 ssid_bitmap, cmd);
1926
1927 sec_part->schedule[0].interval =
1928 cpu_to_le16(req->interval / MSEC_PER_SEC);
1929 sec_part->schedule[0].iter_count = 0xff;
1930
1931 sec_part->delay = 0;
1932
Johannes Bergeffd05a2014-11-18 17:21:19 +01001933 iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
1934 req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
1935 req->mac_addr : NULL,
1936 req->mac_addr_mask);
David Spinadeld2496222014-05-20 12:46:37 +03001937
1938 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1939 if (!ret) {
1940 IWL_DEBUG_SCAN(mvm,
1941 "Sched scan request was sent successfully\n");
1942 } else {
1943 /*
1944 * If the scan failed, it usually means that the FW was unable
1945 * to allocate the time events. Warn on it, but maybe we
1946 * should try to send the command again with different params.
1947 */
1948 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1949 }
1950 return ret;
1951}
1952
1953int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1954 struct iwl_rx_cmd_buffer *rxb,
1955 struct iwl_device_cmd *cmd)
1956{
1957 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1958 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1959 u32 uid = __le32_to_cpu(notif->uid);
1960 bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
1961 int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
1962
David Spinadel2992a322014-11-10 11:16:53 +02001963 /*
1964 * Scan uid may be set to zero in case of scan abort request from above.
1965 */
1966 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
David Spinadeld2496222014-05-20 12:46:37 +03001967 return 0;
1968
1969 IWL_DEBUG_SCAN(mvm,
1970 "Scan completed, uid %u type %s, status %s, EBS status %s\n",
1971 uid, sched ? "sched" : "regular",
1972 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
1973 "completed" : "aborted",
1974 notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
1975 "success" : "failed");
1976
1977 mvm->last_ebs_successful = !notif->ebs_status;
1978 mvm->scan_uid[uid_idx] = 0;
1979
1980 if (!sched) {
1981 ieee80211_scan_completed(mvm->hw,
1982 notif->status ==
1983 IWL_SCAN_OFFLOAD_ABORTED);
1984 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1985 } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
1986 ieee80211_sched_scan_stopped(mvm->hw);
1987 } else {
1988 IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
1989 }
1990
1991 return 0;
1992}
1993
1994static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
1995 struct iwl_rx_packet *pkt, void *data)
1996{
1997 struct iwl_umac_scan_done *scan_done = data;
1998 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1999 u32 uid = __le32_to_cpu(notif->uid);
2000 int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
2001
2002 if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
2003 return false;
2004
2005 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
2006 return false;
2007
2008 /*
2009 * Clear scan uid of scans that was aborted from above and completed
2010 * in FW so the RX handler does nothing.
2011 */
2012 scan_done->mvm->scan_uid[uid_idx] = 0;
2013
2014 return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
2015}
2016
2017static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
2018{
2019 struct iwl_umac_scan_abort cmd = {
2020 .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
2021 sizeof(struct iwl_mvm_umac_cmd_hdr)),
2022 .uid = cpu_to_le32(uid),
2023 };
2024
2025 lockdep_assert_held(&mvm->mutex);
2026
2027 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
2028
2029 return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
2030}
2031
2032static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
2033 enum iwl_umac_scan_uid_type type, bool notify)
2034{
2035 struct iwl_notification_wait wait_scan_done;
2036 static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
2037 struct iwl_umac_scan_done scan_done = {
2038 .mvm = mvm,
2039 .type = type,
2040 };
2041 int i, ret = -EIO;
2042
2043 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
2044 scan_done_notif,
2045 ARRAY_SIZE(scan_done_notif),
2046 iwl_scan_umac_done_check, &scan_done);
2047
2048 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
2049
2050 for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
2051 if (mvm->scan_uid[i] & type) {
2052 int err;
2053
2054 if (iwl_mvm_is_radio_killed(mvm) &&
2055 (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
2056 ieee80211_scan_completed(mvm->hw, true);
2057 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
2058 break;
2059 }
2060
2061 err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
2062 if (!err)
2063 ret = 0;
2064 }
2065 }
2066
2067 if (ret) {
2068 IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
2069 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
2070 return ret;
2071 }
2072
2073 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
2074 if (ret)
2075 return ret;
2076
2077 if (notify) {
2078 if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
2079 ieee80211_sched_scan_stopped(mvm->hw);
2080 if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
2081 ieee80211_scan_completed(mvm->hw, true);
2082 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
2083 }
2084 }
2085
2086 return ret;
2087}
2088
2089int iwl_mvm_scan_size(struct iwl_mvm *mvm)
2090{
2091 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
2092 return sizeof(struct iwl_scan_req_umac) +
2093 sizeof(struct iwl_scan_channel_cfg_umac) *
2094 mvm->fw->ucode_capa.n_scan_channels +
2095 sizeof(struct iwl_scan_req_umac_tail);
2096
2097 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
2098 return sizeof(struct iwl_scan_req_unified_lmac) +
2099 sizeof(struct iwl_scan_channel_cfg_lmac) *
2100 mvm->fw->ucode_capa.n_scan_channels +
2101 sizeof(struct iwl_scan_probe_req);
2102
2103 return sizeof(struct iwl_scan_cmd) +
2104 mvm->fw->ucode_capa.max_probe_length +
2105 mvm->fw->ucode_capa.n_scan_channels *
2106 sizeof(struct iwl_scan_channel);
2107}