blob: 004b1f5d031429a2798cc1d35464daa6ae59db00 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Johannes Berg8ca151b2013-01-24 14:25:36 +01009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020025 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010026 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020033 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Johannes Berg8ca151b2013-01-24 14:25:36 +010034 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/etherdevice.h>
65#include <net/mac80211.h>
66
67#include "mvm.h"
68#include "iwl-eeprom-parse.h"
69#include "fw-api-scan.h"
70
71#define IWL_PLCP_QUIET_THRESH 1
72#define IWL_ACTIVE_QUIET_TIME 10
Alexander Bondar8a110d92014-03-12 17:31:19 +020073
74struct iwl_mvm_scan_params {
75 u32 max_out_time;
76 u32 suspend_time;
Alexander Bondar50df8a32014-03-12 20:30:51 +020077 bool passive_fragmented;
78 struct _dwell {
79 u16 passive;
80 u16 active;
81 } dwell[IEEE80211_NUM_BANDS];
Alexander Bondar8a110d92014-03-12 17:31:19 +020082};
Johannes Berg8ca151b2013-01-24 14:25:36 +010083
84static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
85{
86 u16 rx_chain;
Oren Givon91b05d12013-08-19 08:36:48 +030087 u8 rx_ant;
Johannes Berg8ca151b2013-01-24 14:25:36 +010088
Oren Givon91b05d12013-08-19 08:36:48 +030089 if (mvm->scan_rx_ant != ANT_NONE)
90 rx_ant = mvm->scan_rx_ant;
91 else
Johannes Berg4ed735e2014-02-12 21:47:44 +010092 rx_ant = mvm->fw->valid_rx_ant;
Johannes Berg8ca151b2013-01-24 14:25:36 +010093 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
94 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
95 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
96 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
97 return cpu_to_le16(rx_chain);
98}
99
David Spinadelfb98be52014-05-04 12:51:10 +0300100static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100101{
David Spinadelfb98be52014-05-04 12:51:10 +0300102 if (band == IEEE80211_BAND_2GHZ)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100103 return cpu_to_le32(PHY_BAND_24);
104 else
105 return cpu_to_le32(PHY_BAND_5);
106}
107
108static inline __le32
109iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
110 bool no_cck)
111{
112 u32 tx_ant;
113
114 mvm->scan_last_antenna_idx =
Johannes Berg4ed735e2014-02-12 21:47:44 +0100115 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100116 mvm->scan_last_antenna_idx);
117 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
118
119 if (band == IEEE80211_BAND_2GHZ && !no_cck)
120 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
121 tx_ant);
122 else
123 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
124}
125
126/*
127 * We insert the SSIDs in an inverted order, because the FW will
128 * invert it back. The most prioritized SSID, which is first in the
129 * request list, is not copied here, but inserted directly to the probe
130 * request.
131 */
David Spinadelfb98be52014-05-04 12:51:10 +0300132static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
133 struct cfg80211_ssid *ssids,
134 int n_ssids, int first)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100135{
136 int fw_idx, req_idx;
137
David Spinadelfb98be52014-05-04 12:51:10 +0300138 for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
David Spinadelfe04e832013-07-04 15:17:48 +0300139 req_idx--, fw_idx++) {
David Spinadelfb98be52014-05-04 12:51:10 +0300140 cmd_ssid[fw_idx].id = WLAN_EID_SSID;
141 cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
142 memcpy(cmd_ssid[fw_idx].ssid,
143 ssids[req_idx].ssid,
144 ssids[req_idx].ssid_len);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100145 }
146}
147
148/*
149 * If req->n_ssids > 0, it means we should do an active scan.
150 * In case of active scan w/o directed scan, we receive a zero-length SSID
151 * just to notify that this scan is active and not passive.
152 * In order to notify the FW of the number of SSIDs we wish to scan (including
153 * the zero-length one), we need to set the corresponding bits in chan->type,
David Spinadel20f1a5d2013-08-21 09:14:27 +0300154 * one for each SSID, and set the active bit (first). If the first SSID is
155 * already included in the probe template, so we need to set only
156 * req->n_ssids - 1 bits in addition to the first bit.
Johannes Berg8ca151b2013-01-24 14:25:36 +0100157 */
158static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
159{
160 if (band == IEEE80211_BAND_2GHZ)
161 return 30 + 3 * (n_ssids + 1);
162 return 20 + 2 * (n_ssids + 1);
163}
164
165static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
166{
167 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
168}
169
170static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
David Spinadel20f1a5d2013-08-21 09:14:27 +0300171 struct cfg80211_scan_request *req,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200172 bool basic_ssid,
173 struct iwl_mvm_scan_params *params)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100174{
Johannes Berg8ca151b2013-01-24 14:25:36 +0100175 struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
176 (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
177 int i;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300178 int type = BIT(req->n_ssids) - 1;
Alexander Bondar50df8a32014-03-12 20:30:51 +0200179 enum ieee80211_band band = req->channels[0]->band;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300180
181 if (!basic_ssid)
182 type |= BIT(req->n_ssids);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100183
184 for (i = 0; i < cmd->channel_count; i++) {
185 chan->channel = cpu_to_le16(req->channels[i]->hw_value);
David Spinadel20f1a5d2013-08-21 09:14:27 +0300186 chan->type = cpu_to_le32(type);
Luis R. Rodriguez8fe02e12013-10-21 19:22:25 +0200187 if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
David Spinadelbb963c42013-07-23 14:13:32 +0300188 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200189 chan->active_dwell = cpu_to_le16(params->dwell[band].active);
190 chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100191 chan->iteration_count = cpu_to_le16(1);
192 chan++;
193 }
194}
195
196/*
197 * Fill in probe request with the following parameters:
198 * TA is our vif HW address, which mac80211 ensures we have.
199 * Packet is broadcasted, so this is both SA and DA.
200 * The probe request IE is made out of two: first comes the most prioritized
201 * SSID if a directed scan is requested. Second comes whatever extra
202 * information was given to us as the scan request IE.
203 */
204static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
205 int n_ssids, const u8 *ssid, int ssid_len,
David Spinadel633e2712014-02-06 16:15:23 +0200206 const u8 *band_ie, int band_ie_len,
207 const u8 *common_ie, int common_ie_len,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100208 int left)
209{
210 int len = 0;
211 u8 *pos = NULL;
212
213 /* Make sure there is enough space for the probe request,
214 * two mandatory IEs and the data */
215 left -= 24;
216 if (left < 0)
217 return 0;
218
219 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
220 eth_broadcast_addr(frame->da);
221 memcpy(frame->sa, ta, ETH_ALEN);
222 eth_broadcast_addr(frame->bssid);
223 frame->seq_ctrl = 0;
224
225 len += 24;
226
227 /* for passive scans, no need to fill anything */
228 if (n_ssids == 0)
229 return (u16)len;
230
231 /* points to the payload of the request */
232 pos = &frame->u.probe_req.variable[0];
233
234 /* fill in our SSID IE */
235 left -= ssid_len + 2;
236 if (left < 0)
237 return 0;
238 *pos++ = WLAN_EID_SSID;
239 *pos++ = ssid_len;
240 if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
241 memcpy(pos, ssid, ssid_len);
242 pos += ssid_len;
243 }
244
245 len += ssid_len + 2;
246
David Spinadel633e2712014-02-06 16:15:23 +0200247 if (WARN_ON(left < band_ie_len + common_ie_len))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100248 return len;
249
David Spinadel633e2712014-02-06 16:15:23 +0200250 if (band_ie && band_ie_len) {
251 memcpy(pos, band_ie, band_ie_len);
252 pos += band_ie_len;
253 len += band_ie_len;
254 }
255
256 if (common_ie && common_ie_len) {
257 memcpy(pos, common_ie, common_ie_len);
258 pos += common_ie_len;
259 len += common_ie_len;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100260 }
261
262 return (u16)len;
263}
264
Alexander Bondar8a110d92014-03-12 17:31:19 +0200265static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
266 struct ieee80211_vif *vif)
Haim Dreyfuss61f63252013-11-03 23:02:59 +0200267{
Alexander Bondar8a110d92014-03-12 17:31:19 +0200268 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
269 bool *global_bound = data;
Haim Dreyfuss61f63252013-11-03 23:02:59 +0200270
Alexander Bondar8a110d92014-03-12 17:31:19 +0200271 if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
272 *global_bound = true;
273}
274
275static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200276 struct ieee80211_vif *vif,
Johannes Bergab480032014-06-04 10:13:50 +0200277 int n_ssids, u32 flags,
Alexander Bondar8a110d92014-03-12 17:31:19 +0200278 struct iwl_mvm_scan_params *params)
279{
280 bool global_bound = false;
Alexander Bondar50df8a32014-03-12 20:30:51 +0200281 enum ieee80211_band band;
Alexander Bondar8a110d92014-03-12 17:31:19 +0200282
283 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
284 IEEE80211_IFACE_ITER_NORMAL,
285 iwl_mvm_scan_condition_iterator,
286 &global_bound);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200287
Alexander Bondaref67f18d2014-03-30 10:47:08 +0300288 if (!global_bound)
289 goto not_bound;
290
291 params->suspend_time = 100;
292 params->max_out_time = 600;
293
294 if (iwl_mvm_low_latency(mvm)) {
295 params->suspend_time = 250;
296 params->max_out_time = 250;
Alexander Bondar50df8a32014-03-12 20:30:51 +0200297 }
298
Johannes Bergab480032014-06-04 10:13:50 +0200299 if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
300 params->max_out_time = 200;
301
Alexander Bondaref67f18d2014-03-30 10:47:08 +0300302not_bound:
303
Alexander Bondar50df8a32014-03-12 20:30:51 +0200304 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
Alexander Bondaref67f18d2014-03-30 10:47:08 +0300305 params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200306 params->dwell[band].active = iwl_mvm_get_active_dwell(band,
307 n_ssids);
308 }
Haim Dreyfuss61f63252013-11-03 23:02:59 +0200309}
310
Johannes Berg8ca151b2013-01-24 14:25:36 +0100311int iwl_mvm_scan_request(struct iwl_mvm *mvm,
312 struct ieee80211_vif *vif,
313 struct cfg80211_scan_request *req)
314{
315 struct iwl_host_cmd hcmd = {
316 .id = SCAN_REQUEST_CMD,
317 .len = { 0, },
318 .data = { mvm->scan_cmd, },
Johannes Berg8ca151b2013-01-24 14:25:36 +0100319 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
320 };
321 struct iwl_scan_cmd *cmd = mvm->scan_cmd;
322 int ret;
323 u32 status;
324 int ssid_len = 0;
325 u8 *ssid = NULL;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300326 bool basic_ssid = !(mvm->fw->ucode_capa.flags &
327 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200328 struct iwl_mvm_scan_params params = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +0100329
330 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbach748fa67c2014-03-27 10:06:29 +0200331
332 /* we should have failed registration if scan_cmd was NULL */
333 if (WARN_ON(mvm->scan_cmd == NULL))
334 return -ENOMEM;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100335
336 IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
337 mvm->scan_status = IWL_MVM_SCAN_OS;
David Spinadel05646792014-06-02 09:59:49 +0300338 memset(cmd, 0, ksize(cmd));
Alexander Bondar8a110d92014-03-12 17:31:19 +0200339
Johannes Berg8ca151b2013-01-24 14:25:36 +0100340 cmd->channel_count = (u8)req->n_channels;
341 cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
342 cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
343 cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200344
Johannes Bergab480032014-06-04 10:13:50 +0200345 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200346 cmd->max_out_time = cpu_to_le32(params.max_out_time);
347 cmd->suspend_time = cpu_to_le32(params.suspend_time);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200348 if (params.passive_fragmented)
349 cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
Alexander Bondar8a110d92014-03-12 17:31:19 +0200350
David Spinadelfb98be52014-05-04 12:51:10 +0300351 cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100352 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
353 MAC_FILTER_IN_BEACON);
Ilan Peerd91b06d2013-02-11 08:50:45 +0200354
355 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
356 cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
357 else
358 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
359
Johannes Berg8ca151b2013-01-24 14:25:36 +0100360 cmd->repeats = cpu_to_le32(1);
361
362 /*
363 * If the user asked for passive scan, don't change to active scan if
364 * you see any activity on the channel - remain passive.
365 */
366 if (req->n_ssids > 0) {
367 cmd->passive2active = cpu_to_le16(1);
David Spinadel26e05cc2013-07-08 13:12:29 +0300368 cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
David Spinadel20f1a5d2013-08-21 09:14:27 +0300369 if (basic_ssid) {
370 ssid = req->ssids[0].ssid;
371 ssid_len = req->ssids[0].ssid_len;
372 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100373 } else {
374 cmd->passive2active = 0;
David Spinadel26e05cc2013-07-08 13:12:29 +0300375 cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100376 }
377
David Spinadelfb98be52014-05-04 12:51:10 +0300378 iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
379 basic_ssid ? 1 : 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100380
Emmanuel Grumbach8e2a8662014-01-28 12:27:31 +0200381 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
382 TX_CMD_FLG_BT_DIS);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100383 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
384 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
385 cmd->tx_cmd.rate_n_flags =
386 iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
387 req->no_cck);
388
389 cmd->tx_cmd.len =
390 cpu_to_le16(iwl_mvm_fill_probe_req(
391 (struct ieee80211_mgmt *)cmd->data,
392 vif->addr,
393 req->n_ssids, ssid, ssid_len,
David Spinadel633e2712014-02-06 16:15:23 +0200394 req->ie, req->ie_len, NULL, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +0100395 mvm->fw->ucode_capa.max_probe_length));
396
Alexander Bondar50df8a32014-03-12 20:30:51 +0200397 iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100398
399 cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
400 le16_to_cpu(cmd->tx_cmd.len) +
401 (cmd->channel_count * sizeof(struct iwl_scan_channel)));
402 hcmd.len[0] = le16_to_cpu(cmd->len);
403
404 status = SCAN_RESPONSE_OK;
405 ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
406 if (!ret && status == SCAN_RESPONSE_OK) {
407 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
408 } else {
409 /*
410 * If the scan failed, it usually means that the FW was unable
411 * to allocate the time events. Warn on it, but maybe we
412 * should try to send the command again with different params.
413 */
414 IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
415 status, ret);
416 mvm->scan_status = IWL_MVM_SCAN_NONE;
417 ret = -EIO;
418 }
419 return ret;
420}
421
422int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
423 struct iwl_device_cmd *cmd)
424{
425 struct iwl_rx_packet *pkt = rxb_addr(rxb);
426 struct iwl_cmd_response *resp = (void *)pkt->data;
427
428 IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
429 le32_to_cpu(resp->status));
430 return 0;
431}
432
433int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
434 struct iwl_device_cmd *cmd)
435{
436 struct iwl_rx_packet *pkt = rxb_addr(rxb);
437 struct iwl_scan_complete_notif *notif = (void *)pkt->data;
438
Arik Nemtsov91b80252014-02-10 12:49:39 +0200439 lockdep_assert_held(&mvm->mutex);
440
Johannes Berg8ca151b2013-01-24 14:25:36 +0100441 IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
442 notif->status, notif->scanned_channels);
443
Arik Nemtsov91b80252014-02-10 12:49:39 +0200444 if (mvm->scan_status == IWL_MVM_SCAN_OS)
445 mvm->scan_status = IWL_MVM_SCAN_NONE;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100446 ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
447
Arik Nemtsov519e2022013-10-17 17:51:35 +0300448 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
449
Johannes Berg8ca151b2013-01-24 14:25:36 +0100450 return 0;
451}
452
David Spinadelfb98be52014-05-04 12:51:10 +0300453int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
454 struct iwl_rx_cmd_buffer *rxb,
455 struct iwl_device_cmd *cmd)
David Spinadel35a000b2013-08-28 09:29:43 +0300456{
457 struct iwl_rx_packet *pkt = rxb_addr(rxb);
David Spinadelfb98be52014-05-04 12:51:10 +0300458 u8 client_bitmap = 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300459
David Spinadelfb98be52014-05-04 12:51:10 +0300460 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
461 struct iwl_sched_scan_results *notif = (void *)pkt->data;
462
463 client_bitmap = notif->client_bitmap;
464 }
465
466 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
467 client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
468 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
469 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
470 ieee80211_sched_scan_results(mvm->hw);
471 } else {
472 IWL_DEBUG_SCAN(mvm, "Scan results\n");
473 }
David Spinadel35a000b2013-08-28 09:29:43 +0300474 }
475
476 return 0;
477}
478
Johannes Berg8ca151b2013-01-24 14:25:36 +0100479static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
480 struct iwl_rx_packet *pkt, void *data)
481{
482 struct iwl_mvm *mvm =
483 container_of(notif_wait, struct iwl_mvm, notif_wait);
484 struct iwl_scan_complete_notif *notif;
485 u32 *resp;
486
487 switch (pkt->hdr.cmd) {
488 case SCAN_ABORT_CMD:
489 resp = (void *)pkt->data;
490 if (*resp == CAN_ABORT_STATUS) {
491 IWL_DEBUG_SCAN(mvm,
492 "Scan can be aborted, wait until completion\n");
493 return false;
494 }
495
Emmanuel Grumbach5a3e9f72013-09-15 14:39:02 +0300496 /*
497 * If scan cannot be aborted, it means that we had a
498 * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
499 * ieee80211_scan_completed already.
500 */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100501 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
502 *resp);
503 return true;
504
505 case SCAN_COMPLETE_NOTIFICATION:
506 notif = (void *)pkt->data;
507 IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
508 notif->status);
509 return true;
510
511 default:
512 WARN_ON(1);
513 return false;
514 };
515}
516
David Spinadelfb98be52014-05-04 12:51:10 +0300517static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100518{
519 struct iwl_notification_wait wait_scan_abort;
520 static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
521 SCAN_COMPLETE_NOTIFICATION };
522 int ret;
523
Emmanuel Grumbach5a3e9f72013-09-15 14:39:02 +0300524 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
Arik Nemtsov91b80252014-02-10 12:49:39 +0200525 return 0;
Emmanuel Grumbach5a3e9f72013-09-15 14:39:02 +0300526
Emmanuel Grumbachaaa4e742013-12-19 22:20:58 +0200527 if (iwl_mvm_is_radio_killed(mvm)) {
528 ieee80211_scan_completed(mvm->hw, true);
Arik Nemtsov519e2022013-10-17 17:51:35 +0300529 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
Emmanuel Grumbachaaa4e742013-12-19 22:20:58 +0200530 mvm->scan_status = IWL_MVM_SCAN_NONE;
Arik Nemtsov91b80252014-02-10 12:49:39 +0200531 return 0;
Emmanuel Grumbachaaa4e742013-12-19 22:20:58 +0200532 }
533
Johannes Berg8ca151b2013-01-24 14:25:36 +0100534 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
535 scan_abort_notif,
536 ARRAY_SIZE(scan_abort_notif),
537 iwl_mvm_scan_abort_notif, NULL);
538
Emmanuel Grumbacha1022922014-05-12 11:36:41 +0300539 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100540 if (ret) {
541 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
David Spinadel992f81f2014-01-09 14:22:55 +0200542 /* mac80211's state will be cleaned in the nic_restart flow */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100543 goto out_remove_notif;
544 }
545
Arik Nemtsov91b80252014-02-10 12:49:39 +0200546 return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100547
548out_remove_notif:
549 iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
Arik Nemtsov91b80252014-02-10 12:49:39 +0200550 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100551}
David Spinadel35a000b2013-08-28 09:29:43 +0300552
553int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
554 struct iwl_rx_cmd_buffer *rxb,
555 struct iwl_device_cmd *cmd)
556{
557 struct iwl_rx_packet *pkt = rxb_addr(rxb);
David Spinadelfb98be52014-05-04 12:51:10 +0300558 u8 status, ebs_status;
David Spinadel35a000b2013-08-28 09:29:43 +0300559
David Spinadelfb98be52014-05-04 12:51:10 +0300560 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
561 struct iwl_periodic_scan_complete *scan_notif;
562
563 scan_notif = (void *)pkt->data;
564 status = scan_notif->status;
565 ebs_status = scan_notif->ebs_status;
566 } else {
567 struct iwl_scan_offload_complete *scan_notif;
568
569 scan_notif = (void *)pkt->data;
570 status = scan_notif->status;
571 ebs_status = scan_notif->ebs_status;
572 }
Johannes Berga6623e82014-01-27 15:40:53 +0100573 /* scan status must be locked for proper checking */
574 lockdep_assert_held(&mvm->mutex);
575
Haim Dreyfusse820c2d2014-04-06 11:19:09 +0300576 IWL_DEBUG_SCAN(mvm,
David Spinadelfb98be52014-05-04 12:51:10 +0300577 "%s completed, status %s, EBS status %s\n",
578 mvm->scan_status == IWL_MVM_SCAN_SCHED ?
579 "Scheduled scan" : "Scan",
580 status == IWL_SCAN_OFFLOAD_COMPLETED ?
581 "completed" : "aborted",
582 ebs_status == IWL_SCAN_EBS_SUCCESS ?
583 "success" : "failed");
Haim Dreyfusse820c2d2014-04-06 11:19:09 +0300584
David Spinadel35a000b2013-08-28 09:29:43 +0300585
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200586 /* only call mac80211 completion if the stop was initiated by FW */
587 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
Johannes Berga6623e82014-01-27 15:40:53 +0100588 mvm->scan_status = IWL_MVM_SCAN_NONE;
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200589 ieee80211_sched_scan_stopped(mvm->hw);
David Spinadelfb98be52014-05-04 12:51:10 +0300590 } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
591 mvm->scan_status = IWL_MVM_SCAN_NONE;
592 ieee80211_scan_completed(mvm->hw,
593 status == IWL_SCAN_OFFLOAD_ABORTED);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200594 }
David Spinadel35a000b2013-08-28 09:29:43 +0300595
David Spinadelfb98be52014-05-04 12:51:10 +0300596 mvm->last_ebs_successful = !ebs_status;
Haim Dreyfusse820c2d2014-04-06 11:19:09 +0300597
David Spinadel35a000b2013-08-28 09:29:43 +0300598 return 0;
599}
600
601static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
602 struct ieee80211_vif *vif,
David Spinadel633e2712014-02-06 16:15:23 +0200603 struct ieee80211_scan_ies *ies,
David Spinadel35a000b2013-08-28 09:29:43 +0300604 enum ieee80211_band band,
605 struct iwl_tx_cmd *cmd,
606 u8 *data)
607{
608 u16 cmd_len;
609
610 cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
611 cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
612 cmd->sta_id = mvm->aux_sta.sta_id;
613
614 cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
615
616 cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
617 vif->addr,
618 1, NULL, 0,
David Spinadel633e2712014-02-06 16:15:23 +0200619 ies->ies[band], ies->len[band],
620 ies->common_ies, ies->common_ie_len,
David Spinadel35a000b2013-08-28 09:29:43 +0300621 SCAN_OFFLOAD_PROBE_REQ_SIZE);
622 cmd->len = cpu_to_le16(cmd_len);
623}
624
625static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
626 struct ieee80211_vif *vif,
627 struct cfg80211_sched_scan_request *req,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200628 struct iwl_scan_offload_cmd *scan,
629 struct iwl_mvm_scan_params *params)
David Spinadel35a000b2013-08-28 09:29:43 +0300630{
Eliad Peller89879412014-05-26 18:44:35 +0300631 scan->channel_count = req->n_channels;
David Spinadel35a000b2013-08-28 09:29:43 +0300632 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
633 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
634 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
635 scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200636
Alexander Bondar50df8a32014-03-12 20:30:51 +0200637 scan->max_out_time = cpu_to_le32(params->max_out_time);
638 scan->suspend_time = cpu_to_le32(params->suspend_time);
Alexander Bondar8a110d92014-03-12 17:31:19 +0200639
David Spinadel35a000b2013-08-28 09:29:43 +0300640 scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
641 MAC_FILTER_IN_BEACON);
642 scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
643 scan->rep_count = cpu_to_le32(1);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200644
645 if (params->passive_fragmented)
646 scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
David Spinadel35a000b2013-08-28 09:29:43 +0300647}
648
649static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
650{
651 int i;
652
653 for (i = 0; i < PROBE_OPTION_MAX; i++) {
654 if (!ssid_list[i].len)
655 break;
656 if (ssid_list[i].len == ssid_len &&
657 !memcmp(ssid_list->ssid, ssid, ssid_len))
658 return i;
659 }
660 return -1;
661}
662
663static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
David Spinadelfb98be52014-05-04 12:51:10 +0300664 struct iwl_ssid_ie *direct_scan,
665 u32 *ssid_bitmap, bool basic_ssid)
David Spinadel35a000b2013-08-28 09:29:43 +0300666{
667 int i, j;
668 int index;
669
670 /*
671 * copy SSIDs from match list.
672 * iwl_config_sched_scan_profiles() uses the order of these ssids to
673 * config match list.
674 */
675 for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
Johannes Bergea73cbc2014-01-24 10:53:53 +0100676 /* skip empty SSID matchsets */
677 if (!req->match_sets[i].ssid.ssid_len)
678 continue;
David Spinadelfb98be52014-05-04 12:51:10 +0300679 direct_scan[i].id = WLAN_EID_SSID;
680 direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
681 memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
682 direct_scan[i].len);
David Spinadel35a000b2013-08-28 09:29:43 +0300683 }
684
685 /* add SSIDs from scan SSID list */
686 *ssid_bitmap = 0;
687 for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
688 index = iwl_ssid_exist(req->ssids[j].ssid,
689 req->ssids[j].ssid_len,
David Spinadelfb98be52014-05-04 12:51:10 +0300690 direct_scan);
David Spinadel35a000b2013-08-28 09:29:43 +0300691 if (index < 0) {
David Spinadelfb98be52014-05-04 12:51:10 +0300692 if (!req->ssids[j].ssid_len && basic_ssid)
David Spinadel35a000b2013-08-28 09:29:43 +0300693 continue;
David Spinadelfb98be52014-05-04 12:51:10 +0300694 direct_scan[i].id = WLAN_EID_SSID;
695 direct_scan[i].len = req->ssids[j].ssid_len;
696 memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
697 direct_scan[i].len);
David Spinadel35a000b2013-08-28 09:29:43 +0300698 *ssid_bitmap |= BIT(i + 1);
699 i++;
700 } else {
701 *ssid_bitmap |= BIT(index + 1);
702 }
703 }
704}
705
706static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
707 struct cfg80211_sched_scan_request *req,
David Spinadel762533b2014-06-05 11:20:43 +0300708 u8 *channels_buffer,
David Spinadel35a000b2013-08-28 09:29:43 +0300709 enum ieee80211_band band,
Eliad Peller89879412014-05-26 18:44:35 +0300710 int *head,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200711 u32 ssid_bitmap,
712 struct iwl_mvm_scan_params *params)
David Spinadel35a000b2013-08-28 09:29:43 +0300713{
David Spinadel762533b2014-06-05 11:20:43 +0300714 u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
715 __le32 *type = (__le32 *)channels_buffer;
716 __le16 *channel_number = (__le16 *)(type + n_channels);
717 __le16 *iter_count = channel_number + n_channels;
718 __le32 *iter_interval = (__le32 *)(iter_count + n_channels);
719 u8 *active_dwell = (u8 *)(iter_interval + n_channels);
720 u8 *passive_dwell = active_dwell + n_channels;
Eliad Peller89879412014-05-26 18:44:35 +0300721 int i, index = 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300722
Eliad Peller89879412014-05-26 18:44:35 +0300723 for (i = 0; i < req->n_channels; i++) {
724 struct ieee80211_channel *chan = req->channels[i];
David Spinadel35a000b2013-08-28 09:29:43 +0300725
Eliad Peller89879412014-05-26 18:44:35 +0300726 if (chan->band != band)
727 continue;
David Spinadel35a000b2013-08-28 09:29:43 +0300728
Eliad Peller89879412014-05-26 18:44:35 +0300729 index = *head;
730 (*head)++;
David Spinadel35a000b2013-08-28 09:29:43 +0300731
David Spinadel762533b2014-06-05 11:20:43 +0300732 channel_number[index] = cpu_to_le16(chan->hw_value);
733 active_dwell[index] = params->dwell[band].active;
734 passive_dwell[index] = params->dwell[band].passive;
David Spinadel35a000b2013-08-28 09:29:43 +0300735
David Spinadel762533b2014-06-05 11:20:43 +0300736 iter_count[index] = cpu_to_le16(1);
737 iter_interval[index] = 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300738
Eliad Peller89879412014-05-26 18:44:35 +0300739 if (!(chan->flags & IEEE80211_CHAN_NO_IR))
David Spinadel762533b2014-06-05 11:20:43 +0300740 type[index] |=
David Spinadel35a000b2013-08-28 09:29:43 +0300741 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
742
David Spinadel762533b2014-06-05 11:20:43 +0300743 type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
744 IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
David Spinadel35a000b2013-08-28 09:29:43 +0300745
Eliad Peller89879412014-05-26 18:44:35 +0300746 if (chan->flags & IEEE80211_CHAN_NO_HT40)
David Spinadel762533b2014-06-05 11:20:43 +0300747 type[index] |=
David Spinadel35a000b2013-08-28 09:29:43 +0300748 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
749
750 /* scan for all SSIDs from req->ssids */
David Spinadel762533b2014-06-05 11:20:43 +0300751 type[index] |= cpu_to_le32(ssid_bitmap);
David Spinadel35a000b2013-08-28 09:29:43 +0300752 }
753}
754
755int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
756 struct ieee80211_vif *vif,
757 struct cfg80211_sched_scan_request *req,
David Spinadel633e2712014-02-06 16:15:23 +0200758 struct ieee80211_scan_ies *ies)
David Spinadel35a000b2013-08-28 09:29:43 +0300759{
David Spinadel35a000b2013-08-28 09:29:43 +0300760 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
761 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
762 int head = 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300763 u32 ssid_bitmap;
764 int cmd_len;
765 int ret;
David Spinadel762533b2014-06-05 11:20:43 +0300766 u8 *probes;
David Spinadelfb98be52014-05-04 12:51:10 +0300767 bool basic_ssid = !(mvm->fw->ucode_capa.flags &
768 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
David Spinadel35a000b2013-08-28 09:29:43 +0300769
770 struct iwl_scan_offload_cfg *scan_cfg;
771 struct iwl_host_cmd cmd = {
772 .id = SCAN_OFFLOAD_CONFIG_CMD,
David Spinadel35a000b2013-08-28 09:29:43 +0300773 };
Alexander Bondar50df8a32014-03-12 20:30:51 +0200774 struct iwl_mvm_scan_params params = {};
David Spinadel35a000b2013-08-28 09:29:43 +0300775
776 lockdep_assert_held(&mvm->mutex);
777
David Spinadel35a000b2013-08-28 09:29:43 +0300778 cmd_len = sizeof(struct iwl_scan_offload_cfg) +
David Spinadel762533b2014-06-05 11:20:43 +0300779 mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
Emmanuel Grumbach66092532014-02-20 14:58:30 +0200780 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
David Spinadel35a000b2013-08-28 09:29:43 +0300781
782 scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
783 if (!scan_cfg)
784 return -ENOMEM;
785
David Spinadel762533b2014-06-05 11:20:43 +0300786 probes = scan_cfg->data +
787 mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
788
Johannes Bergab480032014-06-04 10:13:50 +0200789 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
Alexander Bondar50df8a32014-03-12 20:30:51 +0200790 iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
David Spinadel35a000b2013-08-28 09:29:43 +0300791 scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
792
David Spinadelfb98be52014-05-04 12:51:10 +0300793 iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
794 &ssid_bitmap, basic_ssid);
David Spinadel35a000b2013-08-28 09:29:43 +0300795 /* build tx frames for supported bands */
796 if (band_2ghz) {
797 iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
798 IEEE80211_BAND_2GHZ,
799 &scan_cfg->scan_cmd.tx_cmd[0],
David Spinadel762533b2014-06-05 11:20:43 +0300800 probes);
801 iwl_build_channel_cfg(mvm, req, scan_cfg->data,
Eliad Peller89879412014-05-26 18:44:35 +0300802 IEEE80211_BAND_2GHZ, &head,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200803 ssid_bitmap, &params);
David Spinadel35a000b2013-08-28 09:29:43 +0300804 }
805 if (band_5ghz) {
806 iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
807 IEEE80211_BAND_5GHZ,
808 &scan_cfg->scan_cmd.tx_cmd[1],
David Spinadel762533b2014-06-05 11:20:43 +0300809 probes +
David Spinadel35a000b2013-08-28 09:29:43 +0300810 SCAN_OFFLOAD_PROBE_REQ_SIZE);
David Spinadel762533b2014-06-05 11:20:43 +0300811 iwl_build_channel_cfg(mvm, req, scan_cfg->data,
Eliad Peller89879412014-05-26 18:44:35 +0300812 IEEE80211_BAND_5GHZ, &head,
Alexander Bondar50df8a32014-03-12 20:30:51 +0200813 ssid_bitmap, &params);
David Spinadel35a000b2013-08-28 09:29:43 +0300814 }
815
816 cmd.data[0] = scan_cfg;
817 cmd.len[0] = cmd_len;
818 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
819
820 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
821
822 ret = iwl_mvm_send_cmd(mvm, &cmd);
823 kfree(scan_cfg);
824 return ret;
825}
826
827int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
828 struct cfg80211_sched_scan_request *req)
829{
830 struct iwl_scan_offload_profile *profile;
831 struct iwl_scan_offload_profile_cfg *profile_cfg;
832 struct iwl_scan_offload_blacklist *blacklist;
833 struct iwl_host_cmd cmd = {
834 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
David Spinadel35a000b2013-08-28 09:29:43 +0300835 .len[1] = sizeof(*profile_cfg),
836 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
837 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
838 };
839 int blacklist_len;
840 int i;
841 int ret;
842
843 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
844 return -EIO;
845
846 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
847 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
848 else
849 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
850
851 blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
852 if (!blacklist)
853 return -ENOMEM;
854
855 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
856 if (!profile_cfg) {
857 ret = -ENOMEM;
858 goto free_blacklist;
859 }
860
861 cmd.data[0] = blacklist;
862 cmd.len[0] = sizeof(*blacklist) * blacklist_len;
863 cmd.data[1] = profile_cfg;
864
865 /* No blacklist configuration */
866
867 profile_cfg->num_profiles = req->n_match_sets;
868 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
869 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
870 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
David Spinadel6e0bbe52013-12-30 09:59:45 +0200871 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
872 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
David Spinadel35a000b2013-08-28 09:29:43 +0300873
874 for (i = 0; i < req->n_match_sets; i++) {
875 profile = &profile_cfg->profiles[i];
876 profile->ssid_index = i;
877 /* Support any cipher and auth algorithm */
878 profile->unicast_cipher = 0xff;
879 profile->auth_alg = 0xff;
880 profile->network_type = IWL_NETWORK_TYPE_ANY;
881 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
882 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
883 }
884
885 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
886
887 ret = iwl_mvm_send_cmd(mvm, &cmd);
888 kfree(profile_cfg);
889free_blacklist:
890 kfree(blacklist);
891
892 return ret;
893}
894
895int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
896 struct cfg80211_sched_scan_request *req)
897{
898 struct iwl_scan_offload_req scan_req = {
899 .watchdog = IWL_SCHED_SCAN_WATCHDOG,
900
901 .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
David Spinadelb14fc2b2014-06-25 13:17:53 +0300902 .schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
David Spinadel35a000b2013-08-28 09:29:43 +0300903 .schedule_line[0].full_scan_mul = 1,
904
905 .schedule_line[1].iterations = 0xff,
David Spinadelb14fc2b2014-06-25 13:17:53 +0300906 .schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
David Spinadel35a000b2013-08-28 09:29:43 +0300907 .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
908 };
909
910 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
911 IWL_DEBUG_SCAN(mvm,
912 "Sending scheduled scan with filtering, filter len %d\n",
913 req->n_match_sets);
David Spinadel35a000b2013-08-28 09:29:43 +0300914 } else {
915 IWL_DEBUG_SCAN(mvm,
916 "Sending Scheduled scan without filtering\n");
Eliad Pellerde33fb52013-11-10 12:59:46 +0200917 scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
David Spinadel35a000b2013-08-28 09:29:43 +0300918 }
919
Haim Dreyfusse820c2d2014-04-06 11:19:09 +0300920 if (mvm->last_ebs_successful &&
921 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
922 scan_req.flags |=
923 cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
924
Emmanuel Grumbacha1022922014-05-12 11:36:41 +0300925 return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
David Spinadel35a000b2013-08-28 09:29:43 +0300926 sizeof(scan_req), &scan_req);
927}
928
David Spinadelfb98be52014-05-04 12:51:10 +0300929static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
David Spinadel35a000b2013-08-28 09:29:43 +0300930{
931 int ret;
932 struct iwl_host_cmd cmd = {
933 .id = SCAN_OFFLOAD_ABORT_CMD,
David Spinadel35a000b2013-08-28 09:29:43 +0300934 };
935 u32 status;
936
937 /* Exit instantly with error when device is not ready
938 * to receive scan abort command or it does not perform
939 * scheduled scan currently */
David Spinadelfb98be52014-05-04 12:51:10 +0300940 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
941 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
942 mvm->scan_status != IWL_MVM_SCAN_OS))
David Spinadel35a000b2013-08-28 09:29:43 +0300943 return -EIO;
944
945 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
946 if (ret)
947 return ret;
948
949 if (status != CAN_ABORT_STATUS) {
950 /*
951 * The scan abort will return 1 for success or
952 * 2 for "failure". A failure condition can be
953 * due to simply not being in an active scan which
954 * can occur if we send the scan abort before the
955 * microcode has notified us that a scan is completed.
956 */
957 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200958 ret = -ENOENT;
David Spinadel35a000b2013-08-28 09:29:43 +0300959 }
960
961 return ret;
962}
963
David Spinadelfb98be52014-05-04 12:51:10 +0300964int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
David Spinadel35a000b2013-08-28 09:29:43 +0300965{
966 int ret;
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200967 struct iwl_notification_wait wait_scan_done;
968 static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
David Spinadelfb98be52014-05-04 12:51:10 +0300969 bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
David Spinadel35a000b2013-08-28 09:29:43 +0300970
971 lockdep_assert_held(&mvm->mutex);
972
David Spinadelfb98be52014-05-04 12:51:10 +0300973 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
974 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
975 mvm->scan_status != IWL_MVM_SCAN_OS)) {
976 IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200977 return 0;
David Spinadel35a000b2013-08-28 09:29:43 +0300978 }
979
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200980 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
981 scan_done_notif,
982 ARRAY_SIZE(scan_done_notif),
983 NULL, NULL);
984
David Spinadelfb98be52014-05-04 12:51:10 +0300985 ret = iwl_mvm_send_scan_offload_abort(mvm);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200986 if (ret) {
David Spinadelfb98be52014-05-04 12:51:10 +0300987 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
988 sched ? "offloaded " : "", ret);
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200989 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
990 return ret;
991 }
992
David Spinadelfb98be52014-05-04 12:51:10 +0300993 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
994 sched ? "offloaded " : "");
Arik Nemtsov33ea27f2014-02-10 15:34:29 +0200995
996 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
997 if (ret)
998 return ret;
999
1000 /*
1001 * Clear the scan status so the next scan requests will succeed. This
1002 * also ensures the Rx handler doesn't do anything, as the scan was
1003 * stopped from above.
1004 */
1005 mvm->scan_status = IWL_MVM_SCAN_NONE;
1006
David Spinadelfb98be52014-05-04 12:51:10 +03001007 if (notify) {
1008 if (sched)
1009 ieee80211_sched_scan_stopped(mvm->hw);
1010 else
1011 ieee80211_scan_completed(mvm->hw, true);
1012 }
David Spinadel636a2cd2014-05-01 15:57:22 +03001013
Arik Nemtsov33ea27f2014-02-10 15:34:29 +02001014 return 0;
David Spinadel35a000b2013-08-28 09:29:43 +03001015}
David Spinadelfb98be52014-05-04 12:51:10 +03001016
1017static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
1018 struct iwl_scan_req_tx_cmd *tx_cmd,
1019 bool no_cck)
1020{
1021 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
1022 TX_CMD_FLG_BT_DIS);
1023 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
1024 IEEE80211_BAND_2GHZ,
1025 no_cck);
1026 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
1027
1028 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
1029 TX_CMD_FLG_BT_DIS);
1030 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
1031 IEEE80211_BAND_5GHZ,
1032 no_cck);
1033 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
1034}
1035
1036static void
1037iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
1038 struct ieee80211_channel **channels,
1039 int n_channels, u32 ssid_bitmap,
1040 struct iwl_scan_req_unified_lmac *cmd)
1041{
1042 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
1043 int i;
1044
1045 for (i = 0; i < n_channels; i++) {
1046 channel_cfg[i].channel_num =
1047 cpu_to_le16(channels[i]->hw_value);
1048 channel_cfg[i].iter_count = cpu_to_le16(1);
1049 channel_cfg[i].iter_interval = 0;
1050 channel_cfg[i].flags =
1051 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
1052 ssid_bitmap);
1053 }
1054}
1055
1056static void
1057iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1058 struct ieee80211_scan_ies *ies,
1059 struct iwl_scan_req_unified_lmac *cmd)
1060{
1061 struct iwl_scan_probe_req *preq = (void *)(cmd->data +
1062 sizeof(struct iwl_scan_channel_cfg_lmac) *
1063 mvm->fw->ucode_capa.n_scan_channels);
1064 struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
1065 u8 *pos;
1066
1067 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1068 eth_broadcast_addr(frame->da);
1069 memcpy(frame->sa, vif->addr, ETH_ALEN);
1070 eth_broadcast_addr(frame->bssid);
1071 frame->seq_ctrl = 0;
1072
1073 pos = frame->u.probe_req.variable;
1074 *pos++ = WLAN_EID_SSID;
1075 *pos++ = 0;
1076
1077 preq->mac_header.offset = 0;
1078 preq->mac_header.len = cpu_to_le16(24 + 2);
1079
1080 memcpy(pos, ies->ies[IEEE80211_BAND_2GHZ],
1081 ies->len[IEEE80211_BAND_2GHZ]);
1082 preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
1083 preq->band_data[0].len = cpu_to_le16(ies->len[IEEE80211_BAND_2GHZ]);
1084 pos += ies->len[IEEE80211_BAND_2GHZ];
1085
1086 memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
1087 ies->len[IEEE80211_BAND_5GHZ]);
1088 preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
1089 preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
1090 pos += ies->len[IEEE80211_BAND_5GHZ];
1091
1092 memcpy(pos, ies->common_ies, ies->common_ie_len);
1093 preq->common_data.offset = cpu_to_le16(pos - preq->buf);
1094 preq->common_data.len = cpu_to_le16(ies->common_ie_len);
1095}
1096
1097static void
1098iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
1099 struct iwl_scan_req_unified_lmac *cmd,
1100 struct iwl_mvm_scan_params *params)
1101{
David Spinadelaf913442014-06-12 19:29:40 +03001102 memset(cmd, 0, ksize(cmd));
David Spinadelfb98be52014-05-04 12:51:10 +03001103 cmd->active_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].active;
1104 cmd->passive_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].passive;
1105 /* TODO: Use params; now fragmented isn't used. */
1106 cmd->fragmented_dwell = 0;
1107 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
1108 cmd->max_out_time = cpu_to_le32(params->max_out_time);
1109 cmd->suspend_time = cpu_to_le32(params->suspend_time);
1110 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
David Spinadelfb98be52014-05-04 12:51:10 +03001111 cmd->iter_num = cpu_to_le32(1);
David Spinadelaf913442014-06-12 19:29:40 +03001112
1113 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1114 mvm->last_ebs_successful) {
1115 cmd->channel_opt[0].flags =
1116 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1118 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1119 cmd->channel_opt[1].flags =
1120 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1121 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1122 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1123 }
David Spinadelfb98be52014-05-04 12:51:10 +03001124}
1125
1126int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
1127 struct ieee80211_vif *vif,
1128 struct ieee80211_scan_request *req)
1129{
1130 struct iwl_host_cmd hcmd = {
1131 .id = SCAN_OFFLOAD_REQUEST_CMD,
1132 .len = { sizeof(struct iwl_scan_req_unified_lmac) +
1133 sizeof(struct iwl_scan_channel_cfg_lmac) *
1134 mvm->fw->ucode_capa.n_scan_channels +
1135 sizeof(struct iwl_scan_probe_req), },
1136 .data = { mvm->scan_cmd, },
1137 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1138 };
1139 struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
1140 struct iwl_mvm_scan_params params = {};
1141 u32 flags;
1142 int ssid_bitmap = 0;
1143 int ret, i;
1144
1145 lockdep_assert_held(&mvm->mutex);
1146
1147 /* we should have failed registration if scan_cmd was NULL */
1148 if (WARN_ON(mvm->scan_cmd == NULL))
1149 return -ENOMEM;
1150
1151 if (WARN_ON_ONCE(req->req.n_ssids > PROBE_OPTION_MAX ||
1152 req->ies.common_ie_len + req->ies.len[0] +
1153 req->ies.len[1] + 24 + 2 >
1154 SCAN_OFFLOAD_PROBE_REQ_SIZE ||
1155 req->req.n_channels >
1156 mvm->fw->ucode_capa.n_scan_channels))
1157 return -1;
1158
1159 mvm->scan_status = IWL_MVM_SCAN_OS;
1160
1161 iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
1162 &params);
1163
1164 iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
1165
1166 cmd->n_channels = (u8)req->req.n_channels;
1167
1168 flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
1169
1170 if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
1171 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
1172
1173 if (params.passive_fragmented)
1174 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
1175
1176 if (req->req.n_ssids == 0)
1177 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
1178
1179 cmd->scan_flags = cpu_to_le32(flags);
1180
1181 cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
1182 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
1183 MAC_FILTER_IN_BEACON);
1184 iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
1185 iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
1186 req->req.n_ssids, 0);
1187
1188 cmd->schedule[0].delay = 0;
1189 cmd->schedule[0].iterations = 1;
1190 cmd->schedule[0].full_scan_mul = 0;
1191 cmd->schedule[1].delay = 0;
1192 cmd->schedule[1].iterations = 0;
1193 cmd->schedule[1].full_scan_mul = 0;
1194
1195 for (i = 1; i <= req->req.n_ssids; i++)
1196 ssid_bitmap |= BIT(i);
1197
1198 iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
1199 req->req.n_channels, ssid_bitmap,
1200 cmd);
1201
1202 iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, cmd);
1203
1204 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1205 if (!ret) {
1206 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
1207 } else {
1208 /*
1209 * If the scan failed, it usually means that the FW was unable
1210 * to allocate the time events. Warn on it, but maybe we
1211 * should try to send the command again with different params.
1212 */
1213 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1214 mvm->scan_status = IWL_MVM_SCAN_NONE;
1215 ret = -EIO;
1216 }
1217 return ret;
1218}
1219
1220int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
1221 struct ieee80211_vif *vif,
1222 struct cfg80211_sched_scan_request *req,
1223 struct ieee80211_scan_ies *ies)
1224{
1225 struct iwl_host_cmd hcmd = {
1226 .id = SCAN_OFFLOAD_REQUEST_CMD,
1227 .len = { sizeof(struct iwl_scan_req_unified_lmac) +
1228 sizeof(struct iwl_scan_channel_cfg_lmac) *
1229 mvm->fw->ucode_capa.n_scan_channels +
1230 sizeof(struct iwl_scan_probe_req), },
1231 .data = { mvm->scan_cmd, },
1232 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1233 };
1234 struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
1235 struct iwl_mvm_scan_params params = {};
1236 int ret;
1237 u32 flags = 0, ssid_bitmap = 0;
1238
1239 lockdep_assert_held(&mvm->mutex);
1240
1241 /* we should have failed registration if scan_cmd was NULL */
1242 if (WARN_ON(mvm->scan_cmd == NULL))
1243 return -ENOMEM;
1244
1245 if (WARN_ON_ONCE(req->n_ssids > PROBE_OPTION_MAX ||
1246 ies->common_ie_len + ies->len[0] + ies->len[1] + 24 + 2
1247 > SCAN_OFFLOAD_PROBE_REQ_SIZE ||
1248 req->n_channels > mvm->fw->ucode_capa.n_scan_channels))
1249 return -ENOBUFS;
1250
1251 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
1252
1253 iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
1254
1255 cmd->n_channels = (u8)req->n_channels;
1256
1257 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
1258 IWL_DEBUG_SCAN(mvm,
1259 "Sending scheduled scan with filtering, n_match_sets %d\n",
1260 req->n_match_sets);
1261 } else {
1262 IWL_DEBUG_SCAN(mvm,
1263 "Sending Scheduled scan without filtering\n");
1264 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
1265 }
1266
1267 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
1268 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
1269
1270 if (params.passive_fragmented)
1271 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
1272
1273 if (req->n_ssids == 0)
1274 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
1275
1276 cmd->scan_flags = cpu_to_le32(flags);
1277
1278 cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
1279 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
1280 MAC_FILTER_IN_BEACON);
1281 iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
1282 iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
1283
David Spinadelb14fc2b2014-06-25 13:17:53 +03001284 cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
David Spinadelfb98be52014-05-04 12:51:10 +03001285 cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
1286 cmd->schedule[0].full_scan_mul = 1;
1287
David Spinadelb14fc2b2014-06-25 13:17:53 +03001288 cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
David Spinadelfb98be52014-05-04 12:51:10 +03001289 cmd->schedule[1].iterations = 0xff;
1290 cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
1291
1292 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
1293 ssid_bitmap, cmd);
1294
1295 iwl_mvm_build_unified_scan_probe(mvm, vif, ies, cmd);
1296
1297 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1298 if (!ret) {
1299 IWL_DEBUG_SCAN(mvm,
1300 "Sched scan request was sent successfully\n");
1301 } else {
1302 /*
1303 * If the scan failed, it usually means that the FW was unable
1304 * to allocate the time events. Warn on it, but maybe we
1305 * should try to send the command again with different params.
1306 */
1307 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1308 mvm->scan_status = IWL_MVM_SCAN_NONE;
1309 ret = -EIO;
1310 }
1311 return ret;
1312}
1313
1314
1315int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
1316{
1317 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
1318 return iwl_mvm_scan_offload_stop(mvm, true);
1319 return iwl_mvm_cancel_regular_scan(mvm);
1320}