blob: 1ab6dbdb47f30fad3ce28dff93497b0dccae98ff [file] [log] [blame]
Eyal Shapiraf1d63a52012-01-31 11:57:21 +02001
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002/*
Luciano Coelho8f6ac532013-05-04 01:06:11 +03003 * This file is part of wlcore
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004 *
Juuso Oikarinen8bf29b02010-02-18 13:25:51 +02005 * Copyright (C) 2008-2010 Nokia Corporation
Luciano Coelho8f6ac532013-05-04 01:06:11 +03006 * Copyright (C) 2011-2013 Texas Instruments Inc.
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030025#include <linux/firmware.h>
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030026#include <linux/etherdevice.h>
Juuso Oikarinen1fba4972009-10-08 21:56:32 +030027#include <linux/vmalloc.h>
Ido Yariv341b7cd2011-03-31 10:07:01 +020028#include <linux/wl12xx.h>
Felipe Balbia390e852011-10-06 10:07:44 +030029#include <linux/interrupt.h>
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030030
Luciano Coelhoc31be252011-11-21 19:25:24 +020031#include "wlcore.h"
Luciano Coelho0f4e3122011-10-07 11:02:42 +030032#include "debug.h"
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030033#include "wl12xx_80211.h"
Shahar Levi00d20102010-11-08 11:20:10 +000034#include "io.h"
Shahar Levi00d20102010-11-08 11:20:10 +000035#include "tx.h"
Shahar Levi00d20102010-11-08 11:20:10 +000036#include "ps.h"
37#include "init.h"
38#include "debugfs.h"
Shahar Levi00d20102010-11-08 11:20:10 +000039#include "testmode.h"
40#include "scan.h"
Arik Nemtsov53d67a52011-12-12 11:32:37 +020041#include "hw_ops.h"
Luciano Coelho33cab572013-05-04 02:46:38 +030042#include "sysfs.h"
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030043
Juuso Oikarinen9ccd9212009-12-11 15:41:01 +020044#define WL1271_BOOT_RETRIES 3
45
Ido Yariv95dac04f2011-06-06 14:57:06 +030046static char *fwlog_param;
Ido Reis93ac8482013-09-09 12:24:36 +030047static int fwlog_mem_blocks = -1;
Yair Shapira72303412012-11-26 18:05:50 +020048static int bug_on_recovery = -1;
49static int no_recovery = -1;
Ido Yariv95dac04f2011-06-06 14:57:06 +030050
Arik Nemtsov7dece1c2011-04-18 14:15:28 +030051static void __wl1271_op_remove_interface(struct wl1271 *wl,
Eliad Peller536129c2011-10-05 11:55:45 +020052 struct ieee80211_vif *vif,
Arik Nemtsov7dece1c2011-04-18 14:15:28 +030053 bool reset_tx_queues);
Ido Yarivc24ec832012-06-26 21:08:58 +030054static void wlcore_op_stop_locked(struct wl1271 *wl);
Eliad Peller170d0e62011-10-05 11:56:06 +020055static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +020056
Luciano Coelho8f6ac532013-05-04 01:06:11 +030057static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Eliad Pelleref4b29e2011-06-06 13:03:12 +030058{
59 int ret;
Eliad Peller0603d892011-10-05 11:55:51 +020060
Eliad Peller9fd6f212012-03-04 10:55:48 +020061 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
Eliad Pelleref4b29e2011-06-06 13:03:12 +030065 return 0;
66
Eliad Peller8181aec2011-10-10 10:13:04 +020067 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
Eliad Pelleref4b29e2011-06-06 13:03:12 +030068 return 0;
69
Eliad Pellerd50529c2012-11-22 18:06:20 +020070 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
Eliad Pelleref4b29e2011-06-06 13:03:12 +030071 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76}
Juuso Oikarinenc2c192a2010-07-27 03:30:09 +030077
Luis R. Rodriguez0c0280b2013-01-11 18:39:36 +000078static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
Luciano Coelho573c67c2010-11-26 13:44:59 +020080{
Juuso Oikarinenb7417d92010-11-10 11:27:19 +010081 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
83 int i;
Victor Goldenshtein6b70e7e2012-11-25 18:26:59 +020084 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
Juuso Oikarinenb7417d92010-11-10 11:27:19 +010086
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
91 continue;
92
93 if (ch->flags & IEEE80211_CHAN_RADAR)
Luis R. Rodriguez8fe02e12013-10-21 19:22:25 +020094 ch->flags |= IEEE80211_CHAN_NO_IR;
Juuso Oikarinenb7417d92010-11-10 11:27:19 +010095
96 }
97
Arik Nemtsov75592be2013-03-12 17:19:45 +020098 wlcore_regdomain_config(wl);
Juuso Oikarinenb7417d92010-11-10 11:27:19 +010099}
100
Eliad Peller9eb599e2011-10-10 10:12:59 +0200101static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
102 bool enable)
Eliad Peller77ddaa12011-05-15 11:10:29 +0300103{
104 int ret = 0;
105
106 /* we should hold wl->mutex */
Eliad Peller9eb599e2011-10-10 10:12:59 +0200107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300108 if (ret < 0)
109 goto out;
110
111 if (enable)
Eliad Peller0744bdb2011-10-10 10:13:05 +0200112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300113 else
Eliad Peller0744bdb2011-10-10 10:13:05 +0200114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300115out:
116 return ret;
117}
118
119/*
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
122 */
Eliad Peller9eb599e2011-10-10 10:12:59 +0200123int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Eliad Peller77ddaa12011-05-15 11:10:29 +0300124{
125 int ret = 0;
126 int period = wl->conf.rx_streaming.interval;
127
128 /* don't reconfigure if rx_streaming is disabled */
Eliad Peller0744bdb2011-10-10 10:13:05 +0200129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
Eliad Peller77ddaa12011-05-15 11:10:29 +0300130 goto out;
131
132 /* reconfigure/disable according to new streaming_period */
133 if (period &&
Eliad Pellerba8447f2011-10-10 10:13:00 +0200134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
Eliad Peller77ddaa12011-05-15 11:10:29 +0300135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
Eliad Peller9eb599e2011-10-10 10:12:59 +0200137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300138 else {
Eliad Peller9eb599e2011-10-10 10:12:59 +0200139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300140 /* don't cancel_work_sync since we might deadlock */
Eliad Peller9eb599e2011-10-10 10:12:59 +0200141 del_timer_sync(&wlvif->rx_streaming_timer);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300142 }
143out:
144 return ret;
145}
146
147static void wl1271_rx_streaming_enable_work(struct work_struct *work)
148{
149 int ret;
Eliad Peller9eb599e2011-10-10 10:12:59 +0200150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
Eliad Peller77ddaa12011-05-15 11:10:29 +0300153
154 mutex_lock(&wl->mutex);
155
Eliad Peller0744bdb2011-10-10 10:13:05 +0200156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
Eliad Pellerba8447f2011-10-10 10:13:00 +0200157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
Eliad Peller77ddaa12011-05-15 11:10:29 +0300158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
160 goto out;
161
162 if (!wl->conf.rx_streaming.interval)
163 goto out;
164
165 ret = wl1271_ps_elp_wakeup(wl);
166 if (ret < 0)
167 goto out;
168
Eliad Peller9eb599e2011-10-10 10:12:59 +0200169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300170 if (ret < 0)
171 goto out_sleep;
172
173 /* stop it after some time of inactivity */
Eliad Peller9eb599e2011-10-10 10:12:59 +0200174 mod_timer(&wlvif->rx_streaming_timer,
Eliad Peller77ddaa12011-05-15 11:10:29 +0300175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
176
177out_sleep:
178 wl1271_ps_elp_sleep(wl);
179out:
180 mutex_unlock(&wl->mutex);
181}
182
183static void wl1271_rx_streaming_disable_work(struct work_struct *work)
184{
185 int ret;
Eliad Peller9eb599e2011-10-10 10:12:59 +0200186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
Eliad Peller77ddaa12011-05-15 11:10:29 +0300189
190 mutex_lock(&wl->mutex);
191
Eliad Peller0744bdb2011-10-10 10:13:05 +0200192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
Eliad Peller77ddaa12011-05-15 11:10:29 +0300193 goto out;
194
195 ret = wl1271_ps_elp_wakeup(wl);
196 if (ret < 0)
197 goto out;
198
Eliad Peller9eb599e2011-10-10 10:12:59 +0200199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300200 if (ret)
201 goto out_sleep;
202
203out_sleep:
204 wl1271_ps_elp_sleep(wl);
205out:
206 mutex_unlock(&wl->mutex);
207}
208
209static void wl1271_rx_streaming_timer(unsigned long data)
210{
Eliad Peller9eb599e2011-10-10 10:12:59 +0200211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300214}
215
Arik Nemtsov55df5af2012-03-03 22:18:00 +0200216/* wl->mutex must be taken */
217void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218{
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
221 return;
222
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226}
227
228static void wl12xx_tx_watchdog_work(struct work_struct *work)
229{
230 struct delayed_work *dwork;
231 struct wl1271 *wl;
232
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
235
236 mutex_lock(&wl->mutex);
237
Ido Yariv4cc53382012-07-24 19:18:49 +0300238 if (unlikely(wl->state != WLCORE_STATE_ON))
Arik Nemtsov55df5af2012-03-03 22:18:00 +0200239 goto out;
240
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
243 goto out;
244
245 /*
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
248 */
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
253 goto out;
254 }
255
256 /*
257 * if a scan is in progress, we might not have any Tx for a long
258 * time
259 */
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
264 goto out;
265 }
266
267 /*
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
272 */
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
275 " %d stations",
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
279 goto out;
280 }
281
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
285
286out:
287 mutex_unlock(&wl->mutex);
288}
289
Luciano Coelhoe87288f2011-12-05 16:12:54 +0200290static void wlcore_adjust_conf(struct wl1271 *wl)
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +0300291{
Ido Yariv95dac04f2011-06-06 14:57:06 +0300292 /* Adjust settings according to optional module parameters */
Yair Shapira72303412012-11-26 18:05:50 +0200293
Ido Reis93ac8482013-09-09 12:24:36 +0300294 /* Firmware Logger params */
295 if (fwlog_mem_blocks != -1) {
296 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
299 } else {
300 wl1271_error(
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
303 }
304 }
305
Ido Yariv95dac04f2011-06-06 14:57:06 +0300306 if (fwlog_param) {
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 } else {
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 }
320 }
Yair Shapira72303412012-11-26 18:05:50 +0200321
322 if (bug_on_recovery != -1)
323 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
324
325 if (no_recovery != -1)
326 wl->conf.recovery.no_recovery = (u8) no_recovery;
Ido Yariv95dac04f2011-06-06 14:57:06 +0300327}
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +0300328
Eliad Peller6e8cd332011-10-10 10:13:13 +0200329static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 struct wl12xx_vif *wlvif,
331 u8 hlid, u8 tx_pkts)
Arik Nemtsovb622d992011-02-23 00:22:31 +0200332{
Arik Nemtsov37c68ea2013-03-12 17:19:36 +0200333 bool fw_ps;
Arik Nemtsovb622d992011-02-23 00:22:31 +0200334
Arik Nemtsovb622d992011-02-23 00:22:31 +0200335 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
336
337 /*
338 * Wake up from high level PS if the STA is asleep with too little
Arik Nemtsov9b17f1b2011-08-14 13:17:35 +0300339 * packets in FW or if the STA is awake.
Arik Nemtsovb622d992011-02-23 00:22:31 +0200340 */
Arik Nemtsov9b17f1b2011-08-14 13:17:35 +0300341 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
Eliad Peller6e8cd332011-10-10 10:13:13 +0200342 wl12xx_ps_link_end(wl, wlvif, hlid);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200343
Arik Nemtsovda032092011-08-25 12:43:15 +0300344 /*
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
Arik Nemtsov9a100962012-11-28 11:42:42 +0200346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
Eliad Peller41ed1a72014-02-10 13:47:30 +0200348 * Note that a single connected STA means 2*ap_count + 1 active links,
349 * since we must account for the global and broadcast AP links
350 * for each AP. The "fw_ps" check assures us the other link is a STA
351 * connected to the AP. Otherwise the FW would not set the PSM bit.
Arik Nemtsovda032092011-08-25 12:43:15 +0300352 */
Eliad Peller41ed1a72014-02-10 13:47:30 +0200353 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
Arik Nemtsov37c68ea2013-03-12 17:19:36 +0200354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
Eliad Peller6e8cd332011-10-10 10:13:13 +0200355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200356}
357
Arik Nemtsov9b17f1b2011-08-14 13:17:35 +0300358static void wl12xx_irq_update_links_status(struct wl1271 *wl,
Eliad Pellerc7ffb902011-10-05 11:56:05 +0200359 struct wl12xx_vif *wlvif,
Eliad Peller75fb4df2014-02-10 13:47:21 +0200360 struct wl_fw_status *status)
Arik Nemtsovb622d992011-02-23 00:22:31 +0200361{
362 u32 cur_fw_ps_map;
Arik Nemtsov9ebcb232012-11-27 08:44:59 +0200363 u8 hlid;
Arik Nemtsovb622d992011-02-23 00:22:31 +0200364
Eliad Peller75fb4df2014-02-10 13:47:21 +0200365 cur_fw_ps_map = status->link_ps_bitmap;
Arik Nemtsovb622d992011-02-23 00:22:31 +0200366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
371
372 wl->ap_fw_ps_map = cur_fw_ps_map;
373 }
374
Eliad Pellerda08fdf2014-02-10 13:47:22 +0200375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
Eliad Peller6e8cd332011-10-10 10:13:13 +0200376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
Arik Nemtsov9ebcb232012-11-27 08:44:59 +0200377 wl->links[hlid].allocated_pkts);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200378}
379
Eliad Peller75fb4df2014-02-10 13:47:21 +0200380static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300381{
Eliad Peller6e8cd332011-10-10 10:13:13 +0200382 struct wl12xx_vif *wlvif;
Juuso Oikarinenac5e1e32010-02-22 08:38:38 +0200383 struct timespec ts;
Shahar Levi13b107d2011-03-06 16:32:12 +0200384 u32 old_tx_blk_count = wl->tx_blocks_available;
Eliad Peller4d56ad92011-08-14 13:17:05 +0300385 int avail, freed_blocks;
Arik Nemtsovbf54e302011-08-14 13:17:32 +0300386 int i;
Ido Yariv8b7c0fc2012-06-17 21:59:42 +0300387 int ret;
Arik Nemtsov9ebcb232012-11-27 08:44:59 +0200388 struct wl1271_link *lnk;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300389
Eliad Peller75fb4df2014-02-10 13:47:21 +0200390 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
391 wl->raw_fw_status,
392 wl->fw_status_len, false);
Ido Yariv8b7c0fc2012-06-17 21:59:42 +0300393 if (ret < 0)
394 return ret;
Shahar Levi13b107d2011-03-06 16:32:12 +0200395
Eliad Peller75fb4df2014-02-10 13:47:21 +0200396 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
397
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
Eliad Peller75fb4df2014-02-10 13:47:21 +0200400 status->intr,
401 status->fw_rx_counter,
402 status->drv_rx_counter,
403 status->tx_results_counter);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300404
Arik Nemtsovbf54e302011-08-14 13:17:32 +0300405 for (i = 0; i < NUM_TX_QUEUES; i++) {
406 /* prevent wrap-around in freed-packets counter */
Arik Nemtsov742246f2011-08-14 13:17:33 +0300407 wl->tx_allocated_pkts[i] -=
Eliad Peller75fb4df2014-02-10 13:47:21 +0200408 (status->counters.tx_released_pkts[i] -
Arik Nemtsovbf54e302011-08-14 13:17:32 +0300409 wl->tx_pkts_freed[i]) & 0xff;
410
Eliad Peller75fb4df2014-02-10 13:47:21 +0200411 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
Arik Nemtsovbf54e302011-08-14 13:17:32 +0300412 }
413
Arik Nemtsov9ebcb232012-11-27 08:44:59 +0200414
Eliad Pellerda08fdf2014-02-10 13:47:22 +0200415 for_each_set_bit(i, wl->links_map, wl->num_links) {
Arik Nemtsov93d5d102013-03-12 17:19:38 +0200416 u8 diff;
Arik Nemtsov9ebcb232012-11-27 08:44:59 +0200417 lnk = &wl->links[i];
Arik Nemtsov9ebcb232012-11-27 08:44:59 +0200418
Arik Nemtsov93d5d102013-03-12 17:19:38 +0200419 /* prevent wrap-around in freed-packets counter */
Eliad Peller75fb4df2014-02-10 13:47:21 +0200420 diff = (status->counters.tx_lnk_free_pkts[i] -
Arik Nemtsov93d5d102013-03-12 17:19:38 +0200421 lnk->prev_freed_pkts) & 0xff;
422
423 if (diff == 0)
424 continue;
425
426 lnk->allocated_pkts -= diff;
Eliad Peller75fb4df2014-02-10 13:47:21 +0200427 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
Arik Nemtsov93d5d102013-03-12 17:19:38 +0200428
429 /* accumulate the prev_freed_pkts counter */
430 lnk->total_freed_pkts += diff;
Arik Nemtsov9ebcb232012-11-27 08:44:59 +0200431 }
432
Arik Nemtsovbdf91cf2011-08-14 13:17:34 +0300433 /* prevent wrap-around in total blocks counter */
Eliad Peller75fb4df2014-02-10 13:47:21 +0200434 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
435 freed_blocks = status->total_released_blks -
Arik Nemtsovbdf91cf2011-08-14 13:17:34 +0300436 wl->tx_blocks_freed;
437 else
438 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
Eliad Peller75fb4df2014-02-10 13:47:21 +0200439 status->total_released_blks;
Arik Nemtsovbdf91cf2011-08-14 13:17:34 +0300440
Eliad Peller75fb4df2014-02-10 13:47:21 +0200441 wl->tx_blocks_freed = status->total_released_blks;
Shahar Levi13b107d2011-03-06 16:32:12 +0200442
Arik Nemtsov7bb5d6c2011-08-14 13:17:00 +0300443 wl->tx_allocated_blocks -= freed_blocks;
444
Arik Nemtsov55df5af2012-03-03 22:18:00 +0200445 /*
446 * If the FW freed some blocks:
447 * If we still have allocated blocks - re-arm the timer, Tx is
448 * not stuck. Otherwise, cancel the timer (no Tx currently).
449 */
450 if (freed_blocks) {
451 if (wl->tx_allocated_blocks)
452 wl12xx_rearm_tx_watchdog_locked(wl);
453 else
454 cancel_delayed_work(&wl->tx_watchdog_work);
455 }
456
Eliad Peller75fb4df2014-02-10 13:47:21 +0200457 avail = status->tx_total - wl->tx_allocated_blocks;
Ido Yarivd2f4d472011-03-31 10:07:00 +0200458
Eliad Peller4d56ad92011-08-14 13:17:05 +0300459 /*
460 * The FW might change the total number of TX memblocks before
461 * we get a notification about blocks being released. Thus, the
462 * available blocks calculation might yield a temporary result
463 * which is lower than the actual available blocks. Keeping in
464 * mind that only blocks that were allocated can be moved from
465 * TX to RX, tx_blocks_available should never decrease here.
466 */
467 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
468 avail);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300469
Ido Yariva5225502010-10-12 14:49:10 +0200470 /* if more blocks are available now, tx work can be scheduled */
Shahar Levi13b107d2011-03-06 16:32:12 +0200471 if (wl->tx_blocks_available > old_tx_blk_count)
Ido Yariva5225502010-10-12 14:49:10 +0200472 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300473
Eliad Peller4d56ad92011-08-14 13:17:05 +0300474 /* for AP update num of allocated TX blocks per link and ps status */
Eliad Peller6e8cd332011-10-10 10:13:13 +0200475 wl12xx_for_each_wlvif_ap(wl, wlvif) {
Eliad Peller75fb4df2014-02-10 13:47:21 +0200476 wl12xx_irq_update_links_status(wl, wlvif, status);
Eliad Peller6e8cd332011-10-10 10:13:13 +0200477 }
Eliad Peller4d56ad92011-08-14 13:17:05 +0300478
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300479 /* update the host-chipset time offset */
Juuso Oikarinenac5e1e32010-02-22 08:38:38 +0200480 getnstimeofday(&ts);
481 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
Eliad Peller75fb4df2014-02-10 13:47:21 +0200482 (s64)(status->fw_localtime);
Ido Yariv8b7c0fc2012-06-17 21:59:42 +0300483
Eliad Peller75fb4df2014-02-10 13:47:21 +0200484 wl->fw_fast_lnk_map = status->link_fast_bitmap;
Arik Nemtsov0e810472012-11-27 08:45:00 +0200485
Ido Yariv8b7c0fc2012-06-17 21:59:42 +0300486 return 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300487}
488
Ido Yariva6208652011-03-01 15:14:41 +0200489static void wl1271_flush_deferred_work(struct wl1271 *wl)
490{
491 struct sk_buff *skb;
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200492
Ido Yariva6208652011-03-01 15:14:41 +0200493 /* Pass all received frames to the network stack */
494 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
495 ieee80211_rx_ni(wl->hw, skb);
496
497 /* Return sent skbs to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
Eliad Pellerc27d3ac2011-06-07 10:40:39 +0300499 ieee80211_tx_status_ni(wl->hw, skb);
Ido Yariva6208652011-03-01 15:14:41 +0200500}
501
502static void wl1271_netstack_work(struct work_struct *work)
503{
504 struct wl1271 *wl =
505 container_of(work, struct wl1271, netstack_work);
506
507 do {
508 wl1271_flush_deferred_work(wl);
509 } while (skb_queue_len(&wl->deferred_rx_queue));
510}
511
512#define WL1271_IRQ_MAX_LOOPS 256
513
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300514static int wlcore_irq_locked(struct wl1271 *wl)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300515{
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300516 int ret = 0;
Juuso Oikarinenc15f63b2009-10-12 15:08:50 +0300517 u32 intr;
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200518 int loopcount = WL1271_IRQ_MAX_LOOPS;
Ido Yariva6208652011-03-01 15:14:41 +0200519 bool done = false;
520 unsigned int defer_count;
Ido Yarivb07d4032011-03-01 15:14:43 +0200521 unsigned long flags;
522
Ido Yariv341b7cd2011-03-31 10:07:01 +0200523 /*
524 * In case edge triggered interrupt must be used, we cannot iterate
525 * more than once without introducing race conditions with the hardirq.
526 */
527 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
528 loopcount = 1;
529
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300530 wl1271_debug(DEBUG_IRQ, "IRQ work");
531
Ido Yariv4cc53382012-07-24 19:18:49 +0300532 if (unlikely(wl->state != WLCORE_STATE_ON))
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300533 goto out;
534
Ido Yariva6208652011-03-01 15:14:41 +0200535 ret = wl1271_ps_elp_wakeup(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300536 if (ret < 0)
537 goto out;
538
Ido Yariva6208652011-03-01 15:14:41 +0200539 while (!done && loopcount--) {
540 /*
541 * In order to avoid a race with the hardirq, clear the flag
542 * before acknowledging the chip. Since the mutex is held,
543 * wl1271_ps_elp_wakeup cannot be called concurrently.
544 */
545 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100546 smp_mb__after_atomic();
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200547
Eliad Peller75fb4df2014-02-10 13:47:21 +0200548 ret = wlcore_fw_status(wl, wl->fw_status);
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300549 if (ret < 0)
Ido Yariv8b7c0fc2012-06-17 21:59:42 +0300550 goto out;
Arik Nemtsov53d67a52011-12-12 11:32:37 +0200551
552 wlcore_hw_tx_immediate_compl(wl);
553
Eliad Peller75fb4df2014-02-10 13:47:21 +0200554 intr = wl->fw_status->intr;
Ido Reisf5755fe2012-04-23 17:35:25 +0300555 intr &= WLCORE_ALL_INTR_MASK;
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200556 if (!intr) {
Ido Yariva6208652011-03-01 15:14:41 +0200557 done = true;
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200558 continue;
559 }
560
Eliad Pellerccc83b02010-10-27 14:09:57 +0200561 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
Ido Reisf5755fe2012-04-23 17:35:25 +0300562 wl1271_error("HW watchdog interrupt received! starting recovery.");
563 wl->watchdog_recovery = true;
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300564 ret = -EIO;
Ido Reisf5755fe2012-04-23 17:35:25 +0300565
566 /* restarting the chip. ignore any other interrupt. */
567 goto out;
568 }
569
570 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
571 wl1271_error("SW watchdog interrupt received! "
Eliad Pellerccc83b02010-10-27 14:09:57 +0200572 "starting recovery.");
Yoni Divinskyafbe3712012-05-16 11:34:18 +0300573 wl->watchdog_recovery = true;
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300574 ret = -EIO;
Eliad Pellerccc83b02010-10-27 14:09:57 +0200575
576 /* restarting the chip. ignore any other interrupt. */
577 goto out;
578 }
579
Ido Yariva6208652011-03-01 15:14:41 +0200580 if (likely(intr & WL1271_ACX_INTR_DATA)) {
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200581 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
582
Eliad Peller75fb4df2014-02-10 13:47:21 +0200583 ret = wlcore_rx(wl, wl->fw_status);
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300584 if (ret < 0)
Ido Yariv045b9b52012-06-18 12:31:16 +0300585 goto out;
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200586
Ido Yariva5225502010-10-12 14:49:10 +0200587 /* Check if any tx blocks were freed */
Ido Yarivb07d4032011-03-01 15:14:43 +0200588 spin_lock_irqsave(&wl->wl_lock, flags);
Ido Yariva5225502010-10-12 14:49:10 +0200589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
Arik Nemtsovf1a46382011-07-07 14:25:23 +0300590 wl1271_tx_total_queue_count(wl) > 0) {
Ido Yarivb07d4032011-03-01 15:14:43 +0200591 spin_unlock_irqrestore(&wl->wl_lock, flags);
Ido Yariva5225502010-10-12 14:49:10 +0200592 /*
593 * In order to avoid starvation of the TX path,
594 * call the work function directly.
595 */
Ido Yariveb96f842012-06-18 13:21:55 +0300596 ret = wlcore_tx_work_locked(wl);
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300597 if (ret < 0)
Ido Yariveb96f842012-06-18 13:21:55 +0300598 goto out;
Ido Yarivb07d4032011-03-01 15:14:43 +0200599 } else {
600 spin_unlock_irqrestore(&wl->wl_lock, flags);
Ido Yariva5225502010-10-12 14:49:10 +0200601 }
602
Ido Yariv8aad2462011-03-01 15:14:38 +0200603 /* check for tx results */
Ido Yariv045b9b52012-06-18 12:31:16 +0300604 ret = wlcore_hw_tx_delayed_compl(wl);
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300605 if (ret < 0)
Ido Yariv045b9b52012-06-18 12:31:16 +0300606 goto out;
Ido Yariva6208652011-03-01 15:14:41 +0200607
608 /* Make sure the deferred queues don't get too long */
609 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
610 skb_queue_len(&wl->deferred_rx_queue);
611 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
612 wl1271_flush_deferred_work(wl);
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200613 }
614
615 if (intr & WL1271_ACX_INTR_EVENT_A) {
616 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
Ido Yariv045b9b52012-06-18 12:31:16 +0300617 ret = wl1271_event_handle(wl, 0);
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300618 if (ret < 0)
Ido Yariv045b9b52012-06-18 12:31:16 +0300619 goto out;
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200620 }
621
622 if (intr & WL1271_ACX_INTR_EVENT_B) {
623 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
Ido Yariv045b9b52012-06-18 12:31:16 +0300624 ret = wl1271_event_handle(wl, 1);
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300625 if (ret < 0)
Ido Yariv045b9b52012-06-18 12:31:16 +0300626 goto out;
Juuso Oikarinen1e73eb62010-02-22 08:38:37 +0200627 }
628
629 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
630 wl1271_debug(DEBUG_IRQ,
631 "WL1271_ACX_INTR_INIT_COMPLETE");
632
633 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
634 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300635 }
636
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300637 wl1271_ps_elp_sleep(wl);
638
639out:
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300640 return ret;
641}
642
643static irqreturn_t wlcore_irq(int irq, void *cookie)
644{
645 int ret;
646 unsigned long flags;
647 struct wl1271 *wl = cookie;
648
Luciano Coelho97236a02013-03-08 09:41:53 +0200649 /* complete the ELP completion */
650 spin_lock_irqsave(&wl->wl_lock, flags);
651 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
652 if (wl->elp_compl) {
653 complete(wl->elp_compl);
654 wl->elp_compl = NULL;
655 }
656
657 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
658 /* don't enqueue a work right now. mark it as pending */
659 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
660 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
661 disable_irq_nosync(wl->irq);
662 pm_wakeup_event(wl->dev, 0);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
664 return IRQ_HANDLED;
665 }
666 spin_unlock_irqrestore(&wl->wl_lock, flags);
667
Arik Nemtsovb5b45b32012-06-21 18:10:51 +0300668 /* TX might be handled here, avoid redundant work */
669 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
670 cancel_work_sync(&wl->tx_work);
671
672 mutex_lock(&wl->mutex);
673
674 ret = wlcore_irq_locked(wl);
675 if (ret)
676 wl12xx_queue_recovery_work(wl);
677
Ido Yarivb07d4032011-03-01 15:14:43 +0200678 spin_lock_irqsave(&wl->wl_lock, flags);
679 /* In case TX was not handled here, queue TX work */
680 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
681 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
Arik Nemtsovf1a46382011-07-07 14:25:23 +0300682 wl1271_tx_total_queue_count(wl) > 0)
Ido Yarivb07d4032011-03-01 15:14:43 +0200683 ieee80211_queue_work(wl->hw, &wl->tx_work);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
685
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300686 mutex_unlock(&wl->mutex);
Ido Yariva6208652011-03-01 15:14:41 +0200687
688 return IRQ_HANDLED;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300689}
690
Eliad Peller4549d092012-02-06 13:07:52 +0200691struct vif_counter_data {
692 u8 counter;
693
694 struct ieee80211_vif *cur_vif;
695 bool cur_vif_running;
696};
697
698static void wl12xx_vif_count_iter(void *data, u8 *mac,
699 struct ieee80211_vif *vif)
700{
701 struct vif_counter_data *counter = data;
702
703 counter->counter++;
704 if (counter->cur_vif == vif)
705 counter->cur_vif_running = true;
706}
707
708/* caller must not hold wl->mutex, as it might deadlock */
709static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
710 struct ieee80211_vif *cur_vif,
711 struct vif_counter_data *data)
712{
713 memset(data, 0, sizeof(*data));
714 data->cur_vif = cur_vif;
715
Johannes Berg8b2c9822012-11-06 20:23:30 +0100716 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
Eliad Peller4549d092012-02-06 13:07:52 +0200717 wl12xx_vif_count_iter, data);
718}
719
Eliad Peller3fcdab72012-02-06 12:47:54 +0200720static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300721{
722 const struct firmware *fw;
Arik Nemtsov166d5042010-10-16 21:44:57 +0200723 const char *fw_name;
Eliad Peller3fcdab72012-02-06 12:47:54 +0200724 enum wl12xx_fw_type fw_type;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300725 int ret;
726
Eliad Peller3fcdab72012-02-06 12:47:54 +0200727 if (plt) {
728 fw_type = WL12XX_FW_TYPE_PLT;
Luciano Coelho6f7dd162011-11-29 16:27:31 +0200729 fw_name = wl->plt_fw_name;
Eliad Peller3fcdab72012-02-06 12:47:54 +0200730 } else {
Eliad Peller4549d092012-02-06 13:07:52 +0200731 /*
732 * we can't call wl12xx_get_vif_count() here because
733 * wl->mutex is taken, so use the cached last_vif_count value
734 */
Eliad Peller9b1a0a72012-07-25 14:22:21 +0300735 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
Eliad Peller4549d092012-02-06 13:07:52 +0200736 fw_type = WL12XX_FW_TYPE_MULTI;
Luciano Coelho6f7dd162011-11-29 16:27:31 +0200737 fw_name = wl->mr_fw_name;
Eliad Peller4549d092012-02-06 13:07:52 +0200738 } else {
739 fw_type = WL12XX_FW_TYPE_NORMAL;
Luciano Coelho6f7dd162011-11-29 16:27:31 +0200740 fw_name = wl->sr_fw_name;
Eliad Peller4549d092012-02-06 13:07:52 +0200741 }
Eliad Peller3fcdab72012-02-06 12:47:54 +0200742 }
743
744 if (wl->fw_type == fw_type)
745 return 0;
Arik Nemtsov166d5042010-10-16 21:44:57 +0200746
747 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
748
Felipe Balbia390e852011-10-06 10:07:44 +0300749 ret = request_firmware(&fw, fw_name, wl->dev);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300750
751 if (ret < 0) {
Pontus Fuchs35898932011-11-30 15:35:09 +0100752 wl1271_error("could not get firmware %s: %d", fw_name, ret);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300753 return ret;
754 }
755
756 if (fw->size % 4) {
757 wl1271_error("firmware size is not multiple of 32 bits: %zu",
758 fw->size);
759 ret = -EILSEQ;
760 goto out;
761 }
762
Arik Nemtsov166d5042010-10-16 21:44:57 +0200763 vfree(wl->fw);
Eliad Peller3fcdab72012-02-06 12:47:54 +0200764 wl->fw_type = WL12XX_FW_TYPE_NONE;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300765 wl->fw_len = fw->size;
Juuso Oikarinen1fba4972009-10-08 21:56:32 +0300766 wl->fw = vmalloc(wl->fw_len);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300767
768 if (!wl->fw) {
769 wl1271_error("could not allocate memory for the firmware");
770 ret = -ENOMEM;
771 goto out;
772 }
773
774 memcpy(wl->fw, fw->data, wl->fw_len);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300775 ret = 0;
Eliad Peller3fcdab72012-02-06 12:47:54 +0200776 wl->fw_type = fw_type;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300777out:
778 release_firmware(fw);
779
780 return ret;
781}
782
Ido Yarivbaacb9ae2011-06-06 14:57:05 +0300783void wl12xx_queue_recovery_work(struct wl1271 *wl)
784{
Ido Yarivb666bb72012-05-21 01:10:11 +0300785 /* Avoid a recursive recovery */
Ido Yariv792a58a2012-08-15 15:09:30 +0300786 if (wl->state == WLCORE_STATE_ON) {
Arik Nemtsov1ede9502014-02-10 13:47:31 +0200787 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
788 &wl->flags));
789
Ido Yariv4cc53382012-07-24 19:18:49 +0300790 wl->state = WLCORE_STATE_RESTARTING;
Ido Yariv792a58a2012-08-15 15:09:30 +0300791 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
Barak Bercovitz2473ec82013-09-09 12:24:41 +0300792 wl1271_ps_elp_wakeup(wl);
Ido Yarivb666bb72012-05-21 01:10:11 +0300793 wlcore_disable_interrupts_nosync(wl);
Ido Yarivbaacb9ae2011-06-06 14:57:05 +0300794 ieee80211_queue_work(wl->hw, &wl->recovery_work);
Ido Yarivb666bb72012-05-21 01:10:11 +0300795 }
Ido Yarivbaacb9ae2011-06-06 14:57:05 +0300796}
797
Ido Yariv95dac04f2011-06-06 14:57:06 +0300798size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
799{
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300800 size_t len;
Ido Yariv95dac04f2011-06-06 14:57:06 +0300801
802 /* Make sure we have enough room */
Silvan Jegenc8e49552014-02-25 18:12:52 +0100803 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
Ido Yariv95dac04f2011-06-06 14:57:06 +0300804
805 /* Fill the FW log file, consumed by the sysfs fwlog entry */
806 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
807 wl->fwlog_size += len;
808
809 return len;
810}
811
812static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
813{
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300814 struct wlcore_partition_set part, old_part;
Ido Yariv95dac04f2011-06-06 14:57:06 +0300815 u32 addr;
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300816 u32 offset;
817 u32 end_of_log;
Ido Yariv95dac04f2011-06-06 14:57:06 +0300818 u8 *block;
Ido Yariv8b7c0fc2012-06-17 21:59:42 +0300819 int ret;
Ido Yariv95dac04f2011-06-06 14:57:06 +0300820
Luciano Coelho6f7dd162011-11-29 16:27:31 +0200821 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
Ido Yariv95dac04f2011-06-06 14:57:06 +0300822 (wl->conf.fwlog.mem_blocks == 0))
823 return;
824
825 wl1271_info("Reading FW panic log");
826
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300827 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
Ido Yariv95dac04f2011-06-06 14:57:06 +0300828 if (!block)
829 return;
830
831 /*
832 * Make sure the chip is awake and the logger isn't active.
Eliad Peller847cbeb2012-11-26 18:05:42 +0200833 * Do not send a stop fwlog command if the fw is hanged or if
834 * dbgpins are used (due to some fw bug).
Ido Yariv95dac04f2011-06-06 14:57:06 +0300835 */
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300836 if (wl1271_ps_elp_wakeup(wl))
Yoni Divinskyafbe3712012-05-16 11:34:18 +0300837 goto out;
Eliad Peller847cbeb2012-11-26 18:05:42 +0200838 if (!wl->watchdog_recovery &&
839 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300840 wl12xx_cmd_stop_fwlog(wl);
Ido Yariv95dac04f2011-06-06 14:57:06 +0300841
842 /* Read the first memory block address */
Eliad Peller75fb4df2014-02-10 13:47:21 +0200843 ret = wlcore_fw_status(wl, wl->fw_status);
Ido Yariv8b7c0fc2012-06-17 21:59:42 +0300844 if (ret < 0)
Ido Yariv95dac04f2011-06-06 14:57:06 +0300845 goto out;
846
Eliad Peller75fb4df2014-02-10 13:47:21 +0200847 addr = wl->fw_status->log_start_addr;
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300848 if (!addr)
Ido Yariv95dac04f2011-06-06 14:57:06 +0300849 goto out;
850
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300851 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
852 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300853 end_of_log = wl->fwlog_end;
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300854 } else {
855 offset = sizeof(addr);
856 end_of_log = addr;
857 }
858
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300859 old_part = wl->curr_part;
860 memset(&part, 0, sizeof(part));
861
Ido Yariv95dac04f2011-06-06 14:57:06 +0300862 /* Traverse the memory blocks linked list */
Ido Yariv95dac04f2011-06-06 14:57:06 +0300863 do {
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300864 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
865 part.mem.size = PAGE_SIZE;
866
867 ret = wlcore_set_partition(wl, &part);
868 if (ret < 0) {
869 wl1271_error("%s: set_partition start=0x%X size=%d",
870 __func__, part.mem.start, part.mem.size);
871 goto out;
872 }
873
874 memset(block, 0, wl->fw_mem_block_size);
875 ret = wlcore_read_hwaddr(wl, addr, block,
876 wl->fw_mem_block_size, false);
877
Ido Yariv2b800402012-06-18 18:15:50 +0300878 if (ret < 0)
879 goto out;
Ido Yariv95dac04f2011-06-06 14:57:06 +0300880
881 /*
882 * Memory blocks are linked to one another. The first 4 bytes
883 * of each memory block hold the hardware address of the next
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300884 * one. The last memory block points to the first one in
885 * on demand mode and is equal to 0x2000000 in continuous mode.
Ido Yariv95dac04f2011-06-06 14:57:06 +0300886 */
Eliad Peller4d56ad92011-08-14 13:17:05 +0300887 addr = le32_to_cpup((__le32 *)block);
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300888
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300889 if (!wl12xx_copy_fwlog(wl, block + offset,
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300890 wl->fw_mem_block_size - offset))
Ido Yariv95dac04f2011-06-06 14:57:06 +0300891 break;
Igal Chernobelsky1e412132012-06-18 11:05:39 +0300892 } while (addr && (addr != end_of_log));
Ido Yariv95dac04f2011-06-06 14:57:06 +0300893
894 wake_up_interruptible(&wl->fwlog_waitq);
895
896out:
897 kfree(block);
Igal Chernobelskyc83cb802013-09-09 12:24:38 +0300898 wlcore_set_partition(wl, &old_part);
Ido Yariv95dac04f2011-06-06 14:57:06 +0300899}
900
Eliad Peller50d26aa2014-07-11 03:01:26 +0300901static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
902 u8 hlid, struct ieee80211_sta *sta)
903{
904 struct wl1271_station *wl_sta;
Eliad Peller30a00352014-07-11 03:01:27 +0300905 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
Eliad Peller50d26aa2014-07-11 03:01:26 +0300906
907 wl_sta = (void *)sta->drv_priv;
908 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
909
910 /*
911 * increment the initial seq number on recovery to account for
912 * transmitted packets that we haven't yet got in the FW status
913 */
Eliad Peller30a00352014-07-11 03:01:27 +0300914 if (wlvif->encryption_type == KEY_GEM)
915 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
916
Eliad Peller50d26aa2014-07-11 03:01:26 +0300917 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
Eliad Peller30a00352014-07-11 03:01:27 +0300918 wl_sta->total_freed_pkts += sqn_recovery_padding;
Eliad Peller50d26aa2014-07-11 03:01:26 +0300919}
920
921static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
922 struct wl12xx_vif *wlvif,
923 u8 hlid, const u8 *addr)
924{
925 struct ieee80211_sta *sta;
926 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
927
928 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
929 is_zero_ether_addr(addr)))
930 return;
931
932 rcu_read_lock();
933 sta = ieee80211_find_sta(vif, addr);
934 if (sta)
935 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
936 rcu_read_unlock();
937}
938
Ido Yariv61343232012-06-18 15:50:21 +0300939static void wlcore_print_recovery(struct wl1271 *wl)
940{
941 u32 pc = 0;
942 u32 hint_sts = 0;
943 int ret;
944
945 wl1271_info("Hardware recovery in progress. FW ver: %s",
946 wl->chip.fw_ver_str);
947
948 /* change partitions momentarily so we can read the FW pc */
Ido Yarivb0f0ad32012-06-20 00:48:23 +0300949 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
950 if (ret < 0)
951 return;
Ido Yariv61343232012-06-18 15:50:21 +0300952
953 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
954 if (ret < 0)
955 return;
956
957 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
958 if (ret < 0)
959 return;
960
Luciano Coelhoc108c902012-11-26 18:05:49 +0200961 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
962 pc, hint_sts, ++wl->recovery_count);
Ido Yariv61343232012-06-18 15:50:21 +0300963
964 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
965}
966
967
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +0200968static void wl1271_recovery_work(struct work_struct *work)
969{
970 struct wl1271 *wl =
971 container_of(work, struct wl1271, recovery_work);
Eliad Peller48e93e42011-10-10 10:12:58 +0200972 struct wl12xx_vif *wlvif;
Eliad Peller6e8cd332011-10-10 10:13:13 +0200973 struct ieee80211_vif *vif;
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +0200974
975 mutex_lock(&wl->mutex);
976
Ido Yariv4cc53382012-07-24 19:18:49 +0300977 if (wl->state == WLCORE_STATE_OFF || wl->plt)
Eliad Pellerf0277432011-10-10 10:13:14 +0200978 goto out_unlock;
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +0200979
Arik Nemtsovaafec112012-06-25 22:26:19 +0300980 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
Barak Bercovitz5cc14c02013-09-09 12:24:39 +0300981 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
982 wl12xx_read_fwlog_panic(wl);
Arik Nemtsovaafec112012-06-25 22:26:19 +0300983 wlcore_print_recovery(wl);
984 }
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +0200985
Yair Shapira72303412012-11-26 18:05:50 +0200986 BUG_ON(wl->conf.recovery.bug_on_recovery &&
Eliad Pellere9ba7152012-03-04 10:55:54 +0200987 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
Eliad Peller2a5bff02011-08-25 18:10:59 +0300988
Yair Shapira72303412012-11-26 18:05:50 +0200989 if (wl->conf.recovery.no_recovery) {
Arik Nemtsov34785be2011-12-08 13:06:45 +0200990 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
Arik Nemtsov34785be2011-12-08 13:06:45 +0200991 goto out_unlock;
992 }
993
Arik Nemtsov7dece1c2011-04-18 14:15:28 +0300994 /* Prevent spurious TX during FW restart */
Arik Nemtsov66396112012-05-18 07:46:38 +0300995 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
Arik Nemtsov7dece1c2011-04-18 14:15:28 +0300996
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +0200997 /* reboot the chipset */
Eliad Peller6e8cd332011-10-10 10:13:13 +0200998 while (!list_empty(&wl->wlvif_list)) {
999 wlvif = list_first_entry(&wl->wlvif_list,
1000 struct wl12xx_vif, list);
1001 vif = wl12xx_wlvif_to_vif(wlvif);
Eliad Peller50d26aa2014-07-11 03:01:26 +03001002
1003 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1004 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1005 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1006 vif->bss_conf.bssid);
1007 }
1008
Eliad Peller6e8cd332011-10-10 10:13:13 +02001009 __wl1271_op_remove_interface(wl, vif, false);
1010 }
Ido Yarivc24ec832012-06-26 21:08:58 +03001011
1012 wlcore_op_stop_locked(wl);
Ido Yarivbaacb9ae2011-06-06 14:57:05 +03001013
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +02001014 ieee80211_restart_hw(wl->hw);
1015
Arik Nemtsov7dece1c2011-04-18 14:15:28 +03001016 /*
1017 * Its safe to enable TX now - the queues are stopped after a request
1018 * to restart the HW.
1019 */
Arik Nemtsov66396112012-05-18 07:46:38 +03001020 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
Ido Yarivc24ec832012-06-26 21:08:58 +03001021
Eliad Pellerf0277432011-10-10 10:13:14 +02001022out_unlock:
Arik Nemtsovb034fd62012-06-25 22:26:20 +03001023 wl->watchdog_recovery = false;
1024 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +02001025 mutex_unlock(&wl->mutex);
1026}
1027
Ido Yarivb0f0ad32012-06-20 00:48:23 +03001028static int wlcore_fw_wakeup(struct wl1271 *wl)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001029{
Ido Yarivb0f0ad32012-06-20 00:48:23 +03001030 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001031}
1032
1033static int wl1271_setup(struct wl1271 *wl)
1034{
Eliad Peller75fb4df2014-02-10 13:47:21 +02001035 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1036 if (!wl->raw_fw_status)
1037 goto err;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001038
Eliad Peller75fb4df2014-02-10 13:47:21 +02001039 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1040 if (!wl->fw_status)
1041 goto err;
Arik Nemtsov0afd04e2012-05-10 12:13:54 +03001042
Victor Goldenshtein5cbba2d2013-05-12 12:35:31 +03001043 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
Eliad Peller75fb4df2014-02-10 13:47:21 +02001044 if (!wl->tx_res_if)
1045 goto err;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001046
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001047 return 0;
Eliad Peller75fb4df2014-02-10 13:47:21 +02001048err:
1049 kfree(wl->fw_status);
1050 kfree(wl->raw_fw_status);
1051 return -ENOMEM;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001052}
1053
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02001054static int wl12xx_set_power_on(struct wl1271 *wl)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001055{
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02001056 int ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001057
Juuso Oikarinen01ac17e2009-12-11 15:41:02 +02001058 msleep(WL1271_PRE_POWER_ON_SLEEP);
Ohad Ben-Cohen2cc78ff2010-09-16 01:22:04 +02001059 ret = wl1271_power_on(wl);
1060 if (ret < 0)
1061 goto out;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001062 msleep(WL1271_POWER_ON_SLEEP);
Teemu Paasikivi9b280722010-02-18 13:25:56 +02001063 wl1271_io_reset(wl);
1064 wl1271_io_init(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001065
Ido Yarivb0f0ad32012-06-20 00:48:23 +03001066 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1067 if (ret < 0)
1068 goto fail;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001069
1070 /* ELP module wake up */
Ido Yarivb0f0ad32012-06-20 00:48:23 +03001071 ret = wlcore_fw_wakeup(wl);
1072 if (ret < 0)
1073 goto fail;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001074
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02001075out:
1076 return ret;
Ido Yarivb0f0ad32012-06-20 00:48:23 +03001077
1078fail:
1079 wl1271_power_off(wl);
1080 return ret;
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02001081}
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001082
Eliad Peller3fcdab72012-02-06 12:47:54 +02001083static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02001084{
1085 int ret = 0;
1086
1087 ret = wl12xx_set_power_on(wl);
1088 if (ret < 0)
1089 goto out;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001090
Luciano Coelhoe62c9ce2011-11-03 08:44:42 +02001091 /*
1092 * For wl127x based devices we could use the default block
1093 * size (512 bytes), but due to a bug in the sdio driver, we
1094 * need to set it explicitly after the chip is powered on. To
1095 * simplify the code and since the performance impact is
1096 * negligible, we use the same block size for all different
1097 * chip types.
Luciano Coelhob5d6d9b2012-06-05 00:02:25 +03001098 *
1099 * Check if the bus supports blocksize alignment and, if it
1100 * doesn't, make sure we don't have the quirk.
Luciano Coelhoe62c9ce2011-11-03 08:44:42 +02001101 */
Luciano Coelhob5d6d9b2012-06-05 00:02:25 +03001102 if (!wl1271_set_block_size(wl))
1103 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001104
Luciano Coelho6f7dd162011-11-29 16:27:31 +02001105 /* TODO: make sure the lower driver has set things up correctly */
1106
1107 ret = wl1271_setup(wl);
1108 if (ret < 0)
1109 goto out;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001110
Eliad Peller3fcdab72012-02-06 12:47:54 +02001111 ret = wl12xx_fetch_firmware(wl, plt);
1112 if (ret < 0)
1113 goto out;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001114
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001115out:
1116 return ret;
1117}
1118
Yair Shapira7019c802012-07-11 18:48:04 +03001119int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001120{
Juuso Oikarinen9ccd9212009-12-11 15:41:01 +02001121 int retries = WL1271_BOOT_RETRIES;
Gery Kahn6f07b722011-07-18 14:21:49 +03001122 struct wiphy *wiphy = wl->hw->wiphy;
Yair Shapira7019c802012-07-11 18:48:04 +03001123
1124 static const char* const PLT_MODE[] = {
1125 "PLT_OFF",
1126 "PLT_ON",
Yair Shapiradd491ff2013-09-17 18:41:21 +03001127 "PLT_FEM_DETECT",
1128 "PLT_CHIP_AWAKE"
Yair Shapira7019c802012-07-11 18:48:04 +03001129 };
1130
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001131 int ret;
1132
1133 mutex_lock(&wl->mutex);
1134
1135 wl1271_notice("power up");
1136
Ido Yariv4cc53382012-07-24 19:18:49 +03001137 if (wl->state != WLCORE_STATE_OFF) {
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001138 wl1271_error("cannot go into PLT state because not "
1139 "in off state: %d", wl->state);
1140 ret = -EBUSY;
1141 goto out;
1142 }
1143
Yair Shapira7019c802012-07-11 18:48:04 +03001144 /* Indicate to lower levels that we are now in PLT mode */
1145 wl->plt = true;
1146 wl->plt_mode = plt_mode;
1147
Juuso Oikarinen9ccd9212009-12-11 15:41:01 +02001148 while (retries) {
1149 retries--;
Eliad Peller3fcdab72012-02-06 12:47:54 +02001150 ret = wl12xx_chip_wakeup(wl, true);
Juuso Oikarinen9ccd9212009-12-11 15:41:01 +02001151 if (ret < 0)
1152 goto power_off;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001153
Yair Shapiradd491ff2013-09-17 18:41:21 +03001154 if (plt_mode != PLT_CHIP_AWAKE) {
1155 ret = wl->ops->plt_init(wl);
1156 if (ret < 0)
1157 goto power_off;
1158 }
Juuso Oikarinen9ccd9212009-12-11 15:41:01 +02001159
Ido Yariv4cc53382012-07-24 19:18:49 +03001160 wl->state = WLCORE_STATE_ON;
Yair Shapira7019c802012-07-11 18:48:04 +03001161 wl1271_notice("firmware booted in PLT mode %s (%s)",
1162 PLT_MODE[plt_mode],
Levi, Shahar4b7fac72011-01-23 07:27:22 +01001163 wl->chip.fw_ver_str);
Luciano Coelhoe7ddf542011-03-10 15:24:57 +02001164
Gery Kahn6f07b722011-07-18 14:21:49 +03001165 /* update hw/fw version info in wiphy struct */
1166 wiphy->hw_version = wl->chip.id;
1167 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1168 sizeof(wiphy->fw_version));
1169
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001170 goto out;
1171
Juuso Oikarinen9ccd9212009-12-11 15:41:01 +02001172power_off:
1173 wl1271_power_off(wl);
1174 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001175
Yair Shapira7019c802012-07-11 18:48:04 +03001176 wl->plt = false;
1177 wl->plt_mode = PLT_OFF;
1178
Juuso Oikarinen9ccd9212009-12-11 15:41:01 +02001179 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1180 WL1271_BOOT_RETRIES);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001181out:
1182 mutex_unlock(&wl->mutex);
1183
1184 return ret;
1185}
1186
Ido Yarivf3df1332012-01-11 09:42:39 +02001187int wl1271_plt_stop(struct wl1271 *wl)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001188{
1189 int ret = 0;
1190
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001191 wl1271_notice("power down");
1192
Ido Yariv46b0cc92012-01-11 09:42:41 +02001193 /*
1194 * Interrupts must be disabled before setting the state to OFF.
1195 * Otherwise, the interrupt handler might be called and exit without
1196 * reading the interrupt status.
1197 */
Luciano Coelhodd5512e2012-04-11 11:03:14 +03001198 wlcore_disable_interrupts(wl);
Ido Yarivf3df1332012-01-11 09:42:39 +02001199 mutex_lock(&wl->mutex);
Eliad Peller3fcdab72012-02-06 12:47:54 +02001200 if (!wl->plt) {
Ido Yarivf3df1332012-01-11 09:42:39 +02001201 mutex_unlock(&wl->mutex);
Ido Yariv46b0cc92012-01-11 09:42:41 +02001202
1203 /*
1204 * This will not necessarily enable interrupts as interrupts
1205 * may have been disabled when op_stop was called. It will,
1206 * however, balance the above call to disable_interrupts().
1207 */
Luciano Coelhodd5512e2012-04-11 11:03:14 +03001208 wlcore_enable_interrupts(wl);
Ido Yariv46b0cc92012-01-11 09:42:41 +02001209
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001210 wl1271_error("cannot power down because not in PLT "
1211 "state: %d", wl->state);
1212 ret = -EBUSY;
1213 goto out;
1214 }
1215
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001216 mutex_unlock(&wl->mutex);
Ido Yarivf3df1332012-01-11 09:42:39 +02001217
Ido Yariva6208652011-03-01 15:14:41 +02001218 wl1271_flush_deferred_work(wl);
1219 cancel_work_sync(&wl->netstack_work);
Juuso Oikarinen52b0e7a2010-09-21 06:23:31 +02001220 cancel_work_sync(&wl->recovery_work);
Luciano Coelhof6fbecc2012-01-11 09:42:42 +02001221 cancel_delayed_work_sync(&wl->elp_work);
Arik Nemtsov55df5af2012-03-03 22:18:00 +02001222 cancel_delayed_work_sync(&wl->tx_watchdog_work);
Ido Yariva4549692012-01-11 09:42:40 +02001223
1224 mutex_lock(&wl->mutex);
1225 wl1271_power_off(wl);
Luciano Coelhof6fbecc2012-01-11 09:42:42 +02001226 wl->flags = 0;
Arik Nemtsov2f18cf72012-06-10 19:10:45 +03001227 wl->sleep_auth = WL1271_PSM_ILLEGAL;
Ido Yariv4cc53382012-07-24 19:18:49 +03001228 wl->state = WLCORE_STATE_OFF;
Eliad Peller3fcdab72012-02-06 12:47:54 +02001229 wl->plt = false;
Yair Shapira7019c802012-07-11 18:48:04 +03001230 wl->plt_mode = PLT_OFF;
Luciano Coelhof6fbecc2012-01-11 09:42:42 +02001231 wl->rx_counter = 0;
Ido Yariva4549692012-01-11 09:42:40 +02001232 mutex_unlock(&wl->mutex);
1233
Juuso Oikarinen4ae3fa82011-01-14 12:48:46 +01001234out:
1235 return ret;
1236}
Juuso Oikarinen8c7f4f32010-09-21 06:23:29 +02001237
Thomas Huehn36323f82012-07-23 21:33:42 +02001238static void wl1271_op_tx(struct ieee80211_hw *hw,
1239 struct ieee80211_tx_control *control,
1240 struct sk_buff *skb)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001241{
1242 struct wl1271 *wl = hw->priv;
Eliad Pellera8ab39a2011-10-05 11:55:54 +02001243 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1244 struct ieee80211_vif *vif = info->control.vif;
Eliad Peller0f168012011-10-11 13:52:25 +02001245 struct wl12xx_vif *wlvif = NULL;
Juuso Oikarinen830fb672009-12-11 15:41:06 +02001246 unsigned long flags;
Arik Nemtsov708bb3c2011-06-24 13:03:37 +03001247 int q, mapping;
Eliad Pellerd6a3cc22011-10-10 10:12:51 +02001248 u8 hlid;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001249
Arik Nemtsovf4d02002012-11-28 11:42:33 +02001250 if (!vif) {
1251 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1252 ieee80211_free_txskb(hw, skb);
1253 return;
1254 }
Eliad Peller0f168012011-10-11 13:52:25 +02001255
Arik Nemtsovf4d02002012-11-28 11:42:33 +02001256 wlvif = wl12xx_vif_to_data(vif);
Arik Nemtsov708bb3c2011-06-24 13:03:37 +03001257 mapping = skb_get_queue_mapping(skb);
1258 q = wl1271_tx_get_queue(mapping);
Ido Yarivb07d4032011-03-01 15:14:43 +02001259
Thomas Huehn36323f82012-07-23 21:33:42 +02001260 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
Ido Yarivb07d4032011-03-01 15:14:43 +02001261
Juuso Oikarinen830fb672009-12-11 15:41:06 +02001262 spin_lock_irqsave(&wl->wl_lock, flags);
Ido Yarivb07d4032011-03-01 15:14:43 +02001263
Arik Nemtsov66396112012-05-18 07:46:38 +03001264 /*
1265 * drop the packet if the link is invalid or the queue is stopped
1266 * for any reason but watermark. Watermark is a "soft"-stop so we
1267 * allow these packets through.
1268 */
Eliad Pellerd6a3cc22011-10-10 10:12:51 +02001269 if (hlid == WL12XX_INVALID_LINK_ID ||
Arik Nemtsovf4d02002012-11-28 11:42:33 +02001270 (!test_bit(hlid, wlvif->links_map)) ||
Arik Nemtsovd6037d22012-11-28 11:42:44 +02001271 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1272 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
Arik Nemtsov66396112012-05-18 07:46:38 +03001273 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
Eliad Pellerd6a3cc22011-10-10 10:12:51 +02001274 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
Eliad Peller5de8eef2011-12-13 15:26:38 +02001275 ieee80211_free_txskb(hw, skb);
Eliad Pellerd6a3cc22011-10-10 10:12:51 +02001276 goto out;
Arik Nemtsova8c0ddb2011-02-23 00:22:26 +02001277 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001278
Eliad Peller8ccd16e2012-03-04 10:55:55 +02001279 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1280 hlid, q, skb->len);
Eliad Pellerd6a3cc22011-10-10 10:12:51 +02001281 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1282
Arik Nemtsov04b4d692011-08-14 13:17:39 +03001283 wl->tx_queue_count[q]++;
Arik Nemtsovf4d02002012-11-28 11:42:33 +02001284 wlvif->tx_queue_count[q]++;
Arik Nemtsov04b4d692011-08-14 13:17:39 +03001285
1286 /*
1287 * The workqueue is slow to process the tx_queue and we need stop
1288 * the queue here, otherwise the queue will get too long.
1289 */
Arik Nemtsov1c33db72012-11-30 00:48:03 +02001290 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
Arik Nemtsovd6037d22012-11-28 11:42:44 +02001291 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
Arik Nemtsov8cdc44a2012-06-25 22:26:17 +03001292 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
Arik Nemtsov04b4d692011-08-14 13:17:39 +03001293 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
Arik Nemtsov1c33db72012-11-30 00:48:03 +02001294 wlcore_stop_queue_locked(wl, wlvif, q,
Arik Nemtsov66396112012-05-18 07:46:38 +03001295 WLCORE_QUEUE_STOP_REASON_WATERMARK);
Arik Nemtsov04b4d692011-08-14 13:17:39 +03001296 }
1297
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001298 /*
1299 * The chip specific setup must run before the first TX packet -
1300 * before that, the tx_work will not be initialized!
1301 */
1302
Ido Yarivb07d4032011-03-01 15:14:43 +02001303 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1304 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
Ido Yariva5225502010-10-12 14:49:10 +02001305 ieee80211_queue_work(wl->hw, &wl->tx_work);
Ido Yarivb07d4032011-03-01 15:14:43 +02001306
Arik Nemtsov04216da2011-08-14 13:17:38 +03001307out:
Ido Yarivb07d4032011-03-01 15:14:43 +02001308 spin_unlock_irqrestore(&wl->wl_lock, flags);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001309}
1310
Shahar Leviae47c452011-03-06 16:32:14 +02001311int wl1271_tx_dummy_packet(struct wl1271 *wl)
1312{
Ido Yariv990f5de2011-03-31 10:06:59 +02001313 unsigned long flags;
Arik Nemtsov14623782011-08-28 15:11:57 +03001314 int q;
1315
1316 /* no need to queue a new dummy packet if one is already pending */
1317 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1318 return 0;
1319
1320 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
Shahar Leviae47c452011-03-06 16:32:14 +02001321
Ido Yariv990f5de2011-03-31 10:06:59 +02001322 spin_lock_irqsave(&wl->wl_lock, flags);
1323 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
Arik Nemtsovf1a46382011-07-07 14:25:23 +03001324 wl->tx_queue_count[q]++;
Ido Yariv990f5de2011-03-31 10:06:59 +02001325 spin_unlock_irqrestore(&wl->wl_lock, flags);
1326
1327 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1328 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
Ido Yariveb96f842012-06-18 13:21:55 +03001329 return wlcore_tx_work_locked(wl);
Ido Yariv990f5de2011-03-31 10:06:59 +02001330
1331 /*
1332 * If the FW TX is busy, TX work will be scheduled by the threaded
1333 * interrupt handler function
1334 */
1335 return 0;
1336}
1337
1338/*
1339 * The size of the dummy packet should be at least 1400 bytes. However, in
1340 * order to minimize the number of bus transactions, aligning it to 512 bytes
1341 * boundaries could be beneficial, performance wise
1342 */
1343#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1344
Luciano Coelhocf27d862011-04-01 21:08:23 +03001345static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
Ido Yariv990f5de2011-03-31 10:06:59 +02001346{
1347 struct sk_buff *skb;
1348 struct ieee80211_hdr_3addr *hdr;
1349 unsigned int dummy_packet_size;
1350
1351 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1352 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1353
1354 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
Shahar Leviae47c452011-03-06 16:32:14 +02001355 if (!skb) {
Ido Yariv990f5de2011-03-31 10:06:59 +02001356 wl1271_warning("Failed to allocate a dummy packet skb");
1357 return NULL;
Shahar Leviae47c452011-03-06 16:32:14 +02001358 }
1359
1360 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1361
1362 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1363 memset(hdr, 0, sizeof(*hdr));
1364 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
Ido Yariv990f5de2011-03-31 10:06:59 +02001365 IEEE80211_STYPE_NULLFUNC |
1366 IEEE80211_FCTL_TODS);
Shahar Leviae47c452011-03-06 16:32:14 +02001367
Ido Yariv990f5de2011-03-31 10:06:59 +02001368 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
Shahar Leviae47c452011-03-06 16:32:14 +02001369
Luciano Coelho18b92ff2011-03-21 16:35:21 +02001370 /* Dummy packets require the TID to be management */
1371 skb->priority = WL1271_TID_MGMT;
Ido Yariv990f5de2011-03-31 10:06:59 +02001372
1373 /* Initialize all fields that might be used */
Hauke Mehrtens86c438f2011-04-26 23:27:44 +02001374 skb_set_queue_mapping(skb, 0);
Ido Yariv990f5de2011-03-31 10:06:59 +02001375 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
Shahar Leviae47c452011-03-06 16:32:14 +02001376
Ido Yariv990f5de2011-03-31 10:06:59 +02001377 return skb;
Shahar Leviae47c452011-03-06 16:32:14 +02001378}
1379
Ido Yariv990f5de2011-03-31 10:06:59 +02001380
Luciano Coelhof634a4e2011-05-18 16:51:26 -04001381#ifdef CONFIG_PM
Luciano Coelho22479972012-05-16 06:00:00 +03001382static int
Amitkumar Karwar50ac6602013-06-25 19:03:56 -07001383wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001384{
1385 int num_fields = 0, in_field = 0, fields_size = 0;
1386 int i, pattern_len = 0;
1387
1388 if (!p->mask) {
1389 wl1271_warning("No mask in WoWLAN pattern");
1390 return -EINVAL;
1391 }
1392
1393 /*
1394 * The pattern is broken up into segments of bytes at different offsets
1395 * that need to be checked by the FW filter. Each segment is called
1396 * a field in the FW API. We verify that the total number of fields
1397 * required for this pattern won't exceed FW limits (8)
1398 * as well as the total fields buffer won't exceed the FW limit.
1399 * Note that if there's a pattern which crosses Ethernet/IP header
1400 * boundary a new field is required.
1401 */
1402 for (i = 0; i < p->pattern_len; i++) {
1403 if (test_bit(i, (unsigned long *)p->mask)) {
1404 if (!in_field) {
1405 in_field = 1;
1406 pattern_len = 1;
1407 } else {
1408 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1409 num_fields++;
1410 fields_size += pattern_len +
1411 RX_FILTER_FIELD_OVERHEAD;
1412 pattern_len = 1;
1413 } else
1414 pattern_len++;
1415 }
1416 } else {
1417 if (in_field) {
1418 in_field = 0;
1419 fields_size += pattern_len +
1420 RX_FILTER_FIELD_OVERHEAD;
1421 num_fields++;
1422 }
1423 }
1424 }
1425
1426 if (in_field) {
1427 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1428 num_fields++;
1429 }
1430
1431 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1432 wl1271_warning("RX Filter too complex. Too many segments");
1433 return -EINVAL;
1434 }
1435
1436 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1437 wl1271_warning("RX filter pattern is too big");
1438 return -E2BIG;
1439 }
1440
1441 return 0;
1442}
1443
Eyal Shapiraa6eab0c2012-03-14 06:32:07 +02001444struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1445{
1446 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1447}
1448
1449void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1450{
1451 int i;
1452
1453 if (filter == NULL)
1454 return;
1455
1456 for (i = 0; i < filter->num_fields; i++)
1457 kfree(filter->fields[i].pattern);
1458
1459 kfree(filter);
1460}
1461
1462int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1463 u16 offset, u8 flags,
Johannes Berg922bd802014-05-19 17:59:50 +02001464 const u8 *pattern, u8 len)
Eyal Shapiraa6eab0c2012-03-14 06:32:07 +02001465{
1466 struct wl12xx_rx_filter_field *field;
1467
1468 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1469 wl1271_warning("Max fields per RX filter. can't alloc another");
1470 return -EINVAL;
1471 }
1472
1473 field = &filter->fields[filter->num_fields];
1474
1475 field->pattern = kzalloc(len, GFP_KERNEL);
1476 if (!field->pattern) {
1477 wl1271_warning("Failed to allocate RX filter pattern");
1478 return -ENOMEM;
1479 }
1480
1481 filter->num_fields++;
1482
1483 field->offset = cpu_to_le16(offset);
1484 field->flags = flags;
1485 field->len = len;
1486 memcpy(field->pattern, pattern, len);
1487
1488 return 0;
1489}
1490
1491int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1492{
1493 int i, fields_size = 0;
1494
1495 for (i = 0; i < filter->num_fields; i++)
1496 fields_size += filter->fields[i].len +
1497 sizeof(struct wl12xx_rx_filter_field) -
1498 sizeof(u8 *);
1499
1500 return fields_size;
1501}
1502
1503void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1504 u8 *buf)
1505{
1506 int i;
1507 struct wl12xx_rx_filter_field *field;
1508
1509 for (i = 0; i < filter->num_fields; i++) {
1510 field = (struct wl12xx_rx_filter_field *)buf;
1511
1512 field->offset = filter->fields[i].offset;
1513 field->flags = filter->fields[i].flags;
1514 field->len = filter->fields[i].len;
1515
1516 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1517 buf += sizeof(struct wl12xx_rx_filter_field) -
1518 sizeof(u8 *) + field->len;
1519 }
1520}
1521
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001522/*
1523 * Allocates an RX filter returned through f
1524 * which needs to be freed using rx_filter_free()
1525 */
Amitkumar Karwar50ac6602013-06-25 19:03:56 -07001526static int
1527wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1528 struct wl12xx_rx_filter **f)
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001529{
1530 int i, j, ret = 0;
1531 struct wl12xx_rx_filter *filter;
1532 u16 offset;
1533 u8 flags, len;
1534
1535 filter = wl1271_rx_filter_alloc();
1536 if (!filter) {
1537 wl1271_warning("Failed to alloc rx filter");
1538 ret = -ENOMEM;
1539 goto err;
1540 }
1541
1542 i = 0;
1543 while (i < p->pattern_len) {
1544 if (!test_bit(i, (unsigned long *)p->mask)) {
1545 i++;
1546 continue;
1547 }
1548
1549 for (j = i; j < p->pattern_len; j++) {
1550 if (!test_bit(j, (unsigned long *)p->mask))
1551 break;
1552
1553 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1554 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1555 break;
1556 }
1557
1558 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1559 offset = i;
1560 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1561 } else {
1562 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1563 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1564 }
1565
1566 len = j - i;
1567
1568 ret = wl1271_rx_filter_alloc_field(filter,
1569 offset,
1570 flags,
1571 &p->pattern[i], len);
1572 if (ret)
1573 goto err;
1574
1575 i = j;
1576 }
1577
1578 filter->action = FILTER_SIGNAL;
1579
1580 *f = filter;
1581 return 0;
1582
1583err:
1584 wl1271_rx_filter_free(filter);
1585 *f = NULL;
1586
1587 return ret;
1588}
1589
1590static int wl1271_configure_wowlan(struct wl1271 *wl,
1591 struct cfg80211_wowlan *wow)
1592{
1593 int i, ret;
1594
1595 if (!wow || wow->any || !wow->n_patterns) {
Arik Nemtsovc439a1c2012-06-21 18:10:50 +03001596 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1597 FILTER_SIGNAL);
1598 if (ret)
1599 goto out;
1600
1601 ret = wl1271_rx_filter_clear_all(wl);
1602 if (ret)
1603 goto out;
1604
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001605 return 0;
1606 }
1607
1608 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1609 return -EINVAL;
1610
1611 /* Validate all incoming patterns before clearing current FW state */
1612 for (i = 0; i < wow->n_patterns; i++) {
1613 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1614 if (ret) {
1615 wl1271_warning("Bad wowlan pattern %d", i);
1616 return ret;
1617 }
1618 }
1619
Arik Nemtsovc439a1c2012-06-21 18:10:50 +03001620 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1621 if (ret)
1622 goto out;
1623
1624 ret = wl1271_rx_filter_clear_all(wl);
1625 if (ret)
1626 goto out;
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001627
1628 /* Translate WoWLAN patterns into filters */
1629 for (i = 0; i < wow->n_patterns; i++) {
Amitkumar Karwar50ac6602013-06-25 19:03:56 -07001630 struct cfg80211_pkt_pattern *p;
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001631 struct wl12xx_rx_filter *filter = NULL;
1632
1633 p = &wow->patterns[i];
1634
1635 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1636 if (ret) {
1637 wl1271_warning("Failed to create an RX filter from "
1638 "wowlan pattern %d", i);
1639 goto out;
1640 }
1641
1642 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1643
1644 wl1271_rx_filter_free(filter);
1645 if (ret)
1646 goto out;
1647 }
1648
1649 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1650
1651out:
1652 return ret;
1653}
1654
Eyal Shapiradae728f2012-02-02 12:03:39 +02001655static int wl1271_configure_suspend_sta(struct wl1271 *wl,
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001656 struct wl12xx_vif *wlvif,
1657 struct cfg80211_wowlan *wow)
Eyal Shapiradae728f2012-02-02 12:03:39 +02001658{
1659 int ret = 0;
1660
Eyal Shapiradae728f2012-02-02 12:03:39 +02001661 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001662 goto out;
Eyal Shapiradae728f2012-02-02 12:03:39 +02001663
1664 ret = wl1271_ps_elp_wakeup(wl);
1665 if (ret < 0)
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001666 goto out;
Eyal Shapiradae728f2012-02-02 12:03:39 +02001667
Arik Nemtsovc439a1c2012-06-21 18:10:50 +03001668 ret = wl1271_configure_wowlan(wl, wow);
1669 if (ret < 0)
1670 goto out_sleep;
1671
Eyal Shapira11bc97e2012-08-02 07:15:19 +03001672 if ((wl->conf.conn.suspend_wake_up_event ==
1673 wl->conf.conn.wake_up_event) &&
1674 (wl->conf.conn.suspend_listen_interval ==
1675 wl->conf.conn.listen_interval))
1676 goto out_sleep;
1677
Eyal Shapiradae728f2012-02-02 12:03:39 +02001678 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1679 wl->conf.conn.suspend_wake_up_event,
1680 wl->conf.conn.suspend_listen_interval);
1681
1682 if (ret < 0)
1683 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1684
Arik Nemtsovc439a1c2012-06-21 18:10:50 +03001685out_sleep:
Eyal Shapiradae728f2012-02-02 12:03:39 +02001686 wl1271_ps_elp_sleep(wl);
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001687out:
Eyal Shapiradae728f2012-02-02 12:03:39 +02001688 return ret;
1689
1690}
Eliad Peller94390642011-05-13 11:57:13 +03001691
Eliad Peller0603d892011-10-05 11:55:51 +02001692static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1693 struct wl12xx_vif *wlvif)
Eliad Peller94390642011-05-13 11:57:13 +03001694{
Eliad Pellere85d1622011-06-27 13:06:43 +03001695 int ret = 0;
Eliad Peller94390642011-05-13 11:57:13 +03001696
Eliad Peller53d40d02011-10-10 10:13:02 +02001697 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001698 goto out;
Eliad Pellere85d1622011-06-27 13:06:43 +03001699
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001700 ret = wl1271_ps_elp_wakeup(wl);
1701 if (ret < 0)
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001702 goto out;
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001703
Eliad Peller0603d892011-10-05 11:55:51 +02001704 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001705
1706 wl1271_ps_elp_sleep(wl);
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001707out:
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001708 return ret;
1709
1710}
1711
Eliad Pellerd2d66c52011-10-05 11:55:43 +02001712static int wl1271_configure_suspend(struct wl1271 *wl,
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001713 struct wl12xx_vif *wlvif,
1714 struct cfg80211_wowlan *wow)
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001715{
Eyal Shapiradae728f2012-02-02 12:03:39 +02001716 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001717 return wl1271_configure_suspend_sta(wl, wlvif, wow);
Eliad Peller536129c2011-10-05 11:55:45 +02001718 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
Eliad Peller0603d892011-10-05 11:55:51 +02001719 return wl1271_configure_suspend_ap(wl, wlvif);
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001720 return 0;
1721}
1722
Luciano Coelho8f6ac532013-05-04 01:06:11 +03001723static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001724{
Eyal Shapiradae728f2012-02-02 12:03:39 +02001725 int ret = 0;
Eliad Peller536129c2011-10-05 11:55:45 +02001726 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
Eyal Shapiradae728f2012-02-02 12:03:39 +02001727 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
Eliad Peller8a7cf3f2011-06-06 12:21:54 +03001728
Eyal Shapiradae728f2012-02-02 12:03:39 +02001729 if ((!is_ap) && (!is_sta))
Eliad Peller94390642011-05-13 11:57:13 +03001730 return;
1731
Eliad Pellerd49524d2012-08-01 18:44:22 +03001732 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1733 return;
1734
Eliad Peller94390642011-05-13 11:57:13 +03001735 ret = wl1271_ps_elp_wakeup(wl);
1736 if (ret < 0)
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001737 return;
Eliad Peller94390642011-05-13 11:57:13 +03001738
Eyal Shapiradae728f2012-02-02 12:03:39 +02001739 if (is_sta) {
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001740 wl1271_configure_wowlan(wl, NULL);
1741
Eyal Shapira11bc97e2012-08-02 07:15:19 +03001742 if ((wl->conf.conn.suspend_wake_up_event ==
1743 wl->conf.conn.wake_up_event) &&
1744 (wl->conf.conn.suspend_listen_interval ==
1745 wl->conf.conn.listen_interval))
1746 goto out_sleep;
1747
Eyal Shapiradae728f2012-02-02 12:03:39 +02001748 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1749 wl->conf.conn.wake_up_event,
1750 wl->conf.conn.listen_interval);
1751
1752 if (ret < 0)
1753 wl1271_error("resume: wake up conditions failed: %d",
1754 ret);
1755
1756 } else if (is_ap) {
1757 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1758 }
Eliad Peller94390642011-05-13 11:57:13 +03001759
Eyal Shapira11bc97e2012-08-02 07:15:19 +03001760out_sleep:
Eliad Peller94390642011-05-13 11:57:13 +03001761 wl1271_ps_elp_sleep(wl);
Eliad Peller94390642011-05-13 11:57:13 +03001762}
1763
Eliad Peller402e48612011-05-13 11:57:09 +03001764static int wl1271_op_suspend(struct ieee80211_hw *hw,
1765 struct cfg80211_wowlan *wow)
1766{
1767 struct wl1271 *wl = hw->priv;
Eliad Peller6e8cd332011-10-10 10:13:13 +02001768 struct wl12xx_vif *wlvif;
Eliad Peller4a859df2011-06-06 12:21:52 +03001769 int ret;
1770
Eliad Peller402e48612011-05-13 11:57:09 +03001771 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001772 WARN_ON(!wow);
Eliad Pellerf44e5862011-05-13 11:57:11 +03001773
Arik Nemtsov96caded2012-06-21 18:10:47 +03001774 /* we want to perform the recovery before suspending */
1775 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1776 wl1271_warning("postponing suspend to perform recovery");
1777 return -EBUSY;
1778 }
1779
Arik Nemtsovb9239b62012-02-28 00:41:33 +02001780 wl1271_tx_flush(wl);
1781
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001782 mutex_lock(&wl->mutex);
Eliad Peller4a859df2011-06-06 12:21:52 +03001783 wl->wow_enabled = true;
Eliad Peller6e8cd332011-10-10 10:13:13 +02001784 wl12xx_for_each_wlvif(wl, wlvif) {
Eyal Shapirab95d7ce2012-03-14 06:32:10 +02001785 ret = wl1271_configure_suspend(wl, wlvif, wow);
Eliad Peller6e8cd332011-10-10 10:13:13 +02001786 if (ret < 0) {
Dan Carpentercd840f62012-04-16 13:57:02 +03001787 mutex_unlock(&wl->mutex);
Eliad Peller6e8cd332011-10-10 10:13:13 +02001788 wl1271_warning("couldn't prepare device to suspend");
1789 return ret;
1790 }
Eliad Pellerf44e5862011-05-13 11:57:11 +03001791 }
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001792 mutex_unlock(&wl->mutex);
Eliad Peller4a859df2011-06-06 12:21:52 +03001793 /* flush any remaining work */
1794 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
Eliad Peller4a859df2011-06-06 12:21:52 +03001795
1796 /*
1797 * disable and re-enable interrupts in order to flush
1798 * the threaded_irq
1799 */
Luciano Coelhodd5512e2012-04-11 11:03:14 +03001800 wlcore_disable_interrupts(wl);
Eliad Peller4a859df2011-06-06 12:21:52 +03001801
1802 /*
1803 * set suspended flag to avoid triggering a new threaded_irq
1804 * work. no need for spinlock as interrupts are disabled.
1805 */
1806 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1807
Luciano Coelhodd5512e2012-04-11 11:03:14 +03001808 wlcore_enable_interrupts(wl);
Eliad Peller4a859df2011-06-06 12:21:52 +03001809 flush_work(&wl->tx_work);
Eliad Peller4a859df2011-06-06 12:21:52 +03001810 flush_delayed_work(&wl->elp_work);
1811
Arik Nemtsov9be86cf2014-02-10 13:47:18 +02001812 /*
1813 * Cancel the watchdog even if above tx_flush failed. We will detect
1814 * it on resume anyway.
1815 */
1816 cancel_delayed_work(&wl->tx_watchdog_work);
1817
Eliad Peller402e48612011-05-13 11:57:09 +03001818 return 0;
1819}
1820
1821static int wl1271_op_resume(struct ieee80211_hw *hw)
1822{
1823 struct wl1271 *wl = hw->priv;
Eliad Peller6e8cd332011-10-10 10:13:13 +02001824 struct wl12xx_vif *wlvif;
Eliad Peller4a859df2011-06-06 12:21:52 +03001825 unsigned long flags;
Arik Nemtsovea0a3cf2012-06-21 18:10:49 +03001826 bool run_irq_work = false, pending_recovery;
Arik Nemtsov725b8272012-06-21 18:10:52 +03001827 int ret;
Eliad Peller4a859df2011-06-06 12:21:52 +03001828
Eliad Peller402e48612011-05-13 11:57:09 +03001829 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1830 wl->wow_enabled);
Eliad Peller4a859df2011-06-06 12:21:52 +03001831 WARN_ON(!wl->wow_enabled);
Eliad Pellerf44e5862011-05-13 11:57:11 +03001832
1833 /*
1834 * re-enable irq_work enqueuing, and call irq_work directly if
1835 * there is a pending work.
1836 */
Eliad Peller4a859df2011-06-06 12:21:52 +03001837 spin_lock_irqsave(&wl->wl_lock, flags);
1838 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1839 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1840 run_irq_work = true;
1841 spin_unlock_irqrestore(&wl->wl_lock, flags);
Eliad Pellerf44e5862011-05-13 11:57:11 +03001842
Arik Nemtsov725b8272012-06-21 18:10:52 +03001843 mutex_lock(&wl->mutex);
1844
Arik Nemtsovea0a3cf2012-06-21 18:10:49 +03001845 /* test the recovery flag before calling any SDIO functions */
1846 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1847 &wl->flags);
1848
Eliad Peller4a859df2011-06-06 12:21:52 +03001849 if (run_irq_work) {
1850 wl1271_debug(DEBUG_MAC80211,
1851 "run postponed irq_work directly");
Arik Nemtsovea0a3cf2012-06-21 18:10:49 +03001852
1853 /* don't talk to the HW if recovery is pending */
Arik Nemtsov725b8272012-06-21 18:10:52 +03001854 if (!pending_recovery) {
1855 ret = wlcore_irq_locked(wl);
1856 if (ret)
1857 wl12xx_queue_recovery_work(wl);
1858 }
Arik Nemtsovea0a3cf2012-06-21 18:10:49 +03001859
Luciano Coelhodd5512e2012-04-11 11:03:14 +03001860 wlcore_enable_interrupts(wl);
Eliad Pellerf44e5862011-05-13 11:57:11 +03001861 }
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001862
Arik Nemtsovea0a3cf2012-06-21 18:10:49 +03001863 if (pending_recovery) {
1864 wl1271_warning("queuing forgotten recovery on resume");
1865 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1866 goto out;
1867 }
1868
Eliad Peller6e8cd332011-10-10 10:13:13 +02001869 wl12xx_for_each_wlvif(wl, wlvif) {
1870 wl1271_configure_resume(wl, wlvif);
1871 }
Arik Nemtsovea0a3cf2012-06-21 18:10:49 +03001872
1873out:
Eliad Pellerff91afc2011-06-06 12:21:53 +03001874 wl->wow_enabled = false;
Arik Nemtsov9be86cf2014-02-10 13:47:18 +02001875
1876 /*
1877 * Set a flag to re-init the watchdog on the first Tx after resume.
1878 * That way we avoid possible conditions where Tx-complete interrupts
1879 * fail to arrive and we perform a spurious recovery.
1880 */
1881 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
Eyal Shapirac56dbd52012-03-13 20:03:21 +02001882 mutex_unlock(&wl->mutex);
Eliad Pellerf44e5862011-05-13 11:57:11 +03001883
Eliad Peller402e48612011-05-13 11:57:09 +03001884 return 0;
1885}
Luciano Coelhof634a4e2011-05-18 16:51:26 -04001886#endif
Eliad Peller402e48612011-05-13 11:57:09 +03001887
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001888static int wl1271_op_start(struct ieee80211_hw *hw)
1889{
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02001890 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1891
1892 /*
1893 * We have to delay the booting of the hardware because
1894 * we need to know the local MAC address before downloading and
1895 * initializing the firmware. The MAC address cannot be changed
1896 * after boot, and without the proper MAC address, the firmware
1897 * will not function properly.
1898 *
1899 * The MAC address is first known when the corresponding interface
1900 * is added. That is where we will initialize the hardware.
1901 */
1902
Eyal Shapirad18da7f2012-01-31 11:57:25 +02001903 return 0;
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02001904}
1905
Ido Yarivc24ec832012-06-26 21:08:58 +03001906static void wlcore_op_stop_locked(struct wl1271 *wl)
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02001907{
Eliad Pellerbaf62772011-10-10 10:12:52 +02001908 int i;
1909
Ido Yariv4cc53382012-07-24 19:18:49 +03001910 if (wl->state == WLCORE_STATE_OFF) {
Ido Yarivb666bb72012-05-21 01:10:11 +03001911 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1912 &wl->flags))
1913 wlcore_enable_interrupts(wl);
1914
Eliad Peller10c8cd02011-10-10 10:13:06 +02001915 return;
1916 }
Ido Yariv46b0cc92012-01-11 09:42:41 +02001917
Eliad Pellerbaf62772011-10-10 10:12:52 +02001918 /*
1919 * this must be before the cancel_work calls below, so that the work
1920 * functions don't perform further work.
1921 */
Ido Yariv4cc53382012-07-24 19:18:49 +03001922 wl->state = WLCORE_STATE_OFF;
Ido Yarivc24ec832012-06-26 21:08:58 +03001923
1924 /*
1925 * Use the nosync variant to disable interrupts, so the mutex could be
1926 * held while doing so without deadlocking.
1927 */
1928 wlcore_disable_interrupts_nosync(wl);
1929
Eliad Peller10c8cd02011-10-10 10:13:06 +02001930 mutex_unlock(&wl->mutex);
1931
Ido Yarivc24ec832012-06-26 21:08:58 +03001932 wlcore_synchronize_interrupts(wl);
Eliad Peller6dbc5fc2012-07-29 14:37:29 +03001933 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1934 cancel_work_sync(&wl->recovery_work);
Eliad Pellerbaf62772011-10-10 10:12:52 +02001935 wl1271_flush_deferred_work(wl);
1936 cancel_delayed_work_sync(&wl->scan_complete_work);
1937 cancel_work_sync(&wl->netstack_work);
1938 cancel_work_sync(&wl->tx_work);
Eliad Pellerbaf62772011-10-10 10:12:52 +02001939 cancel_delayed_work_sync(&wl->elp_work);
Arik Nemtsov55df5af2012-03-03 22:18:00 +02001940 cancel_delayed_work_sync(&wl->tx_watchdog_work);
Eliad Pellerbaf62772011-10-10 10:12:52 +02001941
1942 /* let's notify MAC80211 about the remaining pending TX frames */
Eliad Pellerbaf62772011-10-10 10:12:52 +02001943 mutex_lock(&wl->mutex);
Arik Nemtsovd935e382012-11-27 08:44:53 +02001944 wl12xx_tx_reset(wl);
Eliad Pellerbaf62772011-10-10 10:12:52 +02001945
1946 wl1271_power_off(wl);
Ido Yarivb666bb72012-05-21 01:10:11 +03001947 /*
1948 * In case a recovery was scheduled, interrupts were disabled to avoid
1949 * an interrupt storm. Now that the power is down, it is safe to
1950 * re-enable interrupts to balance the disable depth
1951 */
1952 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1953 wlcore_enable_interrupts(wl);
Eliad Pellerbaf62772011-10-10 10:12:52 +02001954
1955 wl->band = IEEE80211_BAND_2GHZ;
1956
1957 wl->rx_counter = 0;
1958 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
Arik Nemtsov83d08d32012-05-10 12:13:30 +03001959 wl->channel_type = NL80211_CHAN_NO_HT;
Eliad Pellerbaf62772011-10-10 10:12:52 +02001960 wl->tx_blocks_available = 0;
1961 wl->tx_allocated_blocks = 0;
1962 wl->tx_results_count = 0;
1963 wl->tx_packets_count = 0;
1964 wl->time_offset = 0;
Eliad Pellerbaf62772011-10-10 10:12:52 +02001965 wl->ap_fw_ps_map = 0;
1966 wl->ap_ps_map = 0;
Arik Nemtsov2f18cf72012-06-10 19:10:45 +03001967 wl->sleep_auth = WL1271_PSM_ILLEGAL;
Eliad Pellerbaf62772011-10-10 10:12:52 +02001968 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1969 memset(wl->links_map, 0, sizeof(wl->links_map));
1970 memset(wl->roc_map, 0, sizeof(wl->roc_map));
Eliad Peller978cd3a2012-11-22 18:06:21 +02001971 memset(wl->session_ids, 0, sizeof(wl->session_ids));
Nadim Zubidat02d07272014-02-10 13:47:17 +02001972 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
Eliad Pellerbaf62772011-10-10 10:12:52 +02001973 wl->active_sta_count = 0;
Arik Nemtsov9a100962012-11-28 11:42:42 +02001974 wl->active_link_count = 0;
Eliad Pellerbaf62772011-10-10 10:12:52 +02001975
1976 /* The system link is always allocated */
Arik Nemtsov9ebcb232012-11-27 08:44:59 +02001977 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1978 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
Eliad Pellerbaf62772011-10-10 10:12:52 +02001979 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1980
1981 /*
1982 * this is performed after the cancel_work calls and the associated
1983 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1984 * get executed before all these vars have been reset.
1985 */
1986 wl->flags = 0;
1987
1988 wl->tx_blocks_freed = 0;
1989
1990 for (i = 0; i < NUM_TX_QUEUES; i++) {
1991 wl->tx_pkts_freed[i] = 0;
1992 wl->tx_allocated_pkts[i] = 0;
1993 }
1994
1995 wl1271_debugfs_reset(wl);
1996
Eliad Peller75fb4df2014-02-10 13:47:21 +02001997 kfree(wl->raw_fw_status);
1998 wl->raw_fw_status = NULL;
1999 kfree(wl->fw_status);
2000 wl->fw_status = NULL;
Eliad Pellerbaf62772011-10-10 10:12:52 +02002001 kfree(wl->tx_res_if);
2002 wl->tx_res_if = NULL;
2003 kfree(wl->target_mem_map);
2004 wl->target_mem_map = NULL;
Victor Goldenshtein6b70e7e2012-11-25 18:26:59 +02002005
2006 /*
2007 * FW channels must be re-calibrated after recovery,
Eliad Peller8d3c1fd2013-09-09 12:24:44 +03002008 * save current Reg-Domain channel configuration and clear it.
Victor Goldenshtein6b70e7e2012-11-25 18:26:59 +02002009 */
Eliad Peller8d3c1fd2013-09-09 12:24:44 +03002010 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2011 sizeof(wl->reg_ch_conf_pending));
Victor Goldenshtein6b70e7e2012-11-25 18:26:59 +02002012 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
Ido Yarivc24ec832012-06-26 21:08:58 +03002013}
2014
2015static void wlcore_op_stop(struct ieee80211_hw *hw)
2016{
2017 struct wl1271 *wl = hw->priv;
2018
2019 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2020
2021 mutex_lock(&wl->mutex);
2022
2023 wlcore_op_stop_locked(wl);
Eliad Pellerbaf62772011-10-10 10:12:52 +02002024
2025 mutex_unlock(&wl->mutex);
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02002026}
2027
Eliad Pellerc50a2822012-11-22 18:06:19 +02002028static void wlcore_channel_switch_work(struct work_struct *work)
2029{
2030 struct delayed_work *dwork;
2031 struct wl1271 *wl;
2032 struct ieee80211_vif *vif;
2033 struct wl12xx_vif *wlvif;
2034 int ret;
2035
2036 dwork = container_of(work, struct delayed_work, work);
2037 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2038 wl = wlvif->wl;
2039
2040 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2041
2042 mutex_lock(&wl->mutex);
2043
2044 if (unlikely(wl->state != WLCORE_STATE_ON))
2045 goto out;
2046
2047 /* check the channel switch is still ongoing */
2048 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2049 goto out;
2050
2051 vif = wl12xx_wlvif_to_vif(wlvif);
2052 ieee80211_chswitch_done(vif, false);
2053
2054 ret = wl1271_ps_elp_wakeup(wl);
2055 if (ret < 0)
2056 goto out;
2057
2058 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2059
2060 wl1271_ps_elp_sleep(wl);
2061out:
2062 mutex_unlock(&wl->mutex);
2063}
2064
2065static void wlcore_connection_loss_work(struct work_struct *work)
2066{
2067 struct delayed_work *dwork;
2068 struct wl1271 *wl;
2069 struct ieee80211_vif *vif;
2070 struct wl12xx_vif *wlvif;
2071
2072 dwork = container_of(work, struct delayed_work, work);
2073 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2074 wl = wlvif->wl;
2075
2076 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2077
2078 mutex_lock(&wl->mutex);
2079
2080 if (unlikely(wl->state != WLCORE_STATE_ON))
2081 goto out;
2082
2083 /* Call mac80211 connection loss */
2084 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2085 goto out;
2086
2087 vif = wl12xx_wlvif_to_vif(wlvif);
2088 ieee80211_connection_loss(vif);
2089out:
2090 mutex_unlock(&wl->mutex);
2091}
2092
Arik Nemtsov187e52c2013-09-17 18:41:20 +03002093static void wlcore_pending_auth_complete_work(struct work_struct *work)
2094{
2095 struct delayed_work *dwork;
2096 struct wl1271 *wl;
2097 struct wl12xx_vif *wlvif;
2098 unsigned long time_spare;
2099 int ret;
2100
2101 dwork = container_of(work, struct delayed_work, work);
2102 wlvif = container_of(dwork, struct wl12xx_vif,
2103 pending_auth_complete_work);
2104 wl = wlvif->wl;
2105
2106 mutex_lock(&wl->mutex);
2107
2108 if (unlikely(wl->state != WLCORE_STATE_ON))
2109 goto out;
2110
2111 /*
2112 * Make sure a second really passed since the last auth reply. Maybe
2113 * a second auth reply arrived while we were stuck on the mutex.
2114 * Check for a little less than the timeout to protect from scheduler
2115 * irregularities.
2116 */
2117 time_spare = jiffies +
2118 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2119 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2120 goto out;
2121
2122 ret = wl1271_ps_elp_wakeup(wl);
2123 if (ret < 0)
2124 goto out;
2125
2126 /* cancel the ROC if active */
2127 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2128
2129 wl1271_ps_elp_sleep(wl);
2130out:
2131 mutex_unlock(&wl->mutex);
2132}
2133
Eliad Pellere5a359f2011-10-10 10:13:15 +02002134static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2135{
2136 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2137 WL12XX_MAX_RATE_POLICIES);
2138 if (policy >= WL12XX_MAX_RATE_POLICIES)
2139 return -EBUSY;
2140
2141 __set_bit(policy, wl->rate_policies_map);
2142 *idx = policy;
2143 return 0;
2144}
2145
2146static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2147{
2148 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2149 return;
2150
2151 __clear_bit(*idx, wl->rate_policies_map);
2152 *idx = WL12XX_MAX_RATE_POLICIES;
2153}
2154
Eliad Peller001e39a2012-08-16 13:52:47 +03002155static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2156{
2157 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2158 WLCORE_MAX_KLV_TEMPLATES);
2159 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2160 return -EBUSY;
2161
2162 __set_bit(policy, wl->klv_templates_map);
2163 *idx = policy;
2164 return 0;
2165}
2166
2167static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2168{
2169 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2170 return;
2171
2172 __clear_bit(*idx, wl->klv_templates_map);
2173 *idx = WLCORE_MAX_KLV_TEMPLATES;
2174}
2175
Eliad Peller536129c2011-10-05 11:55:45 +02002176static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002177{
Eliad Peller536129c2011-10-05 11:55:45 +02002178 switch (wlvif->bss_type) {
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002179 case BSS_TYPE_AP_BSS:
Eliad Pellerfb0e7072011-10-05 11:55:47 +02002180 if (wlvif->p2p)
Eliad Peller045c7452011-08-28 15:23:01 +03002181 return WL1271_ROLE_P2P_GO;
2182 else
2183 return WL1271_ROLE_AP;
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002184
2185 case BSS_TYPE_STA_BSS:
Eliad Pellerfb0e7072011-10-05 11:55:47 +02002186 if (wlvif->p2p)
Eliad Peller045c7452011-08-28 15:23:01 +03002187 return WL1271_ROLE_P2P_CL;
2188 else
2189 return WL1271_ROLE_STA;
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002190
Eliad Peller227e81e2011-08-14 13:17:26 +03002191 case BSS_TYPE_IBSS:
2192 return WL1271_ROLE_IBSS;
2193
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002194 default:
Eliad Peller536129c2011-10-05 11:55:45 +02002195 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002196 }
2197 return WL12XX_INVALID_ROLE_TYPE;
2198}
2199
Eliad Peller83587502011-10-10 10:12:53 +02002200static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
Eliad Peller87fbcb02011-10-05 11:55:41 +02002201{
Eliad Pellere936bbe2011-10-05 11:55:56 +02002202 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Pellere5a359f2011-10-10 10:13:15 +02002203 int i;
Eliad Pellere936bbe2011-10-05 11:55:56 +02002204
Eliad Peller48e93e42011-10-10 10:12:58 +02002205 /* clear everything but the persistent data */
2206 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
Eliad Pellere936bbe2011-10-05 11:55:56 +02002207
2208 switch (ieee80211_vif_type_p2p(vif)) {
2209 case NL80211_IFTYPE_P2P_CLIENT:
2210 wlvif->p2p = 1;
2211 /* fall-through */
2212 case NL80211_IFTYPE_STATION:
2213 wlvif->bss_type = BSS_TYPE_STA_BSS;
2214 break;
2215 case NL80211_IFTYPE_ADHOC:
2216 wlvif->bss_type = BSS_TYPE_IBSS;
2217 break;
2218 case NL80211_IFTYPE_P2P_GO:
2219 wlvif->p2p = 1;
2220 /* fall-through */
2221 case NL80211_IFTYPE_AP:
2222 wlvif->bss_type = BSS_TYPE_AP_BSS;
2223 break;
2224 default:
2225 wlvif->bss_type = MAX_BSS_TYPE;
2226 return -EOPNOTSUPP;
2227 }
2228
Eliad Peller0603d892011-10-05 11:55:51 +02002229 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
Eliad Peller7edebf52011-10-05 11:55:52 +02002230 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
Eliad Pellerafaf8bd2011-10-05 11:55:57 +02002231 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
Eliad Pellera8ab39a2011-10-05 11:55:54 +02002232
Eliad Pellere936bbe2011-10-05 11:55:56 +02002233 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2234 wlvif->bss_type == BSS_TYPE_IBSS) {
2235 /* init sta/ibss data */
2236 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
Eliad Pellere5a359f2011-10-10 10:13:15 +02002237 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2238 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2239 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
Eliad Peller001e39a2012-08-16 13:52:47 +03002240 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
Luciano Coelho15e05bc2012-05-10 12:14:05 +03002241 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2242 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2243 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
Eliad Pellere936bbe2011-10-05 11:55:56 +02002244 } else {
2245 /* init ap data */
2246 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2247 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
Eliad Pellere5a359f2011-10-10 10:13:15 +02002248 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2249 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2250 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2251 wl12xx_allocate_rate_policy(wl,
2252 &wlvif->ap.ucast_rate_idx[i]);
Eliad Peller42ec1f82012-11-20 13:20:08 +02002253 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
Luciano Coelho15e05bc2012-05-10 12:14:05 +03002254 /*
2255 * TODO: check if basic_rate shouldn't be
2256 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2257 * instead (the same thing for STA above).
2258 */
Eliad Peller42ec1f82012-11-20 13:20:08 +02002259 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
Luciano Coelho15e05bc2012-05-10 12:14:05 +03002260 /* TODO: this seems to be used only for STA, check it */
Eliad Peller42ec1f82012-11-20 13:20:08 +02002261 wlvif->rate_set = CONF_TX_ENABLED_RATES;
Eliad Pellere936bbe2011-10-05 11:55:56 +02002262 }
Eliad Pellera8ab39a2011-10-05 11:55:54 +02002263
Eliad Peller83587502011-10-10 10:12:53 +02002264 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2265 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
Eliad Peller6a899792011-10-05 11:55:58 +02002266 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2267
Eliad Peller1b92f152011-10-10 10:13:09 +02002268 /*
2269 * mac80211 configures some values globally, while we treat them
2270 * per-interface. thus, on init, we have to copy them from wl
2271 */
2272 wlvif->band = wl->band;
Eliad Peller61f845f2011-10-10 10:13:10 +02002273 wlvif->channel = wl->channel;
Eliad Peller6bd65022011-10-10 10:13:11 +02002274 wlvif->power_level = wl->power_level;
Arik Nemtsov83d08d32012-05-10 12:13:30 +03002275 wlvif->channel_type = wl->channel_type;
Eliad Peller1b92f152011-10-10 10:13:09 +02002276
Eliad Peller9eb599e2011-10-10 10:12:59 +02002277 INIT_WORK(&wlvif->rx_streaming_enable_work,
2278 wl1271_rx_streaming_enable_work);
2279 INIT_WORK(&wlvif->rx_streaming_disable_work,
2280 wl1271_rx_streaming_disable_work);
Eliad Pellerc50a2822012-11-22 18:06:19 +02002281 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2282 wlcore_channel_switch_work);
2283 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2284 wlcore_connection_loss_work);
Arik Nemtsov187e52c2013-09-17 18:41:20 +03002285 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2286 wlcore_pending_auth_complete_work);
Eliad Peller87627212011-10-10 10:12:54 +02002287 INIT_LIST_HEAD(&wlvif->list);
Eliad Peller252efa42011-10-05 11:56:00 +02002288
Eliad Peller9eb599e2011-10-10 10:12:59 +02002289 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2290 (unsigned long) wlvif);
Eliad Pellere936bbe2011-10-05 11:55:56 +02002291 return 0;
Eliad Peller87fbcb02011-10-05 11:55:41 +02002292}
2293
Luciano Coelho5dc283f2013-01-30 10:53:14 +02002294static int wl12xx_init_fw(struct wl1271 *wl)
Eliad Peller1d095472011-10-10 10:12:49 +02002295{
2296 int retries = WL1271_BOOT_RETRIES;
2297 bool booted = false;
2298 struct wiphy *wiphy = wl->hw->wiphy;
2299 int ret;
2300
2301 while (retries) {
2302 retries--;
Eliad Peller3fcdab72012-02-06 12:47:54 +02002303 ret = wl12xx_chip_wakeup(wl, false);
Eliad Peller1d095472011-10-10 10:12:49 +02002304 if (ret < 0)
2305 goto power_off;
2306
Luciano Coelhodd5512e2012-04-11 11:03:14 +03002307 ret = wl->ops->boot(wl);
Eliad Peller1d095472011-10-10 10:12:49 +02002308 if (ret < 0)
2309 goto power_off;
2310
2311 ret = wl1271_hw_init(wl);
2312 if (ret < 0)
2313 goto irq_disable;
2314
2315 booted = true;
2316 break;
2317
2318irq_disable:
2319 mutex_unlock(&wl->mutex);
2320 /* Unlocking the mutex in the middle of handling is
2321 inherently unsafe. In this case we deem it safe to do,
2322 because we need to let any possibly pending IRQ out of
Ido Yariv4cc53382012-07-24 19:18:49 +03002323 the system (and while we are WLCORE_STATE_OFF the IRQ
Eliad Peller1d095472011-10-10 10:12:49 +02002324 work function will not do anything.) Also, any other
2325 possible concurrent operations will fail due to the
2326 current state, hence the wl1271 struct should be safe. */
Luciano Coelhodd5512e2012-04-11 11:03:14 +03002327 wlcore_disable_interrupts(wl);
Eliad Peller1d095472011-10-10 10:12:49 +02002328 wl1271_flush_deferred_work(wl);
2329 cancel_work_sync(&wl->netstack_work);
2330 mutex_lock(&wl->mutex);
2331power_off:
2332 wl1271_power_off(wl);
2333 }
2334
2335 if (!booted) {
2336 wl1271_error("firmware boot failed despite %d retries",
2337 WL1271_BOOT_RETRIES);
2338 goto out;
2339 }
2340
2341 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2342
2343 /* update hw/fw version info in wiphy struct */
2344 wiphy->hw_version = wl->chip.id;
2345 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2346 sizeof(wiphy->fw_version));
2347
2348 /*
2349 * Now we know if 11a is supported (info from the NVS), so disable
2350 * 11a channels if not supported
2351 */
2352 if (!wl->enable_11a)
2353 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2354
2355 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2356 wl->enable_11a ? "" : "not ");
2357
Ido Yariv4cc53382012-07-24 19:18:49 +03002358 wl->state = WLCORE_STATE_ON;
Eliad Peller1d095472011-10-10 10:12:49 +02002359out:
Luciano Coelho5dc283f2013-01-30 10:53:14 +02002360 return ret;
Eliad Peller1d095472011-10-10 10:12:49 +02002361}
2362
Eliad Peller92e712d2011-12-18 20:25:43 +02002363static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2364{
2365 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2366}
2367
Eliad Peller4549d092012-02-06 13:07:52 +02002368/*
2369 * Check whether a fw switch (i.e. moving from one loaded
2370 * fw to another) is needed. This function is also responsible
2371 * for updating wl->last_vif_count, so it must be called before
2372 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2373 * will be used).
2374 */
2375static bool wl12xx_need_fw_change(struct wl1271 *wl,
2376 struct vif_counter_data vif_counter_data,
2377 bool add)
2378{
2379 enum wl12xx_fw_type current_fw = wl->fw_type;
2380 u8 vif_count = vif_counter_data.counter;
2381
2382 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2383 return false;
2384
2385 /* increase the vif count if this is a new vif */
2386 if (add && !vif_counter_data.cur_vif_running)
2387 vif_count++;
2388
2389 wl->last_vif_count = vif_count;
2390
2391 /* no need for fw change if the device is OFF */
Ido Yariv4cc53382012-07-24 19:18:49 +03002392 if (wl->state == WLCORE_STATE_OFF)
Eliad Peller4549d092012-02-06 13:07:52 +02002393 return false;
2394
Eliad Peller9b1a0a72012-07-25 14:22:21 +03002395 /* no need for fw change if a single fw is used */
2396 if (!wl->mr_fw_name)
2397 return false;
2398
Eliad Peller4549d092012-02-06 13:07:52 +02002399 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2400 return true;
2401 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2402 return true;
2403
2404 return false;
2405}
2406
Eliad Peller3dee4392012-02-06 12:47:56 +02002407/*
2408 * Enter "forced psm". Make sure the sta is in psm against the ap,
2409 * to make the fw switch a bit more disconnection-persistent.
2410 */
2411static void wl12xx_force_active_psm(struct wl1271 *wl)
2412{
2413 struct wl12xx_vif *wlvif;
2414
2415 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2416 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2417 }
2418}
2419
Arik Nemtsov1c33db72012-11-30 00:48:03 +02002420struct wlcore_hw_queue_iter_data {
2421 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2422 /* current vif */
2423 struct ieee80211_vif *vif;
2424 /* is the current vif among those iterated */
2425 bool cur_running;
2426};
2427
2428static void wlcore_hw_queue_iter(void *data, u8 *mac,
2429 struct ieee80211_vif *vif)
2430{
2431 struct wlcore_hw_queue_iter_data *iter_data = data;
2432
2433 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2434 return;
2435
2436 if (iter_data->cur_running || vif == iter_data->vif) {
2437 iter_data->cur_running = true;
2438 return;
2439 }
2440
2441 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2442}
2443
2444static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2445 struct wl12xx_vif *wlvif)
2446{
2447 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2448 struct wlcore_hw_queue_iter_data iter_data = {};
2449 int i, q_base;
2450
2451 iter_data.vif = vif;
2452
2453 /* mark all bits taken by active interfaces */
2454 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2455 IEEE80211_IFACE_ITER_RESUME_ALL,
2456 wlcore_hw_queue_iter, &iter_data);
2457
2458 /* the current vif is already running in mac80211 (resume/recovery) */
2459 if (iter_data.cur_running) {
2460 wlvif->hw_queue_base = vif->hw_queue[0];
2461 wl1271_debug(DEBUG_MAC80211,
2462 "using pre-allocated hw queue base %d",
2463 wlvif->hw_queue_base);
2464
2465 /* interface type might have changed type */
2466 goto adjust_cab_queue;
2467 }
2468
2469 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2470 WLCORE_NUM_MAC_ADDRESSES);
2471 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2472 return -EBUSY;
2473
2474 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2475 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2476 wlvif->hw_queue_base);
2477
2478 for (i = 0; i < NUM_TX_QUEUES; i++) {
2479 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2480 /* register hw queues in mac80211 */
2481 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2482 }
2483
2484adjust_cab_queue:
2485 /* the last places are reserved for cab queues per interface */
2486 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2487 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2488 wlvif->hw_queue_base / NUM_TX_QUEUES;
2489 else
2490 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2491
2492 return 0;
2493}
2494
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02002495static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2496 struct ieee80211_vif *vif)
2497{
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002498 struct wl1271 *wl = hw->priv;
Eliad Peller536129c2011-10-05 11:55:45 +02002499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Peller4549d092012-02-06 13:07:52 +02002500 struct vif_counter_data vif_count;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002501 int ret = 0;
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002502 u8 role_type;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002503
Yair Shapiradd491ff2013-09-17 18:41:21 +03002504 if (wl->plt) {
2505 wl1271_error("Adding Interface not allowed while in PLT mode");
2506 return -EBUSY;
2507 }
2508
Johannes Bergea086352012-01-19 09:29:58 +01002509 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2510 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
Johannes Bergc1288b12012-01-19 09:29:57 +01002511
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02002512 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
Eliad Peller045c7452011-08-28 15:23:01 +03002513 ieee80211_vif_type_p2p(vif), vif->addr);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002514
Eliad Peller4549d092012-02-06 13:07:52 +02002515 wl12xx_get_vif_count(hw, vif, &vif_count);
2516
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002517 mutex_lock(&wl->mutex);
Eliad Pellerf750c822011-10-10 10:13:16 +02002518 ret = wl1271_ps_elp_wakeup(wl);
2519 if (ret < 0)
2520 goto out_unlock;
2521
Juuso Oikarinen13026de2011-03-29 16:43:50 +03002522 /*
2523 * in some very corner case HW recovery scenarios its possible to
2524 * get here before __wl1271_op_remove_interface is complete, so
2525 * opt out if that is the case.
2526 */
Eliad Peller10c8cd02011-10-10 10:13:06 +02002527 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2528 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
Juuso Oikarinen13026de2011-03-29 16:43:50 +03002529 ret = -EBUSY;
2530 goto out;
2531 }
2532
Eliad Peller3fcdab72012-02-06 12:47:54 +02002533
Eliad Peller83587502011-10-10 10:12:53 +02002534 ret = wl12xx_init_vif_data(wl, vif);
Eliad Pellere936bbe2011-10-05 11:55:56 +02002535 if (ret < 0)
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02002536 goto out;
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02002537
Eliad Peller252efa42011-10-05 11:56:00 +02002538 wlvif->wl = wl;
Eliad Peller536129c2011-10-05 11:55:45 +02002539 role_type = wl12xx_get_role_type(wl, wlvif);
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002540 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2541 ret = -EINVAL;
2542 goto out;
2543 }
Eliad Peller1d095472011-10-10 10:12:49 +02002544
Arik Nemtsov1c33db72012-11-30 00:48:03 +02002545 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2546 if (ret < 0)
2547 goto out;
2548
Eliad Peller4549d092012-02-06 13:07:52 +02002549 if (wl12xx_need_fw_change(wl, vif_count, true)) {
Eliad Peller3dee4392012-02-06 12:47:56 +02002550 wl12xx_force_active_psm(wl);
Eliad Pellere9ba7152012-03-04 10:55:54 +02002551 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
Eliad Peller4549d092012-02-06 13:07:52 +02002552 mutex_unlock(&wl->mutex);
2553 wl1271_recovery_work(&wl->recovery_work);
2554 return 0;
2555 }
2556
Eliad Peller784f6942011-10-05 11:55:39 +02002557 /*
Eliad Peller1d095472011-10-10 10:12:49 +02002558 * TODO: after the nvs issue will be solved, move this block
2559 * to start(), and make sure here the driver is ON.
Eliad Peller784f6942011-10-05 11:55:39 +02002560 */
Ido Yariv4cc53382012-07-24 19:18:49 +03002561 if (wl->state == WLCORE_STATE_OFF) {
Eliad Peller1d095472011-10-10 10:12:49 +02002562 /*
2563 * we still need this in order to configure the fw
2564 * while uploading the nvs
2565 */
Luciano Coelho5e037e72011-12-23 09:32:17 +02002566 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002567
Luciano Coelho5dc283f2013-01-30 10:53:14 +02002568 ret = wl12xx_init_fw(wl);
2569 if (ret < 0)
Eliad Peller1d095472011-10-10 10:12:49 +02002570 goto out;
Eliad Peller1d095472011-10-10 10:12:49 +02002571 }
Eliad Peller04e80792011-08-14 13:17:09 +03002572
Eliad Peller1d095472011-10-10 10:12:49 +02002573 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2574 role_type, &wlvif->role_id);
2575 if (ret < 0)
Eliad Peller71125ab2010-10-28 21:46:43 +02002576 goto out;
Eliad Peller1d095472011-10-10 10:12:49 +02002577
2578 ret = wl1271_init_vif_specific(wl, vif);
2579 if (ret < 0)
2580 goto out;
Eliad Peller71125ab2010-10-28 21:46:43 +02002581
Eliad Peller87627212011-10-10 10:12:54 +02002582 list_add(&wlvif->list, &wl->wlvif_list);
Eliad Peller10c8cd02011-10-10 10:13:06 +02002583 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
Eliad Pellera4e41302011-10-11 11:49:15 +02002584
2585 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2586 wl->ap_count++;
2587 else
2588 wl->sta_count++;
Juuso Oikarineneb5b28d2009-10-13 12:47:45 +03002589out:
Eliad Pellerf750c822011-10-10 10:13:16 +02002590 wl1271_ps_elp_sleep(wl);
2591out_unlock:
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002592 mutex_unlock(&wl->mutex);
2593
2594 return ret;
2595}
2596
Arik Nemtsov7dece1c2011-04-18 14:15:28 +03002597static void __wl1271_op_remove_interface(struct wl1271 *wl,
Eliad Peller536129c2011-10-05 11:55:45 +02002598 struct ieee80211_vif *vif,
Arik Nemtsov7dece1c2011-04-18 14:15:28 +03002599 bool reset_tx_queues)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002600{
Eliad Peller536129c2011-10-05 11:55:45 +02002601 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Pellere5a359f2011-10-10 10:13:15 +02002602 int i, ret;
Arik Nemtsov2f18cf72012-06-10 19:10:45 +03002603 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002604
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02002605 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002606
Eliad Peller10c8cd02011-10-10 10:13:06 +02002607 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2608 return;
2609
Juuso Oikarinen13026de2011-03-29 16:43:50 +03002610 /* because of hardware recovery, we may get here twice */
Ido Yariv4cc53382012-07-24 19:18:49 +03002611 if (wl->state == WLCORE_STATE_OFF)
Juuso Oikarinen13026de2011-03-29 16:43:50 +03002612 return;
2613
Juuso Oikarinen1b72aec2010-03-18 12:26:39 +02002614 wl1271_info("down");
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002615
Eliad Pellerbaf62772011-10-10 10:12:52 +02002616 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
Eliad Pellerc50a2822012-11-22 18:06:19 +02002617 wl->scan_wlvif == wlvif) {
Arik Nemtsov55df5af2012-03-03 22:18:00 +02002618 /*
2619 * Rearm the tx watchdog just before idling scan. This
2620 * prevents just-finished scans from triggering the watchdog
2621 */
2622 wl12xx_rearm_tx_watchdog_locked(wl);
2623
Luciano Coelho08688d62010-07-08 17:50:07 +03002624 wl->scan.state = WL1271_SCAN_STATE_IDLE;
Luciano Coelho4a31c112011-03-21 23:16:14 +02002625 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
Eliad Pellerc50a2822012-11-22 18:06:19 +02002626 wl->scan_wlvif = NULL;
Juuso Oikarinenb739a422010-10-26 13:24:38 +02002627 wl->scan.req = NULL;
Juuso Oikarinen76a029f2010-07-29 04:54:45 +03002628 ieee80211_scan_completed(wl->hw, true);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002629 }
2630
Barak Bercovitz5a441f52014-02-10 13:47:27 +02002631 if (wl->sched_vif == wlvif)
Eliad Peller10199752012-11-22 18:06:23 +02002632 wl->sched_vif = NULL;
Eliad Peller10199752012-11-22 18:06:23 +02002633
Arik Nemtsov5d979f32012-11-27 08:44:48 +02002634 if (wl->roc_vif == vif) {
2635 wl->roc_vif = NULL;
2636 ieee80211_remain_on_channel_expired(wl->hw);
2637 }
2638
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002639 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2640 /* disable active roles */
2641 ret = wl1271_ps_elp_wakeup(wl);
2642 if (ret < 0)
2643 goto deinit;
2644
Eliad Pellerb890f4c2011-12-18 20:25:44 +02002645 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2646 wlvif->bss_type == BSS_TYPE_IBSS) {
2647 if (wl12xx_dev_role_started(wlvif))
2648 wl12xx_stop_dev(wl, wlvif);
Eliad Peller04e80792011-08-14 13:17:09 +03002649 }
2650
Eliad Peller0603d892011-10-05 11:55:51 +02002651 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002652 if (ret < 0)
2653 goto deinit;
2654
2655 wl1271_ps_elp_sleep(wl);
2656 }
2657deinit:
Arik Nemtsov5a996102013-03-12 17:19:43 +02002658 wl12xx_tx_reset_wlvif(wl, wlvif);
2659
Arik Nemtsove51ae9b2011-08-14 13:17:21 +03002660 /* clear all hlids (except system_hlid) */
Eliad Pellerafaf8bd2011-10-05 11:55:57 +02002661 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
Eliad Pellere5a359f2011-10-10 10:13:15 +02002662
2663 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2664 wlvif->bss_type == BSS_TYPE_IBSS) {
2665 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2666 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2667 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2668 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
Eliad Peller001e39a2012-08-16 13:52:47 +03002669 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
Eliad Pellere5a359f2011-10-10 10:13:15 +02002670 } else {
2671 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2672 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2673 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2674 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2675 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2676 wl12xx_free_rate_policy(wl,
2677 &wlvif->ap.ucast_rate_idx[i]);
Eliad Peller830be7e2012-03-19 11:32:55 +02002678 wl1271_free_ap_keys(wl, wlvif);
Eliad Pellere5a359f2011-10-10 10:13:15 +02002679 }
Eliad Pellerb78b47e2011-08-14 13:17:08 +03002680
Eyal Shapira3eba4a02012-03-19 12:06:27 +02002681 dev_kfree_skb(wlvif->probereq);
2682 wlvif->probereq = NULL;
Eliad Pellere4120df2011-10-10 10:13:17 +02002683 if (wl->last_wlvif == wlvif)
2684 wl->last_wlvif = NULL;
Eliad Peller87627212011-10-10 10:12:54 +02002685 list_del(&wlvif->list);
Eliad Pellerc7ffb902011-10-05 11:56:05 +02002686 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
Eliad Peller0603d892011-10-05 11:55:51 +02002687 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
Eliad Peller7edebf52011-10-05 11:55:52 +02002688 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
Luciano Coelhod6e19d12009-10-12 15:08:43 +03002689
Arik Nemtsov2f18cf72012-06-10 19:10:45 +03002690 if (is_ap)
Eliad Pellera4e41302011-10-11 11:49:15 +02002691 wl->ap_count--;
2692 else
2693 wl->sta_count--;
2694
Arik Nemtsov42066f92012-07-10 10:45:01 +03002695 /*
2696 * Last AP, have more stations. Configure sleep auth according to STA.
2697 * Don't do thin on unintended recovery.
2698 */
2699 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2700 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2701 goto unlock;
2702
Eliad Peller71e996b2013-09-09 12:24:34 +03002703 if (wl->ap_count == 0 && is_ap) {
2704 /* mask ap events */
2705 wl->event_mask &= ~wl->ap_event_mask;
2706 wl1271_event_unmask(wl);
2707 }
2708
Arik Nemtsov2f18cf72012-06-10 19:10:45 +03002709 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2710 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2711 /* Configure for power according to debugfs */
2712 if (sta_auth != WL1271_PSM_ILLEGAL)
2713 wl1271_acx_sleep_auth(wl, sta_auth);
Arik Nemtsov2f18cf72012-06-10 19:10:45 +03002714 /* Configure for ELP power saving */
2715 else
2716 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2717 }
2718
Arik Nemtsov42066f92012-07-10 10:45:01 +03002719unlock:
Eliad Pellerbaf62772011-10-10 10:12:52 +02002720 mutex_unlock(&wl->mutex);
Eyal Shapirad6bf9ad2012-01-31 11:57:20 +02002721
Eliad Peller9eb599e2011-10-10 10:12:59 +02002722 del_timer_sync(&wlvif->rx_streaming_timer);
2723 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2724 cancel_work_sync(&wlvif->rx_streaming_disable_work);
Eliad Pellerc50a2822012-11-22 18:06:19 +02002725 cancel_delayed_work_sync(&wlvif->connection_loss_work);
Arik Nemtsovc8384782013-05-12 12:35:29 +03002726 cancel_delayed_work_sync(&wlvif->channel_switch_work);
Arik Nemtsov187e52c2013-09-17 18:41:20 +03002727 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
Eliad Pellerf4df1bd2011-08-14 13:17:15 +03002728
Eliad Pellerbaf62772011-10-10 10:12:52 +02002729 mutex_lock(&wl->mutex);
Juuso Oikarinen52a2a372010-09-21 06:23:30 +02002730}
Juuso Oikarinenbd9dc492010-04-09 11:07:26 +03002731
Juuso Oikarinen52a2a372010-09-21 06:23:30 +02002732static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2733 struct ieee80211_vif *vif)
2734{
2735 struct wl1271 *wl = hw->priv;
Eliad Peller10c8cd02011-10-10 10:13:06 +02002736 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Peller6e8cd332011-10-10 10:13:13 +02002737 struct wl12xx_vif *iter;
Eliad Peller4549d092012-02-06 13:07:52 +02002738 struct vif_counter_data vif_count;
Juuso Oikarinen52a2a372010-09-21 06:23:30 +02002739
Eliad Peller4549d092012-02-06 13:07:52 +02002740 wl12xx_get_vif_count(hw, vif, &vif_count);
Juuso Oikarinen52a2a372010-09-21 06:23:30 +02002741 mutex_lock(&wl->mutex);
Eliad Peller10c8cd02011-10-10 10:13:06 +02002742
Ido Yariv4cc53382012-07-24 19:18:49 +03002743 if (wl->state == WLCORE_STATE_OFF ||
Eliad Peller10c8cd02011-10-10 10:13:06 +02002744 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2745 goto out;
2746
Juuso Oikarinen67353292010-11-18 15:19:02 +02002747 /*
2748 * wl->vif can be null here if someone shuts down the interface
2749 * just when hardware recovery has been started.
2750 */
Eliad Peller6e8cd332011-10-10 10:13:13 +02002751 wl12xx_for_each_wlvif(wl, iter) {
2752 if (iter != wlvif)
2753 continue;
2754
Eliad Peller536129c2011-10-05 11:55:45 +02002755 __wl1271_op_remove_interface(wl, vif, true);
Eliad Peller6e8cd332011-10-10 10:13:13 +02002756 break;
Juuso Oikarinen67353292010-11-18 15:19:02 +02002757 }
Eliad Peller6e8cd332011-10-10 10:13:13 +02002758 WARN_ON(iter != wlvif);
Eliad Peller4549d092012-02-06 13:07:52 +02002759 if (wl12xx_need_fw_change(wl, vif_count, false)) {
Eliad Peller3dee4392012-02-06 12:47:56 +02002760 wl12xx_force_active_psm(wl);
Eliad Pellere9ba7152012-03-04 10:55:54 +02002761 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
Eliad Peller4549d092012-02-06 13:07:52 +02002762 wl12xx_queue_recovery_work(wl);
Eliad Peller4549d092012-02-06 13:07:52 +02002763 }
Eliad Peller10c8cd02011-10-10 10:13:06 +02002764out:
Juuso Oikarinen67353292010-11-18 15:19:02 +02002765 mutex_unlock(&wl->mutex);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03002766}
2767
Eliad Pellerc0fad1b2011-12-19 12:00:03 +02002768static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2769 struct ieee80211_vif *vif,
2770 enum nl80211_iftype new_type, bool p2p)
2771{
Eliad Peller4549d092012-02-06 13:07:52 +02002772 struct wl1271 *wl = hw->priv;
2773 int ret;
2774
2775 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
Eliad Pellerc0fad1b2011-12-19 12:00:03 +02002776 wl1271_op_remove_interface(hw, vif);
2777
Eliad Peller249e9692012-03-04 10:55:50 +02002778 vif->type = new_type;
Eliad Pellerc0fad1b2011-12-19 12:00:03 +02002779 vif->p2p = p2p;
Eliad Peller4549d092012-02-06 13:07:52 +02002780 ret = wl1271_op_add_interface(hw, vif);
2781
2782 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2783 return ret;
Eliad Pellerc0fad1b2011-12-19 12:00:03 +02002784}
2785
Eliad Peller3230f352012-11-20 13:20:01 +02002786static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002787{
2788 int ret;
Eliad Peller536129c2011-10-05 11:55:45 +02002789 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002790
Juuso Oikarinen69e54342010-05-07 11:39:00 +03002791 /*
2792 * One of the side effects of the JOIN command is that is clears
2793 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2794 * to a WPA/WPA2 access point will therefore kill the data-path.
Ohad Ben-Cohen8bf69aa2011-03-30 19:18:31 +02002795 * Currently the only valid scenario for JOIN during association
2796 * is on roaming, in which case we will also be given new keys.
2797 * Keep the below message for now, unless it starts bothering
2798 * users who really like to roam a lot :)
Juuso Oikarinen69e54342010-05-07 11:39:00 +03002799 */
Eliad Pellerba8447f2011-10-10 10:13:00 +02002800 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
Juuso Oikarinen69e54342010-05-07 11:39:00 +03002801 wl1271_info("JOIN while associated.");
2802
Eliad Peller5ec8a442012-02-02 12:22:09 +02002803 /* clear encryption type */
2804 wlvif->encryption_type = KEY_NONE;
2805
Eliad Peller227e81e2011-08-14 13:17:26 +03002806 if (is_ibss)
Eliad Peller87fbcb02011-10-05 11:55:41 +02002807 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
Eliad Peller18eab432012-11-20 13:20:02 +02002808 else {
2809 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2810 /*
2811 * TODO: this is an ugly workaround for wl12xx fw
2812 * bug - we are not able to tx/rx after the first
2813 * start_sta, so make dummy start+stop calls,
2814 * and then call start_sta again.
2815 * this should be fixed in the fw.
2816 */
2817 wl12xx_cmd_role_start_sta(wl, wlvif);
2818 wl12xx_cmd_role_stop_sta(wl, wlvif);
2819 }
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002820
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002821 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
Eliad Peller18eab432012-11-20 13:20:02 +02002822 }
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002823
Eliad Peller3230f352012-11-20 13:20:01 +02002824 return ret;
2825}
2826
2827static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2828 int offset)
2829{
2830 u8 ssid_len;
2831 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2832 skb->len - offset);
2833
2834 if (!ptr) {
2835 wl1271_error("No SSID in IEs!");
2836 return -ENOENT;
2837 }
2838
2839 ssid_len = ptr[1];
2840 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2841 wl1271_error("SSID is too long!");
2842 return -EINVAL;
2843 }
2844
2845 wlvif->ssid_len = ssid_len;
2846 memcpy(wlvif->ssid, ptr+2, ssid_len);
2847 return 0;
2848}
2849
2850static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2851{
2852 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2853 struct sk_buff *skb;
2854 int ieoffset;
2855
2856 /* we currently only support setting the ssid from the ap probe req */
2857 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2858 return -EINVAL;
2859
2860 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2861 if (!skb)
2862 return -EINVAL;
2863
2864 ieoffset = offsetof(struct ieee80211_mgmt,
2865 u.probe_req.variable);
2866 wl1271_ssid_set(wlvif, skb, ieoffset);
2867 dev_kfree_skb(skb);
2868
2869 return 0;
2870}
2871
2872static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
Eliad Pellerec870112012-11-20 13:20:09 +02002873 struct ieee80211_bss_conf *bss_conf,
2874 u32 sta_rate_set)
Eliad Peller3230f352012-11-20 13:20:01 +02002875{
2876 int ieoffset;
2877 int ret;
2878
2879 wlvif->aid = bss_conf->aid;
Luciano Coelhoaaabee82012-12-04 16:39:47 +02002880 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
Eliad Peller3230f352012-11-20 13:20:01 +02002881 wlvif->beacon_int = bss_conf->beacon_int;
Eliad Pellerd50529c2012-11-22 18:06:20 +02002882 wlvif->wmm_enabled = bss_conf->qos;
Eliad Peller3230f352012-11-20 13:20:01 +02002883
2884 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2885
2886 /*
2887 * with wl1271, we don't need to update the
2888 * beacon_int and dtim_period, because the firmware
2889 * updates it by itself when the first beacon is
2890 * received after a join.
2891 */
2892 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2893 if (ret < 0)
2894 return ret;
2895
2896 /*
2897 * Get a template for hardware connection maintenance
2898 */
2899 dev_kfree_skb(wlvif->probereq);
2900 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2901 wlvif,
2902 NULL);
2903 ieoffset = offsetof(struct ieee80211_mgmt,
2904 u.probe_req.variable);
2905 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2906
2907 /* enable the connection monitoring feature */
2908 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2909 if (ret < 0)
2910 return ret;
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002911
2912 /*
2913 * The join command disable the keep-alive mode, shut down its process,
2914 * and also clear the template config, so we need to reset it all after
2915 * the join. The acx_aid starts the keep-alive process, and the order
2916 * of the commands below is relevant.
2917 */
Eliad Peller0603d892011-10-05 11:55:51 +02002918 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002919 if (ret < 0)
Eliad Peller3230f352012-11-20 13:20:01 +02002920 return ret;
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002921
Eliad Peller0603d892011-10-05 11:55:51 +02002922 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002923 if (ret < 0)
Eliad Peller3230f352012-11-20 13:20:01 +02002924 return ret;
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002925
Eliad Pellerd2d66c52011-10-05 11:55:43 +02002926 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002927 if (ret < 0)
Eliad Peller3230f352012-11-20 13:20:01 +02002928 return ret;
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002929
Eliad Peller0603d892011-10-05 11:55:51 +02002930 ret = wl1271_acx_keep_alive_config(wl, wlvif,
Eliad Peller001e39a2012-08-16 13:52:47 +03002931 wlvif->sta.klv_template_id,
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002932 ACX_KEEP_ALIVE_TPL_VALID);
2933 if (ret < 0)
Eliad Peller3230f352012-11-20 13:20:01 +02002934 return ret;
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002935
Eliad Peller6c7b5192012-11-20 13:20:07 +02002936 /*
2937 * The default fw psm configuration is AUTO, while mac80211 default
2938 * setting is off (ACTIVE), so sync the fw with the correct value.
2939 */
2940 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
Eliad Pellerec870112012-11-20 13:20:09 +02002941 if (ret < 0)
2942 return ret;
2943
2944 if (sta_rate_set) {
2945 wlvif->rate_set =
2946 wl1271_tx_enabled_rates_get(wl,
2947 sta_rate_set,
2948 wlvif->band);
2949 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2950 if (ret < 0)
2951 return ret;
2952 }
Eliad Peller6c7b5192012-11-20 13:20:07 +02002953
Juuso Oikarinen82429d32010-04-28 09:50:01 +03002954 return ret;
2955}
2956
Eliad Peller3230f352012-11-20 13:20:01 +02002957static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Luciano Coelhoc7f43e42009-12-11 15:40:44 +02002958{
2959 int ret;
Eliad Peller3230f352012-11-20 13:20:01 +02002960 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2961
2962 /* make sure we are connected (sta) joined */
2963 if (sta &&
2964 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2965 return false;
2966
2967 /* make sure we are joined (ibss) */
2968 if (!sta &&
2969 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2970 return false;
2971
2972 if (sta) {
2973 /* use defaults when not associated */
2974 wlvif->aid = 0;
2975
2976 /* free probe-request template */
2977 dev_kfree_skb(wlvif->probereq);
2978 wlvif->probereq = NULL;
2979
2980 /* disable connection monitor features */
2981 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2982 if (ret < 0)
2983 return ret;
2984
2985 /* Disable the keep-alive feature */
2986 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2987 if (ret < 0)
2988 return ret;
Eliad Pellerd881fa22014-02-10 13:47:33 +02002989
2990 /* disable beacon filtering */
2991 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2992 if (ret < 0)
2993 return ret;
Eliad Peller3230f352012-11-20 13:20:01 +02002994 }
Luciano Coelhoc7f43e42009-12-11 15:40:44 +02002995
Eliad Peller52630c52011-10-10 10:13:08 +02002996 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
Eliad Peller6e8cd332011-10-10 10:13:13 +02002997 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2998
Eliad Pellerfcab1892012-11-22 18:06:18 +02002999 wl12xx_cmd_stop_channel_switch(wl, wlvif);
Eliad Peller6e8cd332011-10-10 10:13:13 +02003000 ieee80211_chswitch_done(vif, false);
Eliad Pellerc50a2822012-11-22 18:06:19 +02003001 cancel_delayed_work(&wlvif->channel_switch_work);
Shahar Levi6d158ff2011-09-08 13:01:33 +03003002 }
3003
Eliad Peller4137c172012-08-16 13:32:32 +03003004 /* invalidate keep-alive template */
3005 wl1271_acx_keep_alive_config(wl, wlvif,
Eliad Peller001e39a2012-08-16 13:52:47 +03003006 wlvif->sta.klv_template_id,
Eliad Peller4137c172012-08-16 13:32:32 +03003007 ACX_KEEP_ALIVE_TPL_INVALID);
3008
Eliad Peller3230f352012-11-20 13:20:01 +02003009 return 0;
Luciano Coelhoc7f43e42009-12-11 15:40:44 +02003010}
3011
Eliad Peller87fbcb02011-10-05 11:55:41 +02003012static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Juuso Oikarinenebba60c2010-04-01 11:38:20 +03003013{
Eliad Peller1b92f152011-10-10 10:13:09 +02003014 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
Eliad Peller30d0c8f2011-10-05 11:55:42 +02003015 wlvif->rate_set = wlvif->basic_rate_set;
Juuso Oikarinenebba60c2010-04-01 11:38:20 +03003016}
3017
Arik Nemtsovb0ed8a42013-09-17 18:41:23 +03003018static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3019 bool idle)
3020{
3021 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3022
3023 if (idle == cur_idle)
3024 return;
3025
3026 if (idle) {
3027 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3028 } else {
3029 /* The current firmware only supports sched_scan in idle */
3030 if (wl->sched_vif == wlvif)
3031 wl->ops->sched_scan_stop(wl, wlvif);
3032
3033 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3034 }
3035}
3036
Eliad Peller9f259c42011-10-10 10:13:12 +02003037static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3038 struct ieee80211_conf *conf, u32 changed)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003039{
Eliad Pellerb6970ee2012-11-20 13:20:05 +02003040 int ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003041
Eliad Peller6bd65022011-10-10 10:13:11 +02003042 if (conf->power_level != wlvif->power_level) {
Eliad Peller0603d892011-10-05 11:55:51 +02003043 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003044 if (ret < 0)
Eliad Peller9f259c42011-10-10 10:13:12 +02003045 return ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003046
Eliad Peller6bd65022011-10-10 10:13:11 +02003047 wlvif->power_level = conf->power_level;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003048 }
3049
Eliad Peller9f259c42011-10-10 10:13:12 +02003050 return 0;
3051}
3052
3053static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3054{
3055 struct wl1271 *wl = hw->priv;
3056 struct wl12xx_vif *wlvif;
3057 struct ieee80211_conf *conf = &hw->conf;
Eliad Pellerb6970ee2012-11-20 13:20:05 +02003058 int ret = 0;
Eliad Peller9f259c42011-10-10 10:13:12 +02003059
Eliad Pellerb6970ee2012-11-20 13:20:05 +02003060 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
Eliad Peller9f259c42011-10-10 10:13:12 +02003061 " changed 0x%x",
Eliad Peller9f259c42011-10-10 10:13:12 +02003062 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3063 conf->power_level,
3064 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3065 changed);
3066
Eliad Peller9f259c42011-10-10 10:13:12 +02003067 mutex_lock(&wl->mutex);
3068
Eliad Peller9f259c42011-10-10 10:13:12 +02003069 if (changed & IEEE80211_CONF_CHANGE_POWER)
3070 wl->power_level = conf->power_level;
3071
Ido Yariv4cc53382012-07-24 19:18:49 +03003072 if (unlikely(wl->state != WLCORE_STATE_ON))
Eliad Peller9f259c42011-10-10 10:13:12 +02003073 goto out;
3074
3075 ret = wl1271_ps_elp_wakeup(wl);
3076 if (ret < 0)
3077 goto out;
3078
3079 /* configure each interface */
3080 wl12xx_for_each_wlvif(wl, wlvif) {
3081 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3082 if (ret < 0)
3083 goto out_sleep;
3084 }
3085
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003086out_sleep:
3087 wl1271_ps_elp_sleep(wl);
3088
3089out:
3090 mutex_unlock(&wl->mutex);
3091
3092 return ret;
3093}
3094
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003095struct wl1271_filter_params {
3096 bool enabled;
3097 int mc_list_length;
3098 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3099};
3100
Jiri Pirko22bedad32010-04-01 21:22:57 +00003101static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3102 struct netdev_hw_addr_list *mc_list)
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003103{
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003104 struct wl1271_filter_params *fp;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003105 struct netdev_hw_addr *ha;
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003106
Juuso Oikarinen74441132009-10-13 12:47:53 +03003107 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003108 if (!fp) {
3109 wl1271_error("Out of memory setting filters.");
3110 return 0;
3111 }
3112
3113 /* update multicast filtering parameters */
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003114 fp->mc_list_length = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003115 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3116 fp->enabled = false;
3117 } else {
3118 fp->enabled = true;
3119 netdev_hw_addr_list_for_each(ha, mc_list) {
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003120 memcpy(fp->mc_list[fp->mc_list_length],
Jiri Pirko22bedad32010-04-01 21:22:57 +00003121 ha->addr, ETH_ALEN);
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003122 fp->mc_list_length++;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003123 }
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003124 }
3125
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003126 return (u64)(unsigned long)fp;
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003127}
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003128
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003129#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3130 FIF_ALLMULTI | \
3131 FIF_FCSFAIL | \
3132 FIF_BCN_PRBRESP_PROMISC | \
3133 FIF_CONTROL | \
3134 FIF_OTHER_BSS)
3135
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003136static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3137 unsigned int changed,
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03003138 unsigned int *total, u64 multicast)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003139{
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003140 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003141 struct wl1271 *wl = hw->priv;
Eliad Peller6e8cd332011-10-10 10:13:13 +02003142 struct wl12xx_vif *wlvif;
Eliad Peller536129c2011-10-05 11:55:45 +02003143
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003144 int ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003145
Arik Nemtsov7d057862010-10-16 19:25:35 +02003146 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3147 " total %x", changed, *total);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003148
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003149 mutex_lock(&wl->mutex);
3150
Saravanan Dhanabal2c10bb92010-04-09 11:07:27 +03003151 *total &= WL1271_SUPPORTED_FILTERS;
3152 changed &= WL1271_SUPPORTED_FILTERS;
3153
Ido Yariv4cc53382012-07-24 19:18:49 +03003154 if (unlikely(wl->state != WLCORE_STATE_ON))
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003155 goto out;
3156
Ido Yariva6208652011-03-01 15:14:41 +02003157 ret = wl1271_ps_elp_wakeup(wl);
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003158 if (ret < 0)
3159 goto out;
3160
Eliad Peller6e8cd332011-10-10 10:13:13 +02003161 wl12xx_for_each_wlvif(wl, wlvif) {
3162 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3163 if (*total & FIF_ALLMULTI)
3164 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3165 false,
3166 NULL, 0);
3167 else if (fp)
3168 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3169 fp->enabled,
3170 fp->mc_list,
3171 fp->mc_list_length);
3172 if (ret < 0)
3173 goto out_sleep;
3174 }
Arik Nemtsov7d057862010-10-16 19:25:35 +02003175 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003176
Eliad Peller08c1d1c2011-08-14 13:17:04 +03003177 /*
3178 * the fw doesn't provide an api to configure the filters. instead,
3179 * the filters configuration is based on the active roles / ROC
3180 * state.
3181 */
Juuso Oikarinenb54853f2009-10-13 12:47:59 +03003182
3183out_sleep:
3184 wl1271_ps_elp_sleep(wl);
3185
3186out:
3187 mutex_unlock(&wl->mutex);
Juuso Oikarinen14b228a2010-03-18 12:26:43 +02003188 kfree(fp);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003189}
3190
Eliad Peller170d0e62011-10-05 11:56:06 +02003191static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3192 u8 id, u8 key_type, u8 key_size,
3193 const u8 *key, u8 hlid, u32 tx_seq_32,
3194 u16 tx_seq_16)
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003195{
3196 struct wl1271_ap_key *ap_key;
3197 int i;
3198
3199 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3200
3201 if (key_size > MAX_KEY_SIZE)
3202 return -EINVAL;
3203
3204 /*
3205 * Find next free entry in ap_keys. Also check we are not replacing
3206 * an existing key.
3207 */
3208 for (i = 0; i < MAX_NUM_KEYS; i++) {
Eliad Peller170d0e62011-10-05 11:56:06 +02003209 if (wlvif->ap.recorded_keys[i] == NULL)
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003210 break;
3211
Eliad Peller170d0e62011-10-05 11:56:06 +02003212 if (wlvif->ap.recorded_keys[i]->id == id) {
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003213 wl1271_warning("trying to record key replacement");
3214 return -EINVAL;
3215 }
3216 }
3217
3218 if (i == MAX_NUM_KEYS)
3219 return -EBUSY;
3220
3221 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3222 if (!ap_key)
3223 return -ENOMEM;
3224
3225 ap_key->id = id;
3226 ap_key->key_type = key_type;
3227 ap_key->key_size = key_size;
3228 memcpy(ap_key->key, key, key_size);
3229 ap_key->hlid = hlid;
3230 ap_key->tx_seq_32 = tx_seq_32;
3231 ap_key->tx_seq_16 = tx_seq_16;
3232
Eliad Peller170d0e62011-10-05 11:56:06 +02003233 wlvif->ap.recorded_keys[i] = ap_key;
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003234 return 0;
3235}
3236
Eliad Peller170d0e62011-10-05 11:56:06 +02003237static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003238{
3239 int i;
3240
3241 for (i = 0; i < MAX_NUM_KEYS; i++) {
Eliad Peller170d0e62011-10-05 11:56:06 +02003242 kfree(wlvif->ap.recorded_keys[i]);
3243 wlvif->ap.recorded_keys[i] = NULL;
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003244 }
3245}
3246
Eliad Pellera8ab39a2011-10-05 11:55:54 +02003247static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003248{
3249 int i, ret = 0;
3250 struct wl1271_ap_key *key;
3251 bool wep_key_added = false;
3252
3253 for (i = 0; i < MAX_NUM_KEYS; i++) {
Eliad Peller7f97b482011-08-14 13:17:30 +03003254 u8 hlid;
Eliad Peller170d0e62011-10-05 11:56:06 +02003255 if (wlvif->ap.recorded_keys[i] == NULL)
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003256 break;
3257
Eliad Peller170d0e62011-10-05 11:56:06 +02003258 key = wlvif->ap.recorded_keys[i];
Eliad Peller7f97b482011-08-14 13:17:30 +03003259 hlid = key->hlid;
3260 if (hlid == WL12XX_INVALID_LINK_ID)
Eliad Pellera8ab39a2011-10-05 11:55:54 +02003261 hlid = wlvif->ap.bcast_hlid;
Eliad Peller7f97b482011-08-14 13:17:30 +03003262
Eliad Pellera8ab39a2011-10-05 11:55:54 +02003263 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003264 key->id, key->key_type,
3265 key->key_size, key->key,
Eliad Peller7f97b482011-08-14 13:17:30 +03003266 hlid, key->tx_seq_32,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003267 key->tx_seq_16);
3268 if (ret < 0)
3269 goto out;
3270
3271 if (key->key_type == KEY_WEP)
3272 wep_key_added = true;
3273 }
3274
3275 if (wep_key_added) {
Eliad Pellerf75c753f2011-10-05 11:55:59 +02003276 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
Eliad Pellera8ab39a2011-10-05 11:55:54 +02003277 wlvif->ap.bcast_hlid);
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003278 if (ret < 0)
3279 goto out;
3280 }
3281
3282out:
Eliad Peller170d0e62011-10-05 11:56:06 +02003283 wl1271_free_ap_keys(wl, wlvif);
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003284 return ret;
3285}
3286
Eliad Peller536129c2011-10-05 11:55:45 +02003287static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3288 u16 action, u8 id, u8 key_type,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003289 u8 key_size, const u8 *key, u32 tx_seq_32,
3290 u16 tx_seq_16, struct ieee80211_sta *sta)
3291{
3292 int ret;
Eliad Peller536129c2011-10-05 11:55:45 +02003293 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003294
3295 if (is_ap) {
3296 struct wl1271_station *wl_sta;
3297 u8 hlid;
3298
3299 if (sta) {
3300 wl_sta = (struct wl1271_station *)sta->drv_priv;
3301 hlid = wl_sta->hlid;
3302 } else {
Eliad Pellera8ab39a2011-10-05 11:55:54 +02003303 hlid = wlvif->ap.bcast_hlid;
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003304 }
3305
Eliad Peller53d40d02011-10-10 10:13:02 +02003306 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003307 /*
3308 * We do not support removing keys after AP shutdown.
3309 * Pretend we do to make mac80211 happy.
3310 */
3311 if (action != KEY_ADD_OR_REPLACE)
3312 return 0;
3313
Eliad Peller170d0e62011-10-05 11:56:06 +02003314 ret = wl1271_record_ap_key(wl, wlvif, id,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003315 key_type, key_size,
3316 key, hlid, tx_seq_32,
3317 tx_seq_16);
3318 } else {
Eliad Pellera8ab39a2011-10-05 11:55:54 +02003319 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003320 id, key_type, key_size,
3321 key, hlid, tx_seq_32,
3322 tx_seq_16);
3323 }
3324
3325 if (ret < 0)
3326 return ret;
3327 } else {
3328 const u8 *addr;
3329 static const u8 bcast_addr[ETH_ALEN] = {
3330 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3331 };
3332
3333 addr = sta ? sta->addr : bcast_addr;
3334
3335 if (is_zero_ether_addr(addr)) {
3336 /* We dont support TX only encryption */
3337 return -EOPNOTSUPP;
3338 }
3339
3340 /* The wl1271 does not allow to remove unicast keys - they
3341 will be cleared automatically on next CMD_JOIN. Ignore the
3342 request silently, as we dont want the mac80211 to emit
3343 an error message. */
3344 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3345 return 0;
3346
Eliad Peller010d3d32011-08-14 13:17:31 +03003347 /* don't remove key if hlid was already deleted */
3348 if (action == KEY_REMOVE &&
Eliad Peller154da672011-10-05 11:55:53 +02003349 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
Eliad Peller010d3d32011-08-14 13:17:31 +03003350 return 0;
3351
Eliad Pellera8ab39a2011-10-05 11:55:54 +02003352 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003353 id, key_type, key_size,
3354 key, addr, tx_seq_32,
3355 tx_seq_16);
3356 if (ret < 0)
3357 return ret;
3358
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003359 }
3360
3361 return 0;
3362}
3363
Arik Nemtsova1c597f2012-05-18 07:46:40 +03003364static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003365 struct ieee80211_vif *vif,
3366 struct ieee80211_sta *sta,
3367 struct ieee80211_key_conf *key_conf)
3368{
3369 struct wl1271 *wl = hw->priv;
Eliad Pelleraf390f42012-09-03 18:27:58 +03003370 int ret;
3371 bool might_change_spare =
3372 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3373 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
Arik Nemtsova1c597f2012-05-18 07:46:40 +03003374
Eliad Pelleraf390f42012-09-03 18:27:58 +03003375 if (might_change_spare) {
3376 /*
3377 * stop the queues and flush to ensure the next packets are
3378 * in sync with FW spare block accounting
3379 */
Eliad Pelleraf390f42012-09-03 18:27:58 +03003380 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
Eliad Pelleraf390f42012-09-03 18:27:58 +03003381 wl1271_tx_flush(wl);
3382 }
3383
3384 mutex_lock(&wl->mutex);
3385
3386 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3387 ret = -EAGAIN;
3388 goto out_wake_queues;
3389 }
3390
3391 ret = wl1271_ps_elp_wakeup(wl);
3392 if (ret < 0)
3393 goto out_wake_queues;
3394
3395 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3396
3397 wl1271_ps_elp_sleep(wl);
3398
3399out_wake_queues:
3400 if (might_change_spare)
3401 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3402
3403 mutex_unlock(&wl->mutex);
3404
3405 return ret;
Arik Nemtsova1c597f2012-05-18 07:46:40 +03003406}
3407
3408int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3409 struct ieee80211_vif *vif,
3410 struct ieee80211_sta *sta,
3411 struct ieee80211_key_conf *key_conf)
3412{
Eliad Peller536129c2011-10-05 11:55:45 +02003413 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003414 int ret;
Juuso Oikarinenac4e4ce2009-10-08 21:56:19 +03003415 u32 tx_seq_32 = 0;
3416 u16 tx_seq_16 = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003417 u8 key_type;
Arik Nemtsov93d5d102013-03-12 17:19:38 +02003418 u8 hlid;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003419
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003420 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3421
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003422 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003423 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
Johannes Berg97359d12010-08-10 09:46:38 +02003424 key_conf->cipher, key_conf->keyidx,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003425 key_conf->keylen, key_conf->flags);
3426 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3427
Arik Nemtsov93d5d102013-03-12 17:19:38 +02003428 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3429 if (sta) {
3430 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3431 hlid = wl_sta->hlid;
3432 } else {
3433 hlid = wlvif->ap.bcast_hlid;
3434 }
3435 else
3436 hlid = wlvif->sta.hlid;
3437
3438 if (hlid != WL12XX_INVALID_LINK_ID) {
3439 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3440 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3441 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3442 }
3443
Johannes Berg97359d12010-08-10 09:46:38 +02003444 switch (key_conf->cipher) {
3445 case WLAN_CIPHER_SUITE_WEP40:
3446 case WLAN_CIPHER_SUITE_WEP104:
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003447 key_type = KEY_WEP;
3448
3449 key_conf->hw_key_idx = key_conf->keyidx;
3450 break;
Johannes Berg97359d12010-08-10 09:46:38 +02003451 case WLAN_CIPHER_SUITE_TKIP:
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003452 key_type = KEY_TKIP;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003453 key_conf->hw_key_idx = key_conf->keyidx;
3454 break;
Johannes Berg97359d12010-08-10 09:46:38 +02003455 case WLAN_CIPHER_SUITE_CCMP:
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003456 key_type = KEY_AES;
Arik Nemtsov12d4b972011-10-23 08:21:54 +02003457 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003458 break;
Juuso Oikarinen7a557242010-09-27 12:42:07 +02003459 case WL1271_CIPHER_SUITE_GEM:
3460 key_type = KEY_GEM;
Juuso Oikarinen7a557242010-09-27 12:42:07 +02003461 break;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003462 default:
Johannes Berg97359d12010-08-10 09:46:38 +02003463 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003464
Eliad Pelleraf390f42012-09-03 18:27:58 +03003465 return -EOPNOTSUPP;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003466 }
3467
3468 switch (cmd) {
3469 case SET_KEY:
Eliad Peller536129c2011-10-05 11:55:45 +02003470 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003471 key_conf->keyidx, key_type,
3472 key_conf->keylen, key_conf->key,
3473 tx_seq_32, tx_seq_16, sta);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003474 if (ret < 0) {
3475 wl1271_error("Could not add or replace key");
Eliad Pelleraf390f42012-09-03 18:27:58 +03003476 return ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003477 }
Eliad Peller5ec8a442012-02-02 12:22:09 +02003478
3479 /*
3480 * reconfiguring arp response if the unicast (or common)
3481 * encryption key type was changed
3482 */
3483 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3484 (sta || key_type == KEY_WEP) &&
3485 wlvif->encryption_type != key_type) {
3486 wlvif->encryption_type = key_type;
3487 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3488 if (ret < 0) {
3489 wl1271_warning("build arp rsp failed: %d", ret);
Eliad Pelleraf390f42012-09-03 18:27:58 +03003490 return ret;
Eliad Peller5ec8a442012-02-02 12:22:09 +02003491 }
3492 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003493 break;
3494
3495 case DISABLE_KEY:
Eliad Peller536129c2011-10-05 11:55:45 +02003496 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
Arik Nemtsov7f179b42010-10-16 21:39:06 +02003497 key_conf->keyidx, key_type,
3498 key_conf->keylen, key_conf->key,
3499 0, 0, sta);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003500 if (ret < 0) {
3501 wl1271_error("Could not remove key");
Eliad Pelleraf390f42012-09-03 18:27:58 +03003502 return ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003503 }
3504 break;
3505
3506 default:
3507 wl1271_error("Unsupported key cmd 0x%x", cmd);
Eliad Pelleraf390f42012-09-03 18:27:58 +03003508 return -EOPNOTSUPP;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003509 }
3510
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003511 return ret;
3512}
Arik Nemtsova1c597f2012-05-18 07:46:40 +03003513EXPORT_SYMBOL_GPL(wlcore_set_key);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003514
Yoni Divinskyba1e6eb2013-05-12 12:35:28 +03003515static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3516 struct ieee80211_vif *vif,
3517 int key_idx)
3518{
3519 struct wl1271 *wl = hw->priv;
3520 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3521 int ret;
3522
3523 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3524 key_idx);
3525
Eliad Pellerbf4e5f12014-02-10 13:47:29 +02003526 /* we don't handle unsetting of default key */
3527 if (key_idx == -1)
3528 return;
3529
Yoni Divinskyba1e6eb2013-05-12 12:35:28 +03003530 mutex_lock(&wl->mutex);
3531
3532 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3533 ret = -EAGAIN;
3534 goto out_unlock;
3535 }
3536
3537 ret = wl1271_ps_elp_wakeup(wl);
3538 if (ret < 0)
3539 goto out_unlock;
3540
3541 wlvif->default_key = key_idx;
3542
3543 /* the default WEP key needs to be configured at least once */
3544 if (wlvif->encryption_type == KEY_WEP) {
3545 ret = wl12xx_cmd_set_default_wep_key(wl,
3546 key_idx,
3547 wlvif->sta.hlid);
3548 if (ret < 0)
3549 goto out_sleep;
3550 }
3551
3552out_sleep:
3553 wl1271_ps_elp_sleep(wl);
3554
3555out_unlock:
3556 mutex_unlock(&wl->mutex);
3557}
3558
Victor Goldenshtein6b70e7e2012-11-25 18:26:59 +02003559void wlcore_regdomain_config(struct wl1271 *wl)
3560{
3561 int ret;
3562
3563 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3564 return;
3565
3566 mutex_lock(&wl->mutex);
Arik Nemtsov75592be2013-03-12 17:19:45 +02003567
3568 if (unlikely(wl->state != WLCORE_STATE_ON))
3569 goto out;
3570
Victor Goldenshtein6b70e7e2012-11-25 18:26:59 +02003571 ret = wl1271_ps_elp_wakeup(wl);
3572 if (ret < 0)
3573 goto out;
3574
3575 ret = wlcore_cmd_regdomain_config_locked(wl);
3576 if (ret < 0) {
3577 wl12xx_queue_recovery_work(wl);
3578 goto out;
3579 }
3580
3581 wl1271_ps_elp_sleep(wl);
3582out:
3583 mutex_unlock(&wl->mutex);
3584}
3585
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003586static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
Johannes Berga060bbf2010-04-27 11:59:34 +02003587 struct ieee80211_vif *vif,
David Spinadelc56ef672014-02-05 15:21:13 +02003588 struct ieee80211_scan_request *hw_req)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003589{
David Spinadelc56ef672014-02-05 15:21:13 +02003590 struct cfg80211_scan_request *req = &hw_req->req;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003591 struct wl1271 *wl = hw->priv;
3592 int ret;
3593 u8 *ssid = NULL;
Teemu Paasikiviabb0b3b2009-10-13 12:47:50 +03003594 size_t len = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003595
3596 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3597
3598 if (req->n_ssids) {
3599 ssid = req->ssids[0].ssid;
Teemu Paasikiviabb0b3b2009-10-13 12:47:50 +03003600 len = req->ssids[0].ssid_len;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003601 }
3602
3603 mutex_lock(&wl->mutex);
3604
Ido Yariv4cc53382012-07-24 19:18:49 +03003605 if (unlikely(wl->state != WLCORE_STATE_ON)) {
Juuso Oikarinenb739a422010-10-26 13:24:38 +02003606 /*
3607 * We cannot return -EBUSY here because cfg80211 will expect
3608 * a call to ieee80211_scan_completed if we do - in this case
3609 * there won't be any call.
3610 */
3611 ret = -EAGAIN;
3612 goto out;
3613 }
3614
Ido Yariva6208652011-03-01 15:14:41 +02003615 ret = wl1271_ps_elp_wakeup(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003616 if (ret < 0)
3617 goto out;
3618
Eliad Peller97fd3112012-03-04 10:55:52 +02003619 /* fail if there is any role in ROC */
3620 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
Eliad Peller92e712d2011-12-18 20:25:43 +02003621 /* don't allow scanning right now */
3622 ret = -EBUSY;
3623 goto out_sleep;
Eliad Peller251c1772011-08-14 13:17:17 +03003624 }
3625
Eliad Peller78e28062012-11-22 18:06:15 +02003626 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
Eliad Peller251c1772011-08-14 13:17:17 +03003627out_sleep:
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003628 wl1271_ps_elp_sleep(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003629out:
3630 mutex_unlock(&wl->mutex);
3631
3632 return ret;
3633}
3634
Eliad Peller73ecce32011-06-27 13:06:45 +03003635static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3636 struct ieee80211_vif *vif)
3637{
3638 struct wl1271 *wl = hw->priv;
Eliad Peller78e28062012-11-22 18:06:15 +02003639 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Peller73ecce32011-06-27 13:06:45 +03003640 int ret;
3641
3642 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3643
3644 mutex_lock(&wl->mutex);
3645
Ido Yariv4cc53382012-07-24 19:18:49 +03003646 if (unlikely(wl->state != WLCORE_STATE_ON))
Eliad Peller73ecce32011-06-27 13:06:45 +03003647 goto out;
3648
3649 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3650 goto out;
3651
3652 ret = wl1271_ps_elp_wakeup(wl);
3653 if (ret < 0)
3654 goto out;
3655
3656 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
Eliad Peller78e28062012-11-22 18:06:15 +02003657 ret = wl->ops->scan_stop(wl, wlvif);
Eliad Peller73ecce32011-06-27 13:06:45 +03003658 if (ret < 0)
3659 goto out_sleep;
3660 }
Arik Nemtsov55df5af2012-03-03 22:18:00 +02003661
3662 /*
3663 * Rearm the tx watchdog just before idling scan. This
3664 * prevents just-finished scans from triggering the watchdog
3665 */
3666 wl12xx_rearm_tx_watchdog_locked(wl);
3667
Eliad Peller73ecce32011-06-27 13:06:45 +03003668 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3669 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
Eliad Pellerc50a2822012-11-22 18:06:19 +02003670 wl->scan_wlvif = NULL;
Eliad Peller73ecce32011-06-27 13:06:45 +03003671 wl->scan.req = NULL;
3672 ieee80211_scan_completed(wl->hw, true);
3673
3674out_sleep:
3675 wl1271_ps_elp_sleep(wl);
3676out:
3677 mutex_unlock(&wl->mutex);
3678
3679 cancel_delayed_work_sync(&wl->scan_complete_work);
3680}
3681
Luciano Coelho33c2c062011-05-10 14:46:02 +03003682static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3683 struct ieee80211_vif *vif,
3684 struct cfg80211_sched_scan_request *req,
David Spinadel633e2712014-02-06 16:15:23 +02003685 struct ieee80211_scan_ies *ies)
Luciano Coelho33c2c062011-05-10 14:46:02 +03003686{
3687 struct wl1271 *wl = hw->priv;
Eliad Peller536129c2011-10-05 11:55:45 +02003688 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Luciano Coelho33c2c062011-05-10 14:46:02 +03003689 int ret;
3690
3691 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3692
3693 mutex_lock(&wl->mutex);
3694
Ido Yariv4cc53382012-07-24 19:18:49 +03003695 if (unlikely(wl->state != WLCORE_STATE_ON)) {
Pontus Fuchs9e0dc892012-01-11 14:22:42 +01003696 ret = -EAGAIN;
3697 goto out;
3698 }
3699
Luciano Coelho33c2c062011-05-10 14:46:02 +03003700 ret = wl1271_ps_elp_wakeup(wl);
3701 if (ret < 0)
3702 goto out;
3703
Eliad Peller78e28062012-11-22 18:06:15 +02003704 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
Luciano Coelho33c2c062011-05-10 14:46:02 +03003705 if (ret < 0)
3706 goto out_sleep;
3707
Eliad Peller10199752012-11-22 18:06:23 +02003708 wl->sched_vif = wlvif;
Luciano Coelho33c2c062011-05-10 14:46:02 +03003709
3710out_sleep:
3711 wl1271_ps_elp_sleep(wl);
3712out:
3713 mutex_unlock(&wl->mutex);
3714 return ret;
3715}
3716
Johannes Berg37e33082014-02-17 10:48:17 +01003717static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3718 struct ieee80211_vif *vif)
Luciano Coelho33c2c062011-05-10 14:46:02 +03003719{
3720 struct wl1271 *wl = hw->priv;
Yoni Divinsky78f85f52012-05-16 11:34:17 +03003721 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Luciano Coelho33c2c062011-05-10 14:46:02 +03003722 int ret;
3723
3724 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3725
3726 mutex_lock(&wl->mutex);
3727
Ido Yariv4cc53382012-07-24 19:18:49 +03003728 if (unlikely(wl->state != WLCORE_STATE_ON))
Pontus Fuchs9e0dc892012-01-11 14:22:42 +01003729 goto out;
3730
Luciano Coelho33c2c062011-05-10 14:46:02 +03003731 ret = wl1271_ps_elp_wakeup(wl);
3732 if (ret < 0)
3733 goto out;
3734
Eliad Peller78e28062012-11-22 18:06:15 +02003735 wl->ops->sched_scan_stop(wl, wlvif);
Luciano Coelho33c2c062011-05-10 14:46:02 +03003736
3737 wl1271_ps_elp_sleep(wl);
3738out:
3739 mutex_unlock(&wl->mutex);
Johannes Berg37e33082014-02-17 10:48:17 +01003740
3741 return 0;
Luciano Coelho33c2c062011-05-10 14:46:02 +03003742}
3743
Arik Nemtsov68d069c2010-11-08 10:51:07 +01003744static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3745{
3746 struct wl1271 *wl = hw->priv;
3747 int ret = 0;
3748
3749 mutex_lock(&wl->mutex);
3750
Ido Yariv4cc53382012-07-24 19:18:49 +03003751 if (unlikely(wl->state != WLCORE_STATE_ON)) {
Arik Nemtsov68d069c2010-11-08 10:51:07 +01003752 ret = -EAGAIN;
3753 goto out;
3754 }
3755
Ido Yariva6208652011-03-01 15:14:41 +02003756 ret = wl1271_ps_elp_wakeup(wl);
Arik Nemtsov68d069c2010-11-08 10:51:07 +01003757 if (ret < 0)
3758 goto out;
3759
Arik Nemtsov5f704d12011-04-18 14:15:21 +03003760 ret = wl1271_acx_frag_threshold(wl, value);
Arik Nemtsov68d069c2010-11-08 10:51:07 +01003761 if (ret < 0)
3762 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3763
3764 wl1271_ps_elp_sleep(wl);
3765
3766out:
3767 mutex_unlock(&wl->mutex);
3768
3769 return ret;
3770}
3771
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003772static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3773{
3774 struct wl1271 *wl = hw->priv;
Eliad Peller6e8cd332011-10-10 10:13:13 +02003775 struct wl12xx_vif *wlvif;
Saravanan Dhanabalaecb0562010-04-09 11:07:28 +03003776 int ret = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003777
3778 mutex_lock(&wl->mutex);
3779
Ido Yariv4cc53382012-07-24 19:18:49 +03003780 if (unlikely(wl->state != WLCORE_STATE_ON)) {
Juuso Oikarinenf8d98022010-10-26 13:24:39 +02003781 ret = -EAGAIN;
Saravanan Dhanabalaecb0562010-04-09 11:07:28 +03003782 goto out;
Juuso Oikarinenf8d98022010-10-26 13:24:39 +02003783 }
Saravanan Dhanabalaecb0562010-04-09 11:07:28 +03003784
Ido Yariva6208652011-03-01 15:14:41 +02003785 ret = wl1271_ps_elp_wakeup(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003786 if (ret < 0)
3787 goto out;
3788
Eliad Peller6e8cd332011-10-10 10:13:13 +02003789 wl12xx_for_each_wlvif(wl, wlvif) {
3790 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3791 if (ret < 0)
3792 wl1271_warning("set rts threshold failed: %d", ret);
3793 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03003794 wl1271_ps_elp_sleep(wl);
3795
3796out:
3797 mutex_unlock(&wl->mutex);
3798
3799 return ret;
3800}
3801
Eliad Pellerd48055d2011-09-15 12:07:04 +03003802static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3803{
3804 int len;
3805 const u8 *next, *end = skb->data + skb->len;
3806 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3807 skb->len - ieoffset);
3808 if (!ie)
3809 return;
3810 len = ie[1] + 2;
3811 next = ie + len;
3812 memmove(ie, next, end - next);
3813 skb_trim(skb, skb->len - len);
3814}
3815
Eliad Peller26b4bf22011-09-15 12:07:05 +03003816static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3817 unsigned int oui, u8 oui_type,
3818 int ieoffset)
3819{
3820 int len;
3821 const u8 *next, *end = skb->data + skb->len;
3822 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3823 skb->data + ieoffset,
3824 skb->len - ieoffset);
3825 if (!ie)
3826 return;
3827 len = ie[1] + 2;
3828 next = ie + len;
3829 memmove(ie, next, end - next);
3830 skb_trim(skb, skb->len - len);
3831}
3832
Arik Nemtsov341f2c12011-11-22 19:52:59 +02003833static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3834 struct ieee80211_vif *vif)
Arik Nemtsov560f002412011-11-08 18:46:54 +02003835{
Eliad Pellercdaac622012-01-31 11:57:16 +02003836 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Arik Nemtsov560f002412011-11-08 18:46:54 +02003837 struct sk_buff *skb;
3838 int ret;
3839
Arik Nemtsov341f2c12011-11-22 19:52:59 +02003840 skb = ieee80211_proberesp_get(wl->hw, vif);
Arik Nemtsov560f002412011-11-08 18:46:54 +02003841 if (!skb)
Arik Nemtsov341f2c12011-11-22 19:52:59 +02003842 return -EOPNOTSUPP;
Arik Nemtsov560f002412011-11-08 18:46:54 +02003843
Eliad Pellercdaac622012-01-31 11:57:16 +02003844 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
Arik Nemtsov560f002412011-11-08 18:46:54 +02003845 CMD_TEMPL_AP_PROBE_RESPONSE,
3846 skb->data,
3847 skb->len, 0,
3848 rates);
Arik Nemtsov560f002412011-11-08 18:46:54 +02003849 dev_kfree_skb(skb);
Luciano Coelho62c2e572012-05-10 12:14:04 +03003850
3851 if (ret < 0)
3852 goto out;
3853
3854 wl1271_debug(DEBUG_AP, "probe response updated");
3855 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3856
3857out:
Arik Nemtsov560f002412011-11-08 18:46:54 +02003858 return ret;
3859}
3860
3861static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3862 struct ieee80211_vif *vif,
3863 u8 *probe_rsp_data,
3864 size_t probe_rsp_len,
3865 u32 rates)
Arik Nemtsov68eaaf62011-09-03 20:22:03 +03003866{
Eliad Peller1fe9f162011-10-05 11:55:48 +02003867 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3868 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
Arik Nemtsov68eaaf62011-09-03 20:22:03 +03003869 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3870 int ssid_ie_offset, ie_offset, templ_len;
3871 const u8 *ptr;
3872
3873 /* no need to change probe response if the SSID is set correctly */
Eliad Peller1fe9f162011-10-05 11:55:48 +02003874 if (wlvif->ssid_len > 0)
Eliad Pellercdaac622012-01-31 11:57:16 +02003875 return wl1271_cmd_template_set(wl, wlvif->role_id,
Arik Nemtsov68eaaf62011-09-03 20:22:03 +03003876 CMD_TEMPL_AP_PROBE_RESPONSE,
3877 probe_rsp_data,
3878 probe_rsp_len, 0,
3879 rates);
3880
3881 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3882 wl1271_error("probe_rsp template too big");
3883 return -EINVAL;
3884 }
3885
3886 /* start searching from IE offset */
3887 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3888
3889 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3890 probe_rsp_len - ie_offset);
3891 if (!ptr) {
3892 wl1271_error("No SSID in beacon!");
3893 return -EINVAL;
3894 }
3895
3896 ssid_ie_offset = ptr - probe_rsp_data;
3897 ptr += (ptr[1] + 2);
3898
3899 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3900
3901 /* insert SSID from bss_conf */
3902 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3903 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3904 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3905 bss_conf->ssid, bss_conf->ssid_len);
3906 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3907
3908 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3909 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3910 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3911
Eliad Pellercdaac622012-01-31 11:57:16 +02003912 return wl1271_cmd_template_set(wl, wlvif->role_id,
Arik Nemtsov68eaaf62011-09-03 20:22:03 +03003913 CMD_TEMPL_AP_PROBE_RESPONSE,
3914 probe_rsp_templ,
3915 templ_len, 0,
3916 rates);
3917}
3918
Arik Nemtsove78a2872010-10-16 19:07:21 +02003919static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
Eliad Peller0603d892011-10-05 11:55:51 +02003920 struct ieee80211_vif *vif,
Arik Nemtsove78a2872010-10-16 19:07:21 +02003921 struct ieee80211_bss_conf *bss_conf,
3922 u32 changed)
3923{
Eliad Peller0603d892011-10-05 11:55:51 +02003924 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Arik Nemtsove78a2872010-10-16 19:07:21 +02003925 int ret = 0;
3926
3927 if (changed & BSS_CHANGED_ERP_SLOT) {
3928 if (bss_conf->use_short_slot)
Eliad Peller0603d892011-10-05 11:55:51 +02003929 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
Arik Nemtsove78a2872010-10-16 19:07:21 +02003930 else
Eliad Peller0603d892011-10-05 11:55:51 +02003931 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
Arik Nemtsove78a2872010-10-16 19:07:21 +02003932 if (ret < 0) {
3933 wl1271_warning("Set slot time failed %d", ret);
3934 goto out;
3935 }
3936 }
3937
3938 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3939 if (bss_conf->use_short_preamble)
Eliad Peller0603d892011-10-05 11:55:51 +02003940 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
Arik Nemtsove78a2872010-10-16 19:07:21 +02003941 else
Eliad Peller0603d892011-10-05 11:55:51 +02003942 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
Arik Nemtsove78a2872010-10-16 19:07:21 +02003943 }
3944
3945 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3946 if (bss_conf->use_cts_prot)
Eliad Peller0603d892011-10-05 11:55:51 +02003947 ret = wl1271_acx_cts_protect(wl, wlvif,
3948 CTSPROTECT_ENABLE);
Arik Nemtsove78a2872010-10-16 19:07:21 +02003949 else
Eliad Peller0603d892011-10-05 11:55:51 +02003950 ret = wl1271_acx_cts_protect(wl, wlvif,
3951 CTSPROTECT_DISABLE);
Arik Nemtsove78a2872010-10-16 19:07:21 +02003952 if (ret < 0) {
3953 wl1271_warning("Set ctsprotect failed %d", ret);
3954 goto out;
3955 }
3956 }
3957
3958out:
3959 return ret;
3960}
3961
Luciano Coelho62c2e572012-05-10 12:14:04 +03003962static int wlcore_set_beacon_template(struct wl1271 *wl,
3963 struct ieee80211_vif *vif,
3964 bool is_ap)
3965{
3966 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3967 struct ieee80211_hdr *hdr;
3968 u32 min_rate;
3969 int ret;
Luciano Coelho8f6ac532013-05-04 01:06:11 +03003970 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
Luciano Coelho62c2e572012-05-10 12:14:04 +03003971 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3972 u16 tmpl_id;
3973
3974 if (!beacon) {
3975 ret = -EINVAL;
3976 goto out;
3977 }
3978
3979 wl1271_debug(DEBUG_MASTER, "beacon updated");
3980
Eliad Peller3230f352012-11-20 13:20:01 +02003981 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
Luciano Coelho62c2e572012-05-10 12:14:04 +03003982 if (ret < 0) {
3983 dev_kfree_skb(beacon);
3984 goto out;
3985 }
3986 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3987 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3988 CMD_TEMPL_BEACON;
3989 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3990 beacon->data,
3991 beacon->len, 0,
3992 min_rate);
3993 if (ret < 0) {
3994 dev_kfree_skb(beacon);
3995 goto out;
3996 }
3997
Eliad Pellerd50529c2012-11-22 18:06:20 +02003998 wlvif->wmm_enabled =
3999 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4000 WLAN_OUI_TYPE_MICROSOFT_WMM,
4001 beacon->data + ieoffset,
4002 beacon->len - ieoffset);
4003
Luciano Coelho62c2e572012-05-10 12:14:04 +03004004 /*
4005 * In case we already have a probe-resp beacon set explicitly
4006 * by usermode, don't use the beacon data.
4007 */
4008 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4009 goto end_bcn;
4010
4011 /* remove TIM ie from probe response */
4012 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4013
4014 /*
4015 * remove p2p ie from probe response.
4016 * the fw reponds to probe requests that don't include
4017 * the p2p ie. probe requests with p2p ie will be passed,
4018 * and will be responded by the supplicant (the spec
4019 * forbids including the p2p ie when responding to probe
4020 * requests that didn't include it).
4021 */
4022 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4023 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4024
4025 hdr = (struct ieee80211_hdr *) beacon->data;
4026 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4027 IEEE80211_STYPE_PROBE_RESP);
4028 if (is_ap)
4029 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4030 beacon->data,
4031 beacon->len,
4032 min_rate);
4033 else
4034 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4035 CMD_TEMPL_PROBE_RESPONSE,
4036 beacon->data,
4037 beacon->len, 0,
4038 min_rate);
4039end_bcn:
4040 dev_kfree_skb(beacon);
4041 if (ret < 0)
4042 goto out;
4043
4044out:
4045 return ret;
4046}
4047
Arik Nemtsove78a2872010-10-16 19:07:21 +02004048static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4049 struct ieee80211_vif *vif,
4050 struct ieee80211_bss_conf *bss_conf,
4051 u32 changed)
4052{
Eliad Peller87fbcb02011-10-05 11:55:41 +02004053 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Peller536129c2011-10-05 11:55:45 +02004054 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004055 int ret = 0;
4056
Luciano Coelho48af2eb2012-11-20 11:03:32 +02004057 if (changed & BSS_CHANGED_BEACON_INT) {
Arik Nemtsove78a2872010-10-16 19:07:21 +02004058 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4059 bss_conf->beacon_int);
4060
Eliad Peller6a899792011-10-05 11:55:58 +02004061 wlvif->beacon_int = bss_conf->beacon_int;
Arik Nemtsove78a2872010-10-16 19:07:21 +02004062 }
4063
Arik Nemtsov560f002412011-11-08 18:46:54 +02004064 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4065 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
Luciano Coelho62c2e572012-05-10 12:14:04 +03004066
4067 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
Arik Nemtsov560f002412011-11-08 18:46:54 +02004068 }
4069
Luciano Coelho48af2eb2012-11-20 11:03:32 +02004070 if (changed & BSS_CHANGED_BEACON) {
Luciano Coelho62c2e572012-05-10 12:14:04 +03004071 ret = wlcore_set_beacon_template(wl, vif, is_ap);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004072 if (ret < 0)
4073 goto out;
4074 }
4075
4076out:
Arik Nemtsov560f002412011-11-08 18:46:54 +02004077 if (ret != 0)
4078 wl1271_error("beacon info change failed: %d", ret);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004079 return ret;
4080}
4081
4082/* AP mode changes */
4083static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004084 struct ieee80211_vif *vif,
4085 struct ieee80211_bss_conf *bss_conf,
4086 u32 changed)
4087{
Eliad Peller87fbcb02011-10-05 11:55:41 +02004088 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004089 int ret = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004090
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004091 if (changed & BSS_CHANGED_BASIC_RATES) {
Arik Nemtsove78a2872010-10-16 19:07:21 +02004092 u32 rates = bss_conf->basic_rates;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004093
Eliad Peller87fbcb02011-10-05 11:55:41 +02004094 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
Eliad Peller1b92f152011-10-10 10:13:09 +02004095 wlvif->band);
Eliad Pellerd2d66c52011-10-05 11:55:43 +02004096 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
Eliad Peller87fbcb02011-10-05 11:55:41 +02004097 wlvif->basic_rate_set);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004098
Eliad Peller87fbcb02011-10-05 11:55:41 +02004099 ret = wl1271_init_ap_rates(wl, wlvif);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004100 if (ret < 0) {
Arik Nemtsov70f47422011-04-18 14:15:25 +03004101 wl1271_error("AP rate policy change failed %d", ret);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004102 goto out;
Juuso Oikarinene0d8bbf2009-12-11 15:41:04 +02004103 }
Arik Nemtsovc45a85b2011-04-18 14:15:26 +03004104
Eliad Peller784f6942011-10-05 11:55:39 +02004105 ret = wl1271_ap_init_templates(wl, vif);
Arik Nemtsovc45a85b2011-04-18 14:15:26 +03004106 if (ret < 0)
4107 goto out;
Luciano Coelho62c2e572012-05-10 12:14:04 +03004108
4109 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4110 if (ret < 0)
4111 goto out;
4112
4113 ret = wlcore_set_beacon_template(wl, vif, true);
4114 if (ret < 0)
4115 goto out;
Juuso Oikarinene0d8bbf2009-12-11 15:41:04 +02004116 }
4117
Arik Nemtsove78a2872010-10-16 19:07:21 +02004118 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4119 if (ret < 0)
4120 goto out;
4121
Luciano Coelho48af2eb2012-11-20 11:03:32 +02004122 if (changed & BSS_CHANGED_BEACON_ENABLED) {
Arik Nemtsove78a2872010-10-16 19:07:21 +02004123 if (bss_conf->enable_beacon) {
Eliad Peller53d40d02011-10-10 10:13:02 +02004124 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
Eliad Peller87fbcb02011-10-05 11:55:41 +02004125 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004126 if (ret < 0)
4127 goto out;
4128
Eliad Pellera8ab39a2011-10-05 11:55:54 +02004129 ret = wl1271_ap_init_hwenc(wl, wlvif);
Arik Nemtsov7f179b42010-10-16 21:39:06 +02004130 if (ret < 0)
4131 goto out;
Arik Nemtsovcf420392011-08-14 13:17:37 +03004132
Eliad Peller53d40d02011-10-10 10:13:02 +02004133 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
Arik Nemtsovcf420392011-08-14 13:17:37 +03004134 wl1271_debug(DEBUG_AP, "started AP");
Arik Nemtsove78a2872010-10-16 19:07:21 +02004135 }
4136 } else {
Eliad Peller53d40d02011-10-10 10:13:02 +02004137 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004138 /*
4139 * AP might be in ROC in case we have just
4140 * sent auth reply. handle it.
4141 */
4142 if (test_bit(wlvif->role_id, wl->roc_map))
4143 wl12xx_croc(wl, wlvif->role_id);
4144
Eliad Peller0603d892011-10-05 11:55:51 +02004145 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004146 if (ret < 0)
4147 goto out;
4148
Eliad Peller53d40d02011-10-10 10:13:02 +02004149 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
Arik Nemtsov560f002412011-11-08 18:46:54 +02004150 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4151 &wlvif->flags);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004152 wl1271_debug(DEBUG_AP, "stopped AP");
4153 }
4154 }
4155 }
4156
Eliad Peller0603d892011-10-05 11:55:51 +02004157 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004158 if (ret < 0)
4159 goto out;
Arik Nemtsov0b932ab2011-08-14 13:17:27 +03004160
4161 /* Handle HT information change */
4162 if ((changed & BSS_CHANGED_HT) &&
Johannes Berg4bf88532012-11-09 11:39:59 +01004163 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
Eliad Peller0603d892011-10-05 11:55:51 +02004164 ret = wl1271_acx_set_ht_information(wl, wlvif,
Arik Nemtsov0b932ab2011-08-14 13:17:27 +03004165 bss_conf->ht_operation_mode);
4166 if (ret < 0) {
4167 wl1271_warning("Set ht information failed %d", ret);
4168 goto out;
4169 }
4170 }
4171
Arik Nemtsove78a2872010-10-16 19:07:21 +02004172out:
4173 return;
4174}
4175
Eliad Peller3230f352012-11-20 13:20:01 +02004176static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4177 struct ieee80211_bss_conf *bss_conf,
4178 u32 sta_rate_set)
4179{
4180 u32 rates;
4181 int ret;
4182
4183 wl1271_debug(DEBUG_MAC80211,
4184 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4185 bss_conf->bssid, bss_conf->aid,
4186 bss_conf->beacon_int,
4187 bss_conf->basic_rates, sta_rate_set);
4188
4189 wlvif->beacon_int = bss_conf->beacon_int;
4190 rates = bss_conf->basic_rates;
4191 wlvif->basic_rate_set =
4192 wl1271_tx_enabled_rates_get(wl, rates,
4193 wlvif->band);
4194 wlvif->basic_rate =
4195 wl1271_tx_min_rate_get(wl,
4196 wlvif->basic_rate_set);
4197
4198 if (sta_rate_set)
4199 wlvif->rate_set =
4200 wl1271_tx_enabled_rates_get(wl,
4201 sta_rate_set,
4202 wlvif->band);
4203
4204 /* we only support sched_scan while not connected */
Eliad Peller10199752012-11-22 18:06:23 +02004205 if (wl->sched_vif == wlvif)
Eliad Peller78e28062012-11-22 18:06:15 +02004206 wl->ops->sched_scan_stop(wl, wlvif);
Eliad Peller3230f352012-11-20 13:20:01 +02004207
4208 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4209 if (ret < 0)
4210 return ret;
4211
4212 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4213 if (ret < 0)
4214 return ret;
4215
4216 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4217 if (ret < 0)
4218 return ret;
4219
4220 wlcore_set_ssid(wl, wlvif);
4221
4222 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4223
4224 return 0;
4225}
4226
4227static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4228{
4229 int ret;
4230
4231 /* revert back to minimum rates for the current band */
4232 wl1271_set_band_rate(wl, wlvif);
4233 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4234
4235 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4236 if (ret < 0)
4237 return ret;
4238
4239 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4240 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4241 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4242 if (ret < 0)
4243 return ret;
4244 }
4245
4246 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4247 return 0;
4248}
Arik Nemtsove78a2872010-10-16 19:07:21 +02004249/* STA/IBSS mode changes */
4250static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4251 struct ieee80211_vif *vif,
4252 struct ieee80211_bss_conf *bss_conf,
4253 u32 changed)
4254{
Eliad Peller87fbcb02011-10-05 11:55:41 +02004255 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Peller3230f352012-11-20 13:20:01 +02004256 bool do_join = false;
Eliad Peller536129c2011-10-05 11:55:45 +02004257 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
Eliad Peller227e81e2011-08-14 13:17:26 +03004258 bool ibss_joined = false;
Eliad Peller72c2d9e2011-02-02 09:59:37 +02004259 u32 sta_rate_set = 0;
Arik Nemtsove78a2872010-10-16 19:07:21 +02004260 int ret;
Luciano Coelho2d6e4e762011-01-11 19:07:21 +01004261 struct ieee80211_sta *sta;
Arik Nemtsova1008852011-02-12 23:24:20 +02004262 bool sta_exists = false;
4263 struct ieee80211_sta_ht_cap sta_ht_cap;
Arik Nemtsove78a2872010-10-16 19:07:21 +02004264
4265 if (is_ibss) {
4266 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4267 changed);
4268 if (ret < 0)
4269 goto out;
4270 }
4271
Eliad Peller227e81e2011-08-14 13:17:26 +03004272 if (changed & BSS_CHANGED_IBSS) {
4273 if (bss_conf->ibss_joined) {
Eliad Pellereee514e2011-10-10 10:13:01 +02004274 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
Eliad Peller227e81e2011-08-14 13:17:26 +03004275 ibss_joined = true;
4276 } else {
Eliad Peller3230f352012-11-20 13:20:01 +02004277 wlcore_unset_assoc(wl, wlvif);
4278 wl12xx_cmd_role_stop_sta(wl, wlvif);
Eliad Peller227e81e2011-08-14 13:17:26 +03004279 }
4280 }
4281
4282 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
Arik Nemtsove78a2872010-10-16 19:07:21 +02004283 do_join = true;
4284
4285 /* Need to update the SSID (for filtering etc) */
Eliad Peller227e81e2011-08-14 13:17:26 +03004286 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
Arik Nemtsove78a2872010-10-16 19:07:21 +02004287 do_join = true;
4288
Eliad Peller227e81e2011-08-14 13:17:26 +03004289 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
Juuso Oikarinen5da11dc2010-03-26 12:53:24 +02004290 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4291 bss_conf->enable_beacon ? "enabled" : "disabled");
4292
Juuso Oikarinen5da11dc2010-03-26 12:53:24 +02004293 do_join = true;
4294 }
4295
Arik Nemtsovb0ed8a42013-09-17 18:41:23 +03004296 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4297 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4298
Luciano Coelho48af2eb2012-11-20 11:03:32 +02004299 if (changed & BSS_CHANGED_CQM) {
Juuso Oikarinen00236aed2010-04-09 11:07:30 +03004300 bool enable = false;
4301 if (bss_conf->cqm_rssi_thold)
4302 enable = true;
Eliad Peller0603d892011-10-05 11:55:51 +02004303 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
Juuso Oikarinen00236aed2010-04-09 11:07:30 +03004304 bss_conf->cqm_rssi_thold,
4305 bss_conf->cqm_rssi_hyst);
4306 if (ret < 0)
4307 goto out;
Eliad Peller04324d92011-10-05 11:56:03 +02004308 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
Juuso Oikarinen00236aed2010-04-09 11:07:30 +03004309 }
4310
Eliad Pellerec870112012-11-20 13:20:09 +02004311 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4312 BSS_CHANGED_ASSOC)) {
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03004313 rcu_read_lock();
4314 sta = ieee80211_find_sta(vif, bss_conf->bssid);
Luciano Coelhoef08d022012-11-20 11:03:31 +02004315 if (sta) {
4316 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03004317
Luciano Coelhoef08d022012-11-20 11:03:31 +02004318 /* save the supp_rates of the ap */
4319 sta_rate_set = sta->supp_rates[wlvif->band];
4320 if (sta->ht_cap.ht_supported)
4321 sta_rate_set |=
4322 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4323 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4324 sta_ht_cap = sta->ht_cap;
4325 sta_exists = true;
4326 }
Eliad Peller72c2d9e2011-02-02 09:59:37 +02004327
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03004328 rcu_read_unlock();
Eliad Peller72c2d9e2011-02-02 09:59:37 +02004329 }
Eliad Peller72c2d9e2011-02-02 09:59:37 +02004330
Eliad Peller3230f352012-11-20 13:20:01 +02004331 if (changed & BSS_CHANGED_BSSID) {
4332 if (!is_zero_ether_addr(bss_conf->bssid)) {
4333 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4334 sta_rate_set);
4335 if (ret < 0)
4336 goto out;
4337
4338 /* Need to update the BSSID (for filtering etc) */
Eliad Peller446f5ca2012-03-12 14:53:04 +02004339 do_join = true;
Juuso Oikarinend94cd292009-10-08 21:56:25 +03004340 } else {
Eliad Peller3230f352012-11-20 13:20:01 +02004341 ret = wlcore_clear_bssid(wl, wlvif);
Juuso Oikarinenebba60c2010-04-01 11:38:20 +03004342 if (ret < 0)
Arik Nemtsove78a2872010-10-16 19:07:21 +02004343 goto out;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004344 }
4345 }
Juuso Oikarinen8a5a37a2009-10-08 21:56:24 +03004346
Eliad Pellerd192d262011-05-24 14:33:08 +03004347 if (changed & BSS_CHANGED_IBSS) {
4348 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4349 bss_conf->ibss_joined);
4350
4351 if (bss_conf->ibss_joined) {
4352 u32 rates = bss_conf->basic_rates;
Eliad Peller87fbcb02011-10-05 11:55:41 +02004353 wlvif->basic_rate_set =
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03004354 wl1271_tx_enabled_rates_get(wl, rates,
Eliad Peller1b92f152011-10-10 10:13:09 +02004355 wlvif->band);
Eliad Pellerd2d66c52011-10-05 11:55:43 +02004356 wlvif->basic_rate =
Eliad Peller87fbcb02011-10-05 11:55:41 +02004357 wl1271_tx_min_rate_get(wl,
4358 wlvif->basic_rate_set);
Eliad Pellerd192d262011-05-24 14:33:08 +03004359
Shahar Levi06b660e2011-09-05 13:54:36 +03004360 /* by default, use 11b + OFDM rates */
Eliad Peller30d0c8f2011-10-05 11:55:42 +02004361 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4362 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
Eliad Pellerd192d262011-05-24 14:33:08 +03004363 if (ret < 0)
4364 goto out;
4365 }
4366 }
4367
Eliad Pellerd881fa22014-02-10 13:47:33 +02004368 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4369 /* enable beacon filtering */
4370 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4371 if (ret < 0)
4372 goto out;
4373 }
4374
Eliad Peller0603d892011-10-05 11:55:51 +02004375 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004376 if (ret < 0)
4377 goto out;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004378
Juuso Oikarinen8bf29b02010-02-18 13:25:51 +02004379 if (do_join) {
Eliad Peller3230f352012-11-20 13:20:01 +02004380 ret = wlcore_join(wl, wlvif);
Juuso Oikarinen8bf29b02010-02-18 13:25:51 +02004381 if (ret < 0) {
4382 wl1271_warning("cmd join failed %d", ret);
Arik Nemtsove78a2872010-10-16 19:07:21 +02004383 goto out;
Juuso Oikarinen8bf29b02010-02-18 13:25:51 +02004384 }
Eliad Peller3230f352012-11-20 13:20:01 +02004385 }
Eliad Peller251c1772011-08-14 13:17:17 +03004386
Eliad Peller3230f352012-11-20 13:20:01 +02004387 if (changed & BSS_CHANGED_ASSOC) {
4388 if (bss_conf->assoc) {
Eliad Pellerec870112012-11-20 13:20:09 +02004389 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4390 sta_rate_set);
Eliad Peller251c1772011-08-14 13:17:17 +03004391 if (ret < 0)
4392 goto out;
4393
Eliad Peller9fd6f212012-03-04 10:55:48 +02004394 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4395 wl12xx_set_authorized(wl, wlvif);
Eliad Peller3230f352012-11-20 13:20:01 +02004396 } else {
4397 wlcore_unset_assoc(wl, wlvif);
Eliad Peller251c1772011-08-14 13:17:17 +03004398 }
Juuso Oikarinenc1899552010-03-26 12:53:32 +02004399 }
4400
Eliad Peller518b6802012-11-26 18:05:47 +02004401 if (changed & BSS_CHANGED_PS) {
4402 if ((bss_conf->ps) &&
4403 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4404 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4405 int ps_mode;
4406 char *ps_mode_str;
4407
4408 if (wl->conf.conn.forced_ps) {
4409 ps_mode = STATION_POWER_SAVE_MODE;
4410 ps_mode_str = "forced";
4411 } else {
4412 ps_mode = STATION_AUTO_PS_MODE;
4413 ps_mode_str = "auto";
4414 }
4415
4416 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4417
4418 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
Kalle Valoc6999d82010-02-18 13:25:41 +02004419 if (ret < 0)
Eliad Peller518b6802012-11-26 18:05:47 +02004420 wl1271_warning("enter %s ps failed %d",
4421 ps_mode_str, ret);
4422 } else if (!bss_conf->ps &&
4423 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4424 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4425
4426 ret = wl1271_ps_set_mode(wl, wlvif,
4427 STATION_ACTIVE_MODE);
4428 if (ret < 0)
4429 wl1271_warning("exit auto ps failed %d", ret);
Kalle Valoc6999d82010-02-18 13:25:41 +02004430 }
4431 }
Kalle Valo4695dc92010-03-18 12:26:38 +02004432
4433 /* Handle new association with HT. Do this after join. */
Eliad Peller6f0b1bb2013-05-07 15:41:08 +03004434 if (sta_exists) {
Eliad Peller58321b22012-11-20 13:20:10 +02004435 bool enabled =
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004436 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03004437
Eliad Peller530abe12012-11-28 11:42:31 +02004438 ret = wlcore_hw_set_peer_cap(wl,
4439 &sta_ht_cap,
4440 enabled,
4441 wlvif->rate_set,
4442 wlvif->sta.hlid);
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03004443 if (ret < 0) {
Eliad Peller58321b22012-11-20 13:20:10 +02004444 wl1271_warning("Set ht cap failed %d", ret);
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03004445 goto out;
Eliad Peller58321b22012-11-20 13:20:10 +02004446
4447 }
4448
4449 if (enabled) {
4450 ret = wl1271_acx_set_ht_information(wl, wlvif,
4451 bss_conf->ht_operation_mode);
4452 if (ret < 0) {
4453 wl1271_warning("Set ht information failed %d",
4454 ret);
4455 goto out;
4456 }
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03004457 }
4458 }
4459
Eliad Peller76a74c82012-02-02 12:22:11 +02004460 /* Handle arp filtering. Done after join. */
4461 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4462 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4463 __be32 addr = bss_conf->arp_addr_list[0];
4464 wlvif->sta.qos = bss_conf->qos;
4465 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4466
Johannes Berg0f19b412013-01-14 16:39:07 +01004467 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
Eliad Peller76a74c82012-02-02 12:22:11 +02004468 wlvif->ip_addr = addr;
4469 /*
4470 * The template should have been configured only upon
4471 * association. however, it seems that the correct ip
4472 * isn't being set (when sending), so we have to
4473 * reconfigure the template upon every ip change.
4474 */
4475 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4476 if (ret < 0) {
4477 wl1271_warning("build arp rsp failed: %d", ret);
4478 goto out;
4479 }
4480
4481 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4482 (ACX_ARP_FILTER_ARP_FILTERING |
4483 ACX_ARP_FILTER_AUTO_ARP),
4484 addr);
4485 } else {
4486 wlvif->ip_addr = 0;
4487 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4488 }
4489
4490 if (ret < 0)
4491 goto out;
4492 }
4493
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004494out:
4495 return;
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03004496}
4497
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004498static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4499 struct ieee80211_vif *vif,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03004500 struct ieee80211_bss_conf *bss_conf,
4501 u32 changed)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004502{
4503 struct wl1271 *wl = hw->priv;
Eliad Peller536129c2011-10-05 11:55:45 +02004504 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4505 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03004506 int ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004507
Eliad Pellerd3f5a1b2012-11-19 17:14:05 +02004508 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4509 wlvif->role_id, (int)changed);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004510
Arik Nemtsov6b8bf5b2012-05-15 17:08:54 +03004511 /*
4512 * make sure to cancel pending disconnections if our association
4513 * state changed
4514 */
4515 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
Eliad Pellerc50a2822012-11-22 18:06:19 +02004516 cancel_delayed_work_sync(&wlvif->connection_loss_work);
Arik Nemtsov6b8bf5b2012-05-15 17:08:54 +03004517
Eliad Pellerb515d832012-05-15 17:08:56 +03004518 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4519 !bss_conf->enable_beacon)
4520 wl1271_tx_flush(wl);
4521
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004522 mutex_lock(&wl->mutex);
4523
Ido Yariv4cc53382012-07-24 19:18:49 +03004524 if (unlikely(wl->state != WLCORE_STATE_ON))
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004525 goto out;
4526
Eliad Peller10c8cd02011-10-10 10:13:06 +02004527 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4528 goto out;
4529
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004530 ret = wl1271_ps_elp_wakeup(wl);
4531 if (ret < 0)
4532 goto out;
4533
Alex Galb30d49b2014-01-10 15:21:13 -05004534 if ((changed & BSS_CHANGED_TXPOWER) &&
4535 bss_conf->txpower != wlvif->power_level) {
4536
4537 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4538 if (ret < 0)
4539 goto out;
4540
4541 wlvif->power_level = bss_conf->txpower;
4542 }
4543
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004544 if (is_ap)
4545 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4546 else
4547 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4548
4549 wl1271_ps_elp_sleep(wl);
4550
4551out:
4552 mutex_unlock(&wl->mutex);
4553}
4554
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004555static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4556 struct ieee80211_chanctx_conf *ctx)
4557{
4558 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004559 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4560 cfg80211_get_chandef_type(&ctx->def));
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004561 return 0;
4562}
4563
4564static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4565 struct ieee80211_chanctx_conf *ctx)
4566{
4567 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004568 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4569 cfg80211_get_chandef_type(&ctx->def));
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004570}
4571
4572static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4573 struct ieee80211_chanctx_conf *ctx,
4574 u32 changed)
4575{
4576 wl1271_debug(DEBUG_MAC80211,
4577 "mac80211 change chanctx %d (type %d) changed 0x%x",
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004578 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4579 cfg80211_get_chandef_type(&ctx->def), changed);
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004580}
4581
4582static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4583 struct ieee80211_vif *vif,
4584 struct ieee80211_chanctx_conf *ctx)
4585{
4586 struct wl1271 *wl = hw->priv;
4587 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4588 int channel = ieee80211_frequency_to_channel(
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004589 ctx->def.chan->center_freq);
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004590
4591 wl1271_debug(DEBUG_MAC80211,
4592 "mac80211 assign chanctx (role %d) %d (type %d)",
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004593 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004594
4595 mutex_lock(&wl->mutex);
4596
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004597 wlvif->band = ctx->def.chan->band;
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004598 wlvif->channel = channel;
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004599 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004600
4601 /* update default rates according to the band */
4602 wl1271_set_band_rate(wl, wlvif);
4603
4604 mutex_unlock(&wl->mutex);
4605
4606 return 0;
4607}
4608
4609static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4610 struct ieee80211_vif *vif,
4611 struct ieee80211_chanctx_conf *ctx)
4612{
4613 struct wl1271 *wl = hw->priv;
4614 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4615
4616 wl1271_debug(DEBUG_MAC80211,
4617 "mac80211 unassign chanctx (role %d) %d (type %d)",
4618 wlvif->role_id,
Luciano Coelhoaaabee82012-12-04 16:39:47 +02004619 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4620 cfg80211_get_chandef_type(&ctx->def));
Eliad Pellerb6970ee2012-11-20 13:20:05 +02004621
4622 wl1271_tx_flush(wl);
4623}
4624
Eliad Peller8a3a3c82011-10-02 10:15:52 +02004625static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4626 struct ieee80211_vif *vif, u16 queue,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004627 const struct ieee80211_tx_queue_params *params)
4628{
4629 struct wl1271 *wl = hw->priv;
Eliad Peller0603d892011-10-05 11:55:51 +02004630 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004631 u8 ps_scheme;
4632 int ret = 0;
4633
4634 mutex_lock(&wl->mutex);
4635
4636 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4637
4638 if (params->uapsd)
4639 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4640 else
4641 ps_scheme = CONF_PS_SCHEME_LEGACY;
4642
Eliad Peller5b37ddf2011-12-18 20:25:40 +02004643 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
Eliad Pellerc1b193e2011-03-23 22:22:15 +02004644 goto out;
Arik Nemtsov488fc542010-10-16 20:33:45 +02004645
Eliad Pellerc1b193e2011-03-23 22:22:15 +02004646 ret = wl1271_ps_elp_wakeup(wl);
4647 if (ret < 0)
4648 goto out;
Arik Nemtsov488fc542010-10-16 20:33:45 +02004649
Eliad Pellerc1b193e2011-03-23 22:22:15 +02004650 /*
4651 * the txop is confed in units of 32us by the mac80211,
4652 * we need us
4653 */
Eliad Peller0603d892011-10-05 11:55:51 +02004654 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
Eliad Pellerc1b193e2011-03-23 22:22:15 +02004655 params->cw_min, params->cw_max,
4656 params->aifs, params->txop << 5);
4657 if (ret < 0)
4658 goto out_sleep;
4659
Eliad Peller0603d892011-10-05 11:55:51 +02004660 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
Eliad Pellerc1b193e2011-03-23 22:22:15 +02004661 CONF_CHANNEL_TYPE_EDCF,
4662 wl1271_tx_get_queue(queue),
4663 ps_scheme, CONF_ACK_POLICY_LEGACY,
4664 0, 0);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004665
4666out_sleep:
Eliad Pellerc1b193e2011-03-23 22:22:15 +02004667 wl1271_ps_elp_sleep(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004668
4669out:
4670 mutex_unlock(&wl->mutex);
4671
4672 return ret;
4673}
4674
Eliad Peller37a41b42011-09-21 14:06:11 +03004675static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4676 struct ieee80211_vif *vif)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004677{
4678
4679 struct wl1271 *wl = hw->priv;
Eliad Peller9c531142012-01-31 11:57:18 +02004680 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004681 u64 mactime = ULLONG_MAX;
4682 int ret;
4683
4684 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4685
4686 mutex_lock(&wl->mutex);
4687
Ido Yariv4cc53382012-07-24 19:18:49 +03004688 if (unlikely(wl->state != WLCORE_STATE_ON))
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004689 goto out;
4690
Ido Yariva6208652011-03-01 15:14:41 +02004691 ret = wl1271_ps_elp_wakeup(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004692 if (ret < 0)
4693 goto out;
4694
Eliad Peller9c531142012-01-31 11:57:18 +02004695 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004696 if (ret < 0)
4697 goto out_sleep;
4698
4699out_sleep:
4700 wl1271_ps_elp_sleep(wl);
4701
4702out:
4703 mutex_unlock(&wl->mutex);
4704 return mactime;
4705}
4706
4707static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4708 struct survey_info *survey)
4709{
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004710 struct ieee80211_conf *conf = &hw->conf;
4711
4712 if (idx != 0)
4713 return -ENOENT;
4714
Karl Beldan675a0b02013-03-25 16:26:57 +01004715 survey->channel = conf->chandef.chan;
Yoni Divinskyadd779a02012-06-13 18:56:54 +03004716 survey->filled = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03004717 return 0;
4718}
4719
Arik Nemtsov409622e2011-02-23 00:22:29 +02004720static int wl1271_allocate_sta(struct wl1271 *wl,
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004721 struct wl12xx_vif *wlvif,
4722 struct ieee80211_sta *sta)
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004723{
4724 struct wl1271_station *wl_sta;
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004725 int ret;
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004726
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004727
Eliad Peller32f0fd52014-02-10 13:47:23 +02004728 if (wl->active_sta_count >= wl->max_ap_stations) {
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004729 wl1271_warning("could not allocate HLID - too much stations");
4730 return -EBUSY;
4731 }
4732
4733 wl_sta = (struct wl1271_station *)sta->drv_priv;
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004734 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4735 if (ret < 0) {
4736 wl1271_warning("could not allocate HLID - too many links");
4737 return -EBUSY;
4738 }
4739
Arik Nemtsov0e752df2013-03-12 17:19:44 +02004740 /* use the previous security seq, if this is a recovery/resume */
4741 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4742
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004743 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
Arik Nemtsovb622d992011-02-23 00:22:31 +02004744 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
Arik Nemtsovda032092011-08-25 12:43:15 +03004745 wl->active_sta_count++;
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004746 return 0;
4747}
4748
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004749void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004750{
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004751 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
Arik Nemtsovf1acea92011-08-25 12:43:17 +03004752 return;
4753
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004754 clear_bit(hlid, wlvif->ap.sta_hlid_map);
Arik Nemtsovb622d992011-02-23 00:22:31 +02004755 __clear_bit(hlid, &wl->ap_ps_map);
4756 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
Arik Nemtsov0e752df2013-03-12 17:19:44 +02004757
4758 /*
4759 * save the last used PN in the private part of iee80211_sta,
4760 * in case of recovery/suspend
4761 */
Eliad Peller50d26aa2014-07-11 03:01:26 +03004762 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
Arik Nemtsov0e752df2013-03-12 17:19:44 +02004763
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004764 wl12xx_free_link(wl, wlvif, &hlid);
Arik Nemtsovda032092011-08-25 12:43:15 +03004765 wl->active_sta_count--;
Arik Nemtsov55df5af2012-03-03 22:18:00 +02004766
4767 /*
4768 * rearm the tx watchdog when the last STA is freed - give the FW a
4769 * chance to return STA-buffered packets before complaining.
4770 */
4771 if (wl->active_sta_count == 0)
4772 wl12xx_rearm_tx_watchdog_locked(wl);
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004773}
4774
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004775static int wl12xx_sta_add(struct wl1271 *wl,
4776 struct wl12xx_vif *wlvif,
4777 struct ieee80211_sta *sta)
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004778{
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004779 struct wl1271_station *wl_sta;
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004780 int ret = 0;
4781 u8 hlid;
4782
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004783 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4784
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004785 ret = wl1271_allocate_sta(wl, wlvif, sta);
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004786 if (ret < 0)
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004787 return ret;
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004788
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004789 wl_sta = (struct wl1271_station *)sta->drv_priv;
4790 hlid = wl_sta->hlid;
4791
Eliad Peller1b92f152011-10-10 10:13:09 +02004792 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004793 if (ret < 0)
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004794 wl1271_free_sta(wl, wlvif, hlid);
Arik Nemtsov409622e2011-02-23 00:22:29 +02004795
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004796 return ret;
4797}
4798
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004799static int wl12xx_sta_remove(struct wl1271 *wl,
4800 struct wl12xx_vif *wlvif,
4801 struct ieee80211_sta *sta)
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004802{
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004803 struct wl1271_station *wl_sta;
4804 int ret = 0, id;
4805
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004806 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4807
4808 wl_sta = (struct wl1271_station *)sta->drv_priv;
Eliad Pellerc7ffb902011-10-05 11:56:05 +02004809 id = wl_sta->hlid;
4810 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004811 return -EINVAL;
4812
Eliad Peller028e7242014-02-10 13:47:25 +02004813 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004814 if (ret < 0)
4815 return ret;
4816
4817 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4818 return ret;
4819}
4820
Eliad Peller426001a2012-11-26 18:05:45 +02004821static void wlcore_roc_if_possible(struct wl1271 *wl,
4822 struct wl12xx_vif *wlvif)
4823{
4824 if (find_first_bit(wl->roc_map,
4825 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4826 return;
4827
4828 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4829 return;
4830
4831 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4832}
4833
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004834/*
4835 * when wl_sta is NULL, we treat this call as if coming from a
4836 * pending auth reply.
4837 * wl->mutex must be taken and the FW must be awake when the call
4838 * takes place.
4839 */
4840void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4841 struct wl1271_station *wl_sta, bool in_conn)
Eliad Peller426001a2012-11-26 18:05:45 +02004842{
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004843 if (in_conn) {
4844 if (WARN_ON(wl_sta && wl_sta->in_connection))
Eliad Peller426001a2012-11-26 18:05:45 +02004845 return;
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004846
4847 if (!wlvif->ap_pending_auth_reply &&
4848 !wlvif->inconn_count)
Eliad Peller426001a2012-11-26 18:05:45 +02004849 wlcore_roc_if_possible(wl, wlvif);
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004850
4851 if (wl_sta) {
4852 wl_sta->in_connection = true;
4853 wlvif->inconn_count++;
4854 } else {
4855 wlvif->ap_pending_auth_reply = true;
4856 }
Eliad Peller426001a2012-11-26 18:05:45 +02004857 } else {
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004858 if (wl_sta && !wl_sta->in_connection)
Eliad Peller426001a2012-11-26 18:05:45 +02004859 return;
4860
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004861 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
Eliad Peller426001a2012-11-26 18:05:45 +02004862 return;
4863
Arik Nemtsov187e52c2013-09-17 18:41:20 +03004864 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4865 return;
4866
4867 if (wl_sta) {
4868 wl_sta->in_connection = false;
4869 wlvif->inconn_count--;
4870 } else {
4871 wlvif->ap_pending_auth_reply = false;
4872 }
4873
4874 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4875 test_bit(wlvif->role_id, wl->roc_map))
4876 wl12xx_croc(wl, wlvif->role_id);
Eliad Peller426001a2012-11-26 18:05:45 +02004877 }
4878}
4879
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004880static int wl12xx_update_sta_state(struct wl1271 *wl,
4881 struct wl12xx_vif *wlvif,
4882 struct ieee80211_sta *sta,
4883 enum ieee80211_sta_state old_state,
4884 enum ieee80211_sta_state new_state)
4885{
4886 struct wl1271_station *wl_sta;
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004887 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4888 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4889 int ret;
4890
4891 wl_sta = (struct wl1271_station *)sta->drv_priv;
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004892
4893 /* Add station (AP mode) */
4894 if (is_ap &&
4895 old_state == IEEE80211_STA_NOTEXIST &&
Eliad Peller29936262012-11-20 13:20:06 +02004896 new_state == IEEE80211_STA_NONE) {
4897 ret = wl12xx_sta_add(wl, wlvif, sta);
4898 if (ret)
4899 return ret;
Eliad Peller426001a2012-11-26 18:05:45 +02004900
4901 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
Eliad Peller29936262012-11-20 13:20:06 +02004902 }
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004903
4904 /* Remove station (AP mode) */
4905 if (is_ap &&
4906 old_state == IEEE80211_STA_NONE &&
4907 new_state == IEEE80211_STA_NOTEXIST) {
4908 /* must not fail */
4909 wl12xx_sta_remove(wl, wlvif, sta);
Eliad Peller426001a2012-11-26 18:05:45 +02004910
4911 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004912 }
4913
4914 /* Authorize station (AP mode) */
4915 if (is_ap &&
4916 new_state == IEEE80211_STA_AUTHORIZED) {
Arik Nemtsov2fec3d22013-03-12 17:19:37 +02004917 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004918 if (ret < 0)
4919 return ret;
4920
4921 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
Arik Nemtsov2fec3d22013-03-12 17:19:37 +02004922 wl_sta->hlid);
Eliad Peller29936262012-11-20 13:20:06 +02004923 if (ret)
4924 return ret;
Eliad Peller426001a2012-11-26 18:05:45 +02004925
4926 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004927 }
4928
Eliad Peller9fd6f212012-03-04 10:55:48 +02004929 /* Authorize station */
4930 if (is_sta &&
4931 new_state == IEEE80211_STA_AUTHORIZED) {
4932 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
Eliad Peller29936262012-11-20 13:20:06 +02004933 ret = wl12xx_set_authorized(wl, wlvif);
4934 if (ret)
4935 return ret;
Eliad Peller9fd6f212012-03-04 10:55:48 +02004936 }
4937
4938 if (is_sta &&
4939 old_state == IEEE80211_STA_AUTHORIZED &&
4940 new_state == IEEE80211_STA_ASSOC) {
4941 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
Eliad Peller3230f352012-11-20 13:20:01 +02004942 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
Eliad Peller9fd6f212012-03-04 10:55:48 +02004943 }
4944
Eliad Peller50d26aa2014-07-11 03:01:26 +03004945 /* save seq number on disassoc (suspend) */
4946 if (is_sta &&
4947 old_state == IEEE80211_STA_ASSOC &&
4948 new_state == IEEE80211_STA_AUTH) {
4949 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
4950 wlvif->total_freed_pkts = 0;
4951 }
4952
4953 /* restore seq number on assoc (resume) */
4954 if (is_sta &&
4955 old_state == IEEE80211_STA_AUTH &&
4956 new_state == IEEE80211_STA_ASSOC) {
4957 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
4958 }
4959
Eliad Peller29936262012-11-20 13:20:06 +02004960 /* clear ROCs on failure or authorization */
4961 if (is_sta &&
4962 (new_state == IEEE80211_STA_AUTHORIZED ||
4963 new_state == IEEE80211_STA_NOTEXIST)) {
4964 if (test_bit(wlvif->role_id, wl->roc_map))
4965 wl12xx_croc(wl, wlvif->role_id);
4966 }
4967
4968 if (is_sta &&
4969 old_state == IEEE80211_STA_NOTEXIST &&
4970 new_state == IEEE80211_STA_NONE) {
4971 if (find_first_bit(wl->roc_map,
4972 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4973 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4974 wl12xx_roc(wl, wlvif, wlvif->role_id,
4975 wlvif->band, wlvif->channel);
4976 }
4977 }
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004978 return 0;
4979}
4980
4981static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4982 struct ieee80211_vif *vif,
4983 struct ieee80211_sta *sta,
4984 enum ieee80211_sta_state old_state,
4985 enum ieee80211_sta_state new_state)
4986{
4987 struct wl1271 *wl = hw->priv;
4988 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4989 int ret;
4990
4991 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4992 sta->aid, old_state, new_state);
4993
4994 mutex_lock(&wl->mutex);
4995
Ido Yariv4cc53382012-07-24 19:18:49 +03004996 if (unlikely(wl->state != WLCORE_STATE_ON)) {
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004997 ret = -EBUSY;
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02004998 goto out;
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02004999 }
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005000
Ido Yariva6208652011-03-01 15:14:41 +02005001 ret = wl1271_ps_elp_wakeup(wl);
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005002 if (ret < 0)
5003 goto out;
5004
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02005005 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005006
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005007 wl1271_ps_elp_sleep(wl);
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005008out:
5009 mutex_unlock(&wl->mutex);
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02005010 if (new_state < old_state)
5011 return 0;
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005012 return ret;
5013}
5014
Luciano Coelho4623ec72011-03-21 19:26:41 +02005015static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5016 struct ieee80211_vif *vif,
5017 enum ieee80211_ampdu_mlme_action action,
5018 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5019 u8 buf_size)
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005020{
5021 struct wl1271 *wl = hw->priv;
Eliad Peller536129c2011-10-05 11:55:45 +02005022 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005023 int ret;
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005024 u8 hlid, *ba_bitmap;
5025
5026 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5027 tid);
5028
5029 /* sanity check - the fields in FW are only 8bits wide */
5030 if (WARN_ON(tid > 0xFF))
5031 return -ENOTSUPP;
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005032
5033 mutex_lock(&wl->mutex);
5034
Ido Yariv4cc53382012-07-24 19:18:49 +03005035 if (unlikely(wl->state != WLCORE_STATE_ON)) {
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005036 ret = -EAGAIN;
5037 goto out;
5038 }
5039
Eliad Peller536129c2011-10-05 11:55:45 +02005040 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
Eliad Peller154da672011-10-05 11:55:53 +02005041 hlid = wlvif->sta.hlid;
Eliad Peller536129c2011-10-05 11:55:45 +02005042 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005043 struct wl1271_station *wl_sta;
5044
5045 wl_sta = (struct wl1271_station *)sta->drv_priv;
5046 hlid = wl_sta->hlid;
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005047 } else {
5048 ret = -EINVAL;
5049 goto out;
5050 }
5051
Arik Nemtsov9ae5d8d2012-11-28 11:42:45 +02005052 ba_bitmap = &wl->links[hlid].ba_bitmap;
5053
Ido Yariva6208652011-03-01 15:14:41 +02005054 ret = wl1271_ps_elp_wakeup(wl);
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005055 if (ret < 0)
5056 goto out;
5057
Shahar Levi70559a02011-05-22 16:10:22 +03005058 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5059 tid, action);
5060
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005061 switch (action) {
5062 case IEEE80211_AMPDU_RX_START:
Eliad Pellerd0802ab2011-10-05 11:56:04 +02005063 if (!wlvif->ba_support || !wlvif->ba_allowed) {
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005064 ret = -ENOTSUPP;
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005065 break;
5066 }
5067
Igal Chernobelskyd21553f2013-03-12 17:19:35 +02005068 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005069 ret = -EBUSY;
5070 wl1271_error("exceeded max RX BA sessions");
5071 break;
5072 }
5073
5074 if (*ba_bitmap & BIT(tid)) {
5075 ret = -EINVAL;
5076 wl1271_error("cannot enable RX BA session on active "
5077 "tid: %d", tid);
5078 break;
5079 }
5080
5081 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5082 hlid);
5083 if (!ret) {
5084 *ba_bitmap |= BIT(tid);
5085 wl->ba_rx_session_count++;
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005086 }
5087 break;
5088
5089 case IEEE80211_AMPDU_RX_STOP:
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005090 if (!(*ba_bitmap & BIT(tid))) {
Arik Nemtsovc9549102012-06-06 10:48:56 +03005091 /*
5092 * this happens on reconfig - so only output a debug
5093 * message for now, and don't fail the function.
5094 */
5095 wl1271_debug(DEBUG_MAC80211,
5096 "no active RX BA session on tid: %d",
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005097 tid);
Arik Nemtsovc9549102012-06-06 10:48:56 +03005098 ret = 0;
Arik Nemtsov0f9c8252011-08-17 10:45:49 +03005099 break;
5100 }
5101
5102 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5103 hlid);
5104 if (!ret) {
5105 *ba_bitmap &= ~BIT(tid);
5106 wl->ba_rx_session_count--;
5107 }
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005108 break;
5109
5110 /*
5111 * The BA initiator session management in FW independently.
5112 * Falling break here on purpose for all TX APDU commands.
5113 */
5114 case IEEE80211_AMPDU_TX_START:
Johannes Berg18b559d2012-07-18 13:51:25 +02005115 case IEEE80211_AMPDU_TX_STOP_CONT:
5116 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5117 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005118 case IEEE80211_AMPDU_TX_OPERATIONAL:
5119 ret = -EINVAL;
5120 break;
5121
5122 default:
5123 wl1271_error("Incorrect ampdu action id=%x\n", action);
5124 ret = -EINVAL;
5125 }
5126
5127 wl1271_ps_elp_sleep(wl);
5128
5129out:
5130 mutex_unlock(&wl->mutex);
5131
5132 return ret;
5133}
5134
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03005135static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5136 struct ieee80211_vif *vif,
5137 const struct cfg80211_bitrate_mask *mask)
5138{
Eliad Peller83587502011-10-10 10:12:53 +02005139 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03005140 struct wl1271 *wl = hw->priv;
Eliad Pellerd6fa37c2011-10-11 11:57:39 +02005141 int i, ret = 0;
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03005142
5143 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5144 mask->control[NL80211_BAND_2GHZ].legacy,
5145 mask->control[NL80211_BAND_5GHZ].legacy);
5146
5147 mutex_lock(&wl->mutex);
5148
Arik Nemtsov091185d2012-07-03 09:11:03 +03005149 for (i = 0; i < WLCORE_NUM_BANDS; i++)
Eliad Peller83587502011-10-10 10:12:53 +02005150 wlvif->bitrate_masks[i] =
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03005151 wl1271_tx_enabled_rates_get(wl,
5152 mask->control[i].legacy,
5153 i);
Eliad Pellerd6fa37c2011-10-11 11:57:39 +02005154
Ido Yariv4cc53382012-07-24 19:18:49 +03005155 if (unlikely(wl->state != WLCORE_STATE_ON))
Eliad Pellerd6fa37c2011-10-11 11:57:39 +02005156 goto out;
5157
5158 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5159 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5160
5161 ret = wl1271_ps_elp_wakeup(wl);
5162 if (ret < 0)
5163 goto out;
5164
5165 wl1271_set_band_rate(wl, wlvif);
5166 wlvif->basic_rate =
5167 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5168 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5169
5170 wl1271_ps_elp_sleep(wl);
5171 }
5172out:
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03005173 mutex_unlock(&wl->mutex);
5174
Eliad Pellerd6fa37c2011-10-11 11:57:39 +02005175 return ret;
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03005176}
5177
Shahar Levi6d158ff2011-09-08 13:01:33 +03005178static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5179 struct ieee80211_channel_switch *ch_switch)
5180{
5181 struct wl1271 *wl = hw->priv;
Eliad Peller52630c52011-10-10 10:13:08 +02005182 struct wl12xx_vif *wlvif;
Shahar Levi6d158ff2011-09-08 13:01:33 +03005183 int ret;
5184
5185 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5186
Arik Nemtsovb9239b62012-02-28 00:41:33 +02005187 wl1271_tx_flush(wl);
5188
Shahar Levi6d158ff2011-09-08 13:01:33 +03005189 mutex_lock(&wl->mutex);
5190
Ido Yariv4cc53382012-07-24 19:18:49 +03005191 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
Eliad Peller6e8cd332011-10-10 10:13:13 +02005192 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5193 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5194 ieee80211_chswitch_done(vif, false);
5195 }
5196 goto out;
Ido Yariv4cc53382012-07-24 19:18:49 +03005197 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5198 goto out;
Shahar Levi6d158ff2011-09-08 13:01:33 +03005199 }
5200
5201 ret = wl1271_ps_elp_wakeup(wl);
5202 if (ret < 0)
5203 goto out;
5204
Eliad Peller52630c52011-10-10 10:13:08 +02005205 /* TODO: change mac80211 to pass vif as param */
5206 wl12xx_for_each_wlvif_sta(wl, wlvif) {
Eliad Pellerc50a2822012-11-22 18:06:19 +02005207 unsigned long delay_usec;
Shahar Levi6d158ff2011-09-08 13:01:33 +03005208
Eliad Pellerfcab1892012-11-22 18:06:18 +02005209 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
Eliad Pellerc50a2822012-11-22 18:06:19 +02005210 if (ret)
5211 goto out_sleep;
5212
5213 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5214
5215 /* indicate failure 5 seconds after channel switch time */
5216 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5217 ch_switch->count;
5218 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5219 usecs_to_jiffies(delay_usec) +
5220 msecs_to_jiffies(5000));
Eliad Peller52630c52011-10-10 10:13:08 +02005221 }
Shahar Levi6d158ff2011-09-08 13:01:33 +03005222
Eliad Pellerc50a2822012-11-22 18:06:19 +02005223out_sleep:
Shahar Levi6d158ff2011-09-08 13:01:33 +03005224 wl1271_ps_elp_sleep(wl);
5225
5226out:
5227 mutex_unlock(&wl->mutex);
5228}
5229
Emmanuel Grumbach77be2c52014-03-27 11:30:29 +02005230static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5231 u32 queues, bool drop)
Eliad Pellerd8ae5a22012-06-25 13:52:33 +03005232{
5233 struct wl1271 *wl = hw->priv;
5234
5235 wl1271_tx_flush(wl);
5236}
5237
Eliad Pellerdabf37d2012-11-20 13:20:03 +02005238static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5239 struct ieee80211_vif *vif,
5240 struct ieee80211_channel *chan,
Ilan Peerd339d5c2013-02-12 09:34:13 +02005241 int duration,
5242 enum ieee80211_roc_type type)
Eliad Pellerdabf37d2012-11-20 13:20:03 +02005243{
5244 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5245 struct wl1271 *wl = hw->priv;
5246 int channel, ret = 0;
5247
5248 channel = ieee80211_frequency_to_channel(chan->center_freq);
5249
5250 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5251 channel, wlvif->role_id);
5252
5253 mutex_lock(&wl->mutex);
5254
5255 if (unlikely(wl->state != WLCORE_STATE_ON))
5256 goto out;
5257
5258 /* return EBUSY if we can't ROC right now */
5259 if (WARN_ON(wl->roc_vif ||
5260 find_first_bit(wl->roc_map,
5261 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5262 ret = -EBUSY;
5263 goto out;
5264 }
5265
5266 ret = wl1271_ps_elp_wakeup(wl);
5267 if (ret < 0)
5268 goto out;
5269
5270 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5271 if (ret < 0)
5272 goto out_sleep;
5273
5274 wl->roc_vif = vif;
5275 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5276 msecs_to_jiffies(duration));
5277out_sleep:
5278 wl1271_ps_elp_sleep(wl);
5279out:
5280 mutex_unlock(&wl->mutex);
5281 return ret;
5282}
5283
5284static int __wlcore_roc_completed(struct wl1271 *wl)
5285{
5286 struct wl12xx_vif *wlvif;
5287 int ret;
5288
5289 /* already completed */
5290 if (unlikely(!wl->roc_vif))
5291 return 0;
5292
5293 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5294
5295 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5296 return -EBUSY;
5297
5298 ret = wl12xx_stop_dev(wl, wlvif);
5299 if (ret < 0)
5300 return ret;
5301
5302 wl->roc_vif = NULL;
5303
5304 return 0;
5305}
5306
5307static int wlcore_roc_completed(struct wl1271 *wl)
5308{
5309 int ret;
5310
5311 wl1271_debug(DEBUG_MAC80211, "roc complete");
5312
5313 mutex_lock(&wl->mutex);
5314
5315 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5316 ret = -EBUSY;
5317 goto out;
5318 }
5319
5320 ret = wl1271_ps_elp_wakeup(wl);
5321 if (ret < 0)
5322 goto out;
5323
5324 ret = __wlcore_roc_completed(wl);
5325
5326 wl1271_ps_elp_sleep(wl);
5327out:
5328 mutex_unlock(&wl->mutex);
5329
5330 return ret;
5331}
5332
5333static void wlcore_roc_complete_work(struct work_struct *work)
5334{
5335 struct delayed_work *dwork;
5336 struct wl1271 *wl;
5337 int ret;
5338
5339 dwork = container_of(work, struct delayed_work, work);
5340 wl = container_of(dwork, struct wl1271, roc_complete_work);
5341
5342 ret = wlcore_roc_completed(wl);
5343 if (!ret)
5344 ieee80211_remain_on_channel_expired(wl->hw);
5345}
5346
5347static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5348{
5349 struct wl1271 *wl = hw->priv;
5350
5351 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5352
5353 /* TODO: per-vif */
5354 wl1271_tx_flush(wl);
5355
5356 /*
5357 * we can't just flush_work here, because it might deadlock
5358 * (as we might get called from the same workqueue)
5359 */
5360 cancel_delayed_work_sync(&wl->roc_complete_work);
5361 wlcore_roc_completed(wl);
5362
5363 return 0;
5364}
5365
Arik Nemtsov5f9b6772012-11-26 18:05:41 +02005366static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5367 struct ieee80211_vif *vif,
5368 struct ieee80211_sta *sta,
5369 u32 changed)
5370{
5371 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5372 struct wl1271 *wl = hw->priv;
5373
5374 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5375}
5376
Nadim Zubidat0a9ffac2013-03-12 17:19:39 +02005377static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5378 struct ieee80211_vif *vif,
5379 struct ieee80211_sta *sta,
5380 s8 *rssi_dbm)
5381{
5382 struct wl1271 *wl = hw->priv;
5383 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5384 int ret = 0;
5385
5386 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5387
5388 mutex_lock(&wl->mutex);
5389
5390 if (unlikely(wl->state != WLCORE_STATE_ON))
5391 goto out;
5392
5393 ret = wl1271_ps_elp_wakeup(wl);
5394 if (ret < 0)
5395 goto out_sleep;
5396
5397 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5398 if (ret < 0)
5399 goto out_sleep;
5400
5401out_sleep:
5402 wl1271_ps_elp_sleep(wl);
5403
5404out:
5405 mutex_unlock(&wl->mutex);
5406
5407 return ret;
5408}
5409
Arik Nemtsov33437892011-04-26 23:35:39 +03005410static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5411{
5412 struct wl1271 *wl = hw->priv;
5413 bool ret = false;
5414
5415 mutex_lock(&wl->mutex);
5416
Ido Yariv4cc53382012-07-24 19:18:49 +03005417 if (unlikely(wl->state != WLCORE_STATE_ON))
Arik Nemtsov33437892011-04-26 23:35:39 +03005418 goto out;
5419
5420 /* packets are considered pending if in the TX queue or the FW */
Arik Nemtsovf1a46382011-07-07 14:25:23 +03005421 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
Arik Nemtsov33437892011-04-26 23:35:39 +03005422out:
5423 mutex_unlock(&wl->mutex);
5424
5425 return ret;
5426}
5427
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005428/* can't be const, mac80211 writes to this */
5429static struct ieee80211_rate wl1271_rates[] = {
5430 { .bitrate = 10,
5431 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5432 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5433 { .bitrate = 20,
5434 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5435 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5436 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5437 { .bitrate = 55,
5438 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5439 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5440 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5441 { .bitrate = 110,
5442 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5443 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5444 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5445 { .bitrate = 60,
5446 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5447 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5448 { .bitrate = 90,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03005449 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5450 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005451 { .bitrate = 120,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03005452 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5453 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005454 { .bitrate = 180,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03005455 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5456 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005457 { .bitrate = 240,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03005458 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5459 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005460 { .bitrate = 360,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03005461 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5462 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005463 { .bitrate = 480,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03005464 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5465 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005466 { .bitrate = 540,
Juuso Oikarinen2b60100b2009-10-13 12:47:39 +03005467 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5468 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005469};
5470
Juuso Oikarinenfa97f462010-11-10 11:27:20 +01005471/* can't be const, mac80211 writes to this */
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005472static struct ieee80211_channel wl1271_channels[] = {
Victor Goldenshtein583f8162012-11-27 08:44:55 +02005473 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5474 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5475 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5476 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5477 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5478 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5479 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5480 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5481 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5482 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5483 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5484 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5485 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5486 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005487};
5488
5489/* can't be const, mac80211 writes to this */
5490static struct ieee80211_supported_band wl1271_band_2ghz = {
5491 .channels = wl1271_channels,
5492 .n_channels = ARRAY_SIZE(wl1271_channels),
5493 .bitrates = wl1271_rates,
5494 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5495};
5496
Teemu Paasikivi1ebec3d2009-10-13 12:47:48 +03005497/* 5 GHz data rates for WL1273 */
5498static struct ieee80211_rate wl1271_rates_5ghz[] = {
5499 { .bitrate = 60,
5500 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5501 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5502 { .bitrate = 90,
5503 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5504 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5505 { .bitrate = 120,
5506 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5507 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5508 { .bitrate = 180,
5509 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5510 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5511 { .bitrate = 240,
5512 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5513 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5514 { .bitrate = 360,
5515 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5516 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5517 { .bitrate = 480,
5518 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5519 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5520 { .bitrate = 540,
5521 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5522 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5523};
5524
Juuso Oikarinenfa97f462010-11-10 11:27:20 +01005525/* 5 GHz band channels for WL1273 */
Teemu Paasikivi1ebec3d2009-10-13 12:47:48 +03005526static struct ieee80211_channel wl1271_channels_5ghz[] = {
Victor Goldenshtein583f8162012-11-27 08:44:55 +02005527 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
Victor Goldenshtein583f8162012-11-27 08:44:55 +02005528 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5529 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5530 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5531 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5532 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5533 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5534 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5535 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5536 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5537 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5538 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5539 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5540 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5541 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5542 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5543 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5544 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5545 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5546 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5547 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5548 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5549 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5550 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5551 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5552 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5553 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5554 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5555 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5556 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5557 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
Teemu Paasikivi1ebec3d2009-10-13 12:47:48 +03005558};
5559
Teemu Paasikivi1ebec3d2009-10-13 12:47:48 +03005560static struct ieee80211_supported_band wl1271_band_5ghz = {
5561 .channels = wl1271_channels_5ghz,
5562 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5563 .bitrates = wl1271_rates_5ghz,
5564 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
Juuso Oikarinenf876bb92010-03-26 12:53:11 +02005565};
5566
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005567static const struct ieee80211_ops wl1271_ops = {
5568 .start = wl1271_op_start,
Ido Yarivc24ec832012-06-26 21:08:58 +03005569 .stop = wlcore_op_stop,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005570 .add_interface = wl1271_op_add_interface,
5571 .remove_interface = wl1271_op_remove_interface,
Eliad Pellerc0fad1b2011-12-19 12:00:03 +02005572 .change_interface = wl12xx_op_change_interface,
Luciano Coelhof634a4e2011-05-18 16:51:26 -04005573#ifdef CONFIG_PM
Eliad Peller402e48612011-05-13 11:57:09 +03005574 .suspend = wl1271_op_suspend,
5575 .resume = wl1271_op_resume,
Luciano Coelhof634a4e2011-05-18 16:51:26 -04005576#endif
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005577 .config = wl1271_op_config,
Juuso Oikarinenc87dec92009-10-08 21:56:31 +03005578 .prepare_multicast = wl1271_op_prepare_multicast,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005579 .configure_filter = wl1271_op_configure_filter,
5580 .tx = wl1271_op_tx,
Arik Nemtsova1c597f2012-05-18 07:46:40 +03005581 .set_key = wlcore_op_set_key,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005582 .hw_scan = wl1271_op_hw_scan,
Eliad Peller73ecce32011-06-27 13:06:45 +03005583 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
Luciano Coelho33c2c062011-05-10 14:46:02 +03005584 .sched_scan_start = wl1271_op_sched_scan_start,
5585 .sched_scan_stop = wl1271_op_sched_scan_stop,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005586 .bss_info_changed = wl1271_op_bss_info_changed,
Arik Nemtsov68d069c2010-11-08 10:51:07 +01005587 .set_frag_threshold = wl1271_op_set_frag_threshold,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005588 .set_rts_threshold = wl1271_op_set_rts_threshold,
Kalle Valoc6999d82010-02-18 13:25:41 +02005589 .conf_tx = wl1271_op_conf_tx,
Juuso Oikarinenbbbb5382010-07-08 17:49:57 +03005590 .get_tsf = wl1271_op_get_tsf,
John W. Linvilleece550d2010-07-28 16:41:06 -04005591 .get_survey = wl1271_op_get_survey,
Eliad Peller2d6cf2b2012-03-04 10:55:47 +02005592 .sta_state = wl12xx_op_sta_state,
Levi, Shaharbbba3e62011-01-23 07:27:23 +01005593 .ampdu_action = wl1271_op_ampdu_action,
Arik Nemtsov33437892011-04-26 23:35:39 +03005594 .tx_frames_pending = wl1271_tx_frames_pending,
Eliad Pelleraf7fbb22011-09-19 13:51:42 +03005595 .set_bitrate_mask = wl12xx_set_bitrate_mask,
Yoni Divinskyba1e6eb2013-05-12 12:35:28 +03005596 .set_default_unicast_key = wl1271_op_set_default_key_idx,
Shahar Levi6d158ff2011-09-08 13:01:33 +03005597 .channel_switch = wl12xx_op_channel_switch,
Eliad Pellerd8ae5a22012-06-25 13:52:33 +03005598 .flush = wlcore_op_flush,
Eliad Pellerdabf37d2012-11-20 13:20:03 +02005599 .remain_on_channel = wlcore_op_remain_on_channel,
5600 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
Eliad Pellerb6970ee2012-11-20 13:20:05 +02005601 .add_chanctx = wlcore_op_add_chanctx,
5602 .remove_chanctx = wlcore_op_remove_chanctx,
5603 .change_chanctx = wlcore_op_change_chanctx,
5604 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5605 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
Arik Nemtsov5f9b6772012-11-26 18:05:41 +02005606 .sta_rc_update = wlcore_op_sta_rc_update,
Nadim Zubidat0a9ffac2013-03-12 17:19:39 +02005607 .get_rssi = wlcore_op_get_rssi,
Kalle Valoc8c90872010-02-18 13:25:53 +02005608 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005609};
5610
Juuso Oikarinenf876bb92010-03-26 12:53:11 +02005611
Arik Nemtsov43a8bc52011-12-08 00:43:48 +02005612u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
Juuso Oikarinenf876bb92010-03-26 12:53:11 +02005613{
5614 u8 idx;
5615
Arik Nemtsov43a8bc52011-12-08 00:43:48 +02005616 BUG_ON(band >= 2);
Juuso Oikarinenf876bb92010-03-26 12:53:11 +02005617
Arik Nemtsov43a8bc52011-12-08 00:43:48 +02005618 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
Juuso Oikarinenf876bb92010-03-26 12:53:11 +02005619 wl1271_error("Illegal RX rate from HW: %d", rate);
5620 return 0;
5621 }
5622
Arik Nemtsov43a8bc52011-12-08 00:43:48 +02005623 idx = wl->band_rate_to_idx[band][rate];
Juuso Oikarinenf876bb92010-03-26 12:53:11 +02005624 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5625 wl1271_error("Unsupported RX rate from HW: %d", rate);
5626 return 0;
5627 }
5628
5629 return idx;
5630}
5631
Arik Nemtsovf4afbed2012-08-02 20:37:21 +03005632static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
Luciano Coelho5e037e72011-12-23 09:32:17 +02005633{
5634 int i;
5635
Arik Nemtsovf4afbed2012-08-02 20:37:21 +03005636 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5637 oui, nic);
Luciano Coelho5e037e72011-12-23 09:32:17 +02005638
Arik Nemtsovf4afbed2012-08-02 20:37:21 +03005639 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
Luciano Coelho5e037e72011-12-23 09:32:17 +02005640 wl1271_warning("NIC part of the MAC address wraps around!");
5641
Arik Nemtsovf4afbed2012-08-02 20:37:21 +03005642 for (i = 0; i < wl->num_mac_addr; i++) {
Luciano Coelho5e037e72011-12-23 09:32:17 +02005643 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5644 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5645 wl->addresses[i].addr[2] = (u8) oui;
5646 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5647 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5648 wl->addresses[i].addr[5] = (u8) nic;
5649 nic++;
5650 }
5651
Arik Nemtsovf4afbed2012-08-02 20:37:21 +03005652 /* we may be one address short at the most */
5653 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5654
5655 /*
5656 * turn on the LAA bit in the first address and use it as
5657 * the last address.
5658 */
5659 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5660 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5661 memcpy(&wl->addresses[idx], &wl->addresses[0],
5662 sizeof(wl->addresses[0]));
5663 /* LAA bit */
5664 wl->addresses[idx].addr[2] |= BIT(1);
5665 }
5666
5667 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
Luciano Coelho5e037e72011-12-23 09:32:17 +02005668 wl->hw->wiphy->addresses = wl->addresses;
5669}
5670
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005671static int wl12xx_get_hw_info(struct wl1271 *wl)
5672{
5673 int ret;
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005674
5675 ret = wl12xx_set_power_on(wl);
5676 if (ret < 0)
Julia Lawall4fb4e0b2012-10-21 12:52:04 +02005677 return ret;
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005678
Ido Yariv61343232012-06-18 15:50:21 +03005679 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5680 if (ret < 0)
5681 goto out;
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005682
Luciano Coelho00782132011-11-29 13:38:37 +02005683 wl->fuse_oui_addr = 0;
5684 wl->fuse_nic_addr = 0;
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005685
Ido Yariv61343232012-06-18 15:50:21 +03005686 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5687 if (ret < 0)
5688 goto out;
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005689
Luciano Coelho30d9b4a2012-04-11 11:07:28 +03005690 if (wl->ops->get_mac)
Ido Yariv61343232012-06-18 15:50:21 +03005691 ret = wl->ops->get_mac(wl);
Luciano Coelho5e037e72011-12-23 09:32:17 +02005692
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005693out:
Ido Yariv61343232012-06-18 15:50:21 +03005694 wl1271_power_off(wl);
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005695 return ret;
5696}
5697
Felipe Balbi4b32a2c2011-10-06 10:46:20 +03005698static int wl1271_register_hw(struct wl1271 *wl)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005699{
5700 int ret;
Luciano Coelho5e037e72011-12-23 09:32:17 +02005701 u32 oui_addr = 0, nic_addr = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005702
5703 if (wl->mac80211_registered)
5704 return 0;
5705
Ido Yariv6f8d6b22012-09-02 01:32:47 +03005706 if (wl->nvs_len >= 12) {
Shahar Levibc765bf2011-03-06 16:32:10 +02005707 /* NOTE: The wl->nvs->nvs element must be first, in
5708 * order to simplify the casting, we assume it is at
5709 * the beginning of the wl->nvs structure.
5710 */
5711 u8 *nvs_ptr = (u8 *)wl->nvs;
Arik Nemtsov31d26ec2010-10-16 21:49:52 +02005712
Luciano Coelho5e037e72011-12-23 09:32:17 +02005713 oui_addr =
5714 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5715 nic_addr =
5716 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
Arik Nemtsov31d26ec2010-10-16 21:49:52 +02005717 }
5718
Luciano Coelho5e037e72011-12-23 09:32:17 +02005719 /* if the MAC address is zeroed in the NVS derive from fuse */
5720 if (oui_addr == 0 && nic_addr == 0) {
5721 oui_addr = wl->fuse_oui_addr;
5722 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5723 nic_addr = wl->fuse_nic_addr + 1;
5724 }
5725
Arik Nemtsovf4afbed2012-08-02 20:37:21 +03005726 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005727
5728 ret = ieee80211_register_hw(wl->hw);
5729 if (ret < 0) {
5730 wl1271_error("unable to register mac80211 hw: %d", ret);
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005731 goto out;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005732 }
5733
5734 wl->mac80211_registered = true;
5735
Eliad Pellerd60080a2010-11-24 12:53:16 +02005736 wl1271_debugfs_init(wl);
5737
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005738 wl1271_notice("loaded");
5739
Luciano Coelho30c5dbd2012-01-18 14:53:22 +02005740out:
5741 return ret;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005742}
5743
Felipe Balbi4b32a2c2011-10-06 10:46:20 +03005744static void wl1271_unregister_hw(struct wl1271 *wl)
Teemu Paasikivi3b56dd62010-03-18 12:26:46 +02005745{
Eliad Peller3fcdab72012-02-06 12:47:54 +02005746 if (wl->plt)
Ido Yarivf3df1332012-01-11 09:42:39 +02005747 wl1271_plt_stop(wl);
Juuso Oikarinen4ae3fa82011-01-14 12:48:46 +01005748
Teemu Paasikivi3b56dd62010-03-18 12:26:46 +02005749 ieee80211_unregister_hw(wl->hw);
5750 wl->mac80211_registered = false;
5751
5752}
Teemu Paasikivi3b56dd62010-03-18 12:26:46 +02005753
Felipe Balbi4b32a2c2011-10-06 10:46:20 +03005754static int wl1271_init_ieee80211(struct wl1271 *wl)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005755{
Victor Goldenshtein583f8162012-11-27 08:44:55 +02005756 int i;
Juuso Oikarinen7a557242010-09-27 12:42:07 +02005757 static const u32 cipher_suites[] = {
5758 WLAN_CIPHER_SUITE_WEP40,
5759 WLAN_CIPHER_SUITE_WEP104,
5760 WLAN_CIPHER_SUITE_TKIP,
5761 WLAN_CIPHER_SUITE_CCMP,
5762 WL1271_CIPHER_SUITE_GEM,
5763 };
5764
Arik Nemtsov2c0133a2012-05-18 07:46:36 +03005765 /* The tx descriptor buffer */
5766 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5767
5768 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5769 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005770
5771 /* unit us */
5772 /* FIXME: find a proper value */
Juuso Oikarinen50c500a2010-04-01 11:38:22 +03005773 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005774
5775 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
Juuso Oikarinen0a343322010-02-22 08:38:41 +02005776 IEEE80211_HW_SUPPORTS_PS |
Eyal Shapiraf1d63a52012-01-31 11:57:21 +02005777 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
Kalle Valo4695dc92010-03-18 12:26:38 +02005778 IEEE80211_HW_SUPPORTS_UAPSD |
Juuso Oikarinena9af0922010-03-26 12:53:30 +02005779 IEEE80211_HW_HAS_RATE_CONTROL |
Juuso Oikarinen00236aed2010-04-09 11:07:30 +03005780 IEEE80211_HW_CONNECTION_MONITOR |
Luciano Coelho25eaea302011-05-02 12:37:33 +03005781 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
Shahar Levifcd23b62011-05-11 12:12:56 +03005782 IEEE80211_HW_SPECTRUM_MGMT |
Arik Nemtsov93f8c8e2011-08-30 09:34:01 +03005783 IEEE80211_HW_AP_LINK_PS |
5784 IEEE80211_HW_AMPDU_AGGREGATION |
Eliad Peller79aba1b2012-02-02 13:15:35 +02005785 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
Arik Nemtsov6b27fe52014-02-10 13:47:34 +02005786 IEEE80211_HW_QUEUE_CONTROL |
5787 IEEE80211_HW_CHANCTX_STA_CSA;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005788
Juuso Oikarinen7a557242010-09-27 12:42:07 +02005789 wl->hw->wiphy->cipher_suites = cipher_suites;
5790 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5791
Juuso Oikarinene0d8bbf2009-12-11 15:41:04 +02005792 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
Eliad Peller045c7452011-08-28 15:23:01 +03005793 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5794 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005795 wl->hw->wiphy->max_scan_ssids = 1;
Luciano Coelho221737d2011-09-02 14:28:22 +03005796 wl->hw->wiphy->max_sched_scan_ssids = 16;
5797 wl->hw->wiphy->max_match_sets = 16;
Guy Eilamea559b42010-12-09 16:54:59 +02005798 /*
5799 * Maximum length of elements in scanning probe request templates
5800 * should be the maximum length possible for a template, without
5801 * the IEEE80211 header of the template
5802 */
Ido Reisc08e3712012-02-02 13:54:27 +02005803 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
Guy Eilamea559b42010-12-09 16:54:59 +02005804 sizeof(struct ieee80211_header);
Luciano Coelhoa8aaaf52011-01-11 18:25:18 +01005805
Ido Reisc08e3712012-02-02 13:54:27 +02005806 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
Luciano Coelhoc9e79a42011-09-27 16:22:35 +03005807 sizeof(struct ieee80211_header);
5808
Eliad Pellerdabf37d2012-11-20 13:20:03 +02005809 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5810
Johannes Berg81ddbb52012-03-26 18:47:18 +02005811 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
Johannes Berg1fb902602013-08-21 11:24:01 +02005812 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5813 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
Eliad Peller1ec23f72011-08-25 14:26:54 +03005814
Luciano Coelho4a31c112011-03-21 23:16:14 +02005815 /* make sure all our channels fit in the scanned_ch bitmask */
5816 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5817 ARRAY_SIZE(wl1271_channels_5ghz) >
5818 WL1271_MAX_CHANNELS);
Luciano Coelhoa8aaaf52011-01-11 18:25:18 +01005819 /*
Victor Goldenshtein583f8162012-11-27 08:44:55 +02005820 * clear channel flags from the previous usage
5821 * and restore max_power & max_antenna_gain values.
5822 */
5823 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5824 wl1271_band_2ghz.channels[i].flags = 0;
5825 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5826 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5827 }
5828
5829 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5830 wl1271_band_5ghz.channels[i].flags = 0;
5831 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5832 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5833 }
5834
5835 /*
Luciano Coelhoa8aaaf52011-01-11 18:25:18 +01005836 * We keep local copies of the band structs because we need to
5837 * modify them on a per-device basis.
5838 */
5839 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5840 sizeof(wl1271_band_2ghz));
Eliad Pellerbfb92ca2012-05-15 17:09:00 +03005841 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5842 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5843 sizeof(*wl->ht_cap));
Luciano Coelhoa8aaaf52011-01-11 18:25:18 +01005844 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5845 sizeof(wl1271_band_5ghz));
Eliad Pellerbfb92ca2012-05-15 17:09:00 +03005846 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5847 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5848 sizeof(*wl->ht_cap));
Luciano Coelhoa8aaaf52011-01-11 18:25:18 +01005849
5850 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5851 &wl->bands[IEEE80211_BAND_2GHZ];
5852 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5853 &wl->bands[IEEE80211_BAND_5GHZ];
Teemu Paasikivi1ebec3d2009-10-13 12:47:48 +03005854
Arik Nemtsov1c33db72012-11-30 00:48:03 +02005855 /*
5856 * allow 4 queues per mac address we support +
5857 * 1 cab queue per mac + one global offchannel Tx queue
5858 */
5859 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5860
5861 /* the last queue is the offchannel queue */
5862 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
Juuso Oikarinen31627dc2010-03-26 12:53:12 +02005863 wl->hw->max_rates = 1;
Kalle Valo12bd8942010-03-18 12:26:33 +02005864
Juuso Oikarinenb7417d92010-11-10 11:27:19 +01005865 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5866
Arik Nemtsov9c1b1902011-11-08 18:46:55 +02005867 /* the FW answers probe-requests in AP-mode */
5868 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5869 wl->hw->wiphy->probe_resp_offload =
5870 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5871 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5872 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5873
Eliad Pellerbcab320b2012-06-13 20:29:16 +03005874 /* allowed interface combinations */
Eliad Pellerabf0b242014-02-10 13:47:24 +02005875 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5876 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
Eliad Pellerbcab320b2012-06-13 20:29:16 +03005877
Felipe Balbia390e852011-10-06 10:07:44 +03005878 SET_IEEE80211_DEV(wl->hw, wl->dev);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005879
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005880 wl->hw->sta_data_size = sizeof(struct wl1271_station);
Eliad Peller87fbcb02011-10-05 11:55:41 +02005881 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
Arik Nemtsovf84f7d72010-10-16 20:21:23 +02005882
Arik Nemtsovba421f82012-01-06 00:05:51 +02005883 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
Luciano Coelho4c9cfa72011-01-12 14:27:03 +01005884
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005885 return 0;
5886}
5887
Eliad Pellerc50a2822012-11-22 18:06:19 +02005888struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5889 u32 mbox_size)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005890{
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005891 struct ieee80211_hw *hw;
5892 struct wl1271 *wl;
Arik Nemtsova8c0ddb2011-02-23 00:22:26 +02005893 int i, j, ret;
Ido Yariv1f37cbc2010-09-30 13:28:27 +02005894 unsigned int order;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005895
5896 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5897 if (!hw) {
5898 wl1271_error("could not alloc ieee80211_hw");
Juuso Oikarinena1dd8182010-03-18 12:26:31 +02005899 ret = -ENOMEM;
Teemu Paasikivi3b56dd62010-03-18 12:26:46 +02005900 goto err_hw_alloc;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005901 }
5902
5903 wl = hw->priv;
5904 memset(wl, 0, sizeof(*wl));
5905
Arik Nemtsov96e0c682011-12-07 21:09:03 +02005906 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5907 if (!wl->priv) {
5908 wl1271_error("could not alloc wl priv");
5909 ret = -ENOMEM;
5910 goto err_priv_alloc;
5911 }
5912
Eliad Peller87627212011-10-10 10:12:54 +02005913 INIT_LIST_HEAD(&wl->wlvif_list);
Juuso Oikarinen01c09162009-10-13 12:47:55 +03005914
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005915 wl->hw = hw;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005916
Eliad Pellerda08fdf2014-02-10 13:47:22 +02005917 /*
5918 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5919 * we don't allocate any additional resource here, so that's fine.
5920 */
Juuso Oikarinen6742f552010-12-13 09:52:37 +02005921 for (i = 0; i < NUM_TX_QUEUES; i++)
Eliad Pellerda08fdf2014-02-10 13:47:22 +02005922 for (j = 0; j < WLCORE_MAX_LINKS; j++)
Arik Nemtsova8c0ddb2011-02-23 00:22:26 +02005923 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5924
Ido Yariva6208652011-03-01 15:14:41 +02005925 skb_queue_head_init(&wl->deferred_rx_queue);
5926 skb_queue_head_init(&wl->deferred_tx_queue);
5927
Juuso Oikarinen37b70a82009-10-08 21:56:21 +03005928 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
Ido Yariva6208652011-03-01 15:14:41 +02005929 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
Juuso Oikarinen117b38d2010-09-30 10:43:28 +02005930 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5931 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5932 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
Eliad Pellerdabf37d2012-11-20 13:20:03 +02005933 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
Arik Nemtsov55df5af2012-03-03 22:18:00 +02005934 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
Eliad Peller77ddaa12011-05-15 11:10:29 +03005935
Eliad Peller92ef8962011-06-07 12:50:46 +03005936 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5937 if (!wl->freezable_wq) {
5938 ret = -ENOMEM;
5939 goto err_hw;
5940 }
5941
Luciano Coelho8f6ac532013-05-04 01:06:11 +03005942 wl->channel = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005943 wl->rx_counter = 0;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005944 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
Juuso Oikarinen8a5a37a2009-10-08 21:56:24 +03005945 wl->band = IEEE80211_BAND_2GHZ;
Arik Nemtsov83d08d32012-05-10 12:13:30 +03005946 wl->channel_type = NL80211_CHAN_NO_HT;
Juuso Oikarinen830fb672009-12-11 15:41:06 +02005947 wl->flags = 0;
Juuso Oikarinen7fc3a862010-03-18 12:26:32 +02005948 wl->sg_enabled = true;
Arik Nemtsov66340e52012-06-10 17:09:22 +03005949 wl->sleep_auth = WL1271_PSM_ILLEGAL;
Luciano Coelhoc108c902012-11-26 18:05:49 +02005950 wl->recovery_count = 0;
Juuso Oikarinend717fd62010-05-07 11:38:58 +03005951 wl->hw_pg_ver = -1;
Arik Nemtsovb622d992011-02-23 00:22:31 +02005952 wl->ap_ps_map = 0;
5953 wl->ap_fw_ps_map = 0;
Ido Yariv606ea9f2011-03-01 15:14:39 +02005954 wl->quirks = 0;
Ido Yariv341b7cd2011-03-31 10:07:01 +02005955 wl->platform_quirks = 0;
Eliad Pellerf4df1bd2011-08-14 13:17:15 +03005956 wl->system_hlid = WL12XX_SYSTEM_HLID;
Arik Nemtsovda032092011-08-25 12:43:15 +03005957 wl->active_sta_count = 0;
Arik Nemtsov9a100962012-11-28 11:42:42 +02005958 wl->active_link_count = 0;
Ido Yariv95dac04f2011-06-06 14:57:06 +03005959 wl->fwlog_size = 0;
5960 init_waitqueue_head(&wl->fwlog_waitq);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005961
Eliad Pellerf4df1bd2011-08-14 13:17:15 +03005962 /* The system link is always allocated */
5963 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5964
Ido Yariv25eeb9e2010-10-12 16:20:06 +02005965 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
Arik Nemtsov72b06242011-12-07 21:21:51 +02005966 for (i = 0; i < wl->num_tx_desc; i++)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005967 wl->tx_frames[i] = NULL;
5968
5969 spin_lock_init(&wl->wl_lock);
5970
Ido Yariv4cc53382012-07-24 19:18:49 +03005971 wl->state = WLCORE_STATE_OFF;
Eliad Peller3fcdab72012-02-06 12:47:54 +02005972 wl->fw_type = WL12XX_FW_TYPE_NONE;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005973 mutex_init(&wl->mutex);
Arik Nemtsov2c388492012-05-18 07:46:39 +03005974 mutex_init(&wl->flush_mutex);
Ido Yariv6f8d6b22012-09-02 01:32:47 +03005975 init_completion(&wl->nvs_loading_complete);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03005976
Igal Chernobelsky26a309c2012-07-29 18:21:12 +03005977 order = get_order(aggr_buf_size);
Ido Yariv1f37cbc2010-09-30 13:28:27 +02005978 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5979 if (!wl->aggr_buf) {
5980 ret = -ENOMEM;
Eliad Peller92ef8962011-06-07 12:50:46 +03005981 goto err_wq;
Ido Yariv1f37cbc2010-09-30 13:28:27 +02005982 }
Igal Chernobelsky26a309c2012-07-29 18:21:12 +03005983 wl->aggr_buf_size = aggr_buf_size;
Ido Yariv1f37cbc2010-09-30 13:28:27 +02005984
Ido Yariv990f5de2011-03-31 10:06:59 +02005985 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5986 if (!wl->dummy_packet) {
5987 ret = -ENOMEM;
5988 goto err_aggr;
5989 }
5990
Ido Yariv95dac04f2011-06-06 14:57:06 +03005991 /* Allocate one page for the FW log */
5992 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5993 if (!wl->fwlog) {
5994 ret = -ENOMEM;
5995 goto err_dummy_packet;
5996 }
5997
Eliad Pellerc50a2822012-11-22 18:06:19 +02005998 wl->mbox_size = mbox_size;
5999 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
Mircea Gherzan690142e2012-03-17 18:41:53 +01006000 if (!wl->mbox) {
6001 ret = -ENOMEM;
6002 goto err_fwlog;
6003 }
6004
Ido Yariv2e07d022012-11-28 11:42:49 +02006005 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6006 if (!wl->buffer_32) {
6007 ret = -ENOMEM;
6008 goto err_mbox;
6009 }
6010
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006011 return hw;
Juuso Oikarinena1dd8182010-03-18 12:26:31 +02006012
Ido Yariv2e07d022012-11-28 11:42:49 +02006013err_mbox:
6014 kfree(wl->mbox);
6015
Mircea Gherzan690142e2012-03-17 18:41:53 +01006016err_fwlog:
6017 free_page((unsigned long)wl->fwlog);
6018
Ido Yariv990f5de2011-03-31 10:06:59 +02006019err_dummy_packet:
6020 dev_kfree_skb(wl->dummy_packet);
6021
Ido Yariv1f37cbc2010-09-30 13:28:27 +02006022err_aggr:
6023 free_pages((unsigned long)wl->aggr_buf, order);
6024
Eliad Peller92ef8962011-06-07 12:50:46 +03006025err_wq:
6026 destroy_workqueue(wl->freezable_wq);
6027
Juuso Oikarinena1dd8182010-03-18 12:26:31 +02006028err_hw:
Teemu Paasikivi3b56dd62010-03-18 12:26:46 +02006029 wl1271_debugfs_exit(wl);
Arik Nemtsov96e0c682011-12-07 21:09:03 +02006030 kfree(wl->priv);
6031
6032err_priv_alloc:
Teemu Paasikivi3b56dd62010-03-18 12:26:46 +02006033 ieee80211_free_hw(hw);
6034
6035err_hw_alloc:
6036
Juuso Oikarinena1dd8182010-03-18 12:26:31 +02006037 return ERR_PTR(ret);
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006038}
Luciano Coelhoffeb5012011-11-21 18:55:51 +02006039EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006040
Luciano Coelhoffeb5012011-11-21 18:55:51 +02006041int wlcore_free_hw(struct wl1271 *wl)
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006042{
Ido Yariv95dac04f2011-06-06 14:57:06 +03006043 /* Unblock any fwlog readers */
6044 mutex_lock(&wl->mutex);
6045 wl->fwlog_size = -1;
6046 wake_up_interruptible_all(&wl->fwlog_waitq);
6047 mutex_unlock(&wl->mutex);
6048
Luciano Coelho33cab572013-05-04 02:46:38 +03006049 wlcore_sysfs_free(wl);
Gery Kahn6f07b722011-07-18 14:21:49 +03006050
Ido Yariv2e07d022012-11-28 11:42:49 +02006051 kfree(wl->buffer_32);
Eliad Pellera8e27822012-11-19 17:14:06 +02006052 kfree(wl->mbox);
Ido Yariv95dac04f2011-06-06 14:57:06 +03006053 free_page((unsigned long)wl->fwlog);
Ido Yariv990f5de2011-03-31 10:06:59 +02006054 dev_kfree_skb(wl->dummy_packet);
Igal Chernobelsky26a309c2012-07-29 18:21:12 +03006055 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006056
6057 wl1271_debugfs_exit(wl);
6058
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006059 vfree(wl->fw);
6060 wl->fw = NULL;
Eliad Peller3fcdab72012-02-06 12:47:54 +02006061 wl->fw_type = WL12XX_FW_TYPE_NONE;
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006062 kfree(wl->nvs);
6063 wl->nvs = NULL;
6064
Eliad Peller75fb4df2014-02-10 13:47:21 +02006065 kfree(wl->raw_fw_status);
6066 kfree(wl->fw_status);
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006067 kfree(wl->tx_res_if);
Eliad Peller92ef8962011-06-07 12:50:46 +03006068 destroy_workqueue(wl->freezable_wq);
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006069
Arik Nemtsov96e0c682011-12-07 21:09:03 +02006070 kfree(wl->priv);
Teemu Paasikivic332a4b2010-02-18 13:25:57 +02006071 ieee80211_free_hw(wl->hw);
6072
6073 return 0;
6074}
Luciano Coelhoffeb5012011-11-21 18:55:51 +02006075EXPORT_SYMBOL_GPL(wlcore_free_hw);
Teemu Paasikivi50b3eb42010-02-22 08:38:26 +02006076
Johannes Berg964dc9e2013-06-03 17:25:34 +02006077#ifdef CONFIG_PM
6078static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6079 .flags = WIPHY_WOWLAN_ANY,
6080 .n_patterns = WL1271_MAX_RX_FILTERS,
6081 .pattern_min_len = 1,
6082 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6083};
6084#endif
6085
Arik Nemtsovf2cede42013-09-17 18:41:30 +03006086static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6087{
6088 return IRQ_WAKE_THREAD;
6089}
6090
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006091static void wlcore_nvs_cb(const struct firmware *fw, void *context)
Felipe Balbice2a2172011-10-05 14:12:55 +03006092{
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006093 struct wl1271 *wl = context;
6094 struct platform_device *pdev = wl->pdev;
Jingoo Han90650622013-09-10 17:57:57 +09006095 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
Luciano Coelhoafb43e62013-01-25 11:57:48 +02006096 struct wl12xx_platform_data *pdata = pdev_data->pdata;
Felipe Balbia390e852011-10-06 10:07:44 +03006097 unsigned long irqflags;
Luciano Coelhoffeb5012011-11-21 18:55:51 +02006098 int ret;
Arik Nemtsovf2cede42013-09-17 18:41:30 +03006099 irq_handler_t hardirq_fn = NULL;
Felipe Balbia390e852011-10-06 10:07:44 +03006100
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006101 if (fw) {
6102 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6103 if (!wl->nvs) {
6104 wl1271_error("Could not allocate nvs data");
6105 goto out;
6106 }
6107 wl->nvs_len = fw->size;
6108 } else {
6109 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6110 WL12XX_NVS_NAME);
6111 wl->nvs = NULL;
6112 wl->nvs_len = 0;
Felipe Balbia390e852011-10-06 10:07:44 +03006113 }
6114
Ido Yariv3992eb22012-09-02 12:29:27 +03006115 ret = wl->ops->setup(wl);
6116 if (ret < 0)
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006117 goto out_free_nvs;
Ido Yariv3992eb22012-09-02 12:29:27 +03006118
Arik Nemtsov72b06242011-12-07 21:21:51 +02006119 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6120
Luciano Coelhoe87288f2011-12-05 16:12:54 +02006121 /* adjust some runtime configuration parameters */
6122 wlcore_adjust_conf(wl);
6123
Felipe Balbia390e852011-10-06 10:07:44 +03006124 wl->irq = platform_get_irq(pdev, 0);
Felipe Balbia390e852011-10-06 10:07:44 +03006125 wl->platform_quirks = pdata->platform_quirks;
Luciano Coelhoafb43e62013-01-25 11:57:48 +02006126 wl->if_ops = pdev_data->if_ops;
Felipe Balbia390e852011-10-06 10:07:44 +03006127
Arik Nemtsovf2cede42013-09-17 18:41:30 +03006128 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
Felipe Balbia390e852011-10-06 10:07:44 +03006129 irqflags = IRQF_TRIGGER_RISING;
Arik Nemtsovf2cede42013-09-17 18:41:30 +03006130 hardirq_fn = wlcore_hardirq;
6131 } else {
Felipe Balbia390e852011-10-06 10:07:44 +03006132 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
Arik Nemtsovf2cede42013-09-17 18:41:30 +03006133 }
Felipe Balbia390e852011-10-06 10:07:44 +03006134
Arik Nemtsovf2cede42013-09-17 18:41:30 +03006135 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
Luciano Coelho97236a02013-03-08 09:41:53 +02006136 irqflags, pdev->name, wl);
Felipe Balbia390e852011-10-06 10:07:44 +03006137 if (ret < 0) {
6138 wl1271_error("request_irq() failed: %d", ret);
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006139 goto out_free_nvs;
Felipe Balbia390e852011-10-06 10:07:44 +03006140 }
6141
Johannes Bergdfb89c52012-06-27 09:23:48 +02006142#ifdef CONFIG_PM
Felipe Balbia390e852011-10-06 10:07:44 +03006143 ret = enable_irq_wake(wl->irq);
6144 if (!ret) {
6145 wl->irq_wake_enabled = true;
6146 device_init_wakeup(wl->dev, 1);
Johannes Berg964dc9e2013-06-03 17:25:34 +02006147 if (pdata->pwr_in_suspend)
6148 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
Felipe Balbia390e852011-10-06 10:07:44 +03006149 }
Johannes Bergdfb89c52012-06-27 09:23:48 +02006150#endif
Felipe Balbia390e852011-10-06 10:07:44 +03006151 disable_irq(wl->irq);
6152
Luciano Coelho4afc37a2012-05-10 12:14:02 +03006153 ret = wl12xx_get_hw_info(wl);
6154 if (ret < 0) {
6155 wl1271_error("couldn't get hw info");
Luciano Coelho8b425e62012-06-25 14:41:20 +03006156 goto out_irq;
Luciano Coelho4afc37a2012-05-10 12:14:02 +03006157 }
6158
6159 ret = wl->ops->identify_chip(wl);
6160 if (ret < 0)
Luciano Coelho8b425e62012-06-25 14:41:20 +03006161 goto out_irq;
Luciano Coelho4afc37a2012-05-10 12:14:02 +03006162
Felipe Balbia390e852011-10-06 10:07:44 +03006163 ret = wl1271_init_ieee80211(wl);
6164 if (ret)
6165 goto out_irq;
6166
6167 ret = wl1271_register_hw(wl);
6168 if (ret)
6169 goto out_irq;
6170
Luciano Coelho33cab572013-05-04 02:46:38 +03006171 ret = wlcore_sysfs_init(wl);
6172 if (ret)
Luciano Coelho8b425e62012-06-25 14:41:20 +03006173 goto out_unreg;
Felipe Balbif79f8902011-10-06 13:05:25 +03006174
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006175 wl->initialized = true;
Luciano Coelhoffeb5012011-11-21 18:55:51 +02006176 goto out;
Felipe Balbia390e852011-10-06 10:07:44 +03006177
Luciano Coelho8b425e62012-06-25 14:41:20 +03006178out_unreg:
6179 wl1271_unregister_hw(wl);
6180
Felipe Balbia390e852011-10-06 10:07:44 +03006181out_irq:
6182 free_irq(wl->irq, wl);
6183
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006184out_free_nvs:
6185 kfree(wl->nvs);
6186
Felipe Balbia390e852011-10-06 10:07:44 +03006187out:
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006188 release_firmware(fw);
6189 complete_all(&wl->nvs_loading_complete);
6190}
6191
Bill Pembertonb74324d2012-12-03 09:56:42 -05006192int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006193{
6194 int ret;
6195
6196 if (!wl->ops || !wl->ptable)
6197 return -EINVAL;
6198
6199 wl->dev = &pdev->dev;
6200 wl->pdev = pdev;
6201 platform_set_drvdata(pdev, wl);
6202
6203 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6204 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6205 wl, wlcore_nvs_cb);
6206 if (ret < 0) {
6207 wl1271_error("request_firmware_nowait failed: %d", ret);
6208 complete_all(&wl->nvs_loading_complete);
6209 }
6210
Felipe Balbia390e852011-10-06 10:07:44 +03006211 return ret;
Felipe Balbice2a2172011-10-05 14:12:55 +03006212}
Luciano Coelhob2ba99f2011-11-20 23:32:10 +02006213EXPORT_SYMBOL_GPL(wlcore_probe);
Felipe Balbice2a2172011-10-05 14:12:55 +03006214
Bill Pembertonb74324d2012-12-03 09:56:42 -05006215int wlcore_remove(struct platform_device *pdev)
Felipe Balbice2a2172011-10-05 14:12:55 +03006216{
Felipe Balbia390e852011-10-06 10:07:44 +03006217 struct wl1271 *wl = platform_get_drvdata(pdev);
6218
Ido Yariv6f8d6b22012-09-02 01:32:47 +03006219 wait_for_completion(&wl->nvs_loading_complete);
6220 if (!wl->initialized)
6221 return 0;
6222
Felipe Balbia390e852011-10-06 10:07:44 +03006223 if (wl->irq_wake_enabled) {
6224 device_init_wakeup(wl->dev, 0);
6225 disable_irq_wake(wl->irq);
6226 }
6227 wl1271_unregister_hw(wl);
6228 free_irq(wl->irq, wl);
Luciano Coelhoffeb5012011-11-21 18:55:51 +02006229 wlcore_free_hw(wl);
Felipe Balbia390e852011-10-06 10:07:44 +03006230
Felipe Balbice2a2172011-10-05 14:12:55 +03006231 return 0;
6232}
Luciano Coelhob2ba99f2011-11-20 23:32:10 +02006233EXPORT_SYMBOL_GPL(wlcore_remove);
Felipe Balbice2a2172011-10-05 14:12:55 +03006234
Guy Eilam491bbd62011-01-12 10:33:29 +01006235u32 wl12xx_debug_level = DEBUG_NONE;
Eliad Peller17c17552010-12-12 12:15:35 +02006236EXPORT_SYMBOL_GPL(wl12xx_debug_level);
Guy Eilam491bbd62011-01-12 10:33:29 +01006237module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
Eliad Peller17c17552010-12-12 12:15:35 +02006238MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6239
Ido Yariv95dac04f2011-06-06 14:57:06 +03006240module_param_named(fwlog, fwlog_param, charp, 0);
Luciano Coelho2c882fa2012-02-07 12:37:33 +02006241MODULE_PARM_DESC(fwlog,
Ido Yariv95dac04f2011-06-06 14:57:06 +03006242 "FW logger options: continuous, ondemand, dbgpins or disable");
6243
Ido Reis93ac8482013-09-09 12:24:36 +03006244module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6245MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6246
Yair Shapira72303412012-11-26 18:05:50 +02006247module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
Eliad Peller2a5bff02011-08-25 18:10:59 +03006248MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6249
Yair Shapira72303412012-11-26 18:05:50 +02006250module_param(no_recovery, int, S_IRUSR | S_IWUSR);
Arik Nemtsov34785be2011-12-08 13:06:45 +02006251MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6252
Teemu Paasikivi50b3eb42010-02-22 08:38:26 +02006253MODULE_LICENSE("GPL");
Luciano Coelhob1a48ca2011-02-22 14:19:28 +02006254MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
Teemu Paasikivi50b3eb42010-02-22 08:38:26 +02006255MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
Tim Gardner0635ad42012-08-29 13:09:33 -06006256MODULE_FIRMWARE(WL12XX_NVS_NAME);