blob: 5e8ab796d5bc06b86fcbdab0f48edb2698074579 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Eran Harary4fb06282015-04-19 10:05:18 +03009 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010010 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020026 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 *
28 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020029 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010030 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020034 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Eran Harary4fb06282015-04-19 10:05:18 +030035 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon0db056d2015-12-29 11:07:15 +020036 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010037 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66#include <linux/module.h>
Emmanuel Grumbach1bd3cbc2014-03-18 21:15:06 +020067#include <linux/vmalloc.h>
Johannes Berg8ca151b2013-01-24 14:25:36 +010068#include <net/mac80211.h>
69
70#include "iwl-notif-wait.h"
71#include "iwl-trans.h"
72#include "iwl-op-mode.h"
73#include "iwl-fw.h"
74#include "iwl-debug.h"
75#include "iwl-drv.h"
76#include "iwl-modparams.h"
77#include "mvm.h"
78#include "iwl-phy-db.h"
79#include "iwl-eeprom-parse.h"
80#include "iwl-csr.h"
81#include "iwl-io.h"
82#include "iwl-prph.h"
83#include "rs.h"
84#include "fw-api-scan.h"
85#include "time-event.h"
Golan Ben-Ami2f89a5d2015-10-27 19:17:14 +020086#include "fw-dbg.h"
Sharon Dvir39bdb172015-10-15 18:18:09 +030087#include "fw-api.h"
88#include "fw-api-scan.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010089
Johannes Berg8ca151b2013-01-24 14:25:36 +010090#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
Johannes Berg8ca151b2013-01-24 14:25:36 +010091MODULE_DESCRIPTION(DRV_DESCRIPTION);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
93MODULE_LICENSE("GPL");
94
95static const struct iwl_op_mode_ops iwl_mvm_ops;
Johannes Berg0316d302015-05-22 13:41:07 +020096static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
Johannes Berg8ca151b2013-01-24 14:25:36 +010097
98struct iwl_mvm_mod_params iwlmvm_mod_params = {
99 .power_scheme = IWL_POWER_SCHEME_BPS,
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +0200100 .tfd_q_hang_detect = true
Johannes Berg8ca151b2013-01-24 14:25:36 +0100101 /* rest of fields are 0 by default */
102};
103
104module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
105MODULE_PARM_DESC(init_dbg,
106 "set to true to debug an ASSERT in INIT fw (default: false");
107module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
108MODULE_PARM_DESC(power_scheme,
109 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +0200110module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
111 bool, S_IRUGO);
112MODULE_PARM_DESC(tfd_q_hang_detect,
113 "TFD queues hang detection (default: true");
Johannes Berg8ca151b2013-01-24 14:25:36 +0100114
115/*
116 * module init and exit functions
117 */
118static int __init iwl_mvm_init(void)
119{
120 int ret;
121
122 ret = iwl_mvm_rate_control_register();
123 if (ret) {
124 pr_err("Unable to register rate control algorithm: %d\n", ret);
125 return ret;
126 }
127
128 ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
129
130 if (ret) {
131 pr_err("Unable to register MVM op_mode: %d\n", ret);
132 iwl_mvm_rate_control_unregister();
133 }
134
135 return ret;
136}
137module_init(iwl_mvm_init);
138
139static void __exit iwl_mvm_exit(void)
140{
141 iwl_opmode_deregister("iwlmvm");
142 iwl_mvm_rate_control_unregister();
143}
144module_exit(iwl_mvm_exit);
145
146static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
147{
148 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
149 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
150 u32 reg_val = 0;
Moshe Harela0544272014-12-08 21:13:14 +0200151 u32 phy_config = iwl_mvm_get_phy_config(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100152
Moshe Harela0544272014-12-08 21:13:14 +0200153 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
154 FW_PHY_CFG_RADIO_TYPE_POS;
155 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
156 FW_PHY_CFG_RADIO_STEP_POS;
157 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
158 FW_PHY_CFG_RADIO_DASH_POS;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100159
160 /* SKU control */
161 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
162 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
163 reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
164 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
165
166 /* radio configuration */
167 reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
168 reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
169 reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
170
171 WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
172 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
173
Liad Kaufman9b1fcc12014-05-08 16:30:24 +0300174 /*
175 * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and
176 * shouldn't be set to any non-zero value. The same is supposed to be
177 * true of the other HW, but unsetting them (such as the 7260) causes
178 * automatic tests to fail on seemingly unrelated errors. Need to
179 * further investigate this, but for now we'll separate cases.
180 */
181 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
182 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100183
Lilach Edelsteine139dc42013-01-13 13:31:10 +0200184 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
185 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
186 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
187 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
188 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
189 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
190 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
191 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
192 reg_val);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100193
194 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
195 radio_cfg_step, radio_cfg_dash);
196
197 /*
198 * W/A : NIC is stuck in a reset state after Early PCIe power off
199 * (PCIe power is lost before PERST# is asserted), causing ME FW
200 * to lose ownership and not being able to obtain it back.
201 */
Avri Altman95411d02015-05-11 11:04:34 +0300202 if (!mvm->trans->cfg->apmg_not_supported)
Eran Harary3073d8c2013-12-29 14:09:59 +0200203 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
204 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
205 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100206}
207
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200208/**
209 * enum iwl_rx_handler_context context for Rx handler
210 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
211 * which can't acquire mvm->mutex.
212 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
213 * (and only in this case!), it should be set as ASYNC. In that case,
214 * it will be called from a worker with mvm->mutex held.
215 * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
216 * mutex itself, it will be called from a worker without mvm->mutex held.
217 */
218enum iwl_rx_handler_context {
219 RX_HANDLER_SYNC,
220 RX_HANDLER_ASYNC_LOCKED,
221 RX_HANDLER_ASYNC_UNLOCKED,
222};
223
224/**
225 * struct iwl_rx_handlers handler for FW notification
226 * @cmd_id: command id
227 * @context: see &iwl_rx_handler_context
228 * @fn: the function is called when notification is received
229 */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100230struct iwl_rx_handlers {
Avraham Stern1230b162015-07-09 17:17:03 +0300231 u16 cmd_id;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200232 enum iwl_rx_handler_context context;
Johannes Berg04168412015-06-23 21:22:09 +0200233 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100234};
235
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200236#define RX_HANDLER(_cmd_id, _fn, _context) \
237 { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
238#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
239 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240
241/*
242 * Handlers for fw notifications
243 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
244 * This list should be in order of frequency for performance purposes.
245 *
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200246 * The handler can be one from three contexts, see &iwl_rx_handler_context
Johannes Berg8ca151b2013-01-24 14:25:36 +0100247 */
248static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200249 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
250 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100251
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200252 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
253 RX_HANDLER_ASYNC_LOCKED),
254 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
255 RX_HANDLER_ASYNC_LOCKED),
256 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
257 RX_HANDLER_ASYNC_LOCKED),
Emmanuel Grumbachb9fae2d2014-02-17 11:24:10 +0200258 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200259 iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED),
Emmanuel Grumbachf421f9c2013-01-17 14:20:29 +0200260
Sara Sharon3af512d62015-07-22 11:38:40 +0300261 RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200262 iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
Sara Sharon3af512d62015-07-22 11:38:40 +0300263
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200264 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
265 RX_HANDLER_SYNC),
266 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
267 RX_HANDLER_ASYNC_LOCKED),
Emmanuel Grumbach497b49d2013-06-02 20:54:48 +0300268
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200269 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
Johannes Berg3e56ead2013-02-15 22:23:18 +0100270
Alexander Bondare5d74642014-12-09 19:15:49 +0200271 RX_HANDLER(SCAN_ITERATION_COMPLETE,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200272 iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
David Spinadel35a000b2013-08-28 09:29:43 +0300273 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200274 iwl_mvm_rx_lmac_scan_complete_notif,
275 RX_HANDLER_ASYNC_LOCKED),
Luciano Coelho6e56f012015-05-06 16:03:39 +0300276 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200277 RX_HANDLER_SYNC),
David Spinadeld2496222014-05-20 12:46:37 +0300278 RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200279 RX_HANDLER_ASYNC_LOCKED),
Avraham Sternee9219b2015-03-23 15:09:27 +0200280 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200281 iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
Emmanuel Grumbach497b49d2013-06-02 20:54:48 +0300282
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200283 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
284 RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100285
Hila Gonend64048e2013-03-13 18:00:03 +0200286 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200287 RX_HANDLER_SYNC),
Hila Gonend64048e2013-03-13 18:00:03 +0200288
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200289 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
Alexander Bondar175a70b2013-04-14 20:59:37 +0300290 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200291 iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
292 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
293 RX_HANDLER_ASYNC_LOCKED),
Aviya Erenfeld09eef332015-09-01 19:34:38 +0300294 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200295 iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
Chaya Rachel Ivgi0a3b7112015-12-16 16:34:55 +0200296 RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200297 iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
Luciano Coelhoea9af242014-11-06 10:34:49 +0200298
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300299 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200300 RX_HANDLER_ASYNC_LOCKED),
301 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
302 RX_HANDLER_SYNC),
303 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
304 RX_HANDLER_ASYNC_LOCKED),
Sara Sharon0db056d2015-12-29 11:07:15 +0200305 RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200306 iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
Sara Sharonf92659a2016-02-03 15:04:49 +0200307 RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200308 iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100309};
310#undef RX_HANDLER
Avraham Stern1230b162015-07-09 17:17:03 +0300311#undef RX_HANDLER_GRP
Johannes Berg8ca151b2013-01-24 14:25:36 +0100312
Sharon Dvir39bdb172015-10-15 18:18:09 +0300313/* Please keep this array *SORTED* by hex value.
314 * Access is done through binary search
315 */
316static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
317 HCMD_NAME(MVM_ALIVE),
318 HCMD_NAME(REPLY_ERROR),
319 HCMD_NAME(ECHO_CMD),
320 HCMD_NAME(INIT_COMPLETE_NOTIF),
321 HCMD_NAME(PHY_CONTEXT_CMD),
322 HCMD_NAME(DBG_CFG),
323 HCMD_NAME(ANTENNA_COUPLING_NOTIFICATION),
324 HCMD_NAME(SCAN_CFG_CMD),
325 HCMD_NAME(SCAN_REQ_UMAC),
326 HCMD_NAME(SCAN_ABORT_UMAC),
327 HCMD_NAME(SCAN_COMPLETE_UMAC),
328 HCMD_NAME(TOF_CMD),
329 HCMD_NAME(TOF_NOTIFICATION),
Sara Sharon3af512d62015-07-22 11:38:40 +0300330 HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300331 HCMD_NAME(ADD_STA_KEY),
332 HCMD_NAME(ADD_STA),
333 HCMD_NAME(REMOVE_STA),
334 HCMD_NAME(FW_GET_ITEM_CMD),
335 HCMD_NAME(TX_CMD),
336 HCMD_NAME(SCD_QUEUE_CFG),
337 HCMD_NAME(TXPATH_FLUSH),
338 HCMD_NAME(MGMT_MCAST_KEY),
339 HCMD_NAME(WEP_KEY),
340 HCMD_NAME(SHARED_MEM_CFG),
341 HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
342 HCMD_NAME(MAC_CONTEXT_CMD),
343 HCMD_NAME(TIME_EVENT_CMD),
344 HCMD_NAME(TIME_EVENT_NOTIFICATION),
345 HCMD_NAME(BINDING_CONTEXT_CMD),
346 HCMD_NAME(TIME_QUOTA_CMD),
347 HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
348 HCMD_NAME(LQ_CMD),
349 HCMD_NAME(FW_PAGING_BLOCK_CMD),
350 HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
351 HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
352 HCMD_NAME(HOT_SPOT_CMD),
353 HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
354 HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD),
355 HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD),
356 HCMD_NAME(BT_COEX_UPDATE_SW_BOOST),
357 HCMD_NAME(BT_COEX_UPDATE_CORUN_LUT),
358 HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
359 HCMD_NAME(BT_COEX_CI),
360 HCMD_NAME(PHY_CONFIGURATION_CMD),
361 HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
362 HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
363 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
364 HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD),
365 HCMD_NAME(POWER_TABLE_CMD),
366 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
367 HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
368 HCMD_NAME(DC2DC_CONFIG_CMD),
369 HCMD_NAME(NVM_ACCESS_CMD),
370 HCMD_NAME(SET_CALIB_DEFAULT_CMD),
371 HCMD_NAME(BEACON_NOTIFICATION),
372 HCMD_NAME(BEACON_TEMPLATE_CMD),
373 HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
374 HCMD_NAME(BT_CONFIG),
375 HCMD_NAME(STATISTICS_CMD),
376 HCMD_NAME(STATISTICS_NOTIFICATION),
377 HCMD_NAME(EOSP_NOTIFICATION),
378 HCMD_NAME(REDUCE_TX_POWER_CMD),
379 HCMD_NAME(CARD_STATE_CMD),
380 HCMD_NAME(CARD_STATE_NOTIFICATION),
381 HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
382 HCMD_NAME(TDLS_CONFIG_CMD),
383 HCMD_NAME(MAC_PM_POWER_TABLE),
384 HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
385 HCMD_NAME(MFUART_LOAD_NOTIFICATION),
Sara Sharon43413a92015-12-31 11:49:18 +0200386 HCMD_NAME(RSS_CONFIG_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300387 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
388 HCMD_NAME(REPLY_RX_PHY_CMD),
389 HCMD_NAME(REPLY_RX_MPDU_CMD),
390 HCMD_NAME(BA_NOTIF),
391 HCMD_NAME(MCC_UPDATE_CMD),
392 HCMD_NAME(MCC_CHUB_UPDATE_CMD),
393 HCMD_NAME(MARKER_CMD),
394 HCMD_NAME(BT_COEX_PRIO_TABLE),
395 HCMD_NAME(BT_COEX_PROT_ENV),
396 HCMD_NAME(BT_PROFILE_NOTIFICATION),
397 HCMD_NAME(BCAST_FILTER_CMD),
398 HCMD_NAME(MCAST_FILTER_CMD),
399 HCMD_NAME(REPLY_SF_CFG_CMD),
400 HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
401 HCMD_NAME(D3_CONFIG_CMD),
402 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
403 HCMD_NAME(OFFLOADS_QUERY_CMD),
404 HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
405 HCMD_NAME(MATCH_FOUND_NOTIFICATION),
406 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER),
407 HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
408 HCMD_NAME(WOWLAN_PATTERNS),
409 HCMD_NAME(WOWLAN_CONFIGURATION),
410 HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
411 HCMD_NAME(WOWLAN_TKIP_PARAM),
412 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
413 HCMD_NAME(WOWLAN_GET_STATUSES),
414 HCMD_NAME(WOWLAN_TX_POWER_PER_DB),
415 HCMD_NAME(SCAN_ITERATION_COMPLETE),
416 HCMD_NAME(D0I3_END_CMD),
417 HCMD_NAME(LTR_CONFIG),
418 HCMD_NAME(REPLY_DEBUG_CMD),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100419};
Sharon Dvir39bdb172015-10-15 18:18:09 +0300420
421/* Please keep this array *SORTED* by hex value.
422 * Access is done through binary search
423 */
424static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
425 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
Chaya Rachel Ivgi5c89e7b2016-01-05 10:34:47 +0200426 HCMD_NAME(CTDP_CONFIG_CMD),
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200427 HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
Chaya Rachel Ivgi0a3b7112015-12-16 16:34:55 +0200428 HCMD_NAME(CT_KILL_NOTIFICATION),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300429 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
430};
431
Sara Sharon0db056d2015-12-29 11:07:15 +0200432/* Please keep this array *SORTED* by hex value.
433 * Access is done through binary search
434 */
Sara Sharone0d8fde2015-12-28 22:37:08 +0200435static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
436 HCMD_NAME(UPDATE_MU_GROUPS_CMD),
Sara Sharon94bb4482015-12-16 18:48:28 +0200437 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
Sara Sharonf92659a2016-02-03 15:04:49 +0200438 HCMD_NAME(MU_GROUP_MGMT_NOTIF),
Sara Sharon94bb4482015-12-16 18:48:28 +0200439 HCMD_NAME(RX_QUEUES_NOTIFICATION),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200440};
441
442/* Please keep this array *SORTED* by hex value.
443 * Access is done through binary search
444 */
Sara Sharon0db056d2015-12-29 11:07:15 +0200445static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
446 HCMD_NAME(STORED_BEACON_NTF),
447};
448
Sharon Dvir39bdb172015-10-15 18:18:09 +0300449static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
450 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
451 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
452 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200453 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
Sara Sharon0db056d2015-12-29 11:07:15 +0200454 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300455};
456
Johannes Berg8ca151b2013-01-24 14:25:36 +0100457/* this forward declaration can avoid to export the function */
458static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
Eliad Peller37577fe2013-12-05 17:19:39 +0200459static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100460
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500461static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
462{
463 const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
464
465 if (!pwr_tx_backoff)
466 return 0;
467
468 while (pwr_tx_backoff->pwr) {
469 if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
470 return pwr_tx_backoff->backoff;
471
472 pwr_tx_backoff++;
473 }
474
475 return 0;
476}
477
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +0300478static void iwl_mvm_fw_error_dump_wk(struct work_struct *work);
479
Johannes Berg8ca151b2013-01-24 14:25:36 +0100480static struct iwl_op_mode *
481iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
482 const struct iwl_fw *fw, struct dentry *dbgfs_dir)
483{
484 struct ieee80211_hw *hw;
485 struct iwl_op_mode *op_mode;
486 struct iwl_mvm *mvm;
487 struct iwl_trans_config trans_cfg = {};
488 static const u8 no_reclaim_cmds[] = {
489 TX_CMD,
490 };
491 int err, scan_size;
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500492 u32 min_backoff;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100493
Emmanuel Grumbachc4d83272014-01-14 08:45:26 +0200494 /*
495 * We use IWL_MVM_STATION_COUNT to check the validity of the station
496 * index all over the driver - check that its value corresponds to the
497 * array size.
498 */
499 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
500
Johannes Berg8ca151b2013-01-24 14:25:36 +0100501 /********************************
502 * 1. Allocating and configuring HW data
503 ********************************/
504 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
505 sizeof(struct iwl_mvm),
506 &iwl_mvm_hw_ops);
507 if (!hw)
508 return NULL;
509
Oren Givon745160e2014-06-16 10:54:52 +0300510 if (cfg->max_rx_agg_size)
511 hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
512
Gregory Greenman77d96732014-09-02 16:04:58 +0200513 if (cfg->max_tx_agg_size)
514 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
515
Johannes Berg8ca151b2013-01-24 14:25:36 +0100516 op_mode = hw->priv;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100517
518 mvm = IWL_OP_MODE_GET_MVM(op_mode);
519 mvm->dev = trans->dev;
520 mvm->trans = trans;
521 mvm->cfg = cfg;
522 mvm->fw = fw;
523 mvm->hw = hw;
524
Johannes Berg0316d302015-05-22 13:41:07 +0200525 if (iwl_mvm_has_new_rx_api(mvm)) {
526 op_mode->ops = &iwl_mvm_ops_mq;
Sara Sharon25c2b222016-02-07 13:09:59 +0200527 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
Johannes Berg0316d302015-05-22 13:41:07 +0200528 } else {
529 op_mode->ops = &iwl_mvm_ops;
Sara Sharon25c2b222016-02-07 13:09:59 +0200530 trans->rx_mpdu_cmd_hdr_size =
531 sizeof(struct iwl_rx_mpdu_res_start);
Johannes Berg0316d302015-05-22 13:41:07 +0200532
533 if (WARN_ON(trans->num_rx_queues > 1))
534 goto out_free;
535 }
536
Eran Harary291aa7c2013-07-03 11:00:06 +0300537 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
538
Eytan Lifshitz19e737c2013-09-09 13:30:15 +0200539 mvm->aux_queue = 15;
540 mvm->first_agg_queue = 16;
541 mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
542 if (mvm->cfg->base_params->num_of_queues == 16) {
543 mvm->aux_queue = 11;
544 mvm->first_agg_queue = 12;
545 }
Lilach Edelstein1f3b0ff2013-10-06 13:03:32 +0200546 mvm->sf_state = SF_UNINIT;
Emmanuel Grumbach7b358f02014-10-23 14:42:33 +0300547 mvm->cur_ucode = IWL_UCODE_INIT;
Andrei Otcheretianskic89e3332016-01-26 18:12:28 +0200548 mvm->drop_bcn_ap_mode = true;
Eytan Lifshitz19e737c2013-09-09 13:30:15 +0200549
Johannes Berg8ca151b2013-01-24 14:25:36 +0100550 mutex_init(&mvm->mutex);
Eliad Pellerd15a7472014-03-27 18:53:12 +0200551 mutex_init(&mvm->d0i3_suspend_mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100552 spin_lock_init(&mvm->async_handlers_lock);
553 INIT_LIST_HEAD(&mvm->time_event_list);
Ariej Marjiehb1128892014-07-16 21:11:12 +0300554 INIT_LIST_HEAD(&mvm->aux_roc_te_list);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100555 INIT_LIST_HEAD(&mvm->async_handlers_list);
556 spin_lock_init(&mvm->time_event_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300557 spin_lock_init(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100558
559 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
560 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
561 INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
Eliad Peller37577fe2013-12-05 17:19:39 +0200562 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
Emmanuel Grumbachd2709ad2015-01-29 14:58:06 +0200563 INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300564 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100565
Arik Nemtsovb2492502014-03-13 12:21:50 +0200566 spin_lock_init(&mvm->d0i3_tx_lock);
Eliad Peller576eeee2014-07-01 18:38:38 +0300567 spin_lock_init(&mvm->refs_lock);
Arik Nemtsovb2492502014-03-13 12:21:50 +0200568 skb_queue_head_init(&mvm->d0i3_tx);
569 init_waitqueue_head(&mvm->d0i3_exit_waitq);
570
Johannes Berg8ca151b2013-01-24 14:25:36 +0100571 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
572
573 /*
574 * Populate the state variables that the transport layer needs
575 * to know about.
576 */
577 trans_cfg.op_mode = op_mode;
578 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
579 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200580 switch (iwlwifi_mod_params.amsdu_size) {
581 case IWL_AMSDU_4K:
582 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
583 break;
584 case IWL_AMSDU_8K:
585 trans_cfg.rx_buf_size = IWL_AMSDU_8K;
586 break;
587 case IWL_AMSDU_12K:
588 trans_cfg.rx_buf_size = IWL_AMSDU_12K;
589 break;
590 default:
591 pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
592 iwlwifi_mod_params.amsdu_size);
593 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
594 }
Aviya Erenfeldab021652015-06-09 16:45:52 +0300595 trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
596 IWL_UCODE_TLV_API_WIDE_CMD_HDR);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100597
Emmanuel Grumbach25b9ea52013-03-06 11:53:38 +0200598 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100599 trans_cfg.bc_table_dword = true;
600
Sharon Dvir39bdb172015-10-15 18:18:09 +0300601 trans_cfg.command_groups = iwl_mvm_groups;
602 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100603
604 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
Johannes Bergb2d81db2014-08-01 20:48:25 +0200605 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
Emmanuel Grumbach3a736bc2014-09-10 11:16:41 +0300606 trans_cfg.scd_set_active = true;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100607
Liad Kaufmanb4821762014-10-19 16:58:15 +0200608 trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
Emmanuel Grumbach41837ca92015-10-21 09:00:07 +0300609 trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
Liad Kaufmanb4821762014-10-19 16:58:15 +0200610
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200611 /* Set a short watchdog for the command queue */
612 trans_cfg.cmd_q_wdg_timeout =
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200613 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200614
Johannes Berg8ca151b2013-01-24 14:25:36 +0100615 snprintf(mvm->hw->wiphy->fw_version,
616 sizeof(mvm->hw->wiphy->fw_version),
617 "%s", fw->fw_version);
618
619 /* Configure transport layer */
620 iwl_trans_configure(mvm->trans, &trans_cfg);
621
622 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
Liad Kaufman09e350f2014-11-17 11:41:07 +0200623 trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
624 trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
625 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
626 sizeof(trans->dbg_conf_tlv));
Emmanuel Grumbachd2709ad2015-01-29 14:58:06 +0200627 trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100628
629 /* set up notification wait support */
630 iwl_notification_wait_init(&mvm->notif_wait);
631
632 /* Init phy db */
633 mvm->phy_db = iwl_phy_db_init(trans);
634 if (!mvm->phy_db) {
635 IWL_ERR(mvm, "Cannot init phy_db\n");
636 goto out_free;
637 }
638
639 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
640 mvm->cfg->name, mvm->trans->hw_rev);
641
Eran Harary4fb06282015-04-19 10:05:18 +0300642 if (iwlwifi_mod_params.nvm_file)
Eran Hararye02a9d62014-05-07 12:27:10 +0300643 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
Eran Harary4fb06282015-04-19 10:05:18 +0300644 else
645 IWL_DEBUG_EEPROM(mvm->trans->dev,
646 "working without external nvm file\n");
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300647
Eran Hararye02a9d62014-05-07 12:27:10 +0300648 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
Eran Harary14b485f2014-04-23 10:46:09 +0300649 "not allowing power-up and not having nvm_file\n"))
650 goto out_free;
651
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200652 /*
Eran Harary0ade5792014-08-04 12:21:05 +0300653 * Even if nvm exists in the nvm_file driver should read again the nvm
Eran Harary14b485f2014-04-23 10:46:09 +0300654 * from the nic because there might be entries that exist in the OTP
655 * and not in the file.
656 * for nics with no_power_up_nic_in_init: rely completley on nvm_file
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200657 */
Eran Hararye02a9d62014-05-07 12:27:10 +0300658 if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
Eran Harary14b485f2014-04-23 10:46:09 +0300659 err = iwl_nvm_init(mvm, false);
Eliad Peller8c678ed2013-12-09 13:15:02 +0200660 if (err)
661 goto out_free;
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200662 } else {
663 err = iwl_trans_start_hw(mvm->trans);
664 if (err)
665 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100666
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200667 mutex_lock(&mvm->mutex);
Eliad Peller08f0d232015-12-10 15:47:11 +0200668 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200669 err = iwl_run_init_mvm_ucode(mvm, true);
Liad Kaufmanb93b1fe2014-10-21 15:01:50 +0200670 if (!err || !iwlmvm_mod_params.init_dbg)
Chaya Rachel Ivgifcb6b922016-02-22 10:21:41 +0200671 iwl_mvm_stop_device(mvm);
Eliad Peller08f0d232015-12-10 15:47:11 +0200672 iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200673 mutex_unlock(&mvm->mutex);
674 /* returns 0 if successful, 1 if success but in rfkill */
675 if (err < 0 && !iwlmvm_mod_params.init_dbg) {
676 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
677 goto out_free;
678 }
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200679 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100680
David Spinadeld2496222014-05-20 12:46:37 +0300681 scan_size = iwl_mvm_scan_size(mvm);
David Spinadelfb98be52014-05-04 12:51:10 +0300682
Johannes Berg8ca151b2013-01-24 14:25:36 +0100683 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
684 if (!mvm->scan_cmd)
685 goto out_free;
686
Haim Dreyfuss5a4b2af2015-01-13 11:54:51 +0200687 /* Set EBS as successful as long as not stated otherwise by the FW. */
688 mvm->last_ebs_successful = true;
689
Johannes Berg8ca151b2013-01-24 14:25:36 +0100690 err = iwl_mvm_mac_setup_register(mvm);
691 if (err)
692 goto out_free;
693
Chaya Rachel Ivgi04ddc2a2016-03-03 13:31:39 +0200694 min_backoff = calc_min_backoff(trans, cfg);
695 iwl_mvm_thermal_initialize(mvm, min_backoff);
696
Johannes Berg8ca151b2013-01-24 14:25:36 +0100697 err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
698 if (err)
699 goto out_unregister;
700
Matti Gottlieb3848ab62013-07-30 15:29:37 +0300701 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
702
Luca Coelho33c85ea2016-02-22 15:44:13 +0200703 /* The transport always starts with a taken reference, we can
704 * release it now if d0i3 is supported */
705 if (iwl_mvm_is_d0i3_supported(mvm))
706 iwl_trans_unref(mvm->trans);
Eliad Peller7498cf42014-01-16 17:10:44 +0200707
Gregory Greenmance792912015-06-02 18:06:16 +0300708 iwl_mvm_tof_init(mvm);
709
Sara Sharon43413a92015-12-31 11:49:18 +0200710 /* init RSS hash key */
Sara Sharondd4d3162016-02-07 12:50:35 +0200711 get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
Sara Sharon43413a92015-12-31 11:49:18 +0200712
Johannes Berg8ca151b2013-01-24 14:25:36 +0100713 return op_mode;
714
715 out_unregister:
716 ieee80211_unregister_hw(mvm->hw);
Eliad Peller91b0d112014-01-05 12:41:12 +0200717 iwl_mvm_leds_exit(mvm);
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200718 iwl_mvm_thermal_exit(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100719 out_free:
Andrei Otcheretianskidbf73d42015-09-30 12:26:23 +0200720 flush_delayed_work(&mvm->fw_dump_wk);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100721 iwl_phy_db_free(mvm->phy_db);
722 kfree(mvm->scan_cmd);
Eran Hararye02a9d62014-05-07 12:27:10 +0300723 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
Arik Nemtsova4082842013-11-24 19:10:46 +0200724 iwl_trans_op_mode_leave(trans);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100725 ieee80211_free_hw(mvm->hw);
726 return NULL;
727}
728
729static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
730{
731 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
732 int i;
733
Luca Coelhoe27deb42016-03-01 10:30:48 +0200734 /* If d0i3 is supported, we have released the reference that
735 * the transport started with, so we should take it back now
736 * that we are leaving.
737 */
738 if (iwl_mvm_is_d0i3_supported(mvm))
739 iwl_trans_ref(mvm->trans);
740
Johannes Berg8ca151b2013-01-24 14:25:36 +0100741 iwl_mvm_leds_exit(mvm);
742
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200743 iwl_mvm_thermal_exit(mvm);
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300744
Johannes Berg8ca151b2013-01-24 14:25:36 +0100745 ieee80211_unregister_hw(mvm->hw);
746
747 kfree(mvm->scan_cmd);
Eliad Pellere59647e2013-11-28 14:08:50 +0200748 kfree(mvm->mcast_filter_cmd);
749 mvm->mcast_filter_cmd = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100750
Johannes Bergafc66bb2013-05-03 11:44:16 +0200751#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
752 kfree(mvm->d3_resume_sram);
753#endif
754
Arik Nemtsova4082842013-11-24 19:10:46 +0200755 iwl_trans_op_mode_leave(mvm->trans);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100756
757 iwl_phy_db_free(mvm->phy_db);
758 mvm->phy_db = NULL;
759
Johannes Berg8ca151b2013-01-24 14:25:36 +0100760 iwl_free_nvm_data(mvm->nvm_data);
Eran Hararyae2b21b2014-01-09 08:08:24 +0200761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100762 kfree(mvm->nvm_sections[i].data);
763
Matti Gottlieb905e36a2016-02-14 17:05:39 +0200764 iwl_free_fw_paging(mvm);
765
Gregory Greenmance792912015-06-02 18:06:16 +0300766 iwl_mvm_tof_clean(mvm);
767
Johannes Berg8ca151b2013-01-24 14:25:36 +0100768 ieee80211_free_hw(mvm->hw);
769}
770
771struct iwl_async_handler_entry {
772 struct list_head list;
773 struct iwl_rx_cmd_buffer rxb;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200774 enum iwl_rx_handler_context context;
Johannes Berg04168412015-06-23 21:22:09 +0200775 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100776};
777
778void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
779{
780 struct iwl_async_handler_entry *entry, *tmp;
781
782 spin_lock_bh(&mvm->async_handlers_lock);
783 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
784 iwl_free_rxb(&entry->rxb);
785 list_del(&entry->list);
786 kfree(entry);
787 }
788 spin_unlock_bh(&mvm->async_handlers_lock);
789}
790
791static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
792{
793 struct iwl_mvm *mvm =
794 container_of(wk, struct iwl_mvm, async_handlers_wk);
795 struct iwl_async_handler_entry *entry, *tmp;
796 struct list_head local_list;
797
798 INIT_LIST_HEAD(&local_list);
799
800 /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100801
802 /*
803 * Sync with Rx path with a lock. Remove all the entries from this list,
804 * add them to a local one (lock free), and then handle them.
805 */
806 spin_lock_bh(&mvm->async_handlers_lock);
807 list_splice_init(&mvm->async_handlers_list, &local_list);
808 spin_unlock_bh(&mvm->async_handlers_lock);
809
810 list_for_each_entry_safe(entry, tmp, &local_list, list) {
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200811 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
812 mutex_lock(&mvm->mutex);
Johannes Berg04168412015-06-23 21:22:09 +0200813 entry->fn(mvm, &entry->rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100814 iwl_free_rxb(&entry->rxb);
815 list_del(&entry->list);
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200816 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
817 mutex_unlock(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100818 kfree(entry);
819 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100820}
821
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200822static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
823 struct iwl_rx_packet *pkt)
824{
825 struct iwl_fw_dbg_trigger_tlv *trig;
826 struct iwl_fw_dbg_trigger_cmd *cmds_trig;
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200827 int i;
828
829 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
830 return;
831
832 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
833 cmds_trig = (void *)trig->data;
834
835 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
836 return;
837
838 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
839 /* don't collect on CMD 0 */
840 if (!cmds_trig->cmds[i].cmd_id)
841 break;
842
Sara Sharon0ab66e62015-07-13 14:23:59 +0300843 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
844 cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200845 continue;
846
Johannes Berg5d4f9292015-03-31 09:12:54 +0200847 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
Sara Sharon0ab66e62015-07-13 14:23:59 +0300848 "CMD 0x%02x.%02x received",
849 pkt->hdr.group_id, pkt->hdr.cmd);
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200850 break;
851 }
852}
853
Johannes Berg0316d302015-05-22 13:41:07 +0200854static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
855 struct iwl_rx_cmd_buffer *rxb,
856 struct iwl_rx_packet *pkt)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100857{
Johannes Berg0316d302015-05-22 13:41:07 +0200858 int i;
Johannes Berg1738d602015-05-22 12:09:44 +0200859
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200860 iwl_mvm_rx_check_trigger(mvm, pkt);
861
Johannes Berg8ca151b2013-01-24 14:25:36 +0100862 /*
863 * Do the notification wait before RX handlers so
864 * even if the RX handler consumes the RXB we have
865 * access to it in the notification wait entry.
866 */
867 iwl_notification_wait_notify(&mvm->notif_wait, pkt);
868
869 for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
870 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
Emmanuel Grumbach36eed562013-02-10 13:25:25 +0200871 struct iwl_async_handler_entry *entry;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100872
Avraham Stern1230b162015-07-09 17:17:03 +0300873 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
Emmanuel Grumbach36eed562013-02-10 13:25:25 +0200874 continue;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100875
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200876 if (rx_h->context == RX_HANDLER_SYNC) {
Johannes Berg04168412015-06-23 21:22:09 +0200877 rx_h->fn(mvm, rxb);
Johannes Bergf7e64692015-06-23 21:58:17 +0200878 return;
Johannes Berg04168412015-06-23 21:22:09 +0200879 }
Emmanuel Grumbach36eed562013-02-10 13:25:25 +0200880
881 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
882 /* we can't do much... */
883 if (!entry)
Johannes Bergf7e64692015-06-23 21:58:17 +0200884 return;
Emmanuel Grumbach36eed562013-02-10 13:25:25 +0200885
886 entry->rxb._page = rxb_steal_page(rxb);
887 entry->rxb._offset = rxb->_offset;
888 entry->rxb._rx_page_order = rxb->_rx_page_order;
889 entry->fn = rx_h->fn;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200890 entry->context = rx_h->context;
Emmanuel Grumbach36eed562013-02-10 13:25:25 +0200891 spin_lock(&mvm->async_handlers_lock);
892 list_add_tail(&entry->list, &mvm->async_handlers_list);
893 spin_unlock(&mvm->async_handlers_lock);
894 schedule_work(&mvm->async_handlers_wk);
895 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100896 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100897}
898
Johannes Berg0316d302015-05-22 13:41:07 +0200899static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
900 struct napi_struct *napi,
901 struct iwl_rx_cmd_buffer *rxb)
902{
903 struct iwl_rx_packet *pkt = rxb_addr(rxb);
904 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
905
906 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
907 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
Sara Sharon585a6fc2015-12-01 13:48:18 +0200908 else if (pkt->hdr.cmd == FRAME_RELEASE)
909 iwl_mvm_rx_frame_release(mvm, rxb, 0);
Johannes Berg0316d302015-05-22 13:41:07 +0200910 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
911 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
912 else
913 iwl_mvm_rx_common(mvm, rxb, pkt);
914}
915
916static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
917 struct napi_struct *napi,
918 struct iwl_rx_cmd_buffer *rxb)
919{
920 struct iwl_rx_packet *pkt = rxb_addr(rxb);
921 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
922
923 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
Johannes Berg780e87c2015-09-03 14:56:10 +0200924 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
Johannes Berg0316d302015-05-22 13:41:07 +0200925 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
Johannes Berg780e87c2015-09-03 14:56:10 +0200926 iwl_mvm_rx_phy_cmd_mq(mvm, rxb);
Sara Sharon94bb4482015-12-16 18:48:28 +0200927 else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
928 pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
929 iwl_mvm_rx_queue_notif(mvm, rxb, 0);
Johannes Berg0316d302015-05-22 13:41:07 +0200930 else
931 iwl_mvm_rx_common(mvm, rxb, pkt);
932}
933
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +0200934void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100935{
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300936 int q;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100937
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300938 if (WARN_ON_ONCE(!mq))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100939 return;
940
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300941 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
942 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
943 IWL_DEBUG_TX_QUEUES(mvm,
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +0200944 "mac80211 %d already stopped\n", q);
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300945 continue;
946 }
947
948 ieee80211_stop_queue(mvm->hw, q);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100949 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100950}
951
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +0200952static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
953 const struct iwl_device_cmd *cmd)
954{
955 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
956
957 /*
958 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
959 * commands that need to block the Tx queues.
960 */
961 iwl_trans_block_txq_ptrs(mvm->trans, false);
962}
963
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +0200964static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100965{
966 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300967 unsigned long mq;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100968
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300969 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +0200970 mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300971 spin_unlock_bh(&mvm->queue_info_lock);
972
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +0200973 iwl_mvm_stop_mac_queues(mvm, mq);
974}
975
976void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
977{
978 int q;
979
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300980 if (WARN_ON_ONCE(!mq))
Johannes Berg8ca151b2013-01-24 14:25:36 +0100981 return;
982
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300983 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
984 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
985 IWL_DEBUG_TX_QUEUES(mvm,
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +0200986 "mac80211 %d still stopped\n", q);
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300987 continue;
988 }
989
990 ieee80211_wake_queue(mvm->hw, q);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100991 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100992}
993
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +0200994static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
995{
996 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
997 unsigned long mq;
998
999 spin_lock_bh(&mvm->queue_info_lock);
1000 mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
1001 spin_unlock_bh(&mvm->queue_info_lock);
1002
1003 iwl_mvm_start_mac_queues(mvm, mq);
1004}
1005
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001006void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1007{
1008 if (state)
1009 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1010 else
1011 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1012
1013 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
1014}
1015
Johannes Berg14cfca72014-02-25 20:50:53 +01001016static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001017{
1018 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Emmanuel Grumbach31b8b342014-11-02 15:48:09 +02001019 bool calibrating = ACCESS_ONCE(mvm->calibrating);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001020
1021 if (state)
1022 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1023 else
1024 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1025
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001026 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
Johannes Berg14cfca72014-02-25 20:50:53 +01001027
Emmanuel Grumbach31b8b342014-11-02 15:48:09 +02001028 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
1029 if (calibrating)
1030 iwl_abort_notification_waits(&mvm->notif_wait);
1031
1032 /*
1033 * Stop the device if we run OPERATIONAL firmware or if we are in the
1034 * middle of the calibrations.
1035 */
1036 return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001037}
1038
1039static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1040{
1041 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1042 struct ieee80211_tx_info *info;
1043
1044 info = IEEE80211_SKB_CB(skb);
1045 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1046 ieee80211_free_txskb(mvm->hw, skb);
1047}
1048
Johannes Bergac1ed412013-07-04 15:25:25 +02001049struct iwl_mvm_reprobe {
1050 struct device *dev;
1051 struct work_struct work;
1052};
1053
1054static void iwl_mvm_reprobe_wk(struct work_struct *wk)
1055{
1056 struct iwl_mvm_reprobe *reprobe;
1057
1058 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
1059 if (device_reprobe(reprobe->dev))
1060 dev_err(reprobe->dev, "reprobe failed!\n");
1061 kfree(reprobe);
1062 module_put(THIS_MODULE);
1063}
1064
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001065static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
1066{
1067 struct iwl_mvm *mvm =
Emmanuel Grumbachd2709ad2015-01-29 14:58:06 +02001068 container_of(work, struct iwl_mvm, fw_dump_wk.work);
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001069
Liad Kaufmanfb2380a2015-01-01 17:42:46 +02001070 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
1071 return;
1072
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001073 mutex_lock(&mvm->mutex);
Emmanuel Grumbach145d90b2015-04-13 12:05:48 +03001074
1075 /* stop recording */
1076 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1077 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1078 } else {
1079 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
1080 /* wait before we collect the data till the DBGC stop */
1081 udelay(100);
1082 }
1083
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001084 iwl_mvm_fw_error_dump(mvm);
Emmanuel Grumbache66e0b72014-12-29 09:42:37 +02001085
1086 /* start recording again if the firmware is not crashed */
1087 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
Emmanuel Grumbach9beda942015-03-02 14:39:03 +02001088 mvm->fw->dbg_dest_tlv &&
1089 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
Emmanuel Grumbache66e0b72014-12-29 09:42:37 +02001090
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001091 mutex_unlock(&mvm->mutex);
Liad Kaufmanfb2380a2015-01-01 17:42:46 +02001092
1093 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
Emmanuel Grumbach4bfa47f2014-09-11 16:19:43 +03001094}
1095
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001096void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001097{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001098 iwl_abort_notification_waits(&mvm->notif_wait);
1099
1100 /*
David Spinadel992f81f2014-01-09 14:22:55 +02001101 * This is a bit racy, but worst case we tell mac80211 about
1102 * a stopped/aborted scan when that was already done which
1103 * is not a problem. It is necessary to abort any os scan
1104 * here because mac80211 requires having the scan cleared
1105 * before restarting.
1106 * We'll reset the scan_status to NONE in restart cleanup in
1107 * the next start() call from mac80211. If restart isn't called
1108 * (no fw restart) scan status will stay busy.
1109 */
David Spinadel4ffb3652015-03-10 10:06:02 +02001110 iwl_mvm_report_scan_aborted(mvm);
David Spinadel992f81f2014-01-09 14:22:55 +02001111
1112 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001113 * If we're restarting already, don't cycle restarts.
1114 * If INIT fw asserted, it will likely fail again.
1115 * If WoWLAN fw asserted, don't restart either, mac80211
1116 * can't recover this since we're already half suspended.
1117 */
Luciano Coelho60f10712015-01-28 09:00:27 +02001118 if (!mvm->restart_fw && fw_error) {
Oren Givon36fb9012015-07-15 15:47:28 +03001119 iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
1120 NULL);
Luciano Coelho60f10712015-01-28 09:00:27 +02001121 } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
1122 &mvm->status)) {
Johannes Bergac1ed412013-07-04 15:25:25 +02001123 struct iwl_mvm_reprobe *reprobe;
1124
1125 IWL_ERR(mvm,
1126 "Firmware error during reconfiguration - reprobe!\n");
1127
1128 /*
1129 * get a module reference to avoid doing this while unloading
1130 * anyway and to avoid scheduling a work with code that's
1131 * being removed.
1132 */
1133 if (!try_module_get(THIS_MODULE)) {
1134 IWL_ERR(mvm, "Module is being unloaded - abort\n");
1135 return;
1136 }
1137
1138 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
1139 if (!reprobe) {
1140 module_put(THIS_MODULE);
1141 return;
1142 }
1143 reprobe->dev = mvm->trans->dev;
1144 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
1145 schedule_work(&reprobe->work);
Luciano Coelho60f10712015-01-28 09:00:27 +02001146 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
Eliad Peller7498cf42014-01-16 17:10:44 +02001147 /* don't let the transport/FW power down */
1148 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1149
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001150 if (fw_error && mvm->restart_fw > 0)
Eran Harary291aa7c2013-07-03 11:00:06 +03001151 mvm->restart_fw--;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001152 ieee80211_restart_hw(mvm->hw);
1153 }
1154}
1155
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001156static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
1157{
1158 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1159
1160 iwl_mvm_dump_nic_error_log(mvm);
Emmanuel Grumbach1bd3cbc2014-03-18 21:15:06 +02001161
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001162 iwl_mvm_nic_restart(mvm, true);
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001163}
1164
Johannes Berg8ca151b2013-01-24 14:25:36 +01001165static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1166{
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001167 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1168
Johannes Berg8ca151b2013-01-24 14:25:36 +01001169 WARN_ON(1);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001170 iwl_mvm_nic_restart(mvm, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001171}
1172
Eliad Peller37577fe2013-12-05 17:19:39 +02001173struct iwl_d0i3_iter_data {
1174 struct iwl_mvm *mvm;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001175 struct ieee80211_vif *connected_vif;
Eliad Peller37577fe2013-12-05 17:19:39 +02001176 u8 ap_sta_id;
1177 u8 vif_count;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001178 u8 offloading_tid;
1179 bool disable_offloading;
Eliad Peller37577fe2013-12-05 17:19:39 +02001180};
1181
Arik Nemtsovb2492502014-03-13 12:21:50 +02001182static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
1183 struct ieee80211_vif *vif,
1184 struct iwl_d0i3_iter_data *iter_data)
1185{
1186 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1187 struct ieee80211_sta *ap_sta;
1188 struct iwl_mvm_sta *mvmsta;
1189 u32 available_tids = 0;
1190 u8 tid;
1191
1192 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
1193 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
1194 return false;
1195
1196 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
1197 if (IS_ERR_OR_NULL(ap_sta))
1198 return false;
1199
1200 mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
1201 spin_lock_bh(&mvmsta->lock);
1202 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1203 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1204
1205 /*
1206 * in case of pending tx packets, don't use this tid
1207 * for offloading in order to prevent reuse of the same
1208 * qos seq counters.
1209 */
1210 if (iwl_mvm_tid_queued(tid_data))
1211 continue;
1212
1213 if (tid_data->state != IWL_AGG_OFF)
1214 continue;
1215
1216 available_tids |= BIT(tid);
1217 }
1218 spin_unlock_bh(&mvmsta->lock);
1219
1220 /*
1221 * disallow protocol offloading if we have no available tid
1222 * (with no pending frames and no active aggregation,
1223 * as we don't handle "holes" properly - the scheduler needs the
1224 * frame's seq number and TFD index to match)
1225 */
1226 if (!available_tids)
1227 return true;
1228
1229 /* for simplicity, just use the first available tid */
1230 iter_data->offloading_tid = ffs(available_tids) - 1;
1231 return false;
1232}
1233
Eliad Pellerd6230972013-11-03 20:09:08 +02001234static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
1235 struct ieee80211_vif *vif)
1236{
Eliad Peller37577fe2013-12-05 17:19:39 +02001237 struct iwl_d0i3_iter_data *data = _data;
1238 struct iwl_mvm *mvm = data->mvm;
1239 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Eliad Pellerd6230972013-11-03 20:09:08 +02001240 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1241
1242 IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
1243 if (vif->type != NL80211_IFTYPE_STATION ||
1244 !vif->bss_conf.assoc)
1245 return;
1246
Arik Nemtsovb2492502014-03-13 12:21:50 +02001247 /*
1248 * in case of pending tx packets or active aggregations,
1249 * avoid offloading features in order to prevent reuse of
1250 * the same qos seq counters.
1251 */
1252 if (iwl_mvm_disallow_offloading(mvm, vif, data))
1253 data->disable_offloading = true;
1254
Eliad Pellerd6230972013-11-03 20:09:08 +02001255 iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
Sara Sharonc97dab42015-11-19 11:53:49 +02001256 iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
1257 false, flags);
Eliad Pellerd6230972013-11-03 20:09:08 +02001258
1259 /*
1260 * on init/association, mvm already configures POWER_TABLE_CMD
1261 * and REPLY_MCAST_FILTER_CMD, so currently don't
1262 * reconfigure them (we might want to use different
1263 * params later on, though).
1264 */
Eliad Peller37577fe2013-12-05 17:19:39 +02001265 data->ap_sta_id = mvmvif->ap_sta_id;
1266 data->vif_count++;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001267
1268 /*
1269 * no new commands can be sent at this stage, so it's safe
1270 * to save the vif pointer during d0i3 entrance.
1271 */
1272 data->connected_vif = vif;
Eliad Pellerd6230972013-11-03 20:09:08 +02001273}
1274
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001275static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001276 struct iwl_wowlan_config_cmd *cmd,
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001277 struct iwl_d0i3_iter_data *iter_data)
1278{
1279 struct ieee80211_sta *ap_sta;
1280 struct iwl_mvm_sta *mvm_ap_sta;
1281
1282 if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
1283 return;
1284
1285 rcu_read_lock();
1286
1287 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
1288 if (IS_ERR_OR_NULL(ap_sta))
1289 goto out;
1290
1291 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001292 cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001293 cmd->offloading_tid = iter_data->offloading_tid;
Sara Sharon70b4c532015-11-19 13:12:15 +02001294 cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
Sara Sharon0db056d2015-12-29 11:07:15 +02001295 ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001296 /*
1297 * The d0i3 uCode takes care of the nonqos counters,
1298 * so configure only the qos seq ones.
1299 */
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001300 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001301out:
1302 rcu_read_unlock();
1303}
Eliad Peller67359432014-12-09 15:23:54 +02001304
1305int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
Eliad Pellerb3370d42013-11-25 15:20:16 +02001306{
1307 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001308 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001309 int ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02001310 struct iwl_d0i3_iter_data d0i3_iter_data = {
1311 .mvm = mvm,
1312 };
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001313 struct iwl_wowlan_config_cmd wowlan_config_cmd = {
1314 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1315 IWL_WOWLAN_WAKEUP_BEACON_MISS |
Sara Sharon0db056d2015-12-29 11:07:15 +02001316 IWL_WOWLAN_WAKEUP_LINK_CHANGE),
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001317 };
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001318 struct iwl_d3_manager_config d3_cfg_cmd = {
1319 .min_sleep_time = cpu_to_le32(1000),
Eliad Pellerd9f1fc22014-12-23 15:05:14 +02001320 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001321 };
Eliad Pellerb3370d42013-11-25 15:20:16 +02001322
1323 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001324
Eliad Peller08f0d232015-12-10 15:47:11 +02001325 if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR))
1326 return -EINVAL;
1327
Arik Nemtsovb2492502014-03-13 12:21:50 +02001328 set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001329
Eliad Pellerf4cf8682014-11-04 16:57:06 +02001330 /*
1331 * iwl_mvm_ref_sync takes a reference before checking the flag.
1332 * so by checking there is no held reference we prevent a state
1333 * in which iwl_mvm_ref_sync continues successfully while we
1334 * configure the firmware to enter d0i3
1335 */
1336 if (iwl_mvm_ref_taken(mvm)) {
1337 IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
1338 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
Eliad Pellercaf15782014-11-09 15:25:33 +02001339 wake_up(&mvm->d0i3_exit_waitq);
Eliad Pellerf4cf8682014-11-04 16:57:06 +02001340 return 1;
1341 }
1342
Eliad Pellerd6230972013-11-03 20:09:08 +02001343 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1344 IEEE80211_IFACE_ITER_NORMAL,
1345 iwl_mvm_enter_d0i3_iterator,
Eliad Peller37577fe2013-12-05 17:19:39 +02001346 &d0i3_iter_data);
1347 if (d0i3_iter_data.vif_count == 1) {
1348 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001349 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
Eliad Peller37577fe2013-12-05 17:19:39 +02001350 } else {
1351 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1352 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001353 mvm->d0i3_offloading = false;
Eliad Peller37577fe2013-12-05 17:19:39 +02001354 }
Eliad Pellerd6230972013-11-03 20:09:08 +02001355
Emmanuel Grumbachecc7c512015-08-17 15:54:41 +03001356 /* make sure we have no running tx while configuring the seqno */
1357 synchronize_net();
1358
Luca Coelhoeb3908d2015-10-02 18:13:10 +03001359 /* Flush the hw queues, in case something got queued during entry */
1360 ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags);
1361 if (ret)
1362 return ret;
1363
Eliad Peller183edd82015-09-01 14:16:00 +03001364 /* configure wowlan configuration only if needed */
1365 if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
Sara Sharon0db056d2015-12-29 11:07:15 +02001366 /* wake on beacons only if beacon storing isn't supported */
1367 if (!fw_has_capa(&mvm->fw->ucode_capa,
1368 IWL_UCODE_TLV_CAPA_BEACON_STORING))
1369 wowlan_config_cmd.wakeup_filter |=
1370 cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
1371
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001372 iwl_mvm_wowlan_config_key_params(mvm,
1373 d0i3_iter_data.connected_vif,
1374 true, flags);
1375
Eliad Peller183edd82015-09-01 14:16:00 +03001376 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
1377 &d0i3_iter_data);
1378
1379 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1380 sizeof(wowlan_config_cmd),
1381 &wowlan_config_cmd);
1382 if (ret)
1383 return ret;
1384 }
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001385
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001386 return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1387 flags | CMD_MAKE_TRANS_IDLE,
1388 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
Eliad Pellerb3370d42013-11-25 15:20:16 +02001389}
1390
Eliad Pellerd6230972013-11-03 20:09:08 +02001391static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1392 struct ieee80211_vif *vif)
1393{
1394 struct iwl_mvm *mvm = _data;
1395 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1396
1397 IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1398 if (vif->type != NL80211_IFTYPE_STATION ||
1399 !vif->bss_conf.assoc)
1400 return;
1401
1402 iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1403}
1404
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001405struct iwl_mvm_d0i3_exit_work_iter_data {
David Spinadelb3df2242015-08-06 10:26:50 +03001406 struct iwl_mvm *mvm;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001407 struct iwl_wowlan_status *status;
David Spinadelb3df2242015-08-06 10:26:50 +03001408 u32 wakeup_reasons;
1409};
1410
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001411static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
1412 struct ieee80211_vif *vif)
Eliad Peller37577fe2013-12-05 17:19:39 +02001413{
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001414 struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
Eliad Peller37577fe2013-12-05 17:19:39 +02001415 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001416 u32 reasons = data->wakeup_reasons;
Eliad Peller37577fe2013-12-05 17:19:39 +02001417
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001418 /* consider only the relevant station interface */
1419 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1420 data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
1421 return;
1422
1423 if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
1424 iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
1425 else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
1426 ieee80211_beacon_loss(vif);
1427 else
1428 iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
Eliad Peller37577fe2013-12-05 17:19:39 +02001429}
1430
Arik Nemtsovb2492502014-03-13 12:21:50 +02001431void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1432{
1433 struct ieee80211_sta *sta = NULL;
1434 struct iwl_mvm_sta *mvm_ap_sta;
1435 int i;
1436 bool wake_queues = false;
1437
1438 lockdep_assert_held(&mvm->mutex);
1439
1440 spin_lock_bh(&mvm->d0i3_tx_lock);
1441
1442 if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
1443 goto out;
1444
1445 IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1446
1447 /* get the sta in order to update seq numbers and re-enqueue skbs */
1448 sta = rcu_dereference_protected(
1449 mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1450 lockdep_is_held(&mvm->mutex));
1451
1452 if (IS_ERR_OR_NULL(sta)) {
1453 sta = NULL;
1454 goto out;
1455 }
1456
1457 if (mvm->d0i3_offloading && qos_seq) {
1458 /* update qos seq numbers if offloading was enabled */
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001459 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001460 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1461 u16 seq = le16_to_cpu(qos_seq[i]);
1462 /* firmware stores last-used one, we store next one */
1463 seq += 0x10;
1464 mvm_ap_sta->tid_data[i].seq_number = seq;
1465 }
1466 }
1467out:
1468 /* re-enqueue (or drop) all packets */
1469 while (!skb_queue_empty(&mvm->d0i3_tx)) {
1470 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1471
1472 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1473 ieee80211_free_txskb(mvm->hw, skb);
1474
1475 /* if the skb_queue is not empty, we need to wake queues */
1476 wake_queues = true;
1477 }
1478 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1479 wake_up(&mvm->d0i3_exit_waitq);
1480 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1481 if (wake_queues)
1482 ieee80211_wake_queues(mvm->hw);
1483
1484 spin_unlock_bh(&mvm->d0i3_tx_lock);
1485}
1486
Eliad Peller37577fe2013-12-05 17:19:39 +02001487static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1488{
1489 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1490 struct iwl_host_cmd get_status_cmd = {
1491 .id = WOWLAN_GET_STATUSES,
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001492 .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
Eliad Peller37577fe2013-12-05 17:19:39 +02001493 };
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001494 struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
1495 .mvm = mvm,
1496 };
1497
Emmanuel Grumbach3afec6392014-03-30 09:10:28 +03001498 struct iwl_wowlan_status *status;
Eliad Peller37577fe2013-12-05 17:19:39 +02001499 int ret;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001500 u32 wakeup_reasons = 0;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001501 __le16 *qos_seq = NULL;
Eliad Peller37577fe2013-12-05 17:19:39 +02001502
1503 mutex_lock(&mvm->mutex);
1504 ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
1505 if (ret)
1506 goto out;
1507
1508 if (!get_status_cmd.resp_pkt)
1509 goto out;
1510
1511 status = (void *)get_status_cmd.resp_pkt->data;
1512 wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001513 qos_seq = status->qos_seq_ctr;
Eliad Peller37577fe2013-12-05 17:19:39 +02001514
1515 IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1516
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001517 iter_data.wakeup_reasons = wakeup_reasons;
1518 iter_data.status = status;
1519 ieee80211_iterate_active_interfaces(mvm->hw,
1520 IEEE80211_IFACE_ITER_NORMAL,
1521 iwl_mvm_d0i3_exit_work_iter,
1522 &iter_data);
Eliad Peller37577fe2013-12-05 17:19:39 +02001523out:
Arik Nemtsovb2492502014-03-13 12:21:50 +02001524 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
Jonathan Doron47c8b152014-11-27 16:55:25 +02001525
Eliad Peller7c014e32015-09-06 14:17:17 +03001526 IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1527 wakeup_reasons);
1528
Eliad Pellere5629be2015-04-14 11:36:23 +03001529 /* qos_seq might point inside resp_pkt, so free it only now */
1530 if (get_status_cmd.resp_pkt)
1531 iwl_free_resp(&get_status_cmd);
1532
Jonathan Doron47c8b152014-11-27 16:55:25 +02001533 /* the FW might have updated the regdomain */
1534 iwl_mvm_update_changed_regdom(mvm);
1535
Eliad Pellerd15a7472014-03-27 18:53:12 +02001536 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
Eliad Peller37577fe2013-12-05 17:19:39 +02001537 mutex_unlock(&mvm->mutex);
1538}
1539
Eliad Pellerd15a7472014-03-27 18:53:12 +02001540int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
Eliad Pellerb3370d42013-11-25 15:20:16 +02001541{
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001542 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1543 CMD_WAKE_UP_TRANS;
Eliad Pellerd6230972013-11-03 20:09:08 +02001544 int ret;
Eliad Pellerb3370d42013-11-25 15:20:16 +02001545
1546 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001547
Eliad Peller08f0d232015-12-10 15:47:11 +02001548 if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR))
1549 return -EINVAL;
1550
Eliad Pellerd15a7472014-03-27 18:53:12 +02001551 mutex_lock(&mvm->d0i3_suspend_mutex);
1552 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1553 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1554 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1555 mutex_unlock(&mvm->d0i3_suspend_mutex);
1556 return 0;
1557 }
1558 mutex_unlock(&mvm->d0i3_suspend_mutex);
1559
Eliad Pellerd6230972013-11-03 20:09:08 +02001560 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1561 if (ret)
Eliad Peller37577fe2013-12-05 17:19:39 +02001562 goto out;
Eliad Pellerd6230972013-11-03 20:09:08 +02001563
1564 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1565 IEEE80211_IFACE_ITER_NORMAL,
1566 iwl_mvm_exit_d0i3_iterator,
1567 mvm);
Eliad Peller37577fe2013-12-05 17:19:39 +02001568out:
1569 schedule_work(&mvm->d0i3_exit_work);
1570 return ret;
Eliad Pellerb3370d42013-11-25 15:20:16 +02001571}
1572
Eliad Peller67359432014-12-09 15:23:54 +02001573int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
Eliad Pellerd15a7472014-03-27 18:53:12 +02001574{
1575 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1576
1577 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1578 return _iwl_mvm_exit_d0i3(mvm);
1579}
1580
Johannes Berg0316d302015-05-22 13:41:07 +02001581#define IWL_MVM_COMMON_OPS \
1582 /* these could be differentiated */ \
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02001583 .async_cb = iwl_mvm_async_cb, \
Johannes Berg0316d302015-05-22 13:41:07 +02001584 .queue_full = iwl_mvm_stop_sw_queue, \
1585 .queue_not_full = iwl_mvm_wake_sw_queue, \
1586 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
1587 .free_skb = iwl_mvm_free_skb, \
1588 .nic_error = iwl_mvm_nic_error, \
1589 .cmd_queue_full = iwl_mvm_cmd_queue_full, \
1590 .nic_config = iwl_mvm_nic_config, \
1591 .enter_d0i3 = iwl_mvm_enter_d0i3, \
1592 .exit_d0i3 = iwl_mvm_exit_d0i3, \
1593 /* as we only register one, these MUST be common! */ \
1594 .start = iwl_op_mode_mvm_start, \
1595 .stop = iwl_op_mode_mvm_stop
1596
Johannes Berg8ca151b2013-01-24 14:25:36 +01001597static const struct iwl_op_mode_ops iwl_mvm_ops = {
Johannes Berg0316d302015-05-22 13:41:07 +02001598 IWL_MVM_COMMON_OPS,
1599 .rx = iwl_mvm_rx,
1600};
1601
1602static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1603 struct napi_struct *napi,
1604 struct iwl_rx_cmd_buffer *rxb,
1605 unsigned int queue)
1606{
1607 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Sara Sharon585a6fc2015-12-01 13:48:18 +02001608 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Johannes Berg0316d302015-05-22 13:41:07 +02001609
Sara Sharon585a6fc2015-12-01 13:48:18 +02001610 if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
1611 iwl_mvm_rx_frame_release(mvm, rxb, queue);
Sara Sharon94bb4482015-12-16 18:48:28 +02001612 else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
1613 pkt->hdr.group_id == DATA_PATH_GROUP))
1614 iwl_mvm_rx_queue_notif(mvm, rxb, queue);
Sara Sharon585a6fc2015-12-01 13:48:18 +02001615 else
1616 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
Johannes Berg0316d302015-05-22 13:41:07 +02001617}
1618
1619static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1620 IWL_MVM_COMMON_OPS,
1621 .rx = iwl_mvm_rx_mq,
1622 .rx_rss = iwl_mvm_rx_mq_rss,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001623};