blob: af3fba10abc195847d781832ad597a9482613ab2 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Eran Harary4fb06282015-04-19 10:05:18 +03009 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Liad Kaufmande8ba412017-03-16 13:00:59 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Shaul Triebitz8745f122018-01-11 16:18:46 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020031 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Eran Harary4fb06282015-04-19 10:05:18 +030032 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Liad Kaufmande8ba412017-03-16 13:00:59 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Shaul Triebitz8745f122018-01-11 16:18:46 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <linux/module.h>
Emmanuel Grumbach1bd3cbc2014-03-18 21:15:06 +020065#include <linux/vmalloc.h>
Johannes Berg8ca151b2013-01-24 14:25:36 +010066#include <net/mac80211.h>
67
Johannes Berg9fca9d52017-06-01 10:32:17 +020068#include "fw/notif-wait.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069#include "iwl-trans.h"
70#include "iwl-op-mode.h"
Johannes Bergd962f9b2017-06-01 10:22:09 +020071#include "fw/img.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072#include "iwl-debug.h"
73#include "iwl-drv.h"
74#include "iwl-modparams.h"
75#include "mvm.h"
76#include "iwl-phy-db.h"
77#include "iwl-eeprom-parse.h"
78#include "iwl-csr.h"
79#include "iwl-io.h"
80#include "iwl-prph.h"
81#include "rs.h"
Johannes Bergd172a5e2017-06-02 15:15:53 +020082#include "fw/api/scan.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010083#include "time-event.h"
Sharon Dvir39bdb172015-10-15 18:18:09 +030084#include "fw-api.h"
Johannes Bergd172a5e2017-06-02 15:15:53 +020085#include "fw/api/scan.h"
Luca Coelhof2abcfa2017-09-28 15:29:27 +030086#include "fw/acpi.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010087
Johannes Berg8ca151b2013-01-24 14:25:36 +010088#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
Johannes Berg8ca151b2013-01-24 14:25:36 +010089MODULE_DESCRIPTION(DRV_DESCRIPTION);
Johannes Berg8ca151b2013-01-24 14:25:36 +010090MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
91MODULE_LICENSE("GPL");
92
93static const struct iwl_op_mode_ops iwl_mvm_ops;
Johannes Berg0316d302015-05-22 13:41:07 +020094static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
Johannes Berg8ca151b2013-01-24 14:25:36 +010095
96struct iwl_mvm_mod_params iwlmvm_mod_params = {
97 .power_scheme = IWL_POWER_SCHEME_BPS,
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +020098 .tfd_q_hang_detect = true
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 /* rest of fields are 0 by default */
100};
101
Joe Perches2ef00c52018-03-23 15:54:37 -0700102module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100103MODULE_PARM_DESC(init_dbg,
104 "set to true to debug an ASSERT in INIT fw (default: false");
Joe Perches2ef00c52018-03-23 15:54:37 -0700105module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100106MODULE_PARM_DESC(power_scheme,
107 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +0200108module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
Joe Perches2ef00c52018-03-23 15:54:37 -0700109 bool, 0444);
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +0200110MODULE_PARM_DESC(tfd_q_hang_detect,
111 "TFD queues hang detection (default: true");
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112
113/*
114 * module init and exit functions
115 */
116static int __init iwl_mvm_init(void)
117{
118 int ret;
119
120 ret = iwl_mvm_rate_control_register();
121 if (ret) {
122 pr_err("Unable to register rate control algorithm: %d\n", ret);
123 return ret;
124 }
125
126 ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
Gregory Greenman9f66a392017-11-05 18:49:48 +0200127 if (ret)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100128 pr_err("Unable to register MVM op_mode: %d\n", ret);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100129
130 return ret;
131}
132module_init(iwl_mvm_init);
133
134static void __exit iwl_mvm_exit(void)
135{
136 iwl_opmode_deregister("iwlmvm");
137 iwl_mvm_rate_control_unregister();
138}
139module_exit(iwl_mvm_exit);
140
141static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
142{
143 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
144 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
145 u32 reg_val = 0;
Moshe Harela0544272014-12-08 21:13:14 +0200146 u32 phy_config = iwl_mvm_get_phy_config(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100147
Moshe Harela0544272014-12-08 21:13:14 +0200148 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
149 FW_PHY_CFG_RADIO_TYPE_POS;
150 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
151 FW_PHY_CFG_RADIO_STEP_POS;
152 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
153 FW_PHY_CFG_RADIO_DASH_POS;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100154
155 /* SKU control */
156 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
157 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
158 reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
159 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
160
161 /* radio configuration */
162 reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
163 reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
164 reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
165
166 WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
167 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
168
Liad Kaufman9b1fcc12014-05-08 16:30:24 +0300169 /*
Sara Sharon6e584872017-03-22 14:07:50 +0200170 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
171 * sampling, and shouldn't be set to any non-zero value.
172 * The same is supposed to be true of the other HW, but unsetting
173 * them (such as the 7260) causes automatic tests to fail on seemingly
174 * unrelated errors. Need to further investigate this, but for now
175 * we'll separate cases.
Liad Kaufman9b1fcc12014-05-08 16:30:24 +0300176 */
Sara Sharon6e584872017-03-22 14:07:50 +0200177 if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
Liad Kaufman9b1fcc12014-05-08 16:30:24 +0300178 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100179
Shahar S Matityahu2d8c2612018-01-29 11:05:37 +0200180 if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
181 reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
182
Lilach Edelsteine139dc42013-01-13 13:31:10 +0200183 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
184 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
185 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
186 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
187 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
188 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
189 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
Shahar S Matityahu2d8c2612018-01-29 11:05:37 +0200190 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
191 CSR_HW_IF_CONFIG_REG_D3_DEBUG,
Lilach Edelsteine139dc42013-01-13 13:31:10 +0200192 reg_val);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100193
194 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
195 radio_cfg_step, radio_cfg_dash);
196
197 /*
198 * W/A : NIC is stuck in a reset state after Early PCIe power off
199 * (PCIe power is lost before PERST# is asserted), causing ME FW
200 * to lose ownership and not being able to obtain it back.
201 */
Avri Altman95411d02015-05-11 11:04:34 +0300202 if (!mvm->trans->cfg->apmg_not_supported)
Eran Harary3073d8c2013-12-29 14:09:59 +0200203 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
204 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
205 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100206}
207
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200208/**
209 * enum iwl_rx_handler_context context for Rx handler
210 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
211 * which can't acquire mvm->mutex.
212 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
213 * (and only in this case!), it should be set as ASYNC. In that case,
214 * it will be called from a worker with mvm->mutex held.
215 * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
216 * mutex itself, it will be called from a worker without mvm->mutex held.
217 */
218enum iwl_rx_handler_context {
219 RX_HANDLER_SYNC,
220 RX_HANDLER_ASYNC_LOCKED,
221 RX_HANDLER_ASYNC_UNLOCKED,
222};
223
224/**
225 * struct iwl_rx_handlers handler for FW notification
226 * @cmd_id: command id
227 * @context: see &iwl_rx_handler_context
228 * @fn: the function is called when notification is received
229 */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100230struct iwl_rx_handlers {
Avraham Stern1230b162015-07-09 17:17:03 +0300231 u16 cmd_id;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200232 enum iwl_rx_handler_context context;
Johannes Berg04168412015-06-23 21:22:09 +0200233 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100234};
235
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200236#define RX_HANDLER(_cmd_id, _fn, _context) \
237 { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
238#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
239 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240
241/*
242 * Handlers for fw notifications
243 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
244 * This list should be in order of frequency for performance purposes.
245 *
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200246 * The handler can be one from three contexts, see &iwl_rx_handler_context
Johannes Berg8ca151b2013-01-24 14:25:36 +0100247 */
248static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200249 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
250 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100251
Gregory Greenman84226ca2017-11-02 04:07:52 +0200252 RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
253 iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
254
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200255 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
256 RX_HANDLER_ASYNC_LOCKED),
257 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
258 RX_HANDLER_ASYNC_LOCKED),
259 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
260 RX_HANDLER_ASYNC_LOCKED),
Emmanuel Grumbachf421f9c2013-01-17 14:20:29 +0200261
Sara Sharon3af512d62015-07-22 11:38:40 +0300262 RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200263 iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
Sara Sharon3af512d62015-07-22 11:38:40 +0300264
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200265 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
266 RX_HANDLER_SYNC),
267 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
268 RX_HANDLER_ASYNC_LOCKED),
Emmanuel Grumbach497b49d2013-06-02 20:54:48 +0300269
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200270 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
Johannes Berg3e56ead2013-02-15 22:23:18 +0100271
Alexander Bondare5d74642014-12-09 19:15:49 +0200272 RX_HANDLER(SCAN_ITERATION_COMPLETE,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200273 iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
David Spinadel35a000b2013-08-28 09:29:43 +0300274 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200275 iwl_mvm_rx_lmac_scan_complete_notif,
276 RX_HANDLER_ASYNC_LOCKED),
Luciano Coelho6e56f012015-05-06 16:03:39 +0300277 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200278 RX_HANDLER_SYNC),
David Spinadeld2496222014-05-20 12:46:37 +0300279 RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200280 RX_HANDLER_ASYNC_LOCKED),
Avraham Sternee9219b2015-03-23 15:09:27 +0200281 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200282 iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
Emmanuel Grumbach497b49d2013-06-02 20:54:48 +0300283
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200284 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
285 RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100286
Hila Gonend64048e2013-03-13 18:00:03 +0200287 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200288 RX_HANDLER_SYNC),
Hila Gonend64048e2013-03-13 18:00:03 +0200289
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200290 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
Alexander Bondar175a70b2013-04-14 20:59:37 +0300291 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200292 iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
293 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
294 RX_HANDLER_ASYNC_LOCKED),
Aviya Erenfeld09eef332015-09-01 19:34:38 +0300295 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
Chaya Rachel Ivgiec77a332016-03-13 11:39:53 +0200296 iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
Chaya Rachel Ivgi0a3b7112015-12-16 16:34:55 +0200297 RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200298 iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
Luciano Coelhoea9af242014-11-06 10:34:49 +0200299
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300300 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200301 RX_HANDLER_ASYNC_LOCKED),
302 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
303 RX_HANDLER_SYNC),
304 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
305 RX_HANDLER_ASYNC_LOCKED),
Golan Ben-Amibdccdb82016-11-15 14:45:29 +0200306 RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
307 iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
Sara Sharon0db056d2015-12-29 11:07:15 +0200308 RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200309 iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
Sara Sharonf92659a2016-02-03 15:04:49 +0200310 RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200311 iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
Johannes Berg65e25482016-04-13 14:24:22 +0200312 RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
313 iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100314};
315#undef RX_HANDLER
Avraham Stern1230b162015-07-09 17:17:03 +0300316#undef RX_HANDLER_GRP
Johannes Berg8ca151b2013-01-24 14:25:36 +0100317
Sharon Dvir39bdb172015-10-15 18:18:09 +0300318/* Please keep this array *SORTED* by hex value.
319 * Access is done through binary search
320 */
321static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
322 HCMD_NAME(MVM_ALIVE),
323 HCMD_NAME(REPLY_ERROR),
324 HCMD_NAME(ECHO_CMD),
325 HCMD_NAME(INIT_COMPLETE_NOTIF),
326 HCMD_NAME(PHY_CONTEXT_CMD),
327 HCMD_NAME(DBG_CFG),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300328 HCMD_NAME(SCAN_CFG_CMD),
329 HCMD_NAME(SCAN_REQ_UMAC),
330 HCMD_NAME(SCAN_ABORT_UMAC),
331 HCMD_NAME(SCAN_COMPLETE_UMAC),
332 HCMD_NAME(TOF_CMD),
333 HCMD_NAME(TOF_NOTIFICATION),
Sara Sharon3af512d62015-07-22 11:38:40 +0300334 HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300335 HCMD_NAME(ADD_STA_KEY),
336 HCMD_NAME(ADD_STA),
337 HCMD_NAME(REMOVE_STA),
338 HCMD_NAME(FW_GET_ITEM_CMD),
339 HCMD_NAME(TX_CMD),
340 HCMD_NAME(SCD_QUEUE_CFG),
341 HCMD_NAME(TXPATH_FLUSH),
342 HCMD_NAME(MGMT_MCAST_KEY),
343 HCMD_NAME(WEP_KEY),
344 HCMD_NAME(SHARED_MEM_CFG),
345 HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
346 HCMD_NAME(MAC_CONTEXT_CMD),
347 HCMD_NAME(TIME_EVENT_CMD),
348 HCMD_NAME(TIME_EVENT_NOTIFICATION),
349 HCMD_NAME(BINDING_CONTEXT_CMD),
350 HCMD_NAME(TIME_QUOTA_CMD),
351 HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
Johannes Berg7089ae62017-06-28 16:19:49 +0200352 HCMD_NAME(LEDS_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300353 HCMD_NAME(LQ_CMD),
354 HCMD_NAME(FW_PAGING_BLOCK_CMD),
355 HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
356 HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
357 HCMD_NAME(HOT_SPOT_CMD),
358 HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300359 HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
360 HCMD_NAME(BT_COEX_CI),
361 HCMD_NAME(PHY_CONFIGURATION_CMD),
362 HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
Sara Sharon176aa602016-08-31 19:03:01 +0300363 HCMD_NAME(PHY_DB_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300364 HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
365 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300366 HCMD_NAME(POWER_TABLE_CMD),
367 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
368 HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
369 HCMD_NAME(DC2DC_CONFIG_CMD),
370 HCMD_NAME(NVM_ACCESS_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300371 HCMD_NAME(BEACON_NOTIFICATION),
372 HCMD_NAME(BEACON_TEMPLATE_CMD),
373 HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
374 HCMD_NAME(BT_CONFIG),
375 HCMD_NAME(STATISTICS_CMD),
376 HCMD_NAME(STATISTICS_NOTIFICATION),
377 HCMD_NAME(EOSP_NOTIFICATION),
378 HCMD_NAME(REDUCE_TX_POWER_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300379 HCMD_NAME(CARD_STATE_NOTIFICATION),
380 HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
381 HCMD_NAME(TDLS_CONFIG_CMD),
382 HCMD_NAME(MAC_PM_POWER_TABLE),
383 HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
384 HCMD_NAME(MFUART_LOAD_NOTIFICATION),
Sara Sharon43413a92015-12-31 11:49:18 +0200385 HCMD_NAME(RSS_CONFIG_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300386 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
387 HCMD_NAME(REPLY_RX_PHY_CMD),
388 HCMD_NAME(REPLY_RX_MPDU_CMD),
Emmanuel Grumbach3e73aa32017-07-27 09:40:16 +0300389 HCMD_NAME(FRAME_RELEASE),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300390 HCMD_NAME(BA_NOTIF),
391 HCMD_NAME(MCC_UPDATE_CMD),
392 HCMD_NAME(MCC_CHUB_UPDATE_CMD),
393 HCMD_NAME(MARKER_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300394 HCMD_NAME(BT_PROFILE_NOTIFICATION),
395 HCMD_NAME(BCAST_FILTER_CMD),
396 HCMD_NAME(MCAST_FILTER_CMD),
397 HCMD_NAME(REPLY_SF_CFG_CMD),
398 HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
399 HCMD_NAME(D3_CONFIG_CMD),
400 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
401 HCMD_NAME(OFFLOADS_QUERY_CMD),
402 HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
403 HCMD_NAME(MATCH_FOUND_NOTIFICATION),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300404 HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
405 HCMD_NAME(WOWLAN_PATTERNS),
406 HCMD_NAME(WOWLAN_CONFIGURATION),
407 HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
408 HCMD_NAME(WOWLAN_TKIP_PARAM),
409 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
410 HCMD_NAME(WOWLAN_GET_STATUSES),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300411 HCMD_NAME(SCAN_ITERATION_COMPLETE),
412 HCMD_NAME(D0I3_END_CMD),
413 HCMD_NAME(LTR_CONFIG),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100414};
Sharon Dvir39bdb172015-10-15 18:18:09 +0300415
416/* Please keep this array *SORTED* by hex value.
417 * Access is done through binary search
418 */
Golan Ben-Ami5b086412016-02-09 12:57:16 +0200419static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
420 HCMD_NAME(SHARED_MEM_CFG_CMD),
Sara Sharon4399caa2016-12-11 10:32:42 +0200421 HCMD_NAME(INIT_EXTENDED_CFG_CMD),
Golan Ben-Ami5b086412016-02-09 12:57:16 +0200422};
423
424/* Please keep this array *SORTED* by hex value.
425 * Access is done through binary search
426 */
Aviya Erenfeld03098262016-02-18 14:09:33 +0200427static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200428 HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
Aviya Erenfeld03098262016-02-18 14:09:33 +0200429};
430
431/* Please keep this array *SORTED* by hex value.
432 * Access is done through binary search
433 */
Sharon Dvir39bdb172015-10-15 18:18:09 +0300434static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
435 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
Chaya Rachel Ivgi5c89e7b2016-01-05 10:34:47 +0200436 HCMD_NAME(CTDP_CONFIG_CMD),
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200437 HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
Haim Dreyfussa6bff3c2017-01-19 12:00:46 +0200438 HCMD_NAME(GEO_TX_POWER_LIMIT),
Chaya Rachel Ivgi0a3b7112015-12-16 16:34:55 +0200439 HCMD_NAME(CT_KILL_NOTIFICATION),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300440 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
441};
442
Sara Sharon0db056d2015-12-29 11:07:15 +0200443/* Please keep this array *SORTED* by hex value.
444 * Access is done through binary search
445 */
Sara Sharone0d8fde2015-12-28 22:37:08 +0200446static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
Emmanuel Grumbachddef2f92016-12-19 08:41:16 +0200447 HCMD_NAME(DQA_ENABLE_CMD),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200448 HCMD_NAME(UPDATE_MU_GROUPS_CMD),
Sara Sharon94bb4482015-12-16 18:48:28 +0200449 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
Luca Coelho514c30692018-06-24 11:59:54 +0300450 HCMD_NAME(STA_HE_CTXT_CMD),
Sara Sharon8edbfaa2018-02-05 12:42:44 +0200451 HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
Johannes Berg65e25482016-04-13 14:24:22 +0200452 HCMD_NAME(STA_PM_NOTIF),
Sara Sharonf92659a2016-02-03 15:04:49 +0200453 HCMD_NAME(MU_GROUP_MGMT_NOTIF),
Sara Sharon94bb4482015-12-16 18:48:28 +0200454 HCMD_NAME(RX_QUEUES_NOTIFICATION),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200455};
456
457/* Please keep this array *SORTED* by hex value.
458 * Access is done through binary search
459 */
Golan Ben-Amibdccdb82016-11-15 14:45:29 +0200460static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
461 HCMD_NAME(MFU_ASSERT_DUMP_NTF),
462};
463
464/* Please keep this array *SORTED* by hex value.
465 * Access is done through binary search
466 */
Sara Sharon0db056d2015-12-29 11:07:15 +0200467static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
468 HCMD_NAME(STORED_BEACON_NTF),
469};
470
Sara Sharon1f370652016-08-31 18:13:57 +0300471/* Please keep this array *SORTED* by hex value.
472 * Access is done through binary search
473 */
474static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
475 HCMD_NAME(NVM_ACCESS_COMPLETE),
Sara Sharone9e1ba32017-01-08 16:46:14 +0200476 HCMD_NAME(NVM_GET_INFO),
Sara Sharon1f370652016-08-31 18:13:57 +0300477};
478
Sharon Dvir39bdb172015-10-15 18:18:09 +0300479static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
480 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
481 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
Golan Ben-Ami5b086412016-02-09 12:57:16 +0200482 [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
Aviya Erenfeld03098262016-02-18 14:09:33 +0200483 [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300484 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200485 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
Sara Sharon0db056d2015-12-29 11:07:15 +0200486 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
Sara Sharon1f370652016-08-31 18:13:57 +0300487 [REGULATORY_AND_NVM_GROUP] =
488 HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300489};
490
Johannes Berg8ca151b2013-01-24 14:25:36 +0100491/* this forward declaration can avoid to export the function */
492static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300493#ifdef CONFIG_PM
Eliad Peller37577fe2013-12-05 17:19:39 +0200494static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300495#endif
Johannes Berg8ca151b2013-01-24 14:25:36 +0100496
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300497static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500498{
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300499 const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
500 u64 dflt_pwr_limit;
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500501
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300502 if (!backoff)
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500503 return 0;
504
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300505 dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500506
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300507 while (backoff->pwr) {
508 if (dflt_pwr_limit >= backoff->pwr)
509 return backoff->backoff;
510
511 backoff++;
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500512 }
513
514 return 0;
515}
516
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200517static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
518{
519 struct iwl_mvm *mvm =
520 container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
521 struct ieee80211_vif *tx_blocked_vif;
522 struct iwl_mvm_vif *mvmvif;
523
524 mutex_lock(&mvm->mutex);
525
526 tx_blocked_vif =
527 rcu_dereference_protected(mvm->csa_tx_blocked_vif,
528 lockdep_is_held(&mvm->mutex));
529
530 if (!tx_blocked_vif)
531 goto unlock;
532
533 mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
534 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
535 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
536unlock:
537 mutex_unlock(&mvm->mutex);
538}
539
Johannes Berg7174beb2017-06-01 16:03:19 +0200540static int iwl_mvm_fwrt_dump_start(void *ctx)
541{
542 struct iwl_mvm *mvm = ctx;
543 int ret;
544
545 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
546 if (ret)
547 return ret;
548
549 mutex_lock(&mvm->mutex);
550
551 return 0;
552}
553
554static void iwl_mvm_fwrt_dump_end(void *ctx)
555{
556 struct iwl_mvm *mvm = ctx;
557
558 mutex_unlock(&mvm->mutex);
559
560 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
561}
562
Shaul Triebitz8745f122018-01-11 16:18:46 +0200563static bool iwl_mvm_fwrt_fw_running(void *ctx)
564{
565 return iwl_mvm_firmware_running(ctx);
566}
567
Shahar S Matityahud3f4b6d2018-06-12 15:40:42 +0300568static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
569{
570 struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
571 int ret;
572
573 mutex_lock(&mvm->mutex);
574 ret = iwl_mvm_send_cmd(mvm, host_cmd);
575 mutex_unlock(&mvm->mutex);
576
577 return ret;
578}
579
Johannes Berg7174beb2017-06-01 16:03:19 +0200580static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
581 .dump_start = iwl_mvm_fwrt_dump_start,
582 .dump_end = iwl_mvm_fwrt_dump_end,
Shaul Triebitz8745f122018-01-11 16:18:46 +0200583 .fw_running = iwl_mvm_fwrt_fw_running,
Shahar S Matityahud3f4b6d2018-06-12 15:40:42 +0300584 .send_hcmd = iwl_mvm_fwrt_send_hcmd,
Johannes Berg7174beb2017-06-01 16:03:19 +0200585};
586
Johannes Berg8ca151b2013-01-24 14:25:36 +0100587static struct iwl_op_mode *
588iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
589 const struct iwl_fw *fw, struct dentry *dbgfs_dir)
590{
591 struct ieee80211_hw *hw;
592 struct iwl_op_mode *op_mode;
593 struct iwl_mvm *mvm;
594 struct iwl_trans_config trans_cfg = {};
595 static const u8 no_reclaim_cmds[] = {
596 TX_CMD,
597 };
598 int err, scan_size;
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500599 u32 min_backoff;
Shaul Triebitz034925c2018-05-10 17:34:52 +0300600 enum iwl_amsdu_size rb_size_default;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100601
Emmanuel Grumbachc4d83272014-01-14 08:45:26 +0200602 /*
603 * We use IWL_MVM_STATION_COUNT to check the validity of the station
604 * index all over the driver - check that its value corresponds to the
605 * array size.
606 */
607 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
608
Johannes Berg8ca151b2013-01-24 14:25:36 +0100609 /********************************
610 * 1. Allocating and configuring HW data
611 ********************************/
612 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
613 sizeof(struct iwl_mvm),
614 &iwl_mvm_hw_ops);
615 if (!hw)
616 return NULL;
617
Oren Givon745160e2014-06-16 10:54:52 +0300618 if (cfg->max_rx_agg_size)
619 hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
Johannes Berg1eda2952018-06-15 14:21:53 +0200620 else
621 hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
Oren Givon745160e2014-06-16 10:54:52 +0300622
Gregory Greenman77d96732014-09-02 16:04:58 +0200623 if (cfg->max_tx_agg_size)
624 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
Johannes Berg1eda2952018-06-15 14:21:53 +0200625 else
626 hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
Gregory Greenman77d96732014-09-02 16:04:58 +0200627
Johannes Berg8ca151b2013-01-24 14:25:36 +0100628 op_mode = hw->priv;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100629
630 mvm = IWL_OP_MODE_GET_MVM(op_mode);
631 mvm->dev = trans->dev;
632 mvm->trans = trans;
633 mvm->cfg = cfg;
634 mvm->fw = fw;
635 mvm->hw = hw;
636
Mordechay Goodstein93b167c2017-09-26 11:31:55 +0000637 iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
638 dbgfs_dir);
Johannes Berg235acb12017-06-01 12:10:32 +0200639
Liad Kaufmande8ba412017-03-16 13:00:59 +0200640 mvm->init_status = 0;
641
Johannes Berg0316d302015-05-22 13:41:07 +0200642 if (iwl_mvm_has_new_rx_api(mvm)) {
643 op_mode->ops = &iwl_mvm_ops_mq;
Golan Ben Ami18ead592018-02-05 12:54:36 +0200644 trans->rx_mpdu_cmd_hdr_size =
645 (trans->cfg->device_family >=
646 IWL_DEVICE_FAMILY_22560) ?
647 sizeof(struct iwl_rx_mpdu_desc) :
648 IWL_RX_DESC_SIZE_V1;
Johannes Berg0316d302015-05-22 13:41:07 +0200649 } else {
650 op_mode->ops = &iwl_mvm_ops;
Sara Sharon25c2b222016-02-07 13:09:59 +0200651 trans->rx_mpdu_cmd_hdr_size =
652 sizeof(struct iwl_rx_mpdu_res_start);
Johannes Berg0316d302015-05-22 13:41:07 +0200653
654 if (WARN_ON(trans->num_rx_queues > 1))
655 goto out_free;
656 }
657
Johannes Berg3b37f4c2017-05-30 16:45:31 +0200658 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
Eran Harary291aa7c2013-07-03 11:00:06 +0300659
Johannes Bergc8f54702017-06-19 23:50:31 +0200660 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +0200661 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
Johannes Bergc8f54702017-06-19 23:50:31 +0200662 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
663 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
Liad Kaufman28d07932015-09-01 16:36:25 +0300664
Lilach Edelstein1f3b0ff2013-10-06 13:03:32 +0200665 mvm->sf_state = SF_UNINIT;
Johannes Berg7d6222e22017-06-08 09:18:22 +0200666 if (iwl_mvm_has_unified_ucode(mvm))
Johannes Berg702e9752017-06-02 11:56:58 +0200667 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
Sara Sharon1f370652016-08-31 18:13:57 +0300668 else
Johannes Berg702e9752017-06-02 11:56:58 +0200669 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
Andrei Otcheretianskic89e3332016-01-26 18:12:28 +0200670 mvm->drop_bcn_ap_mode = true;
Eytan Lifshitz19e737c2013-09-09 13:30:15 +0200671
Johannes Berg8ca151b2013-01-24 14:25:36 +0100672 mutex_init(&mvm->mutex);
Eliad Pellerd15a7472014-03-27 18:53:12 +0200673 mutex_init(&mvm->d0i3_suspend_mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100674 spin_lock_init(&mvm->async_handlers_lock);
675 INIT_LIST_HEAD(&mvm->time_event_list);
Ariej Marjiehb1128892014-07-16 21:11:12 +0300676 INIT_LIST_HEAD(&mvm->aux_roc_te_list);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100677 INIT_LIST_HEAD(&mvm->async_handlers_list);
678 spin_lock_init(&mvm->time_event_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300679 spin_lock_init(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100680
681 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
682 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300683#ifdef CONFIG_PM
Eliad Peller37577fe2013-12-05 17:19:39 +0200684 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300685#endif
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300686 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
Luca Coelho69e04642016-05-03 12:18:33 +0300687 INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
Liad Kaufman24afba72015-07-28 18:56:08 +0300688 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100689
Arik Nemtsovb2492502014-03-13 12:21:50 +0200690 spin_lock_init(&mvm->d0i3_tx_lock);
Eliad Peller576eeee2014-07-01 18:38:38 +0300691 spin_lock_init(&mvm->refs_lock);
Arik Nemtsovb2492502014-03-13 12:21:50 +0200692 skb_queue_head_init(&mvm->d0i3_tx);
693 init_waitqueue_head(&mvm->d0i3_exit_waitq);
Sara Sharon3a732c62016-10-09 17:34:24 +0300694 init_waitqueue_head(&mvm->rx_sync_waitq);
Arik Nemtsovb2492502014-03-13 12:21:50 +0200695
Sara Sharon0636b932016-02-18 14:21:12 +0200696 atomic_set(&mvm->queue_sync_counter, 0);
697
Johannes Berg8ca151b2013-01-24 14:25:36 +0100698 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
699
Luca Coelho7d9d0d52018-04-12 16:15:07 +0300700 spin_lock_init(&mvm->tcm.lock);
701 INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
702 mvm->tcm.ts = jiffies;
703 mvm->tcm.ll_ts = jiffies;
704 mvm->tcm.uapsd_nonagg_ts = jiffies;
705
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200706 INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
707
Johannes Berg8ca151b2013-01-24 14:25:36 +0100708 /*
709 * Populate the state variables that the transport layer needs
710 * to know about.
711 */
712 trans_cfg.op_mode = op_mode;
713 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
714 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
Shaul Triebitz034925c2018-05-10 17:34:52 +0300715
716 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
717 rb_size_default = IWL_AMSDU_2K;
718 else
719 rb_size_default = IWL_AMSDU_4K;
720
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200721 switch (iwlwifi_mod_params.amsdu_size) {
Emmanuel Grumbach4bdd4df2016-04-07 16:44:42 +0300722 case IWL_AMSDU_DEF:
Shaul Triebitz034925c2018-05-10 17:34:52 +0300723 trans_cfg.rx_buf_size = rb_size_default;
724 break;
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200725 case IWL_AMSDU_4K:
726 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
727 break;
728 case IWL_AMSDU_8K:
729 trans_cfg.rx_buf_size = IWL_AMSDU_8K;
730 break;
731 case IWL_AMSDU_12K:
732 trans_cfg.rx_buf_size = IWL_AMSDU_12K;
733 break;
734 default:
735 pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
736 iwlwifi_mod_params.amsdu_size);
Shaul Triebitz034925c2018-05-10 17:34:52 +0300737 trans_cfg.rx_buf_size = rb_size_default;
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200738 }
Emmanuel Grumbach4bdd4df2016-04-07 16:44:42 +0300739
Luca Coelho4b87e5a2016-09-12 16:03:30 +0300740 trans->wide_cmd_header = true;
Golan Ben Ami2a182fb2018-01-02 12:08:31 +0200741 trans_cfg.bc_table_dword =
742 mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100743
Sharon Dvir39bdb172015-10-15 18:18:09 +0300744 trans_cfg.command_groups = iwl_mvm_groups;
745 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100746
Johannes Bergc8f54702017-06-19 23:50:31 +0200747 trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
Johannes Bergb2d81db2014-08-01 20:48:25 +0200748 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
Emmanuel Grumbach3a736bc2014-09-10 11:16:41 +0300749 trans_cfg.scd_set_active = true;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100750
Johannes Berg21cb3222016-06-21 13:11:48 +0200751 trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
752 driver_data[2]);
753
Emmanuel Grumbach41837ca92015-10-21 09:00:07 +0300754 trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
Liad Kaufmanb4821762014-10-19 16:58:15 +0200755
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200756 /* Set a short watchdog for the command queue */
757 trans_cfg.cmd_q_wdg_timeout =
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200758 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200759
Johannes Berg8ca151b2013-01-24 14:25:36 +0100760 snprintf(mvm->hw->wiphy->fw_version,
761 sizeof(mvm->hw->wiphy->fw_version),
762 "%s", fw->fw_version);
763
764 /* Configure transport layer */
765 iwl_trans_configure(mvm->trans, &trans_cfg);
766
767 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
Sara Sharon17b809c2018-06-11 11:43:09 +0300768 trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
769 trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
770 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
Liad Kaufman09e350f2014-11-17 11:41:07 +0200771 sizeof(trans->dbg_conf_tlv));
Sara Sharon17b809c2018-06-11 11:43:09 +0300772 trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
773 trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100774
Golan Ben-Ami132db312017-07-17 19:42:35 +0300775 trans->iml = mvm->fw->iml;
776 trans->iml_len = mvm->fw->iml_len;
777
Johannes Berg8ca151b2013-01-24 14:25:36 +0100778 /* set up notification wait support */
779 iwl_notification_wait_init(&mvm->notif_wait);
780
781 /* Init phy db */
782 mvm->phy_db = iwl_phy_db_init(trans);
783 if (!mvm->phy_db) {
784 IWL_ERR(mvm, "Cannot init phy_db\n");
785 goto out_free;
786 }
787
788 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
789 mvm->cfg->name, mvm->trans->hw_rev);
790
Eran Harary4fb06282015-04-19 10:05:18 +0300791 if (iwlwifi_mod_params.nvm_file)
Eran Hararye02a9d62014-05-07 12:27:10 +0300792 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
Eran Harary4fb06282015-04-19 10:05:18 +0300793 else
794 IWL_DEBUG_EEPROM(mvm->trans->dev,
795 "working without external nvm file\n");
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300796
Sara Sharon56f29292016-08-31 12:37:55 +0300797 err = iwl_trans_start_hw(mvm->trans);
798 if (err)
Eran Harary14b485f2014-04-23 10:46:09 +0300799 goto out_free;
800
Sara Sharon56f29292016-08-31 12:37:55 +0300801 mutex_lock(&mvm->mutex);
802 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
Johannes Berg8c5f47b2017-02-20 17:47:04 +0100803 err = iwl_run_init_mvm_ucode(mvm, true);
Shahar S Matityahuf38efdb2018-05-27 17:17:07 +0300804 if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status))
805 iwl_fw_alive_error_dump(&mvm->fwrt);
Liad Kaufmanf4744252017-11-23 10:29:04 +0200806 if (!iwlmvm_mod_params.init_dbg || !err)
Sara Sharon56f29292016-08-31 12:37:55 +0300807 iwl_mvm_stop_device(mvm);
808 iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
809 mutex_unlock(&mvm->mutex);
Liad Kaufmande8ba412017-03-16 13:00:59 +0200810 if (err < 0) {
Sara Sharon56f29292016-08-31 12:37:55 +0300811 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
812 goto out_free;
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200813 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100814
David Spinadeld2496222014-05-20 12:46:37 +0300815 scan_size = iwl_mvm_scan_size(mvm);
David Spinadelfb98be52014-05-04 12:51:10 +0300816
Johannes Berg8ca151b2013-01-24 14:25:36 +0100817 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
818 if (!mvm->scan_cmd)
819 goto out_free;
820
Haim Dreyfuss5a4b2af2015-01-13 11:54:51 +0200821 /* Set EBS as successful as long as not stated otherwise by the FW. */
822 mvm->last_ebs_successful = true;
823
Johannes Berg8ca151b2013-01-24 14:25:36 +0100824 err = iwl_mvm_mac_setup_register(mvm);
825 if (err)
826 goto out_free;
Sara Sharon1f370652016-08-31 18:13:57 +0300827 mvm->hw_registered = true;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100828
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300829 min_backoff = iwl_mvm_min_backoff(mvm);
Chaya Rachel Ivgi04ddc2a2016-03-03 13:31:39 +0200830 iwl_mvm_thermal_initialize(mvm, min_backoff);
831
Johannes Berg8ca151b2013-01-24 14:25:36 +0100832 err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
833 if (err)
834 goto out_unregister;
835
Liad Kaufman678d9b62017-05-18 18:00:49 +0300836 if (!iwl_mvm_has_new_rx_stats_api(mvm))
837 memset(&mvm->rx_stats_v3, 0,
838 sizeof(struct mvm_statistics_rx_v3));
839 else
840 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
Matti Gottlieb3848ab62013-07-30 15:29:37 +0300841
Luca Coelho33c85ea2016-02-22 15:44:13 +0200842 /* The transport always starts with a taken reference, we can
843 * release it now if d0i3 is supported */
844 if (iwl_mvm_is_d0i3_supported(mvm))
845 iwl_trans_unref(mvm->trans);
Eliad Peller7498cf42014-01-16 17:10:44 +0200846
Gregory Greenmance792912015-06-02 18:06:16 +0300847 iwl_mvm_tof_init(mvm);
848
Johannes Berg8ca151b2013-01-24 14:25:36 +0100849 return op_mode;
850
851 out_unregister:
Liad Kaufmande8ba412017-03-16 13:00:59 +0200852 if (iwlmvm_mod_params.init_dbg)
853 return op_mode;
854
Johannes Berg8ca151b2013-01-24 14:25:36 +0100855 ieee80211_unregister_hw(mvm->hw);
Sara Sharon1f370652016-08-31 18:13:57 +0300856 mvm->hw_registered = false;
Eliad Peller91b0d112014-01-05 12:41:12 +0200857 iwl_mvm_leds_exit(mvm);
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200858 iwl_mvm_thermal_exit(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100859 out_free:
Johannes Berg7174beb2017-06-01 16:03:19 +0200860 iwl_fw_flush_dump(&mvm->fwrt);
Shahar S Matityahu54f3f992018-08-20 17:16:21 +0300861 iwl_fw_runtime_free(&mvm->fwrt);
Liad Kaufmande8ba412017-03-16 13:00:59 +0200862
863 if (iwlmvm_mod_params.init_dbg)
864 return op_mode;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100865 iwl_phy_db_free(mvm->phy_db);
866 kfree(mvm->scan_cmd);
Sara Sharon56f29292016-08-31 12:37:55 +0300867 iwl_trans_op_mode_leave(trans);
868
Johannes Berg8ca151b2013-01-24 14:25:36 +0100869 ieee80211_free_hw(mvm->hw);
870 return NULL;
871}
872
873static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
874{
875 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
876 int i;
877
Luca Coelhoe27deb42016-03-01 10:30:48 +0200878 /* If d0i3 is supported, we have released the reference that
879 * the transport started with, so we should take it back now
880 * that we are leaving.
881 */
882 if (iwl_mvm_is_d0i3_supported(mvm))
883 iwl_trans_ref(mvm->trans);
884
Johannes Berg8ca151b2013-01-24 14:25:36 +0100885 iwl_mvm_leds_exit(mvm);
886
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200887 iwl_mvm_thermal_exit(mvm);
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300888
Liad Kaufmande8ba412017-03-16 13:00:59 +0200889 if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
890 ieee80211_unregister_hw(mvm->hw);
891 mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
892 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100893
894 kfree(mvm->scan_cmd);
Eliad Pellere59647e2013-11-28 14:08:50 +0200895 kfree(mvm->mcast_filter_cmd);
896 mvm->mcast_filter_cmd = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100897
Johannes Bergafc66bb2013-05-03 11:44:16 +0200898#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
899 kfree(mvm->d3_resume_sram);
900#endif
Arik Nemtsova4082842013-11-24 19:10:46 +0200901 iwl_trans_op_mode_leave(mvm->trans);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100902
903 iwl_phy_db_free(mvm->phy_db);
904 mvm->phy_db = NULL;
905
Luca Coelho1dad3e02017-05-03 15:09:52 +0300906 kfree(mvm->nvm_data);
Eran Hararyae2b21b2014-01-09 08:08:24 +0200907 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100908 kfree(mvm->nvm_sections[i].data);
909
Luca Coelho7d9d0d52018-04-12 16:15:07 +0300910 cancel_delayed_work_sync(&mvm->tcm.work);
911
Gregory Greenmance792912015-06-02 18:06:16 +0300912 iwl_mvm_tof_clean(mvm);
913
Shahar S Matityahu54f3f992018-08-20 17:16:21 +0300914 iwl_fw_runtime_free(&mvm->fwrt);
Emmanuel Grumbacha2a57a32016-03-15 15:36:36 +0200915 mutex_destroy(&mvm->mutex);
916 mutex_destroy(&mvm->d0i3_suspend_mutex);
917
Johannes Berg8ca151b2013-01-24 14:25:36 +0100918 ieee80211_free_hw(mvm->hw);
919}
920
921struct iwl_async_handler_entry {
922 struct list_head list;
923 struct iwl_rx_cmd_buffer rxb;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200924 enum iwl_rx_handler_context context;
Johannes Berg04168412015-06-23 21:22:09 +0200925 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100926};
927
928void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
929{
930 struct iwl_async_handler_entry *entry, *tmp;
931
932 spin_lock_bh(&mvm->async_handlers_lock);
933 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
934 iwl_free_rxb(&entry->rxb);
935 list_del(&entry->list);
936 kfree(entry);
937 }
938 spin_unlock_bh(&mvm->async_handlers_lock);
939}
940
941static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
942{
943 struct iwl_mvm *mvm =
944 container_of(wk, struct iwl_mvm, async_handlers_wk);
945 struct iwl_async_handler_entry *entry, *tmp;
Johannes Berg80982032016-08-31 22:16:11 +0200946 LIST_HEAD(local_list);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100947
948 /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100949
950 /*
951 * Sync with Rx path with a lock. Remove all the entries from this list,
952 * add them to a local one (lock free), and then handle them.
953 */
954 spin_lock_bh(&mvm->async_handlers_lock);
955 list_splice_init(&mvm->async_handlers_list, &local_list);
956 spin_unlock_bh(&mvm->async_handlers_lock);
957
958 list_for_each_entry_safe(entry, tmp, &local_list, list) {
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200959 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
960 mutex_lock(&mvm->mutex);
Johannes Berg04168412015-06-23 21:22:09 +0200961 entry->fn(mvm, &entry->rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100962 iwl_free_rxb(&entry->rxb);
963 list_del(&entry->list);
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200964 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
965 mutex_unlock(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100966 kfree(entry);
967 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100968}
969
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200970static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
971 struct iwl_rx_packet *pkt)
972{
973 struct iwl_fw_dbg_trigger_tlv *trig;
974 struct iwl_fw_dbg_trigger_cmd *cmds_trig;
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200975 int i;
976
Sara Sharon6c042d72018-06-12 10:41:35 +0300977 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
978 FW_DBG_TRIGGER_FW_NOTIF);
979 if (!trig)
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200980 return;
981
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200982 cmds_trig = (void *)trig->data;
983
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200984 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
985 /* don't collect on CMD 0 */
986 if (!cmds_trig->cmds[i].cmd_id)
987 break;
988
Sara Sharon0ab66e62015-07-13 14:23:59 +0300989 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
990 cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200991 continue;
992
Johannes Berg7174beb2017-06-01 16:03:19 +0200993 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
994 "CMD 0x%02x.%02x received",
995 pkt->hdr.group_id, pkt->hdr.cmd);
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200996 break;
997 }
998}
999
Johannes Berg0316d302015-05-22 13:41:07 +02001000static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
1001 struct iwl_rx_cmd_buffer *rxb,
1002 struct iwl_rx_packet *pkt)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001003{
Johannes Berg0316d302015-05-22 13:41:07 +02001004 int i;
Johannes Berg1738d602015-05-22 12:09:44 +02001005
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +02001006 iwl_mvm_rx_check_trigger(mvm, pkt);
1007
Johannes Berg8ca151b2013-01-24 14:25:36 +01001008 /*
1009 * Do the notification wait before RX handlers so
1010 * even if the RX handler consumes the RXB we have
1011 * access to it in the notification wait entry.
1012 */
1013 iwl_notification_wait_notify(&mvm->notif_wait, pkt);
1014
1015 for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
1016 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001017 struct iwl_async_handler_entry *entry;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001018
Avraham Stern1230b162015-07-09 17:17:03 +03001019 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001020 continue;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001021
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +02001022 if (rx_h->context == RX_HANDLER_SYNC) {
Johannes Berg04168412015-06-23 21:22:09 +02001023 rx_h->fn(mvm, rxb);
Johannes Bergf7e64692015-06-23 21:58:17 +02001024 return;
Johannes Berg04168412015-06-23 21:22:09 +02001025 }
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001026
1027 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1028 /* we can't do much... */
1029 if (!entry)
Johannes Bergf7e64692015-06-23 21:58:17 +02001030 return;
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001031
1032 entry->rxb._page = rxb_steal_page(rxb);
1033 entry->rxb._offset = rxb->_offset;
1034 entry->rxb._rx_page_order = rxb->_rx_page_order;
1035 entry->fn = rx_h->fn;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +02001036 entry->context = rx_h->context;
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001037 spin_lock(&mvm->async_handlers_lock);
1038 list_add_tail(&entry->list, &mvm->async_handlers_list);
1039 spin_unlock(&mvm->async_handlers_lock);
1040 schedule_work(&mvm->async_handlers_wk);
Sara Sharon07fb3292018-03-13 13:49:25 +02001041 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001042 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001043}
1044
Johannes Berg0316d302015-05-22 13:41:07 +02001045static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
1046 struct napi_struct *napi,
1047 struct iwl_rx_cmd_buffer *rxb)
1048{
1049 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1050 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001051 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
Johannes Berg0316d302015-05-22 13:41:07 +02001052
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001053 if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
Johannes Berg0316d302015-05-22 13:41:07 +02001054 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001055 else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
Johannes Berg0316d302015-05-22 13:41:07 +02001056 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
1057 else
1058 iwl_mvm_rx_common(mvm, rxb, pkt);
1059}
1060
1061static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1062 struct napi_struct *napi,
1063 struct iwl_rx_cmd_buffer *rxb)
1064{
1065 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1066 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001067 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
Johannes Berg0316d302015-05-22 13:41:07 +02001068
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001069 if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
Johannes Berg780e87c2015-09-03 14:56:10 +02001070 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001071 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1072 RX_QUEUES_NOTIFICATION)))
Sara Sharon94bb4482015-12-16 18:48:28 +02001073 iwl_mvm_rx_queue_notif(mvm, rxb, 0);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001074 else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
Johannes Berg58035432016-04-27 13:33:26 +02001075 iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
Johannes Berg0316d302015-05-22 13:41:07 +02001076 else
1077 iwl_mvm_rx_common(mvm, rxb, pkt);
1078}
1079
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001080void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001081{
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001082 int q;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001083
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001084 if (WARN_ON_ONCE(!mq))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001085 return;
1086
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001087 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1088 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
1089 IWL_DEBUG_TX_QUEUES(mvm,
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001090 "mac80211 %d already stopped\n", q);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001091 continue;
1092 }
1093
1094 ieee80211_stop_queue(mvm->hw, q);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001095 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001096}
1097
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02001098static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
1099 const struct iwl_device_cmd *cmd)
1100{
1101 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1102
1103 /*
1104 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
1105 * commands that need to block the Tx queues.
1106 */
1107 iwl_trans_block_txq_ptrs(mvm->trans, false);
1108}
1109
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001110static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001111{
1112 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001113 unsigned long mq;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001114
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001115 spin_lock_bh(&mvm->queue_info_lock);
Sara Sharon34e10862017-02-23 13:15:07 +02001116 mq = mvm->hw_queue_to_mac80211[hw_queue];
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001117 spin_unlock_bh(&mvm->queue_info_lock);
1118
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001119 iwl_mvm_stop_mac_queues(mvm, mq);
1120}
1121
1122void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
1123{
1124 int q;
1125
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001126 if (WARN_ON_ONCE(!mq))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001127 return;
1128
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001129 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1130 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
1131 IWL_DEBUG_TX_QUEUES(mvm,
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001132 "mac80211 %d still stopped\n", q);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001133 continue;
1134 }
1135
1136 ieee80211_wake_queue(mvm->hw, q);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001137 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001138}
1139
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001140static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1141{
1142 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1143 unsigned long mq;
1144
1145 spin_lock_bh(&mvm->queue_info_lock);
Sara Sharon34e10862017-02-23 13:15:07 +02001146 mq = mvm->hw_queue_to_mac80211[hw_queue];
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001147 spin_unlock_bh(&mvm->queue_info_lock);
1148
1149 iwl_mvm_start_mac_queues(mvm, mq);
1150}
1151
Johannes Berg6ad04352017-04-25 10:21:18 +02001152static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
1153{
1154 bool state = iwl_mvm_is_radio_killed(mvm);
1155
1156 if (state)
1157 wake_up(&mvm->rx_sync_waitq);
1158
1159 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
1160}
1161
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001162void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1163{
1164 if (state)
1165 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1166 else
1167 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1168
Johannes Berg6ad04352017-04-25 10:21:18 +02001169 iwl_mvm_set_rfkill_state(mvm);
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001170}
1171
Johannes Berg14cfca72014-02-25 20:50:53 +01001172static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001173{
1174 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Mark Rutland6aa7de02017-10-23 14:07:29 -07001175 bool calibrating = READ_ONCE(mvm->calibrating);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001176
1177 if (state)
1178 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1179 else
1180 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1181
Johannes Berg6ad04352017-04-25 10:21:18 +02001182 iwl_mvm_set_rfkill_state(mvm);
Johannes Berg14cfca72014-02-25 20:50:53 +01001183
Emmanuel Grumbach31b8b342014-11-02 15:48:09 +02001184 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
1185 if (calibrating)
1186 iwl_abort_notification_waits(&mvm->notif_wait);
1187
1188 /*
1189 * Stop the device if we run OPERATIONAL firmware or if we are in the
1190 * middle of the calibrations.
1191 */
Johannes Berg702e9752017-06-02 11:56:58 +02001192 return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001193}
1194
1195static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1196{
1197 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1198 struct ieee80211_tx_info *info;
1199
1200 info = IEEE80211_SKB_CB(skb);
1201 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1202 ieee80211_free_txskb(mvm->hw, skb);
1203}
1204
Johannes Bergac1ed412013-07-04 15:25:25 +02001205struct iwl_mvm_reprobe {
1206 struct device *dev;
1207 struct work_struct work;
1208};
1209
1210static void iwl_mvm_reprobe_wk(struct work_struct *wk)
1211{
1212 struct iwl_mvm_reprobe *reprobe;
1213
1214 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
1215 if (device_reprobe(reprobe->dev))
1216 dev_err(reprobe->dev, "reprobe failed!\n");
1217 kfree(reprobe);
1218 module_put(THIS_MODULE);
1219}
1220
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001221void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001222{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001223 iwl_abort_notification_waits(&mvm->notif_wait);
1224
1225 /*
David Spinadel992f81f2014-01-09 14:22:55 +02001226 * This is a bit racy, but worst case we tell mac80211 about
1227 * a stopped/aborted scan when that was already done which
1228 * is not a problem. It is necessary to abort any os scan
1229 * here because mac80211 requires having the scan cleared
1230 * before restarting.
1231 * We'll reset the scan_status to NONE in restart cleanup in
1232 * the next start() call from mac80211. If restart isn't called
1233 * (no fw restart) scan status will stay busy.
1234 */
David Spinadel4ffb3652015-03-10 10:06:02 +02001235 iwl_mvm_report_scan_aborted(mvm);
David Spinadel992f81f2014-01-09 14:22:55 +02001236
1237 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001238 * If we're restarting already, don't cycle restarts.
1239 * If INIT fw asserted, it will likely fail again.
1240 * If WoWLAN fw asserted, don't restart either, mac80211
1241 * can't recover this since we're already half suspended.
1242 */
Johannes Berg3b37f4c2017-05-30 16:45:31 +02001243 if (!mvm->fw_restart && fw_error) {
Johannes Berg7174beb2017-06-01 16:03:19 +02001244 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
Sara Sharonea7cb822018-06-21 14:44:28 +03001245 NULL, 0);
Johannes Bergbf8b2862017-06-30 10:48:28 +02001246 } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Bergac1ed412013-07-04 15:25:25 +02001247 struct iwl_mvm_reprobe *reprobe;
1248
1249 IWL_ERR(mvm,
1250 "Firmware error during reconfiguration - reprobe!\n");
1251
1252 /*
1253 * get a module reference to avoid doing this while unloading
1254 * anyway and to avoid scheduling a work with code that's
1255 * being removed.
1256 */
1257 if (!try_module_get(THIS_MODULE)) {
1258 IWL_ERR(mvm, "Module is being unloaded - abort\n");
1259 return;
1260 }
1261
1262 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
1263 if (!reprobe) {
1264 module_put(THIS_MODULE);
1265 return;
1266 }
1267 reprobe->dev = mvm->trans->dev;
1268 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
1269 schedule_work(&reprobe->work);
Johannes Berg702e9752017-06-02 11:56:58 +02001270 } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
Emmanuel Grumbachf60c9e52018-05-01 14:23:56 +03001271 mvm->hw_registered &&
1272 !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
Eliad Peller7498cf42014-01-16 17:10:44 +02001273 /* don't let the transport/FW power down */
1274 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1275
Johannes Berg3b37f4c2017-05-30 16:45:31 +02001276 if (fw_error && mvm->fw_restart > 0)
1277 mvm->fw_restart--;
Johannes Bergbf8b2862017-06-30 10:48:28 +02001278 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001279 ieee80211_restart_hw(mvm->hw);
1280 }
1281}
1282
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001283static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
1284{
1285 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1286
Emmanuel Grumbachf60c9e52018-05-01 14:23:56 +03001287 if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
1288 iwl_mvm_dump_nic_error_log(mvm);
Emmanuel Grumbach1bd3cbc2014-03-18 21:15:06 +02001289
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001290 iwl_mvm_nic_restart(mvm, true);
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001291}
1292
Johannes Berg8ca151b2013-01-24 14:25:36 +01001293static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1294{
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001295 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1296
Johannes Berg8ca151b2013-01-24 14:25:36 +01001297 WARN_ON(1);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001298 iwl_mvm_nic_restart(mvm, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001299}
1300
Luca Coelhoa75b9b32018-08-21 07:24:18 +03001301#ifdef CONFIG_PM
Eliad Peller37577fe2013-12-05 17:19:39 +02001302struct iwl_d0i3_iter_data {
1303 struct iwl_mvm *mvm;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001304 struct ieee80211_vif *connected_vif;
Eliad Peller37577fe2013-12-05 17:19:39 +02001305 u8 ap_sta_id;
1306 u8 vif_count;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001307 u8 offloading_tid;
1308 bool disable_offloading;
Eliad Peller37577fe2013-12-05 17:19:39 +02001309};
1310
Arik Nemtsovb2492502014-03-13 12:21:50 +02001311static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
1312 struct ieee80211_vif *vif,
1313 struct iwl_d0i3_iter_data *iter_data)
1314{
1315 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001316 struct iwl_mvm_sta *mvmsta;
1317 u32 available_tids = 0;
1318 u8 tid;
1319
1320 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
Sara Sharon0ae98812017-01-04 14:53:58 +02001321 mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
Arik Nemtsovb2492502014-03-13 12:21:50 +02001322 return false;
1323
Sara Sharon13303c02016-04-10 15:51:54 +03001324 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
1325 if (!mvmsta)
Arik Nemtsovb2492502014-03-13 12:21:50 +02001326 return false;
1327
Arik Nemtsovb2492502014-03-13 12:21:50 +02001328 spin_lock_bh(&mvmsta->lock);
1329 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1330 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1331
1332 /*
1333 * in case of pending tx packets, don't use this tid
1334 * for offloading in order to prevent reuse of the same
1335 * qos seq counters.
1336 */
Liad Kaufmandd321622017-04-05 16:25:11 +03001337 if (iwl_mvm_tid_queued(mvm, tid_data))
Arik Nemtsovb2492502014-03-13 12:21:50 +02001338 continue;
1339
1340 if (tid_data->state != IWL_AGG_OFF)
1341 continue;
1342
1343 available_tids |= BIT(tid);
1344 }
1345 spin_unlock_bh(&mvmsta->lock);
1346
1347 /*
1348 * disallow protocol offloading if we have no available tid
1349 * (with no pending frames and no active aggregation,
1350 * as we don't handle "holes" properly - the scheduler needs the
1351 * frame's seq number and TFD index to match)
1352 */
1353 if (!available_tids)
1354 return true;
1355
1356 /* for simplicity, just use the first available tid */
1357 iter_data->offloading_tid = ffs(available_tids) - 1;
1358 return false;
1359}
1360
Eliad Pellerd6230972013-11-03 20:09:08 +02001361static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
1362 struct ieee80211_vif *vif)
1363{
Eliad Peller37577fe2013-12-05 17:19:39 +02001364 struct iwl_d0i3_iter_data *data = _data;
1365 struct iwl_mvm *mvm = data->mvm;
1366 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Eliad Pellerd6230972013-11-03 20:09:08 +02001367 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1368
1369 IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
1370 if (vif->type != NL80211_IFTYPE_STATION ||
1371 !vif->bss_conf.assoc)
1372 return;
1373
Arik Nemtsovb2492502014-03-13 12:21:50 +02001374 /*
1375 * in case of pending tx packets or active aggregations,
1376 * avoid offloading features in order to prevent reuse of
1377 * the same qos seq counters.
1378 */
1379 if (iwl_mvm_disallow_offloading(mvm, vif, data))
1380 data->disable_offloading = true;
1381
Eliad Pellerd6230972013-11-03 20:09:08 +02001382 iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
Sara Sharonc97dab42015-11-19 11:53:49 +02001383 iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
1384 false, flags);
Eliad Pellerd6230972013-11-03 20:09:08 +02001385
1386 /*
1387 * on init/association, mvm already configures POWER_TABLE_CMD
1388 * and REPLY_MCAST_FILTER_CMD, so currently don't
1389 * reconfigure them (we might want to use different
1390 * params later on, though).
1391 */
Eliad Peller37577fe2013-12-05 17:19:39 +02001392 data->ap_sta_id = mvmvif->ap_sta_id;
1393 data->vif_count++;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001394
1395 /*
1396 * no new commands can be sent at this stage, so it's safe
1397 * to save the vif pointer during d0i3 entrance.
1398 */
1399 data->connected_vif = vif;
Eliad Pellerd6230972013-11-03 20:09:08 +02001400}
1401
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001402static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001403 struct iwl_wowlan_config_cmd *cmd,
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001404 struct iwl_d0i3_iter_data *iter_data)
1405{
1406 struct ieee80211_sta *ap_sta;
1407 struct iwl_mvm_sta *mvm_ap_sta;
1408
Sara Sharon0ae98812017-01-04 14:53:58 +02001409 if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001410 return;
1411
1412 rcu_read_lock();
1413
1414 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
1415 if (IS_ERR_OR_NULL(ap_sta))
1416 goto out;
1417
1418 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001419 cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001420 cmd->offloading_tid = iter_data->offloading_tid;
Sara Sharon70b4c532015-11-19 13:12:15 +02001421 cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
Sara Sharon0db056d2015-12-29 11:07:15 +02001422 ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001423 /*
1424 * The d0i3 uCode takes care of the nonqos counters,
1425 * so configure only the qos seq ones.
1426 */
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001427 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001428out:
1429 rcu_read_unlock();
1430}
Eliad Peller67359432014-12-09 15:23:54 +02001431
1432int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
Eliad Pellerb3370d42013-11-25 15:20:16 +02001433{
1434 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001435 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001436 int ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02001437 struct iwl_d0i3_iter_data d0i3_iter_data = {
1438 .mvm = mvm,
1439 };
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001440 struct iwl_wowlan_config_cmd wowlan_config_cmd = {
1441 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1442 IWL_WOWLAN_WAKEUP_BEACON_MISS |
Sara Sharon0db056d2015-12-29 11:07:15 +02001443 IWL_WOWLAN_WAKEUP_LINK_CHANGE),
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001444 };
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001445 struct iwl_d3_manager_config d3_cfg_cmd = {
1446 .min_sleep_time = cpu_to_le32(1000),
Eliad Pellerd9f1fc22014-12-23 15:05:14 +02001447 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001448 };
Eliad Pellerb3370d42013-11-25 15:20:16 +02001449
1450 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001451
Johannes Berg702e9752017-06-02 11:56:58 +02001452 if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
Eliad Peller08f0d232015-12-10 15:47:11 +02001453 return -EINVAL;
1454
Arik Nemtsovb2492502014-03-13 12:21:50 +02001455 set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001456
Eliad Pellerf4cf8682014-11-04 16:57:06 +02001457 /*
1458 * iwl_mvm_ref_sync takes a reference before checking the flag.
1459 * so by checking there is no held reference we prevent a state
1460 * in which iwl_mvm_ref_sync continues successfully while we
1461 * configure the firmware to enter d0i3
1462 */
1463 if (iwl_mvm_ref_taken(mvm)) {
1464 IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
1465 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
Eliad Pellercaf15782014-11-09 15:25:33 +02001466 wake_up(&mvm->d0i3_exit_waitq);
Eliad Pellerf4cf8682014-11-04 16:57:06 +02001467 return 1;
1468 }
1469
Eliad Pellerd6230972013-11-03 20:09:08 +02001470 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1471 IEEE80211_IFACE_ITER_NORMAL,
1472 iwl_mvm_enter_d0i3_iterator,
Eliad Peller37577fe2013-12-05 17:19:39 +02001473 &d0i3_iter_data);
1474 if (d0i3_iter_data.vif_count == 1) {
1475 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001476 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
Eliad Peller37577fe2013-12-05 17:19:39 +02001477 } else {
1478 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
Sara Sharon0ae98812017-01-04 14:53:58 +02001479 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001480 mvm->d0i3_offloading = false;
Eliad Peller37577fe2013-12-05 17:19:39 +02001481 }
Eliad Pellerd6230972013-11-03 20:09:08 +02001482
Luca Coelho7d9d0d52018-04-12 16:15:07 +03001483 iwl_mvm_pause_tcm(mvm, true);
Emmanuel Grumbachecc7c512015-08-17 15:54:41 +03001484 /* make sure we have no running tx while configuring the seqno */
1485 synchronize_net();
1486
Luca Coelhoeb3908d2015-10-02 18:13:10 +03001487 /* Flush the hw queues, in case something got queued during entry */
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001488 /* TODO new tx api */
1489 if (iwl_mvm_has_new_tx_api(mvm)) {
1490 WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
1491 } else {
1492 ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
1493 flags);
1494 if (ret)
1495 return ret;
1496 }
Luca Coelhoeb3908d2015-10-02 18:13:10 +03001497
Eliad Peller183edd82015-09-01 14:16:00 +03001498 /* configure wowlan configuration only if needed */
Sara Sharon0ae98812017-01-04 14:53:58 +02001499 if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
Sara Sharon0db056d2015-12-29 11:07:15 +02001500 /* wake on beacons only if beacon storing isn't supported */
1501 if (!fw_has_capa(&mvm->fw->ucode_capa,
1502 IWL_UCODE_TLV_CAPA_BEACON_STORING))
1503 wowlan_config_cmd.wakeup_filter |=
1504 cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
1505
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001506 iwl_mvm_wowlan_config_key_params(mvm,
1507 d0i3_iter_data.connected_vif,
1508 true, flags);
1509
Eliad Peller183edd82015-09-01 14:16:00 +03001510 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
1511 &d0i3_iter_data);
1512
1513 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1514 sizeof(wowlan_config_cmd),
1515 &wowlan_config_cmd);
1516 if (ret)
1517 return ret;
1518 }
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001519
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001520 return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1521 flags | CMD_MAKE_TRANS_IDLE,
1522 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
Eliad Pellerb3370d42013-11-25 15:20:16 +02001523}
1524
Eliad Pellerd6230972013-11-03 20:09:08 +02001525static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1526 struct ieee80211_vif *vif)
1527{
1528 struct iwl_mvm *mvm = _data;
1529 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1530
1531 IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1532 if (vif->type != NL80211_IFTYPE_STATION ||
1533 !vif->bss_conf.assoc)
1534 return;
1535
1536 iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1537}
1538
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001539struct iwl_mvm_d0i3_exit_work_iter_data {
David Spinadelb3df2242015-08-06 10:26:50 +03001540 struct iwl_mvm *mvm;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001541 struct iwl_wowlan_status *status;
David Spinadelb3df2242015-08-06 10:26:50 +03001542 u32 wakeup_reasons;
1543};
1544
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001545static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
1546 struct ieee80211_vif *vif)
Eliad Peller37577fe2013-12-05 17:19:39 +02001547{
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001548 struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
Eliad Peller37577fe2013-12-05 17:19:39 +02001549 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001550 u32 reasons = data->wakeup_reasons;
Eliad Peller37577fe2013-12-05 17:19:39 +02001551
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001552 /* consider only the relevant station interface */
1553 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1554 data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
1555 return;
1556
1557 if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
1558 iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
1559 else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
1560 ieee80211_beacon_loss(vif);
1561 else
1562 iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
Eliad Peller37577fe2013-12-05 17:19:39 +02001563}
1564
Arik Nemtsovb2492502014-03-13 12:21:50 +02001565void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1566{
1567 struct ieee80211_sta *sta = NULL;
1568 struct iwl_mvm_sta *mvm_ap_sta;
1569 int i;
1570 bool wake_queues = false;
1571
1572 lockdep_assert_held(&mvm->mutex);
1573
1574 spin_lock_bh(&mvm->d0i3_tx_lock);
1575
Sara Sharon0ae98812017-01-04 14:53:58 +02001576 if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
Arik Nemtsovb2492502014-03-13 12:21:50 +02001577 goto out;
1578
1579 IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1580
1581 /* get the sta in order to update seq numbers and re-enqueue skbs */
1582 sta = rcu_dereference_protected(
1583 mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1584 lockdep_is_held(&mvm->mutex));
1585
1586 if (IS_ERR_OR_NULL(sta)) {
1587 sta = NULL;
1588 goto out;
1589 }
1590
1591 if (mvm->d0i3_offloading && qos_seq) {
1592 /* update qos seq numbers if offloading was enabled */
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001593 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001594 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1595 u16 seq = le16_to_cpu(qos_seq[i]);
1596 /* firmware stores last-used one, we store next one */
1597 seq += 0x10;
1598 mvm_ap_sta->tid_data[i].seq_number = seq;
1599 }
1600 }
1601out:
1602 /* re-enqueue (or drop) all packets */
1603 while (!skb_queue_empty(&mvm->d0i3_tx)) {
1604 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1605
1606 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1607 ieee80211_free_txskb(mvm->hw, skb);
1608
1609 /* if the skb_queue is not empty, we need to wake queues */
1610 wake_queues = true;
1611 }
1612 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1613 wake_up(&mvm->d0i3_exit_waitq);
Sara Sharon0ae98812017-01-04 14:53:58 +02001614 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001615 if (wake_queues)
1616 ieee80211_wake_queues(mvm->hw);
1617
1618 spin_unlock_bh(&mvm->d0i3_tx_lock);
1619}
1620
Eliad Peller37577fe2013-12-05 17:19:39 +02001621static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1622{
1623 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001624 struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
1625 .mvm = mvm,
1626 };
1627
Emmanuel Grumbach3afec6392014-03-30 09:10:28 +03001628 struct iwl_wowlan_status *status;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001629 u32 wakeup_reasons = 0;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001630 __le16 *qos_seq = NULL;
Eliad Peller37577fe2013-12-05 17:19:39 +02001631
1632 mutex_lock(&mvm->mutex);
Eliad Peller37577fe2013-12-05 17:19:39 +02001633
Luca Coelho2afa6a72018-03-26 10:24:18 +03001634 status = iwl_mvm_send_wowlan_get_status(mvm);
1635 if (IS_ERR_OR_NULL(status)) {
1636 /* set to NULL so we don't need to check before kfree'ing */
1637 status = NULL;
1638 goto out;
1639 }
1640
Eliad Peller37577fe2013-12-05 17:19:39 +02001641 wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001642 qos_seq = status->qos_seq_ctr;
Eliad Peller37577fe2013-12-05 17:19:39 +02001643
1644 IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1645
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001646 iter_data.wakeup_reasons = wakeup_reasons;
1647 iter_data.status = status;
1648 ieee80211_iterate_active_interfaces(mvm->hw,
1649 IEEE80211_IFACE_ITER_NORMAL,
1650 iwl_mvm_d0i3_exit_work_iter,
1651 &iter_data);
Eliad Peller37577fe2013-12-05 17:19:39 +02001652out:
Arik Nemtsovb2492502014-03-13 12:21:50 +02001653 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
Jonathan Doron47c8b152014-11-27 16:55:25 +02001654
Eliad Peller7c014e32015-09-06 14:17:17 +03001655 IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1656 wakeup_reasons);
1657
Eliad Pellere5629be2015-04-14 11:36:23 +03001658 /* qos_seq might point inside resp_pkt, so free it only now */
Luca Coelho2afa6a72018-03-26 10:24:18 +03001659 kfree(status);
Eliad Pellere5629be2015-04-14 11:36:23 +03001660
Jonathan Doron47c8b152014-11-27 16:55:25 +02001661 /* the FW might have updated the regdomain */
1662 iwl_mvm_update_changed_regdom(mvm);
1663
Luca Coelho7d9d0d52018-04-12 16:15:07 +03001664 iwl_mvm_resume_tcm(mvm);
Eliad Pellerd15a7472014-03-27 18:53:12 +02001665 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
Eliad Peller37577fe2013-12-05 17:19:39 +02001666 mutex_unlock(&mvm->mutex);
1667}
1668
Eliad Pellerd15a7472014-03-27 18:53:12 +02001669int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
Eliad Pellerb3370d42013-11-25 15:20:16 +02001670{
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001671 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1672 CMD_WAKE_UP_TRANS;
Eliad Pellerd6230972013-11-03 20:09:08 +02001673 int ret;
Eliad Pellerb3370d42013-11-25 15:20:16 +02001674
1675 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001676
Johannes Berg702e9752017-06-02 11:56:58 +02001677 if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
Eliad Peller08f0d232015-12-10 15:47:11 +02001678 return -EINVAL;
1679
Eliad Pellerd15a7472014-03-27 18:53:12 +02001680 mutex_lock(&mvm->d0i3_suspend_mutex);
1681 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1682 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1683 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1684 mutex_unlock(&mvm->d0i3_suspend_mutex);
1685 return 0;
1686 }
1687 mutex_unlock(&mvm->d0i3_suspend_mutex);
1688
Eliad Pellerd6230972013-11-03 20:09:08 +02001689 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1690 if (ret)
Eliad Peller37577fe2013-12-05 17:19:39 +02001691 goto out;
Eliad Pellerd6230972013-11-03 20:09:08 +02001692
1693 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1694 IEEE80211_IFACE_ITER_NORMAL,
1695 iwl_mvm_exit_d0i3_iterator,
1696 mvm);
Eliad Peller37577fe2013-12-05 17:19:39 +02001697out:
1698 schedule_work(&mvm->d0i3_exit_work);
1699 return ret;
Eliad Pellerb3370d42013-11-25 15:20:16 +02001700}
1701
Eliad Peller67359432014-12-09 15:23:54 +02001702int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
Eliad Pellerd15a7472014-03-27 18:53:12 +02001703{
1704 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1705
1706 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1707 return _iwl_mvm_exit_d0i3(mvm);
1708}
1709
Luca Coelhoa75b9b32018-08-21 07:24:18 +03001710#define IWL_MVM_D0I3_OPS \
1711 .enter_d0i3 = iwl_mvm_enter_d0i3, \
1712 .exit_d0i3 = iwl_mvm_exit_d0i3,
1713#else /* CONFIG_PM */
1714#define IWL_MVM_D0I3_OPS
1715#endif /* CONFIG_PM */
1716
Johannes Berg0316d302015-05-22 13:41:07 +02001717#define IWL_MVM_COMMON_OPS \
1718 /* these could be differentiated */ \
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02001719 .async_cb = iwl_mvm_async_cb, \
Johannes Berg0316d302015-05-22 13:41:07 +02001720 .queue_full = iwl_mvm_stop_sw_queue, \
1721 .queue_not_full = iwl_mvm_wake_sw_queue, \
1722 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
1723 .free_skb = iwl_mvm_free_skb, \
1724 .nic_error = iwl_mvm_nic_error, \
1725 .cmd_queue_full = iwl_mvm_cmd_queue_full, \
1726 .nic_config = iwl_mvm_nic_config, \
Luca Coelhoa75b9b32018-08-21 07:24:18 +03001727 IWL_MVM_D0I3_OPS \
Johannes Berg0316d302015-05-22 13:41:07 +02001728 /* as we only register one, these MUST be common! */ \
1729 .start = iwl_op_mode_mvm_start, \
1730 .stop = iwl_op_mode_mvm_stop
1731
Johannes Berg8ca151b2013-01-24 14:25:36 +01001732static const struct iwl_op_mode_ops iwl_mvm_ops = {
Johannes Berg0316d302015-05-22 13:41:07 +02001733 IWL_MVM_COMMON_OPS,
1734 .rx = iwl_mvm_rx,
1735};
1736
1737static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1738 struct napi_struct *napi,
1739 struct iwl_rx_cmd_buffer *rxb,
1740 unsigned int queue)
1741{
1742 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Sara Sharon585a6fc2015-12-01 13:48:18 +02001743 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001744 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
Johannes Berg0316d302015-05-22 13:41:07 +02001745
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001746 if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
Sara Sharona3383842016-02-28 15:41:47 +02001747 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001748 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1749 RX_QUEUES_NOTIFICATION)))
Sara Sharon94bb4482015-12-16 18:48:28 +02001750 iwl_mvm_rx_queue_notif(mvm, rxb, queue);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001751 else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
Sara Sharon585a6fc2015-12-01 13:48:18 +02001752 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
Johannes Berg0316d302015-05-22 13:41:07 +02001753}
1754
1755static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1756 IWL_MVM_COMMON_OPS,
1757 .rx = iwl_mvm_rx_mq,
1758 .rx_rss = iwl_mvm_rx_mq_rss,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001759};