blob: 2f611bf83d5613433d514522e916b54667f69957 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02008 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Eran Harary4fb06282015-04-19 10:05:18 +03009 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Liad Kaufmande8ba412017-03-16 13:00:59 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Shaul Triebitz8745f122018-01-11 16:18:46 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +020031 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
Eran Harary4fb06282015-04-19 10:05:18 +030032 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Liad Kaufmande8ba412017-03-16 13:00:59 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Shaul Triebitz8745f122018-01-11 16:18:46 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <linux/module.h>
Emmanuel Grumbach1bd3cbc2014-03-18 21:15:06 +020065#include <linux/vmalloc.h>
Johannes Berg8ca151b2013-01-24 14:25:36 +010066#include <net/mac80211.h>
67
Johannes Berg9fca9d52017-06-01 10:32:17 +020068#include "fw/notif-wait.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069#include "iwl-trans.h"
70#include "iwl-op-mode.h"
Johannes Bergd962f9b2017-06-01 10:22:09 +020071#include "fw/img.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072#include "iwl-debug.h"
73#include "iwl-drv.h"
74#include "iwl-modparams.h"
75#include "mvm.h"
76#include "iwl-phy-db.h"
77#include "iwl-eeprom-parse.h"
78#include "iwl-csr.h"
79#include "iwl-io.h"
80#include "iwl-prph.h"
81#include "rs.h"
Johannes Bergd172a5e2017-06-02 15:15:53 +020082#include "fw/api/scan.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010083#include "time-event.h"
Sharon Dvir39bdb172015-10-15 18:18:09 +030084#include "fw-api.h"
Johannes Bergd172a5e2017-06-02 15:15:53 +020085#include "fw/api/scan.h"
Luca Coelhof2abcfa2017-09-28 15:29:27 +030086#include "fw/acpi.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010087
Johannes Berg8ca151b2013-01-24 14:25:36 +010088#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
Johannes Berg8ca151b2013-01-24 14:25:36 +010089MODULE_DESCRIPTION(DRV_DESCRIPTION);
Johannes Berg8ca151b2013-01-24 14:25:36 +010090MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
91MODULE_LICENSE("GPL");
92
93static const struct iwl_op_mode_ops iwl_mvm_ops;
Johannes Berg0316d302015-05-22 13:41:07 +020094static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
Johannes Berg8ca151b2013-01-24 14:25:36 +010095
96struct iwl_mvm_mod_params iwlmvm_mod_params = {
97 .power_scheme = IWL_POWER_SCHEME_BPS,
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +020098 .tfd_q_hang_detect = true
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 /* rest of fields are 0 by default */
100};
101
Joe Perches2ef00c52018-03-23 15:54:37 -0700102module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100103MODULE_PARM_DESC(init_dbg,
104 "set to true to debug an ASSERT in INIT fw (default: false");
Joe Perches2ef00c52018-03-23 15:54:37 -0700105module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100106MODULE_PARM_DESC(power_scheme,
107 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +0200108module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
Joe Perches2ef00c52018-03-23 15:54:37 -0700109 bool, 0444);
Emmanuel Grumbachce71c2f2015-01-11 17:19:39 +0200110MODULE_PARM_DESC(tfd_q_hang_detect,
111 "TFD queues hang detection (default: true");
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112
113/*
114 * module init and exit functions
115 */
116static int __init iwl_mvm_init(void)
117{
118 int ret;
119
120 ret = iwl_mvm_rate_control_register();
121 if (ret) {
122 pr_err("Unable to register rate control algorithm: %d\n", ret);
123 return ret;
124 }
125
126 ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
Gregory Greenman9f66a392017-11-05 18:49:48 +0200127 if (ret)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100128 pr_err("Unable to register MVM op_mode: %d\n", ret);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100129
130 return ret;
131}
132module_init(iwl_mvm_init);
133
134static void __exit iwl_mvm_exit(void)
135{
136 iwl_opmode_deregister("iwlmvm");
137 iwl_mvm_rate_control_unregister();
138}
139module_exit(iwl_mvm_exit);
140
141static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
142{
143 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
144 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
145 u32 reg_val = 0;
Moshe Harela0544272014-12-08 21:13:14 +0200146 u32 phy_config = iwl_mvm_get_phy_config(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100147
Moshe Harela0544272014-12-08 21:13:14 +0200148 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
149 FW_PHY_CFG_RADIO_TYPE_POS;
150 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
151 FW_PHY_CFG_RADIO_STEP_POS;
152 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
153 FW_PHY_CFG_RADIO_DASH_POS;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100154
155 /* SKU control */
156 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
157 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
158 reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
159 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
160
161 /* radio configuration */
162 reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
163 reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
164 reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
165
166 WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
167 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
168
Liad Kaufman9b1fcc12014-05-08 16:30:24 +0300169 /*
Sara Sharon6e584872017-03-22 14:07:50 +0200170 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
171 * sampling, and shouldn't be set to any non-zero value.
172 * The same is supposed to be true of the other HW, but unsetting
173 * them (such as the 7260) causes automatic tests to fail on seemingly
174 * unrelated errors. Need to further investigate this, but for now
175 * we'll separate cases.
Liad Kaufman9b1fcc12014-05-08 16:30:24 +0300176 */
Sara Sharon6e584872017-03-22 14:07:50 +0200177 if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
Liad Kaufman9b1fcc12014-05-08 16:30:24 +0300178 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100179
Shahar S Matityahu2d8c2612018-01-29 11:05:37 +0200180 if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
181 reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
182
Lilach Edelsteine139dc42013-01-13 13:31:10 +0200183 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
184 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
185 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
186 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
187 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
188 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
189 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
Shahar S Matityahu2d8c2612018-01-29 11:05:37 +0200190 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
191 CSR_HW_IF_CONFIG_REG_D3_DEBUG,
Lilach Edelsteine139dc42013-01-13 13:31:10 +0200192 reg_val);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100193
194 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
195 radio_cfg_step, radio_cfg_dash);
196
197 /*
198 * W/A : NIC is stuck in a reset state after Early PCIe power off
199 * (PCIe power is lost before PERST# is asserted), causing ME FW
200 * to lose ownership and not being able to obtain it back.
201 */
Avri Altman95411d02015-05-11 11:04:34 +0300202 if (!mvm->trans->cfg->apmg_not_supported)
Eran Harary3073d8c2013-12-29 14:09:59 +0200203 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
204 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
205 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100206}
207
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200208/**
209 * enum iwl_rx_handler_context context for Rx handler
210 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
211 * which can't acquire mvm->mutex.
212 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
213 * (and only in this case!), it should be set as ASYNC. In that case,
214 * it will be called from a worker with mvm->mutex held.
215 * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
216 * mutex itself, it will be called from a worker without mvm->mutex held.
217 */
218enum iwl_rx_handler_context {
219 RX_HANDLER_SYNC,
220 RX_HANDLER_ASYNC_LOCKED,
221 RX_HANDLER_ASYNC_UNLOCKED,
222};
223
224/**
225 * struct iwl_rx_handlers handler for FW notification
226 * @cmd_id: command id
227 * @context: see &iwl_rx_handler_context
228 * @fn: the function is called when notification is received
229 */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100230struct iwl_rx_handlers {
Avraham Stern1230b162015-07-09 17:17:03 +0300231 u16 cmd_id;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200232 enum iwl_rx_handler_context context;
Johannes Berg04168412015-06-23 21:22:09 +0200233 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100234};
235
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200236#define RX_HANDLER(_cmd_id, _fn, _context) \
237 { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
238#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
239 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240
241/*
242 * Handlers for fw notifications
243 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
244 * This list should be in order of frequency for performance purposes.
245 *
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200246 * The handler can be one from three contexts, see &iwl_rx_handler_context
Johannes Berg8ca151b2013-01-24 14:25:36 +0100247 */
248static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200249 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
250 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100251
Gregory Greenman84226ca2017-11-02 04:07:52 +0200252 RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
253 iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
254
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200255 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
256 RX_HANDLER_ASYNC_LOCKED),
257 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
258 RX_HANDLER_ASYNC_LOCKED),
259 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
260 RX_HANDLER_ASYNC_LOCKED),
Emmanuel Grumbachf421f9c2013-01-17 14:20:29 +0200261
Sara Sharon3af512d62015-07-22 11:38:40 +0300262 RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200263 iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
Sara Sharon3af512d62015-07-22 11:38:40 +0300264
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200265 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
266 RX_HANDLER_SYNC),
267 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
268 RX_HANDLER_ASYNC_LOCKED),
Emmanuel Grumbach497b49d2013-06-02 20:54:48 +0300269
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200270 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
Johannes Berg3e56ead2013-02-15 22:23:18 +0100271
Alexander Bondare5d74642014-12-09 19:15:49 +0200272 RX_HANDLER(SCAN_ITERATION_COMPLETE,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200273 iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
David Spinadel35a000b2013-08-28 09:29:43 +0300274 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200275 iwl_mvm_rx_lmac_scan_complete_notif,
276 RX_HANDLER_ASYNC_LOCKED),
Luciano Coelho6e56f012015-05-06 16:03:39 +0300277 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200278 RX_HANDLER_SYNC),
David Spinadeld2496222014-05-20 12:46:37 +0300279 RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200280 RX_HANDLER_ASYNC_LOCKED),
Avraham Sternee9219b2015-03-23 15:09:27 +0200281 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200282 iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
Emmanuel Grumbach497b49d2013-06-02 20:54:48 +0300283
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200284 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
285 RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100286
Hila Gonend64048e2013-03-13 18:00:03 +0200287 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200288 RX_HANDLER_SYNC),
Hila Gonend64048e2013-03-13 18:00:03 +0200289
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200290 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
Alexander Bondar175a70b2013-04-14 20:59:37 +0300291 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200292 iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
293 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
294 RX_HANDLER_ASYNC_LOCKED),
Aviya Erenfeld09eef332015-09-01 19:34:38 +0300295 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
Chaya Rachel Ivgiec77a332016-03-13 11:39:53 +0200296 iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
Chaya Rachel Ivgi0a3b7112015-12-16 16:34:55 +0200297 RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200298 iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
Luciano Coelhoea9af242014-11-06 10:34:49 +0200299
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300300 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200301 RX_HANDLER_ASYNC_LOCKED),
302 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
303 RX_HANDLER_SYNC),
304 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
305 RX_HANDLER_ASYNC_LOCKED),
Golan Ben-Amibdccdb82016-11-15 14:45:29 +0200306 RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
307 iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
Sara Sharon0db056d2015-12-29 11:07:15 +0200308 RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200309 iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
Sara Sharonf92659a2016-02-03 15:04:49 +0200310 RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200311 iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
Johannes Berg65e25482016-04-13 14:24:22 +0200312 RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
313 iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100314};
315#undef RX_HANDLER
Avraham Stern1230b162015-07-09 17:17:03 +0300316#undef RX_HANDLER_GRP
Johannes Berg8ca151b2013-01-24 14:25:36 +0100317
Sharon Dvir39bdb172015-10-15 18:18:09 +0300318/* Please keep this array *SORTED* by hex value.
319 * Access is done through binary search
320 */
321static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
322 HCMD_NAME(MVM_ALIVE),
323 HCMD_NAME(REPLY_ERROR),
324 HCMD_NAME(ECHO_CMD),
325 HCMD_NAME(INIT_COMPLETE_NOTIF),
326 HCMD_NAME(PHY_CONTEXT_CMD),
327 HCMD_NAME(DBG_CFG),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300328 HCMD_NAME(SCAN_CFG_CMD),
329 HCMD_NAME(SCAN_REQ_UMAC),
330 HCMD_NAME(SCAN_ABORT_UMAC),
331 HCMD_NAME(SCAN_COMPLETE_UMAC),
332 HCMD_NAME(TOF_CMD),
333 HCMD_NAME(TOF_NOTIFICATION),
Sara Sharon3af512d62015-07-22 11:38:40 +0300334 HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300335 HCMD_NAME(ADD_STA_KEY),
336 HCMD_NAME(ADD_STA),
337 HCMD_NAME(REMOVE_STA),
338 HCMD_NAME(FW_GET_ITEM_CMD),
339 HCMD_NAME(TX_CMD),
340 HCMD_NAME(SCD_QUEUE_CFG),
341 HCMD_NAME(TXPATH_FLUSH),
342 HCMD_NAME(MGMT_MCAST_KEY),
343 HCMD_NAME(WEP_KEY),
344 HCMD_NAME(SHARED_MEM_CFG),
345 HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
346 HCMD_NAME(MAC_CONTEXT_CMD),
347 HCMD_NAME(TIME_EVENT_CMD),
348 HCMD_NAME(TIME_EVENT_NOTIFICATION),
349 HCMD_NAME(BINDING_CONTEXT_CMD),
350 HCMD_NAME(TIME_QUOTA_CMD),
351 HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
Johannes Berg7089ae62017-06-28 16:19:49 +0200352 HCMD_NAME(LEDS_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300353 HCMD_NAME(LQ_CMD),
354 HCMD_NAME(FW_PAGING_BLOCK_CMD),
355 HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
356 HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
357 HCMD_NAME(HOT_SPOT_CMD),
358 HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300359 HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
360 HCMD_NAME(BT_COEX_CI),
361 HCMD_NAME(PHY_CONFIGURATION_CMD),
362 HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
Sara Sharon176aa602016-08-31 19:03:01 +0300363 HCMD_NAME(PHY_DB_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300364 HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
365 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300366 HCMD_NAME(POWER_TABLE_CMD),
367 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
368 HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
369 HCMD_NAME(DC2DC_CONFIG_CMD),
370 HCMD_NAME(NVM_ACCESS_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300371 HCMD_NAME(BEACON_NOTIFICATION),
372 HCMD_NAME(BEACON_TEMPLATE_CMD),
373 HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
374 HCMD_NAME(BT_CONFIG),
375 HCMD_NAME(STATISTICS_CMD),
376 HCMD_NAME(STATISTICS_NOTIFICATION),
377 HCMD_NAME(EOSP_NOTIFICATION),
378 HCMD_NAME(REDUCE_TX_POWER_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300379 HCMD_NAME(CARD_STATE_NOTIFICATION),
380 HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
381 HCMD_NAME(TDLS_CONFIG_CMD),
382 HCMD_NAME(MAC_PM_POWER_TABLE),
383 HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
384 HCMD_NAME(MFUART_LOAD_NOTIFICATION),
Sara Sharon43413a92015-12-31 11:49:18 +0200385 HCMD_NAME(RSS_CONFIG_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300386 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
387 HCMD_NAME(REPLY_RX_PHY_CMD),
388 HCMD_NAME(REPLY_RX_MPDU_CMD),
Emmanuel Grumbach3e73aa32017-07-27 09:40:16 +0300389 HCMD_NAME(FRAME_RELEASE),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300390 HCMD_NAME(BA_NOTIF),
391 HCMD_NAME(MCC_UPDATE_CMD),
392 HCMD_NAME(MCC_CHUB_UPDATE_CMD),
393 HCMD_NAME(MARKER_CMD),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300394 HCMD_NAME(BT_PROFILE_NOTIFICATION),
395 HCMD_NAME(BCAST_FILTER_CMD),
396 HCMD_NAME(MCAST_FILTER_CMD),
397 HCMD_NAME(REPLY_SF_CFG_CMD),
398 HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
399 HCMD_NAME(D3_CONFIG_CMD),
400 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
401 HCMD_NAME(OFFLOADS_QUERY_CMD),
402 HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
403 HCMD_NAME(MATCH_FOUND_NOTIFICATION),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300404 HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
405 HCMD_NAME(WOWLAN_PATTERNS),
406 HCMD_NAME(WOWLAN_CONFIGURATION),
407 HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
408 HCMD_NAME(WOWLAN_TKIP_PARAM),
409 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
410 HCMD_NAME(WOWLAN_GET_STATUSES),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300411 HCMD_NAME(SCAN_ITERATION_COMPLETE),
412 HCMD_NAME(D0I3_END_CMD),
413 HCMD_NAME(LTR_CONFIG),
Johannes Berg8ca151b2013-01-24 14:25:36 +0100414};
Sharon Dvir39bdb172015-10-15 18:18:09 +0300415
416/* Please keep this array *SORTED* by hex value.
417 * Access is done through binary search
418 */
Golan Ben-Ami5b086412016-02-09 12:57:16 +0200419static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
420 HCMD_NAME(SHARED_MEM_CFG_CMD),
Sara Sharon4399caa2016-12-11 10:32:42 +0200421 HCMD_NAME(INIT_EXTENDED_CFG_CMD),
Golan Ben-Ami5b086412016-02-09 12:57:16 +0200422};
423
424/* Please keep this array *SORTED* by hex value.
425 * Access is done through binary search
426 */
Aviya Erenfeld03098262016-02-18 14:09:33 +0200427static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200428 HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
Aviya Erenfeld03098262016-02-18 14:09:33 +0200429};
430
431/* Please keep this array *SORTED* by hex value.
432 * Access is done through binary search
433 */
Sharon Dvir39bdb172015-10-15 18:18:09 +0300434static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
435 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
Chaya Rachel Ivgi5c89e7b2016-01-05 10:34:47 +0200436 HCMD_NAME(CTDP_CONFIG_CMD),
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200437 HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
Haim Dreyfussa6bff3c2017-01-19 12:00:46 +0200438 HCMD_NAME(GEO_TX_POWER_LIMIT),
Chaya Rachel Ivgi0a3b7112015-12-16 16:34:55 +0200439 HCMD_NAME(CT_KILL_NOTIFICATION),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300440 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
441};
442
Sara Sharon0db056d2015-12-29 11:07:15 +0200443/* Please keep this array *SORTED* by hex value.
444 * Access is done through binary search
445 */
Sara Sharone0d8fde2015-12-28 22:37:08 +0200446static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
Emmanuel Grumbachddef2f92016-12-19 08:41:16 +0200447 HCMD_NAME(DQA_ENABLE_CMD),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200448 HCMD_NAME(UPDATE_MU_GROUPS_CMD),
Sara Sharon94bb4482015-12-16 18:48:28 +0200449 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
Luca Coelho514c30692018-06-24 11:59:54 +0300450 HCMD_NAME(STA_HE_CTXT_CMD),
Sara Sharon8edbfaa2018-02-05 12:42:44 +0200451 HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
Johannes Berg65e25482016-04-13 14:24:22 +0200452 HCMD_NAME(STA_PM_NOTIF),
Sara Sharonf92659a2016-02-03 15:04:49 +0200453 HCMD_NAME(MU_GROUP_MGMT_NOTIF),
Sara Sharon94bb4482015-12-16 18:48:28 +0200454 HCMD_NAME(RX_QUEUES_NOTIFICATION),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200455};
456
457/* Please keep this array *SORTED* by hex value.
458 * Access is done through binary search
459 */
Golan Ben-Amibdccdb82016-11-15 14:45:29 +0200460static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
461 HCMD_NAME(MFU_ASSERT_DUMP_NTF),
462};
463
464/* Please keep this array *SORTED* by hex value.
465 * Access is done through binary search
466 */
Sara Sharon0db056d2015-12-29 11:07:15 +0200467static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
468 HCMD_NAME(STORED_BEACON_NTF),
469};
470
Sara Sharon1f370652016-08-31 18:13:57 +0300471/* Please keep this array *SORTED* by hex value.
472 * Access is done through binary search
473 */
474static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
475 HCMD_NAME(NVM_ACCESS_COMPLETE),
Sara Sharone9e1ba32017-01-08 16:46:14 +0200476 HCMD_NAME(NVM_GET_INFO),
Sara Sharon1f370652016-08-31 18:13:57 +0300477};
478
Sharon Dvir39bdb172015-10-15 18:18:09 +0300479static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
480 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
481 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
Golan Ben-Ami5b086412016-02-09 12:57:16 +0200482 [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
Aviya Erenfeld03098262016-02-18 14:09:33 +0200483 [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300484 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
Sara Sharone0d8fde2015-12-28 22:37:08 +0200485 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
Sara Sharon0db056d2015-12-29 11:07:15 +0200486 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
Sara Sharon1f370652016-08-31 18:13:57 +0300487 [REGULATORY_AND_NVM_GROUP] =
488 HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
Sharon Dvir39bdb172015-10-15 18:18:09 +0300489};
490
Johannes Berg8ca151b2013-01-24 14:25:36 +0100491/* this forward declaration can avoid to export the function */
492static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300493#ifdef CONFIG_PM
Eliad Peller37577fe2013-12-05 17:19:39 +0200494static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300495#endif
Johannes Berg8ca151b2013-01-24 14:25:36 +0100496
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300497static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500498{
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300499 const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
500 u64 dflt_pwr_limit;
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500501
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300502 if (!backoff)
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500503 return 0;
504
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300505 dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500506
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300507 while (backoff->pwr) {
508 if (dflt_pwr_limit >= backoff->pwr)
509 return backoff->backoff;
510
511 backoff++;
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500512 }
513
514 return 0;
515}
516
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200517static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
518{
519 struct iwl_mvm *mvm =
520 container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
521 struct ieee80211_vif *tx_blocked_vif;
522 struct iwl_mvm_vif *mvmvif;
523
524 mutex_lock(&mvm->mutex);
525
526 tx_blocked_vif =
527 rcu_dereference_protected(mvm->csa_tx_blocked_vif,
528 lockdep_is_held(&mvm->mutex));
529
530 if (!tx_blocked_vif)
531 goto unlock;
532
533 mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
534 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
535 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
536unlock:
537 mutex_unlock(&mvm->mutex);
538}
539
Johannes Berg7174beb2017-06-01 16:03:19 +0200540static int iwl_mvm_fwrt_dump_start(void *ctx)
541{
542 struct iwl_mvm *mvm = ctx;
543 int ret;
544
545 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
546 if (ret)
547 return ret;
548
549 mutex_lock(&mvm->mutex);
550
551 return 0;
552}
553
554static void iwl_mvm_fwrt_dump_end(void *ctx)
555{
556 struct iwl_mvm *mvm = ctx;
557
558 mutex_unlock(&mvm->mutex);
559
560 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
561}
562
Shaul Triebitz8745f122018-01-11 16:18:46 +0200563static bool iwl_mvm_fwrt_fw_running(void *ctx)
564{
565 return iwl_mvm_firmware_running(ctx);
566}
567
Johannes Berg7174beb2017-06-01 16:03:19 +0200568static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
569 .dump_start = iwl_mvm_fwrt_dump_start,
570 .dump_end = iwl_mvm_fwrt_dump_end,
Shaul Triebitz8745f122018-01-11 16:18:46 +0200571 .fw_running = iwl_mvm_fwrt_fw_running,
Johannes Berg7174beb2017-06-01 16:03:19 +0200572};
573
Johannes Berg8ca151b2013-01-24 14:25:36 +0100574static struct iwl_op_mode *
575iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
576 const struct iwl_fw *fw, struct dentry *dbgfs_dir)
577{
578 struct ieee80211_hw *hw;
579 struct iwl_op_mode *op_mode;
580 struct iwl_mvm *mvm;
581 struct iwl_trans_config trans_cfg = {};
582 static const u8 no_reclaim_cmds[] = {
583 TX_CMD,
584 };
585 int err, scan_size;
Ido Yariv0c0e2c72014-01-16 21:12:02 -0500586 u32 min_backoff;
Shaul Triebitz034925c2018-05-10 17:34:52 +0300587 enum iwl_amsdu_size rb_size_default;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100588
Emmanuel Grumbachc4d83272014-01-14 08:45:26 +0200589 /*
590 * We use IWL_MVM_STATION_COUNT to check the validity of the station
591 * index all over the driver - check that its value corresponds to the
592 * array size.
593 */
594 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
595
Johannes Berg8ca151b2013-01-24 14:25:36 +0100596 /********************************
597 * 1. Allocating and configuring HW data
598 ********************************/
599 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
600 sizeof(struct iwl_mvm),
601 &iwl_mvm_hw_ops);
602 if (!hw)
603 return NULL;
604
Oren Givon745160e2014-06-16 10:54:52 +0300605 if (cfg->max_rx_agg_size)
606 hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
607
Gregory Greenman77d96732014-09-02 16:04:58 +0200608 if (cfg->max_tx_agg_size)
609 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
610
Johannes Berg8ca151b2013-01-24 14:25:36 +0100611 op_mode = hw->priv;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100612
613 mvm = IWL_OP_MODE_GET_MVM(op_mode);
614 mvm->dev = trans->dev;
615 mvm->trans = trans;
616 mvm->cfg = cfg;
617 mvm->fw = fw;
618 mvm->hw = hw;
619
Mordechay Goodstein93b167c2017-09-26 11:31:55 +0000620 iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
621 dbgfs_dir);
Johannes Berg235acb12017-06-01 12:10:32 +0200622
Liad Kaufmande8ba412017-03-16 13:00:59 +0200623 mvm->init_status = 0;
624
Johannes Berg0316d302015-05-22 13:41:07 +0200625 if (iwl_mvm_has_new_rx_api(mvm)) {
626 op_mode->ops = &iwl_mvm_ops_mq;
Golan Ben Ami18ead592018-02-05 12:54:36 +0200627 trans->rx_mpdu_cmd_hdr_size =
628 (trans->cfg->device_family >=
629 IWL_DEVICE_FAMILY_22560) ?
630 sizeof(struct iwl_rx_mpdu_desc) :
631 IWL_RX_DESC_SIZE_V1;
Johannes Berg0316d302015-05-22 13:41:07 +0200632 } else {
633 op_mode->ops = &iwl_mvm_ops;
Sara Sharon25c2b222016-02-07 13:09:59 +0200634 trans->rx_mpdu_cmd_hdr_size =
635 sizeof(struct iwl_rx_mpdu_res_start);
Johannes Berg0316d302015-05-22 13:41:07 +0200636
637 if (WARN_ON(trans->num_rx_queues > 1))
638 goto out_free;
639 }
640
Johannes Berg3b37f4c2017-05-30 16:45:31 +0200641 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
Eran Harary291aa7c2013-07-03 11:00:06 +0300642
Johannes Bergc8f54702017-06-19 23:50:31 +0200643 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +0200644 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
Johannes Bergc8f54702017-06-19 23:50:31 +0200645 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
646 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
Liad Kaufman28d07932015-09-01 16:36:25 +0300647
Lilach Edelstein1f3b0ff2013-10-06 13:03:32 +0200648 mvm->sf_state = SF_UNINIT;
Johannes Berg7d6222e22017-06-08 09:18:22 +0200649 if (iwl_mvm_has_unified_ucode(mvm))
Johannes Berg702e9752017-06-02 11:56:58 +0200650 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
Sara Sharon1f370652016-08-31 18:13:57 +0300651 else
Johannes Berg702e9752017-06-02 11:56:58 +0200652 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
Andrei Otcheretianskic89e3332016-01-26 18:12:28 +0200653 mvm->drop_bcn_ap_mode = true;
Eytan Lifshitz19e737c2013-09-09 13:30:15 +0200654
Johannes Berg8ca151b2013-01-24 14:25:36 +0100655 mutex_init(&mvm->mutex);
Eliad Pellerd15a7472014-03-27 18:53:12 +0200656 mutex_init(&mvm->d0i3_suspend_mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100657 spin_lock_init(&mvm->async_handlers_lock);
658 INIT_LIST_HEAD(&mvm->time_event_list);
Ariej Marjiehb1128892014-07-16 21:11:12 +0300659 INIT_LIST_HEAD(&mvm->aux_roc_te_list);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100660 INIT_LIST_HEAD(&mvm->async_handlers_list);
661 spin_lock_init(&mvm->time_event_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300662 spin_lock_init(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100663
664 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
665 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300666#ifdef CONFIG_PM
Eliad Peller37577fe2013-12-05 17:19:39 +0200667 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
Luca Coelhoa75b9b32018-08-21 07:24:18 +0300668#endif
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +0300669 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
Luca Coelho69e04642016-05-03 12:18:33 +0300670 INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
Liad Kaufman24afba72015-07-28 18:56:08 +0300671 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100672
Arik Nemtsovb2492502014-03-13 12:21:50 +0200673 spin_lock_init(&mvm->d0i3_tx_lock);
Eliad Peller576eeee2014-07-01 18:38:38 +0300674 spin_lock_init(&mvm->refs_lock);
Arik Nemtsovb2492502014-03-13 12:21:50 +0200675 skb_queue_head_init(&mvm->d0i3_tx);
676 init_waitqueue_head(&mvm->d0i3_exit_waitq);
Sara Sharon3a732c62016-10-09 17:34:24 +0300677 init_waitqueue_head(&mvm->rx_sync_waitq);
Arik Nemtsovb2492502014-03-13 12:21:50 +0200678
Sara Sharon0636b932016-02-18 14:21:12 +0200679 atomic_set(&mvm->queue_sync_counter, 0);
680
Johannes Berg8ca151b2013-01-24 14:25:36 +0100681 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
682
Luca Coelho7d9d0d52018-04-12 16:15:07 +0300683 spin_lock_init(&mvm->tcm.lock);
684 INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
685 mvm->tcm.ts = jiffies;
686 mvm->tcm.ll_ts = jiffies;
687 mvm->tcm.uapsd_nonagg_ts = jiffies;
688
Andrei Otcheretianskid3a108a2016-02-28 17:12:21 +0200689 INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
690
Johannes Berg8ca151b2013-01-24 14:25:36 +0100691 /*
692 * Populate the state variables that the transport layer needs
693 * to know about.
694 */
695 trans_cfg.op_mode = op_mode;
696 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
697 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
Shaul Triebitz034925c2018-05-10 17:34:52 +0300698
699 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
700 rb_size_default = IWL_AMSDU_2K;
701 else
702 rb_size_default = IWL_AMSDU_4K;
703
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200704 switch (iwlwifi_mod_params.amsdu_size) {
Emmanuel Grumbach4bdd4df2016-04-07 16:44:42 +0300705 case IWL_AMSDU_DEF:
Shaul Triebitz034925c2018-05-10 17:34:52 +0300706 trans_cfg.rx_buf_size = rb_size_default;
707 break;
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200708 case IWL_AMSDU_4K:
709 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
710 break;
711 case IWL_AMSDU_8K:
712 trans_cfg.rx_buf_size = IWL_AMSDU_8K;
713 break;
714 case IWL_AMSDU_12K:
715 trans_cfg.rx_buf_size = IWL_AMSDU_12K;
716 break;
717 default:
718 pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
719 iwlwifi_mod_params.amsdu_size);
Shaul Triebitz034925c2018-05-10 17:34:52 +0300720 trans_cfg.rx_buf_size = rb_size_default;
Emmanuel Grumbach6c4fbcb2015-11-10 11:57:41 +0200721 }
Emmanuel Grumbach4bdd4df2016-04-07 16:44:42 +0300722
Luca Coelho4b87e5a2016-09-12 16:03:30 +0300723 trans->wide_cmd_header = true;
Golan Ben Ami2a182fb2018-01-02 12:08:31 +0200724 trans_cfg.bc_table_dword =
725 mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100726
Sharon Dvir39bdb172015-10-15 18:18:09 +0300727 trans_cfg.command_groups = iwl_mvm_groups;
728 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100729
Johannes Bergc8f54702017-06-19 23:50:31 +0200730 trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
Johannes Bergb2d81db2014-08-01 20:48:25 +0200731 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
Emmanuel Grumbach3a736bc2014-09-10 11:16:41 +0300732 trans_cfg.scd_set_active = true;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100733
Johannes Berg21cb3222016-06-21 13:11:48 +0200734 trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
735 driver_data[2]);
736
Emmanuel Grumbach41837ca92015-10-21 09:00:07 +0300737 trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
Liad Kaufmanb4821762014-10-19 16:58:15 +0200738
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200739 /* Set a short watchdog for the command queue */
740 trans_cfg.cmd_q_wdg_timeout =
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200741 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200742
Johannes Berg8ca151b2013-01-24 14:25:36 +0100743 snprintf(mvm->hw->wiphy->fw_version,
744 sizeof(mvm->hw->wiphy->fw_version),
745 "%s", fw->fw_version);
746
747 /* Configure transport layer */
748 iwl_trans_configure(mvm->trans, &trans_cfg);
749
750 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
Sara Sharon17b809c2018-06-11 11:43:09 +0300751 trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
752 trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
753 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
Liad Kaufman09e350f2014-11-17 11:41:07 +0200754 sizeof(trans->dbg_conf_tlv));
Sara Sharon17b809c2018-06-11 11:43:09 +0300755 trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
756 trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100757
Golan Ben-Ami132db312017-07-17 19:42:35 +0300758 trans->iml = mvm->fw->iml;
759 trans->iml_len = mvm->fw->iml_len;
760
Johannes Berg8ca151b2013-01-24 14:25:36 +0100761 /* set up notification wait support */
762 iwl_notification_wait_init(&mvm->notif_wait);
763
764 /* Init phy db */
765 mvm->phy_db = iwl_phy_db_init(trans);
766 if (!mvm->phy_db) {
767 IWL_ERR(mvm, "Cannot init phy_db\n");
768 goto out_free;
769 }
770
771 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
772 mvm->cfg->name, mvm->trans->hw_rev);
773
Eran Harary4fb06282015-04-19 10:05:18 +0300774 if (iwlwifi_mod_params.nvm_file)
Eran Hararye02a9d62014-05-07 12:27:10 +0300775 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
Eran Harary4fb06282015-04-19 10:05:18 +0300776 else
777 IWL_DEBUG_EEPROM(mvm->trans->dev,
778 "working without external nvm file\n");
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300779
Sara Sharon56f29292016-08-31 12:37:55 +0300780 err = iwl_trans_start_hw(mvm->trans);
781 if (err)
Eran Harary14b485f2014-04-23 10:46:09 +0300782 goto out_free;
783
Sara Sharon56f29292016-08-31 12:37:55 +0300784 mutex_lock(&mvm->mutex);
785 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
Johannes Berg8c5f47b2017-02-20 17:47:04 +0100786 err = iwl_run_init_mvm_ucode(mvm, true);
Liad Kaufmanf4744252017-11-23 10:29:04 +0200787 if (!iwlmvm_mod_params.init_dbg || !err)
Sara Sharon56f29292016-08-31 12:37:55 +0300788 iwl_mvm_stop_device(mvm);
789 iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
790 mutex_unlock(&mvm->mutex);
Liad Kaufmande8ba412017-03-16 13:00:59 +0200791 if (err < 0) {
Sara Sharon56f29292016-08-31 12:37:55 +0300792 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
793 goto out_free;
Eytan Lifshitz81a67e32013-09-11 12:39:18 +0200794 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100795
David Spinadeld2496222014-05-20 12:46:37 +0300796 scan_size = iwl_mvm_scan_size(mvm);
David Spinadelfb98be52014-05-04 12:51:10 +0300797
Johannes Berg8ca151b2013-01-24 14:25:36 +0100798 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
799 if (!mvm->scan_cmd)
800 goto out_free;
801
Haim Dreyfuss5a4b2af2015-01-13 11:54:51 +0200802 /* Set EBS as successful as long as not stated otherwise by the FW. */
803 mvm->last_ebs_successful = true;
804
Johannes Berg8ca151b2013-01-24 14:25:36 +0100805 err = iwl_mvm_mac_setup_register(mvm);
806 if (err)
807 goto out_free;
Sara Sharon1f370652016-08-31 18:13:57 +0300808 mvm->hw_registered = true;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100809
Luca Coelhof2abcfa2017-09-28 15:29:27 +0300810 min_backoff = iwl_mvm_min_backoff(mvm);
Chaya Rachel Ivgi04ddc2a2016-03-03 13:31:39 +0200811 iwl_mvm_thermal_initialize(mvm, min_backoff);
812
Johannes Berg8ca151b2013-01-24 14:25:36 +0100813 err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
814 if (err)
815 goto out_unregister;
816
Liad Kaufman678d9b62017-05-18 18:00:49 +0300817 if (!iwl_mvm_has_new_rx_stats_api(mvm))
818 memset(&mvm->rx_stats_v3, 0,
819 sizeof(struct mvm_statistics_rx_v3));
820 else
821 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
Matti Gottlieb3848ab62013-07-30 15:29:37 +0300822
Luca Coelho33c85ea2016-02-22 15:44:13 +0200823 /* The transport always starts with a taken reference, we can
824 * release it now if d0i3 is supported */
825 if (iwl_mvm_is_d0i3_supported(mvm))
826 iwl_trans_unref(mvm->trans);
Eliad Peller7498cf42014-01-16 17:10:44 +0200827
Gregory Greenmance792912015-06-02 18:06:16 +0300828 iwl_mvm_tof_init(mvm);
829
Johannes Berg8ca151b2013-01-24 14:25:36 +0100830 return op_mode;
831
832 out_unregister:
Liad Kaufmande8ba412017-03-16 13:00:59 +0200833 if (iwlmvm_mod_params.init_dbg)
834 return op_mode;
835
Johannes Berg8ca151b2013-01-24 14:25:36 +0100836 ieee80211_unregister_hw(mvm->hw);
Sara Sharon1f370652016-08-31 18:13:57 +0300837 mvm->hw_registered = false;
Eliad Peller91b0d112014-01-05 12:41:12 +0200838 iwl_mvm_leds_exit(mvm);
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200839 iwl_mvm_thermal_exit(mvm);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100840 out_free:
Johannes Berg7174beb2017-06-01 16:03:19 +0200841 iwl_fw_flush_dump(&mvm->fwrt);
Liad Kaufmande8ba412017-03-16 13:00:59 +0200842
843 if (iwlmvm_mod_params.init_dbg)
844 return op_mode;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100845 iwl_phy_db_free(mvm->phy_db);
846 kfree(mvm->scan_cmd);
Sara Sharon56f29292016-08-31 12:37:55 +0300847 iwl_trans_op_mode_leave(trans);
848
Johannes Berg8ca151b2013-01-24 14:25:36 +0100849 ieee80211_free_hw(mvm->hw);
850 return NULL;
851}
852
853static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
854{
855 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
856 int i;
857
Luca Coelhoe27deb42016-03-01 10:30:48 +0200858 /* If d0i3 is supported, we have released the reference that
859 * the transport started with, so we should take it back now
860 * that we are leaving.
861 */
862 if (iwl_mvm_is_d0i3_supported(mvm))
863 iwl_trans_ref(mvm->trans);
864
Johannes Berg8ca151b2013-01-24 14:25:36 +0100865 iwl_mvm_leds_exit(mvm);
866
Chaya Rachel Ivgic221daf2015-12-29 09:54:49 +0200867 iwl_mvm_thermal_exit(mvm);
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +0300868
Liad Kaufmande8ba412017-03-16 13:00:59 +0200869 if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
870 ieee80211_unregister_hw(mvm->hw);
871 mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
872 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100873
874 kfree(mvm->scan_cmd);
Eliad Pellere59647e2013-11-28 14:08:50 +0200875 kfree(mvm->mcast_filter_cmd);
876 mvm->mcast_filter_cmd = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100877
Johannes Bergafc66bb2013-05-03 11:44:16 +0200878#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
879 kfree(mvm->d3_resume_sram);
880#endif
Arik Nemtsova4082842013-11-24 19:10:46 +0200881 iwl_trans_op_mode_leave(mvm->trans);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100882
883 iwl_phy_db_free(mvm->phy_db);
884 mvm->phy_db = NULL;
885
Luca Coelho1dad3e02017-05-03 15:09:52 +0300886 kfree(mvm->nvm_data);
Eran Hararyae2b21b2014-01-09 08:08:24 +0200887 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100888 kfree(mvm->nvm_sections[i].data);
889
Luca Coelho7d9d0d52018-04-12 16:15:07 +0300890 cancel_delayed_work_sync(&mvm->tcm.work);
891
Gregory Greenmance792912015-06-02 18:06:16 +0300892 iwl_mvm_tof_clean(mvm);
893
Emmanuel Grumbacha2a57a32016-03-15 15:36:36 +0200894 mutex_destroy(&mvm->mutex);
895 mutex_destroy(&mvm->d0i3_suspend_mutex);
896
Johannes Berg8ca151b2013-01-24 14:25:36 +0100897 ieee80211_free_hw(mvm->hw);
898}
899
900struct iwl_async_handler_entry {
901 struct list_head list;
902 struct iwl_rx_cmd_buffer rxb;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200903 enum iwl_rx_handler_context context;
Johannes Berg04168412015-06-23 21:22:09 +0200904 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100905};
906
907void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
908{
909 struct iwl_async_handler_entry *entry, *tmp;
910
911 spin_lock_bh(&mvm->async_handlers_lock);
912 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
913 iwl_free_rxb(&entry->rxb);
914 list_del(&entry->list);
915 kfree(entry);
916 }
917 spin_unlock_bh(&mvm->async_handlers_lock);
918}
919
920static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
921{
922 struct iwl_mvm *mvm =
923 container_of(wk, struct iwl_mvm, async_handlers_wk);
924 struct iwl_async_handler_entry *entry, *tmp;
Johannes Berg80982032016-08-31 22:16:11 +0200925 LIST_HEAD(local_list);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100926
927 /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
Johannes Berg8ca151b2013-01-24 14:25:36 +0100928
929 /*
930 * Sync with Rx path with a lock. Remove all the entries from this list,
931 * add them to a local one (lock free), and then handle them.
932 */
933 spin_lock_bh(&mvm->async_handlers_lock);
934 list_splice_init(&mvm->async_handlers_list, &local_list);
935 spin_unlock_bh(&mvm->async_handlers_lock);
936
937 list_for_each_entry_safe(entry, tmp, &local_list, list) {
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200938 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
939 mutex_lock(&mvm->mutex);
Johannes Berg04168412015-06-23 21:22:09 +0200940 entry->fn(mvm, &entry->rxb);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100941 iwl_free_rxb(&entry->rxb);
942 list_del(&entry->list);
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +0200943 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
944 mutex_unlock(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100945 kfree(entry);
946 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100947}
948
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200949static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
950 struct iwl_rx_packet *pkt)
951{
952 struct iwl_fw_dbg_trigger_tlv *trig;
953 struct iwl_fw_dbg_trigger_cmd *cmds_trig;
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200954 int i;
955
Sara Sharon6c042d72018-06-12 10:41:35 +0300956 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
957 FW_DBG_TRIGGER_FW_NOTIF);
958 if (!trig)
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200959 return;
960
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200961 cmds_trig = (void *)trig->data;
962
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200963 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
964 /* don't collect on CMD 0 */
965 if (!cmds_trig->cmds[i].cmd_id)
966 break;
967
Sara Sharon0ab66e62015-07-13 14:23:59 +0300968 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
969 cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200970 continue;
971
Johannes Berg7174beb2017-06-01 16:03:19 +0200972 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
973 "CMD 0x%02x.%02x received",
974 pkt->hdr.group_id, pkt->hdr.cmd);
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200975 break;
976 }
977}
978
Johannes Berg0316d302015-05-22 13:41:07 +0200979static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
980 struct iwl_rx_cmd_buffer *rxb,
981 struct iwl_rx_packet *pkt)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100982{
Johannes Berg0316d302015-05-22 13:41:07 +0200983 int i;
Johannes Berg1738d602015-05-22 12:09:44 +0200984
Emmanuel Grumbach917f39b2015-02-10 10:49:20 +0200985 iwl_mvm_rx_check_trigger(mvm, pkt);
986
Johannes Berg8ca151b2013-01-24 14:25:36 +0100987 /*
988 * Do the notification wait before RX handlers so
989 * even if the RX handler consumes the RXB we have
990 * access to it in the notification wait entry.
991 */
992 iwl_notification_wait_notify(&mvm->notif_wait, pkt);
993
994 for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
995 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
Emmanuel Grumbach36eed562013-02-10 13:25:25 +0200996 struct iwl_async_handler_entry *entry;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100997
Avraham Stern1230b162015-07-09 17:17:03 +0300998 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
Emmanuel Grumbach36eed562013-02-10 13:25:25 +0200999 continue;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001000
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +02001001 if (rx_h->context == RX_HANDLER_SYNC) {
Johannes Berg04168412015-06-23 21:22:09 +02001002 rx_h->fn(mvm, rxb);
Johannes Bergf7e64692015-06-23 21:58:17 +02001003 return;
Johannes Berg04168412015-06-23 21:22:09 +02001004 }
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001005
1006 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1007 /* we can't do much... */
1008 if (!entry)
Johannes Bergf7e64692015-06-23 21:58:17 +02001009 return;
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001010
1011 entry->rxb._page = rxb_steal_page(rxb);
1012 entry->rxb._offset = rxb->_offset;
1013 entry->rxb._rx_page_order = rxb->_rx_page_order;
1014 entry->fn = rx_h->fn;
Chaya Rachel Ivgic9cb14a2016-03-03 15:35:34 +02001015 entry->context = rx_h->context;
Emmanuel Grumbach36eed562013-02-10 13:25:25 +02001016 spin_lock(&mvm->async_handlers_lock);
1017 list_add_tail(&entry->list, &mvm->async_handlers_list);
1018 spin_unlock(&mvm->async_handlers_lock);
1019 schedule_work(&mvm->async_handlers_wk);
Sara Sharon07fb3292018-03-13 13:49:25 +02001020 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001021 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001022}
1023
Johannes Berg0316d302015-05-22 13:41:07 +02001024static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
1025 struct napi_struct *napi,
1026 struct iwl_rx_cmd_buffer *rxb)
1027{
1028 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1029 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001030 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
Johannes Berg0316d302015-05-22 13:41:07 +02001031
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001032 if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
Johannes Berg0316d302015-05-22 13:41:07 +02001033 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001034 else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
Johannes Berg0316d302015-05-22 13:41:07 +02001035 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
1036 else
1037 iwl_mvm_rx_common(mvm, rxb, pkt);
1038}
1039
1040static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1041 struct napi_struct *napi,
1042 struct iwl_rx_cmd_buffer *rxb)
1043{
1044 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1045 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001046 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
Johannes Berg0316d302015-05-22 13:41:07 +02001047
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001048 if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
Johannes Berg780e87c2015-09-03 14:56:10 +02001049 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001050 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1051 RX_QUEUES_NOTIFICATION)))
Sara Sharon94bb4482015-12-16 18:48:28 +02001052 iwl_mvm_rx_queue_notif(mvm, rxb, 0);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001053 else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
Johannes Berg58035432016-04-27 13:33:26 +02001054 iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
Johannes Berg0316d302015-05-22 13:41:07 +02001055 else
1056 iwl_mvm_rx_common(mvm, rxb, pkt);
1057}
1058
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001059void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001060{
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001061 int q;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001062
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001063 if (WARN_ON_ONCE(!mq))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001064 return;
1065
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001066 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1067 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
1068 IWL_DEBUG_TX_QUEUES(mvm,
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001069 "mac80211 %d already stopped\n", q);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001070 continue;
1071 }
1072
1073 ieee80211_stop_queue(mvm->hw, q);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001074 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001075}
1076
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02001077static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
1078 const struct iwl_device_cmd *cmd)
1079{
1080 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1081
1082 /*
1083 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
1084 * commands that need to block the Tx queues.
1085 */
1086 iwl_trans_block_txq_ptrs(mvm->trans, false);
1087}
1088
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001089static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001090{
1091 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001092 unsigned long mq;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001093
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001094 spin_lock_bh(&mvm->queue_info_lock);
Sara Sharon34e10862017-02-23 13:15:07 +02001095 mq = mvm->hw_queue_to_mac80211[hw_queue];
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001096 spin_unlock_bh(&mvm->queue_info_lock);
1097
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001098 iwl_mvm_stop_mac_queues(mvm, mq);
1099}
1100
1101void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
1102{
1103 int q;
1104
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001105 if (WARN_ON_ONCE(!mq))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001106 return;
1107
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001108 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1109 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
1110 IWL_DEBUG_TX_QUEUES(mvm,
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001111 "mac80211 %d still stopped\n", q);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03001112 continue;
1113 }
1114
1115 ieee80211_wake_queue(mvm->hw, q);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001116 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001117}
1118
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001119static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1120{
1121 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1122 unsigned long mq;
1123
1124 spin_lock_bh(&mvm->queue_info_lock);
Sara Sharon34e10862017-02-23 13:15:07 +02001125 mq = mvm->hw_queue_to_mac80211[hw_queue];
Liad Kaufmanb4f7a9d2016-02-03 11:05:41 +02001126 spin_unlock_bh(&mvm->queue_info_lock);
1127
1128 iwl_mvm_start_mac_queues(mvm, mq);
1129}
1130
Johannes Berg6ad04352017-04-25 10:21:18 +02001131static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
1132{
1133 bool state = iwl_mvm_is_radio_killed(mvm);
1134
1135 if (state)
1136 wake_up(&mvm->rx_sync_waitq);
1137
1138 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
1139}
1140
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001141void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1142{
1143 if (state)
1144 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1145 else
1146 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1147
Johannes Berg6ad04352017-04-25 10:21:18 +02001148 iwl_mvm_set_rfkill_state(mvm);
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001149}
1150
Johannes Berg14cfca72014-02-25 20:50:53 +01001151static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001152{
1153 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Mark Rutland6aa7de02017-10-23 14:07:29 -07001154 bool calibrating = READ_ONCE(mvm->calibrating);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001155
1156 if (state)
1157 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1158 else
1159 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1160
Johannes Berg6ad04352017-04-25 10:21:18 +02001161 iwl_mvm_set_rfkill_state(mvm);
Johannes Berg14cfca72014-02-25 20:50:53 +01001162
Emmanuel Grumbach31b8b342014-11-02 15:48:09 +02001163 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
1164 if (calibrating)
1165 iwl_abort_notification_waits(&mvm->notif_wait);
1166
1167 /*
1168 * Stop the device if we run OPERATIONAL firmware or if we are in the
1169 * middle of the calibrations.
1170 */
Johannes Berg702e9752017-06-02 11:56:58 +02001171 return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001172}
1173
1174static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1175{
1176 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1177 struct ieee80211_tx_info *info;
1178
1179 info = IEEE80211_SKB_CB(skb);
1180 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1181 ieee80211_free_txskb(mvm->hw, skb);
1182}
1183
Johannes Bergac1ed412013-07-04 15:25:25 +02001184struct iwl_mvm_reprobe {
1185 struct device *dev;
1186 struct work_struct work;
1187};
1188
1189static void iwl_mvm_reprobe_wk(struct work_struct *wk)
1190{
1191 struct iwl_mvm_reprobe *reprobe;
1192
1193 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
1194 if (device_reprobe(reprobe->dev))
1195 dev_err(reprobe->dev, "reprobe failed!\n");
1196 kfree(reprobe);
1197 module_put(THIS_MODULE);
1198}
1199
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001200void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001201{
Johannes Berg8ca151b2013-01-24 14:25:36 +01001202 iwl_abort_notification_waits(&mvm->notif_wait);
1203
1204 /*
David Spinadel992f81f2014-01-09 14:22:55 +02001205 * This is a bit racy, but worst case we tell mac80211 about
1206 * a stopped/aborted scan when that was already done which
1207 * is not a problem. It is necessary to abort any os scan
1208 * here because mac80211 requires having the scan cleared
1209 * before restarting.
1210 * We'll reset the scan_status to NONE in restart cleanup in
1211 * the next start() call from mac80211. If restart isn't called
1212 * (no fw restart) scan status will stay busy.
1213 */
David Spinadel4ffb3652015-03-10 10:06:02 +02001214 iwl_mvm_report_scan_aborted(mvm);
David Spinadel992f81f2014-01-09 14:22:55 +02001215
1216 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001217 * If we're restarting already, don't cycle restarts.
1218 * If INIT fw asserted, it will likely fail again.
1219 * If WoWLAN fw asserted, don't restart either, mac80211
1220 * can't recover this since we're already half suspended.
1221 */
Johannes Berg3b37f4c2017-05-30 16:45:31 +02001222 if (!mvm->fw_restart && fw_error) {
Johannes Berg7174beb2017-06-01 16:03:19 +02001223 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
Johannes Bergbf8b2862017-06-30 10:48:28 +02001224 NULL);
1225 } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Bergac1ed412013-07-04 15:25:25 +02001226 struct iwl_mvm_reprobe *reprobe;
1227
1228 IWL_ERR(mvm,
1229 "Firmware error during reconfiguration - reprobe!\n");
1230
1231 /*
1232 * get a module reference to avoid doing this while unloading
1233 * anyway and to avoid scheduling a work with code that's
1234 * being removed.
1235 */
1236 if (!try_module_get(THIS_MODULE)) {
1237 IWL_ERR(mvm, "Module is being unloaded - abort\n");
1238 return;
1239 }
1240
1241 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
1242 if (!reprobe) {
1243 module_put(THIS_MODULE);
1244 return;
1245 }
1246 reprobe->dev = mvm->trans->dev;
1247 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
1248 schedule_work(&reprobe->work);
Johannes Berg702e9752017-06-02 11:56:58 +02001249 } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
Emmanuel Grumbachf60c9e52018-05-01 14:23:56 +03001250 mvm->hw_registered &&
1251 !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
Eliad Peller7498cf42014-01-16 17:10:44 +02001252 /* don't let the transport/FW power down */
1253 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1254
Johannes Berg3b37f4c2017-05-30 16:45:31 +02001255 if (fw_error && mvm->fw_restart > 0)
1256 mvm->fw_restart--;
Johannes Bergbf8b2862017-06-30 10:48:28 +02001257 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001258 ieee80211_restart_hw(mvm->hw);
1259 }
1260}
1261
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001262static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
1263{
1264 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1265
Emmanuel Grumbachf60c9e52018-05-01 14:23:56 +03001266 if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
1267 iwl_mvm_dump_nic_error_log(mvm);
Emmanuel Grumbach1bd3cbc2014-03-18 21:15:06 +02001268
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001269 iwl_mvm_nic_restart(mvm, true);
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001270}
1271
Johannes Berg8ca151b2013-01-24 14:25:36 +01001272static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1273{
Emmanuel Grumbach715c9982013-02-28 08:57:31 +02001274 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1275
Johannes Berg8ca151b2013-01-24 14:25:36 +01001276 WARN_ON(1);
Luciano Coelhob08c1d92014-05-20 23:31:05 +03001277 iwl_mvm_nic_restart(mvm, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001278}
1279
Luca Coelhoa75b9b32018-08-21 07:24:18 +03001280#ifdef CONFIG_PM
Eliad Peller37577fe2013-12-05 17:19:39 +02001281struct iwl_d0i3_iter_data {
1282 struct iwl_mvm *mvm;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001283 struct ieee80211_vif *connected_vif;
Eliad Peller37577fe2013-12-05 17:19:39 +02001284 u8 ap_sta_id;
1285 u8 vif_count;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001286 u8 offloading_tid;
1287 bool disable_offloading;
Eliad Peller37577fe2013-12-05 17:19:39 +02001288};
1289
Arik Nemtsovb2492502014-03-13 12:21:50 +02001290static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
1291 struct ieee80211_vif *vif,
1292 struct iwl_d0i3_iter_data *iter_data)
1293{
1294 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001295 struct iwl_mvm_sta *mvmsta;
1296 u32 available_tids = 0;
1297 u8 tid;
1298
1299 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
Sara Sharon0ae98812017-01-04 14:53:58 +02001300 mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
Arik Nemtsovb2492502014-03-13 12:21:50 +02001301 return false;
1302
Sara Sharon13303c02016-04-10 15:51:54 +03001303 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
1304 if (!mvmsta)
Arik Nemtsovb2492502014-03-13 12:21:50 +02001305 return false;
1306
Arik Nemtsovb2492502014-03-13 12:21:50 +02001307 spin_lock_bh(&mvmsta->lock);
1308 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1309 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1310
1311 /*
1312 * in case of pending tx packets, don't use this tid
1313 * for offloading in order to prevent reuse of the same
1314 * qos seq counters.
1315 */
Liad Kaufmandd321622017-04-05 16:25:11 +03001316 if (iwl_mvm_tid_queued(mvm, tid_data))
Arik Nemtsovb2492502014-03-13 12:21:50 +02001317 continue;
1318
1319 if (tid_data->state != IWL_AGG_OFF)
1320 continue;
1321
1322 available_tids |= BIT(tid);
1323 }
1324 spin_unlock_bh(&mvmsta->lock);
1325
1326 /*
1327 * disallow protocol offloading if we have no available tid
1328 * (with no pending frames and no active aggregation,
1329 * as we don't handle "holes" properly - the scheduler needs the
1330 * frame's seq number and TFD index to match)
1331 */
1332 if (!available_tids)
1333 return true;
1334
1335 /* for simplicity, just use the first available tid */
1336 iter_data->offloading_tid = ffs(available_tids) - 1;
1337 return false;
1338}
1339
Eliad Pellerd6230972013-11-03 20:09:08 +02001340static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
1341 struct ieee80211_vif *vif)
1342{
Eliad Peller37577fe2013-12-05 17:19:39 +02001343 struct iwl_d0i3_iter_data *data = _data;
1344 struct iwl_mvm *mvm = data->mvm;
1345 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Eliad Pellerd6230972013-11-03 20:09:08 +02001346 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1347
1348 IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
1349 if (vif->type != NL80211_IFTYPE_STATION ||
1350 !vif->bss_conf.assoc)
1351 return;
1352
Arik Nemtsovb2492502014-03-13 12:21:50 +02001353 /*
1354 * in case of pending tx packets or active aggregations,
1355 * avoid offloading features in order to prevent reuse of
1356 * the same qos seq counters.
1357 */
1358 if (iwl_mvm_disallow_offloading(mvm, vif, data))
1359 data->disable_offloading = true;
1360
Eliad Pellerd6230972013-11-03 20:09:08 +02001361 iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
Sara Sharonc97dab42015-11-19 11:53:49 +02001362 iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
1363 false, flags);
Eliad Pellerd6230972013-11-03 20:09:08 +02001364
1365 /*
1366 * on init/association, mvm already configures POWER_TABLE_CMD
1367 * and REPLY_MCAST_FILTER_CMD, so currently don't
1368 * reconfigure them (we might want to use different
1369 * params later on, though).
1370 */
Eliad Peller37577fe2013-12-05 17:19:39 +02001371 data->ap_sta_id = mvmvif->ap_sta_id;
1372 data->vif_count++;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001373
1374 /*
1375 * no new commands can be sent at this stage, so it's safe
1376 * to save the vif pointer during d0i3 entrance.
1377 */
1378 data->connected_vif = vif;
Eliad Pellerd6230972013-11-03 20:09:08 +02001379}
1380
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001381static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001382 struct iwl_wowlan_config_cmd *cmd,
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001383 struct iwl_d0i3_iter_data *iter_data)
1384{
1385 struct ieee80211_sta *ap_sta;
1386 struct iwl_mvm_sta *mvm_ap_sta;
1387
Sara Sharon0ae98812017-01-04 14:53:58 +02001388 if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001389 return;
1390
1391 rcu_read_lock();
1392
1393 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
1394 if (IS_ERR_OR_NULL(ap_sta))
1395 goto out;
1396
1397 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001398 cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001399 cmd->offloading_tid = iter_data->offloading_tid;
Sara Sharon70b4c532015-11-19 13:12:15 +02001400 cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
Sara Sharon0db056d2015-12-29 11:07:15 +02001401 ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001402 /*
1403 * The d0i3 uCode takes care of the nonqos counters,
1404 * so configure only the qos seq ones.
1405 */
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001406 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
Eliad Peller1a95c8d2013-11-21 19:19:52 +02001407out:
1408 rcu_read_unlock();
1409}
Eliad Peller67359432014-12-09 15:23:54 +02001410
1411int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
Eliad Pellerb3370d42013-11-25 15:20:16 +02001412{
1413 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001414 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001415 int ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02001416 struct iwl_d0i3_iter_data d0i3_iter_data = {
1417 .mvm = mvm,
1418 };
Emmanuel Grumbachc8b06a92014-11-24 09:06:57 +02001419 struct iwl_wowlan_config_cmd wowlan_config_cmd = {
1420 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1421 IWL_WOWLAN_WAKEUP_BEACON_MISS |
Sara Sharon0db056d2015-12-29 11:07:15 +02001422 IWL_WOWLAN_WAKEUP_LINK_CHANGE),
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001423 };
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001424 struct iwl_d3_manager_config d3_cfg_cmd = {
1425 .min_sleep_time = cpu_to_le32(1000),
Eliad Pellerd9f1fc22014-12-23 15:05:14 +02001426 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001427 };
Eliad Pellerb3370d42013-11-25 15:20:16 +02001428
1429 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001430
Johannes Berg702e9752017-06-02 11:56:58 +02001431 if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
Eliad Peller08f0d232015-12-10 15:47:11 +02001432 return -EINVAL;
1433
Arik Nemtsovb2492502014-03-13 12:21:50 +02001434 set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001435
Eliad Pellerf4cf8682014-11-04 16:57:06 +02001436 /*
1437 * iwl_mvm_ref_sync takes a reference before checking the flag.
1438 * so by checking there is no held reference we prevent a state
1439 * in which iwl_mvm_ref_sync continues successfully while we
1440 * configure the firmware to enter d0i3
1441 */
1442 if (iwl_mvm_ref_taken(mvm)) {
1443 IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
1444 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
Eliad Pellercaf15782014-11-09 15:25:33 +02001445 wake_up(&mvm->d0i3_exit_waitq);
Eliad Pellerf4cf8682014-11-04 16:57:06 +02001446 return 1;
1447 }
1448
Eliad Pellerd6230972013-11-03 20:09:08 +02001449 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1450 IEEE80211_IFACE_ITER_NORMAL,
1451 iwl_mvm_enter_d0i3_iterator,
Eliad Peller37577fe2013-12-05 17:19:39 +02001452 &d0i3_iter_data);
1453 if (d0i3_iter_data.vif_count == 1) {
1454 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001455 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
Eliad Peller37577fe2013-12-05 17:19:39 +02001456 } else {
1457 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
Sara Sharon0ae98812017-01-04 14:53:58 +02001458 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001459 mvm->d0i3_offloading = false;
Eliad Peller37577fe2013-12-05 17:19:39 +02001460 }
Eliad Pellerd6230972013-11-03 20:09:08 +02001461
Luca Coelho7d9d0d52018-04-12 16:15:07 +03001462 iwl_mvm_pause_tcm(mvm, true);
Emmanuel Grumbachecc7c512015-08-17 15:54:41 +03001463 /* make sure we have no running tx while configuring the seqno */
1464 synchronize_net();
1465
Luca Coelhoeb3908d2015-10-02 18:13:10 +03001466 /* Flush the hw queues, in case something got queued during entry */
Mordechai Goodsteind167e812017-05-10 16:42:53 +03001467 /* TODO new tx api */
1468 if (iwl_mvm_has_new_tx_api(mvm)) {
1469 WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
1470 } else {
1471 ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
1472 flags);
1473 if (ret)
1474 return ret;
1475 }
Luca Coelhoeb3908d2015-10-02 18:13:10 +03001476
Eliad Peller183edd82015-09-01 14:16:00 +03001477 /* configure wowlan configuration only if needed */
Sara Sharon0ae98812017-01-04 14:53:58 +02001478 if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
Sara Sharon0db056d2015-12-29 11:07:15 +02001479 /* wake on beacons only if beacon storing isn't supported */
1480 if (!fw_has_capa(&mvm->fw->ucode_capa,
1481 IWL_UCODE_TLV_CAPA_BEACON_STORING))
1482 wowlan_config_cmd.wakeup_filter |=
1483 cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
1484
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001485 iwl_mvm_wowlan_config_key_params(mvm,
1486 d0i3_iter_data.connected_vif,
1487 true, flags);
1488
Eliad Peller183edd82015-09-01 14:16:00 +03001489 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
1490 &d0i3_iter_data);
1491
1492 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1493 sizeof(wowlan_config_cmd),
1494 &wowlan_config_cmd);
1495 if (ret)
1496 return ret;
1497 }
Eliad Pellerb77f06d2013-11-06 10:49:32 +02001498
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001499 return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1500 flags | CMD_MAKE_TRANS_IDLE,
1501 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
Eliad Pellerb3370d42013-11-25 15:20:16 +02001502}
1503
Eliad Pellerd6230972013-11-03 20:09:08 +02001504static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1505 struct ieee80211_vif *vif)
1506{
1507 struct iwl_mvm *mvm = _data;
1508 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1509
1510 IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1511 if (vif->type != NL80211_IFTYPE_STATION ||
1512 !vif->bss_conf.assoc)
1513 return;
1514
1515 iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1516}
1517
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001518struct iwl_mvm_d0i3_exit_work_iter_data {
David Spinadelb3df2242015-08-06 10:26:50 +03001519 struct iwl_mvm *mvm;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001520 struct iwl_wowlan_status *status;
David Spinadelb3df2242015-08-06 10:26:50 +03001521 u32 wakeup_reasons;
1522};
1523
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001524static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
1525 struct ieee80211_vif *vif)
Eliad Peller37577fe2013-12-05 17:19:39 +02001526{
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001527 struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
Eliad Peller37577fe2013-12-05 17:19:39 +02001528 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001529 u32 reasons = data->wakeup_reasons;
Eliad Peller37577fe2013-12-05 17:19:39 +02001530
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001531 /* consider only the relevant station interface */
1532 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1533 data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
1534 return;
1535
1536 if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
1537 iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
1538 else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
1539 ieee80211_beacon_loss(vif);
1540 else
1541 iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
Eliad Peller37577fe2013-12-05 17:19:39 +02001542}
1543
Arik Nemtsovb2492502014-03-13 12:21:50 +02001544void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1545{
1546 struct ieee80211_sta *sta = NULL;
1547 struct iwl_mvm_sta *mvm_ap_sta;
1548 int i;
1549 bool wake_queues = false;
1550
1551 lockdep_assert_held(&mvm->mutex);
1552
1553 spin_lock_bh(&mvm->d0i3_tx_lock);
1554
Sara Sharon0ae98812017-01-04 14:53:58 +02001555 if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
Arik Nemtsovb2492502014-03-13 12:21:50 +02001556 goto out;
1557
1558 IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1559
1560 /* get the sta in order to update seq numbers and re-enqueue skbs */
1561 sta = rcu_dereference_protected(
1562 mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1563 lockdep_is_held(&mvm->mutex));
1564
1565 if (IS_ERR_OR_NULL(sta)) {
1566 sta = NULL;
1567 goto out;
1568 }
1569
1570 if (mvm->d0i3_offloading && qos_seq) {
1571 /* update qos seq numbers if offloading was enabled */
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001572 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001573 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1574 u16 seq = le16_to_cpu(qos_seq[i]);
1575 /* firmware stores last-used one, we store next one */
1576 seq += 0x10;
1577 mvm_ap_sta->tid_data[i].seq_number = seq;
1578 }
1579 }
1580out:
1581 /* re-enqueue (or drop) all packets */
1582 while (!skb_queue_empty(&mvm->d0i3_tx)) {
1583 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1584
1585 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1586 ieee80211_free_txskb(mvm->hw, skb);
1587
1588 /* if the skb_queue is not empty, we need to wake queues */
1589 wake_queues = true;
1590 }
1591 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1592 wake_up(&mvm->d0i3_exit_waitq);
Sara Sharon0ae98812017-01-04 14:53:58 +02001593 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001594 if (wake_queues)
1595 ieee80211_wake_queues(mvm->hw);
1596
1597 spin_unlock_bh(&mvm->d0i3_tx_lock);
1598}
1599
Eliad Peller37577fe2013-12-05 17:19:39 +02001600static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1601{
1602 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001603 struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
1604 .mvm = mvm,
1605 };
1606
Emmanuel Grumbach3afec6392014-03-30 09:10:28 +03001607 struct iwl_wowlan_status *status;
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001608 u32 wakeup_reasons = 0;
Arik Nemtsovb2492502014-03-13 12:21:50 +02001609 __le16 *qos_seq = NULL;
Eliad Peller37577fe2013-12-05 17:19:39 +02001610
1611 mutex_lock(&mvm->mutex);
Eliad Peller37577fe2013-12-05 17:19:39 +02001612
Luca Coelho2afa6a72018-03-26 10:24:18 +03001613 status = iwl_mvm_send_wowlan_get_status(mvm);
1614 if (IS_ERR_OR_NULL(status)) {
1615 /* set to NULL so we don't need to check before kfree'ing */
1616 status = NULL;
1617 goto out;
1618 }
1619
Eliad Peller37577fe2013-12-05 17:19:39 +02001620 wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
Arik Nemtsovb2492502014-03-13 12:21:50 +02001621 qos_seq = status->qos_seq_ctr;
Eliad Peller37577fe2013-12-05 17:19:39 +02001622
1623 IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1624
Eliad Pellera3f7ba52015-11-11 17:23:59 +02001625 iter_data.wakeup_reasons = wakeup_reasons;
1626 iter_data.status = status;
1627 ieee80211_iterate_active_interfaces(mvm->hw,
1628 IEEE80211_IFACE_ITER_NORMAL,
1629 iwl_mvm_d0i3_exit_work_iter,
1630 &iter_data);
Eliad Peller37577fe2013-12-05 17:19:39 +02001631out:
Arik Nemtsovb2492502014-03-13 12:21:50 +02001632 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
Jonathan Doron47c8b152014-11-27 16:55:25 +02001633
Eliad Peller7c014e32015-09-06 14:17:17 +03001634 IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1635 wakeup_reasons);
1636
Eliad Pellere5629be2015-04-14 11:36:23 +03001637 /* qos_seq might point inside resp_pkt, so free it only now */
Luca Coelho2afa6a72018-03-26 10:24:18 +03001638 kfree(status);
Eliad Pellere5629be2015-04-14 11:36:23 +03001639
Jonathan Doron47c8b152014-11-27 16:55:25 +02001640 /* the FW might have updated the regdomain */
1641 iwl_mvm_update_changed_regdom(mvm);
1642
Luca Coelho7d9d0d52018-04-12 16:15:07 +03001643 iwl_mvm_resume_tcm(mvm);
Eliad Pellerd15a7472014-03-27 18:53:12 +02001644 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
Eliad Peller37577fe2013-12-05 17:19:39 +02001645 mutex_unlock(&mvm->mutex);
1646}
1647
Eliad Pellerd15a7472014-03-27 18:53:12 +02001648int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
Eliad Pellerb3370d42013-11-25 15:20:16 +02001649{
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001650 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1651 CMD_WAKE_UP_TRANS;
Eliad Pellerd6230972013-11-03 20:09:08 +02001652 int ret;
Eliad Pellerb3370d42013-11-25 15:20:16 +02001653
1654 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
Arik Nemtsov98ee7782013-10-02 16:58:09 +03001655
Johannes Berg702e9752017-06-02 11:56:58 +02001656 if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
Eliad Peller08f0d232015-12-10 15:47:11 +02001657 return -EINVAL;
1658
Eliad Pellerd15a7472014-03-27 18:53:12 +02001659 mutex_lock(&mvm->d0i3_suspend_mutex);
1660 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1661 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1662 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1663 mutex_unlock(&mvm->d0i3_suspend_mutex);
1664 return 0;
1665 }
1666 mutex_unlock(&mvm->d0i3_suspend_mutex);
1667
Eliad Pellerd6230972013-11-03 20:09:08 +02001668 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1669 if (ret)
Eliad Peller37577fe2013-12-05 17:19:39 +02001670 goto out;
Eliad Pellerd6230972013-11-03 20:09:08 +02001671
1672 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1673 IEEE80211_IFACE_ITER_NORMAL,
1674 iwl_mvm_exit_d0i3_iterator,
1675 mvm);
Eliad Peller37577fe2013-12-05 17:19:39 +02001676out:
1677 schedule_work(&mvm->d0i3_exit_work);
1678 return ret;
Eliad Pellerb3370d42013-11-25 15:20:16 +02001679}
1680
Eliad Peller67359432014-12-09 15:23:54 +02001681int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
Eliad Pellerd15a7472014-03-27 18:53:12 +02001682{
1683 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1684
1685 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1686 return _iwl_mvm_exit_d0i3(mvm);
1687}
1688
Luca Coelhoa75b9b32018-08-21 07:24:18 +03001689#define IWL_MVM_D0I3_OPS \
1690 .enter_d0i3 = iwl_mvm_enter_d0i3, \
1691 .exit_d0i3 = iwl_mvm_exit_d0i3,
1692#else /* CONFIG_PM */
1693#define IWL_MVM_D0I3_OPS
1694#endif /* CONFIG_PM */
1695
Johannes Berg0316d302015-05-22 13:41:07 +02001696#define IWL_MVM_COMMON_OPS \
1697 /* these could be differentiated */ \
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02001698 .async_cb = iwl_mvm_async_cb, \
Johannes Berg0316d302015-05-22 13:41:07 +02001699 .queue_full = iwl_mvm_stop_sw_queue, \
1700 .queue_not_full = iwl_mvm_wake_sw_queue, \
1701 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
1702 .free_skb = iwl_mvm_free_skb, \
1703 .nic_error = iwl_mvm_nic_error, \
1704 .cmd_queue_full = iwl_mvm_cmd_queue_full, \
1705 .nic_config = iwl_mvm_nic_config, \
Luca Coelhoa75b9b32018-08-21 07:24:18 +03001706 IWL_MVM_D0I3_OPS \
Johannes Berg0316d302015-05-22 13:41:07 +02001707 /* as we only register one, these MUST be common! */ \
1708 .start = iwl_op_mode_mvm_start, \
1709 .stop = iwl_op_mode_mvm_stop
1710
Johannes Berg8ca151b2013-01-24 14:25:36 +01001711static const struct iwl_op_mode_ops iwl_mvm_ops = {
Johannes Berg0316d302015-05-22 13:41:07 +02001712 IWL_MVM_COMMON_OPS,
1713 .rx = iwl_mvm_rx,
1714};
1715
1716static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1717 struct napi_struct *napi,
1718 struct iwl_rx_cmd_buffer *rxb,
1719 unsigned int queue)
1720{
1721 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
Sara Sharon585a6fc2015-12-01 13:48:18 +02001722 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001723 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
Johannes Berg0316d302015-05-22 13:41:07 +02001724
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001725 if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
Sara Sharona3383842016-02-28 15:41:47 +02001726 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001727 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1728 RX_QUEUES_NOTIFICATION)))
Sara Sharon94bb4482015-12-16 18:48:28 +02001729 iwl_mvm_rx_queue_notif(mvm, rxb, queue);
Johannes Berg61b0f5d2016-08-04 08:57:59 +02001730 else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
Sara Sharon585a6fc2015-12-01 13:48:18 +02001731 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
Johannes Berg0316d302015-05-22 13:41:07 +02001732}
1733
1734static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1735 IWL_MVM_COMMON_OPS,
1736 .rx = iwl_mvm_rx_mq,
1737 .rx_rss = iwl_mvm_rx_mq_rss,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001738};