| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1 | /****************************************************************************** | 
 | 2 |  * | 
 | 3 |  * This file is provided under a dual BSD/GPLv2 license.  When using or | 
 | 4 |  * redistributing this file, you may do so under either license. | 
 | 5 |  * | 
 | 6 |  * GPL LICENSE SUMMARY | 
 | 7 |  * | 
| Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 8 |  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 
| Eran Harary | 4fb0628 | 2015-04-19 10:05:18 +0300 | [diff] [blame] | 9 |  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 
| Liad Kaufman | de8ba41 | 2017-03-16 13:00:59 +0200 | [diff] [blame] | 10 |  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 
| Shaul Triebitz | 8745f12 | 2018-01-11 16:18:46 +0200 | [diff] [blame] | 11 |  * Copyright(c) 2018        Intel Corporation | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 12 |  * | 
 | 13 |  * This program is free software; you can redistribute it and/or modify | 
 | 14 |  * it under the terms of version 2 of the GNU General Public License as | 
 | 15 |  * published by the Free Software Foundation. | 
 | 16 |  * | 
 | 17 |  * This program is distributed in the hope that it will be useful, but | 
 | 18 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 19 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 20 |  * General Public License for more details. | 
 | 21 |  * | 
 | 22 |  * You should have received a copy of the GNU General Public License | 
 | 23 |  * along with this program; if not, write to the Free Software | 
 | 24 |  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | 
 | 25 |  * USA | 
 | 26 |  * | 
 | 27 |  * The full GNU General Public License is included in this distribution | 
| Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 28 |  * in the file called COPYING. | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 29 |  * | 
 | 30 |  * Contact Information: | 
| Emmanuel Grumbach | cb2f827 | 2015-11-17 15:39:56 +0200 | [diff] [blame] | 31 |  *  Intel Linux Wireless <linuxwifi@intel.com> | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 32 |  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 
 | 33 |  * | 
 | 34 |  * BSD LICENSE | 
 | 35 |  * | 
| Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 36 |  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 
| Eran Harary | 4fb0628 | 2015-04-19 10:05:18 +0300 | [diff] [blame] | 37 |  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 
| Liad Kaufman | de8ba41 | 2017-03-16 13:00:59 +0200 | [diff] [blame] | 38 |  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 
| Shaul Triebitz | 8745f12 | 2018-01-11 16:18:46 +0200 | [diff] [blame] | 39 |  * Copyright(c) 2018        Intel Corporation | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 40 |  * All rights reserved. | 
 | 41 |  * | 
 | 42 |  * Redistribution and use in source and binary forms, with or without | 
 | 43 |  * modification, are permitted provided that the following conditions | 
 | 44 |  * are met: | 
 | 45 |  * | 
 | 46 |  *  * Redistributions of source code must retain the above copyright | 
 | 47 |  *    notice, this list of conditions and the following disclaimer. | 
 | 48 |  *  * Redistributions in binary form must reproduce the above copyright | 
 | 49 |  *    notice, this list of conditions and the following disclaimer in | 
 | 50 |  *    the documentation and/or other materials provided with the | 
 | 51 |  *    distribution. | 
 | 52 |  *  * Neither the name Intel Corporation nor the names of its | 
 | 53 |  *    contributors may be used to endorse or promote products derived | 
 | 54 |  *    from this software without specific prior written permission. | 
 | 55 |  * | 
 | 56 |  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
 | 57 |  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
 | 58 |  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 
 | 59 |  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 
 | 60 |  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 
 | 61 |  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 
 | 62 |  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 
 | 63 |  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 
 | 64 |  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
 | 65 |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
 | 66 |  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
 | 67 |  * | 
 | 68 |  *****************************************************************************/ | 
 | 69 | #include <linux/module.h> | 
| Emmanuel Grumbach | 1bd3cbc | 2014-03-18 21:15:06 +0200 | [diff] [blame] | 70 | #include <linux/vmalloc.h> | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 71 | #include <net/mac80211.h> | 
 | 72 |  | 
| Johannes Berg | 9fca9d5 | 2017-06-01 10:32:17 +0200 | [diff] [blame] | 73 | #include "fw/notif-wait.h" | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 74 | #include "iwl-trans.h" | 
 | 75 | #include "iwl-op-mode.h" | 
| Johannes Berg | d962f9b | 2017-06-01 10:22:09 +0200 | [diff] [blame] | 76 | #include "fw/img.h" | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 77 | #include "iwl-debug.h" | 
 | 78 | #include "iwl-drv.h" | 
 | 79 | #include "iwl-modparams.h" | 
 | 80 | #include "mvm.h" | 
 | 81 | #include "iwl-phy-db.h" | 
 | 82 | #include "iwl-eeprom-parse.h" | 
 | 83 | #include "iwl-csr.h" | 
 | 84 | #include "iwl-io.h" | 
 | 85 | #include "iwl-prph.h" | 
 | 86 | #include "rs.h" | 
| Johannes Berg | d172a5e | 2017-06-02 15:15:53 +0200 | [diff] [blame] | 87 | #include "fw/api/scan.h" | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 88 | #include "time-event.h" | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 89 | #include "fw-api.h" | 
| Johannes Berg | d172a5e | 2017-06-02 15:15:53 +0200 | [diff] [blame] | 90 | #include "fw/api/scan.h" | 
| Luca Coelho | f2abcfa | 2017-09-28 15:29:27 +0300 | [diff] [blame] | 91 | #include "fw/acpi.h" | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 92 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 93 | #define DRV_DESCRIPTION	"The new Intel(R) wireless AGN driver for Linux" | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 94 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 95 | MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); | 
 | 96 | MODULE_LICENSE("GPL"); | 
 | 97 |  | 
 | 98 | static const struct iwl_op_mode_ops iwl_mvm_ops; | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 99 | static const struct iwl_op_mode_ops iwl_mvm_ops_mq; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 100 |  | 
 | 101 | struct iwl_mvm_mod_params iwlmvm_mod_params = { | 
 | 102 | 	.power_scheme = IWL_POWER_SCHEME_BPS, | 
| Emmanuel Grumbach | ce71c2f | 2015-01-11 17:19:39 +0200 | [diff] [blame] | 103 | 	.tfd_q_hang_detect = true | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 104 | 	/* rest of fields are 0 by default */ | 
 | 105 | }; | 
 | 106 |  | 
 | 107 | module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO); | 
 | 108 | MODULE_PARM_DESC(init_dbg, | 
 | 109 | 		 "set to true to debug an ASSERT in INIT fw (default: false"); | 
 | 110 | module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO); | 
 | 111 | MODULE_PARM_DESC(power_scheme, | 
 | 112 | 		 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); | 
| Emmanuel Grumbach | ce71c2f | 2015-01-11 17:19:39 +0200 | [diff] [blame] | 113 | module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, | 
 | 114 | 		   bool, S_IRUGO); | 
 | 115 | MODULE_PARM_DESC(tfd_q_hang_detect, | 
 | 116 | 		 "TFD queues hang detection (default: true"); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 117 |  | 
 | 118 | /* | 
 | 119 |  * module init and exit functions | 
 | 120 |  */ | 
 | 121 | static int __init iwl_mvm_init(void) | 
 | 122 | { | 
 | 123 | 	int ret; | 
 | 124 |  | 
 | 125 | 	ret = iwl_mvm_rate_control_register(); | 
 | 126 | 	if (ret) { | 
 | 127 | 		pr_err("Unable to register rate control algorithm: %d\n", ret); | 
 | 128 | 		return ret; | 
 | 129 | 	} | 
 | 130 |  | 
 | 131 | 	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); | 
| Gregory Greenman | 9f66a39 | 2017-11-05 18:49:48 +0200 | [diff] [blame] | 132 | 	if (ret) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 133 | 		pr_err("Unable to register MVM op_mode: %d\n", ret); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 134 |  | 
 | 135 | 	return ret; | 
 | 136 | } | 
 | 137 | module_init(iwl_mvm_init); | 
 | 138 |  | 
 | 139 | static void __exit iwl_mvm_exit(void) | 
 | 140 | { | 
 | 141 | 	iwl_opmode_deregister("iwlmvm"); | 
 | 142 | 	iwl_mvm_rate_control_unregister(); | 
 | 143 | } | 
 | 144 | module_exit(iwl_mvm_exit); | 
 | 145 |  | 
 | 146 | static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) | 
 | 147 | { | 
 | 148 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 149 | 	u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; | 
 | 150 | 	u32 reg_val = 0; | 
| Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 151 | 	u32 phy_config = iwl_mvm_get_phy_config(mvm); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 152 |  | 
| Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 153 | 	radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> | 
 | 154 | 			 FW_PHY_CFG_RADIO_TYPE_POS; | 
 | 155 | 	radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> | 
 | 156 | 			 FW_PHY_CFG_RADIO_STEP_POS; | 
 | 157 | 	radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> | 
 | 158 | 			 FW_PHY_CFG_RADIO_DASH_POS; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 159 |  | 
 | 160 | 	/* SKU control */ | 
 | 161 | 	reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << | 
 | 162 | 				CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; | 
 | 163 | 	reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << | 
 | 164 | 				CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; | 
 | 165 |  | 
 | 166 | 	/* radio configuration */ | 
 | 167 | 	reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; | 
 | 168 | 	reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; | 
 | 169 | 	reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; | 
 | 170 |  | 
 | 171 | 	WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & | 
 | 172 | 		 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); | 
 | 173 |  | 
| Liad Kaufman | 9b1fcc1 | 2014-05-08 16:30:24 +0300 | [diff] [blame] | 174 | 	/* | 
| Sara Sharon | 6e58487 | 2017-03-22 14:07:50 +0200 | [diff] [blame] | 175 | 	 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC | 
 | 176 | 	 * sampling, and shouldn't be set to any non-zero value. | 
 | 177 | 	 * The same is supposed to be true of the other HW, but unsetting | 
 | 178 | 	 * them (such as the 7260) causes automatic tests to fail on seemingly | 
 | 179 | 	 * unrelated errors. Need to further investigate this, but for now | 
 | 180 | 	 * we'll separate cases. | 
| Liad Kaufman | 9b1fcc1 | 2014-05-08 16:30:24 +0300 | [diff] [blame] | 181 | 	 */ | 
| Sara Sharon | 6e58487 | 2017-03-22 14:07:50 +0200 | [diff] [blame] | 182 | 	if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) | 
| Liad Kaufman | 9b1fcc1 | 2014-05-08 16:30:24 +0300 | [diff] [blame] | 183 | 		reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 184 |  | 
| Lilach Edelstein | e139dc4 | 2013-01-13 13:31:10 +0200 | [diff] [blame] | 185 | 	iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, | 
 | 186 | 				CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | | 
 | 187 | 				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | | 
 | 188 | 				CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | | 
 | 189 | 				CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | | 
 | 190 | 				CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | | 
 | 191 | 				CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | | 
 | 192 | 				CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, | 
 | 193 | 				reg_val); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 194 |  | 
 | 195 | 	IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, | 
 | 196 | 		       radio_cfg_step, radio_cfg_dash); | 
 | 197 |  | 
 | 198 | 	/* | 
 | 199 | 	 * W/A : NIC is stuck in a reset state after Early PCIe power off | 
 | 200 | 	 * (PCIe power is lost before PERST# is asserted), causing ME FW | 
 | 201 | 	 * to lose ownership and not being able to obtain it back. | 
 | 202 | 	 */ | 
| Avri Altman | 95411d0 | 2015-05-11 11:04:34 +0300 | [diff] [blame] | 203 | 	if (!mvm->trans->cfg->apmg_not_supported) | 
| Eran Harary | 3073d8c | 2013-12-29 14:09:59 +0200 | [diff] [blame] | 204 | 		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, | 
 | 205 | 				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, | 
 | 206 | 				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 207 | } | 
 | 208 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 209 | /** | 
 | 210 |  * enum iwl_rx_handler_context context for Rx handler | 
 | 211 |  * @RX_HANDLER_SYNC : this means that it will be called in the Rx path | 
 | 212 |  *	which can't acquire mvm->mutex. | 
 | 213 |  * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex | 
 | 214 |  *	(and only in this case!), it should be set as ASYNC. In that case, | 
 | 215 |  *	it will be called from a worker with mvm->mutex held. | 
 | 216 |  * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the | 
 | 217 |  *	mutex itself, it will be called from a worker without mvm->mutex held. | 
 | 218 |  */ | 
 | 219 | enum iwl_rx_handler_context { | 
 | 220 | 	RX_HANDLER_SYNC, | 
 | 221 | 	RX_HANDLER_ASYNC_LOCKED, | 
 | 222 | 	RX_HANDLER_ASYNC_UNLOCKED, | 
 | 223 | }; | 
 | 224 |  | 
 | 225 | /** | 
 | 226 |  * struct iwl_rx_handlers handler for FW notification | 
 | 227 |  * @cmd_id: command id | 
 | 228 |  * @context: see &iwl_rx_handler_context | 
 | 229 |  * @fn: the function is called when notification is received | 
 | 230 |  */ | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 231 | struct iwl_rx_handlers { | 
| Avraham Stern | 1230b16 | 2015-07-09 17:17:03 +0300 | [diff] [blame] | 232 | 	u16 cmd_id; | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 233 | 	enum iwl_rx_handler_context context; | 
| Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 234 | 	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 235 | }; | 
 | 236 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 237 | #define RX_HANDLER(_cmd_id, _fn, _context)	\ | 
 | 238 | 	{ .cmd_id = _cmd_id, .fn = _fn, .context = _context } | 
 | 239 | #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)	\ | 
 | 240 | 	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context } | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 241 |  | 
 | 242 | /* | 
 | 243 |  * Handlers for fw notifications | 
 | 244 |  * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME | 
 | 245 |  * This list should be in order of frequency for performance purposes. | 
 | 246 |  * | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 247 |  * The handler can be one from three contexts, see &iwl_rx_handler_context | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 248 |  */ | 
 | 249 | static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 250 | 	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC), | 
 | 251 | 	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC), | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 252 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 253 | 	RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, | 
 | 254 | 		   RX_HANDLER_ASYNC_LOCKED), | 
 | 255 | 	RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, | 
 | 256 | 		   RX_HANDLER_ASYNC_LOCKED), | 
 | 257 | 	RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, | 
 | 258 | 		   RX_HANDLER_ASYNC_LOCKED), | 
| Emmanuel Grumbach | f421f9c | 2013-01-17 14:20:29 +0200 | [diff] [blame] | 259 |  | 
| Sara Sharon | 3af512d6 | 2015-07-22 11:38:40 +0300 | [diff] [blame] | 260 | 	RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 261 | 		   iwl_mvm_window_status_notif, RX_HANDLER_SYNC), | 
| Sara Sharon | 3af512d6 | 2015-07-22 11:38:40 +0300 | [diff] [blame] | 262 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 263 | 	RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, | 
 | 264 | 		   RX_HANDLER_SYNC), | 
 | 265 | 	RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, | 
 | 266 | 		   RX_HANDLER_ASYNC_LOCKED), | 
| Emmanuel Grumbach | 497b49d | 2013-06-02 20:54:48 +0300 | [diff] [blame] | 267 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 268 | 	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC), | 
| Johannes Berg | 3e56ead | 2013-02-15 22:23:18 +0100 | [diff] [blame] | 269 |  | 
| Alexander Bondar | e5d7464 | 2014-12-09 19:15:49 +0200 | [diff] [blame] | 270 | 	RX_HANDLER(SCAN_ITERATION_COMPLETE, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 271 | 		   iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC), | 
| David Spinadel | 35a000b | 2013-08-28 09:29:43 +0300 | [diff] [blame] | 272 | 	RX_HANDLER(SCAN_OFFLOAD_COMPLETE, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 273 | 		   iwl_mvm_rx_lmac_scan_complete_notif, | 
 | 274 | 		   RX_HANDLER_ASYNC_LOCKED), | 
| Luciano Coelho | 6e56f01 | 2015-05-06 16:03:39 +0300 | [diff] [blame] | 275 | 	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 276 | 		   RX_HANDLER_SYNC), | 
| David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 277 | 	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 278 | 		   RX_HANDLER_ASYNC_LOCKED), | 
| Avraham Stern | ee9219b | 2015-03-23 15:09:27 +0200 | [diff] [blame] | 279 | 	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 280 | 		   iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC), | 
| Emmanuel Grumbach | 497b49d | 2013-06-02 20:54:48 +0300 | [diff] [blame] | 281 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 282 | 	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, | 
 | 283 | 		   RX_HANDLER_SYNC), | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 284 |  | 
| Hila Gonen | d64048e | 2013-03-13 18:00:03 +0200 | [diff] [blame] | 285 | 	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 286 | 		   RX_HANDLER_SYNC), | 
| Hila Gonen | d64048e | 2013-03-13 18:00:03 +0200 | [diff] [blame] | 287 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 288 | 	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC), | 
| Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 289 | 	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 290 | 		   iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC), | 
 | 291 | 	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, | 
 | 292 | 		   RX_HANDLER_ASYNC_LOCKED), | 
| Aviya Erenfeld | 09eef33 | 2015-09-01 19:34:38 +0300 | [diff] [blame] | 293 | 	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, | 
| Chaya Rachel Ivgi | ec77a33 | 2016-03-13 11:39:53 +0200 | [diff] [blame] | 294 | 		       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED), | 
| Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 295 | 	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 296 | 		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), | 
| Luciano Coelho | ea9af24 | 2014-11-06 10:34:49 +0200 | [diff] [blame] | 297 |  | 
| Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 298 | 	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 299 | 		   RX_HANDLER_ASYNC_LOCKED), | 
 | 300 | 	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, | 
 | 301 | 		   RX_HANDLER_SYNC), | 
 | 302 | 	RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, | 
 | 303 | 		   RX_HANDLER_ASYNC_LOCKED), | 
| Golan Ben-Ami | bdccdb8 | 2016-11-15 14:45:29 +0200 | [diff] [blame] | 304 | 	RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, | 
 | 305 | 		       iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC), | 
| Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 306 | 	RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 307 | 		       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC), | 
| Sara Sharon | f92659a | 2016-02-03 15:04:49 +0200 | [diff] [blame] | 308 | 	RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 309 | 		       iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), | 
| Johannes Berg | 65e2548 | 2016-04-13 14:24:22 +0200 | [diff] [blame] | 310 | 	RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, | 
 | 311 | 		       iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 312 | }; | 
 | 313 | #undef RX_HANDLER | 
| Avraham Stern | 1230b16 | 2015-07-09 17:17:03 +0300 | [diff] [blame] | 314 | #undef RX_HANDLER_GRP | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 315 |  | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 316 | /* Please keep this array *SORTED* by hex value. | 
 | 317 |  * Access is done through binary search | 
 | 318 |  */ | 
 | 319 | static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { | 
 | 320 | 	HCMD_NAME(MVM_ALIVE), | 
 | 321 | 	HCMD_NAME(REPLY_ERROR), | 
 | 322 | 	HCMD_NAME(ECHO_CMD), | 
 | 323 | 	HCMD_NAME(INIT_COMPLETE_NOTIF), | 
 | 324 | 	HCMD_NAME(PHY_CONTEXT_CMD), | 
 | 325 | 	HCMD_NAME(DBG_CFG), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 326 | 	HCMD_NAME(SCAN_CFG_CMD), | 
 | 327 | 	HCMD_NAME(SCAN_REQ_UMAC), | 
 | 328 | 	HCMD_NAME(SCAN_ABORT_UMAC), | 
 | 329 | 	HCMD_NAME(SCAN_COMPLETE_UMAC), | 
 | 330 | 	HCMD_NAME(TOF_CMD), | 
 | 331 | 	HCMD_NAME(TOF_NOTIFICATION), | 
| Sara Sharon | 3af512d6 | 2015-07-22 11:38:40 +0300 | [diff] [blame] | 332 | 	HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 333 | 	HCMD_NAME(ADD_STA_KEY), | 
 | 334 | 	HCMD_NAME(ADD_STA), | 
 | 335 | 	HCMD_NAME(REMOVE_STA), | 
 | 336 | 	HCMD_NAME(FW_GET_ITEM_CMD), | 
 | 337 | 	HCMD_NAME(TX_CMD), | 
 | 338 | 	HCMD_NAME(SCD_QUEUE_CFG), | 
 | 339 | 	HCMD_NAME(TXPATH_FLUSH), | 
 | 340 | 	HCMD_NAME(MGMT_MCAST_KEY), | 
 | 341 | 	HCMD_NAME(WEP_KEY), | 
 | 342 | 	HCMD_NAME(SHARED_MEM_CFG), | 
 | 343 | 	HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD), | 
 | 344 | 	HCMD_NAME(MAC_CONTEXT_CMD), | 
 | 345 | 	HCMD_NAME(TIME_EVENT_CMD), | 
 | 346 | 	HCMD_NAME(TIME_EVENT_NOTIFICATION), | 
 | 347 | 	HCMD_NAME(BINDING_CONTEXT_CMD), | 
 | 348 | 	HCMD_NAME(TIME_QUOTA_CMD), | 
 | 349 | 	HCMD_NAME(NON_QOS_TX_COUNTER_CMD), | 
| Johannes Berg | 7089ae6 | 2017-06-28 16:19:49 +0200 | [diff] [blame] | 350 | 	HCMD_NAME(LEDS_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 351 | 	HCMD_NAME(LQ_CMD), | 
 | 352 | 	HCMD_NAME(FW_PAGING_BLOCK_CMD), | 
 | 353 | 	HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), | 
 | 354 | 	HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), | 
 | 355 | 	HCMD_NAME(HOT_SPOT_CMD), | 
 | 356 | 	HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 357 | 	HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), | 
 | 358 | 	HCMD_NAME(BT_COEX_CI), | 
 | 359 | 	HCMD_NAME(PHY_CONFIGURATION_CMD), | 
 | 360 | 	HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), | 
| Sara Sharon | 176aa60 | 2016-08-31 19:03:01 +0300 | [diff] [blame] | 361 | 	HCMD_NAME(PHY_DB_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 362 | 	HCMD_NAME(SCAN_OFFLOAD_COMPLETE), | 
 | 363 | 	HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 364 | 	HCMD_NAME(POWER_TABLE_CMD), | 
 | 365 | 	HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), | 
 | 366 | 	HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), | 
 | 367 | 	HCMD_NAME(DC2DC_CONFIG_CMD), | 
 | 368 | 	HCMD_NAME(NVM_ACCESS_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 369 | 	HCMD_NAME(BEACON_NOTIFICATION), | 
 | 370 | 	HCMD_NAME(BEACON_TEMPLATE_CMD), | 
 | 371 | 	HCMD_NAME(TX_ANT_CONFIGURATION_CMD), | 
 | 372 | 	HCMD_NAME(BT_CONFIG), | 
 | 373 | 	HCMD_NAME(STATISTICS_CMD), | 
 | 374 | 	HCMD_NAME(STATISTICS_NOTIFICATION), | 
 | 375 | 	HCMD_NAME(EOSP_NOTIFICATION), | 
 | 376 | 	HCMD_NAME(REDUCE_TX_POWER_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 377 | 	HCMD_NAME(CARD_STATE_NOTIFICATION), | 
 | 378 | 	HCMD_NAME(MISSED_BEACONS_NOTIFICATION), | 
 | 379 | 	HCMD_NAME(TDLS_CONFIG_CMD), | 
 | 380 | 	HCMD_NAME(MAC_PM_POWER_TABLE), | 
 | 381 | 	HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), | 
 | 382 | 	HCMD_NAME(MFUART_LOAD_NOTIFICATION), | 
| Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 383 | 	HCMD_NAME(RSS_CONFIG_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 384 | 	HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), | 
 | 385 | 	HCMD_NAME(REPLY_RX_PHY_CMD), | 
 | 386 | 	HCMD_NAME(REPLY_RX_MPDU_CMD), | 
| Emmanuel Grumbach | 3e73aa3 | 2017-07-27 09:40:16 +0300 | [diff] [blame] | 387 | 	HCMD_NAME(FRAME_RELEASE), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 388 | 	HCMD_NAME(BA_NOTIF), | 
 | 389 | 	HCMD_NAME(MCC_UPDATE_CMD), | 
 | 390 | 	HCMD_NAME(MCC_CHUB_UPDATE_CMD), | 
 | 391 | 	HCMD_NAME(MARKER_CMD), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 392 | 	HCMD_NAME(BT_PROFILE_NOTIFICATION), | 
 | 393 | 	HCMD_NAME(BCAST_FILTER_CMD), | 
 | 394 | 	HCMD_NAME(MCAST_FILTER_CMD), | 
 | 395 | 	HCMD_NAME(REPLY_SF_CFG_CMD), | 
 | 396 | 	HCMD_NAME(REPLY_BEACON_FILTERING_CMD), | 
 | 397 | 	HCMD_NAME(D3_CONFIG_CMD), | 
 | 398 | 	HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), | 
 | 399 | 	HCMD_NAME(OFFLOADS_QUERY_CMD), | 
 | 400 | 	HCMD_NAME(REMOTE_WAKE_CONFIG_CMD), | 
 | 401 | 	HCMD_NAME(MATCH_FOUND_NOTIFICATION), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 402 | 	HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), | 
 | 403 | 	HCMD_NAME(WOWLAN_PATTERNS), | 
 | 404 | 	HCMD_NAME(WOWLAN_CONFIGURATION), | 
 | 405 | 	HCMD_NAME(WOWLAN_TSC_RSC_PARAM), | 
 | 406 | 	HCMD_NAME(WOWLAN_TKIP_PARAM), | 
 | 407 | 	HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), | 
 | 408 | 	HCMD_NAME(WOWLAN_GET_STATUSES), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 409 | 	HCMD_NAME(SCAN_ITERATION_COMPLETE), | 
 | 410 | 	HCMD_NAME(D0I3_END_CMD), | 
 | 411 | 	HCMD_NAME(LTR_CONFIG), | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 412 | }; | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 413 |  | 
 | 414 | /* Please keep this array *SORTED* by hex value. | 
 | 415 |  * Access is done through binary search | 
 | 416 |  */ | 
| Golan Ben-Ami | 5b08641 | 2016-02-09 12:57:16 +0200 | [diff] [blame] | 417 | static const struct iwl_hcmd_names iwl_mvm_system_names[] = { | 
 | 418 | 	HCMD_NAME(SHARED_MEM_CFG_CMD), | 
| Sara Sharon | 4399caa | 2016-12-11 10:32:42 +0200 | [diff] [blame] | 419 | 	HCMD_NAME(INIT_EXTENDED_CFG_CMD), | 
| Golan Ben-Ami | 5b08641 | 2016-02-09 12:57:16 +0200 | [diff] [blame] | 420 | }; | 
 | 421 |  | 
 | 422 | /* Please keep this array *SORTED* by hex value. | 
 | 423 |  * Access is done through binary search | 
 | 424 |  */ | 
| Aviya Erenfeld | 0309826 | 2016-02-18 14:09:33 +0200 | [diff] [blame] | 425 | static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { | 
| Andrei Otcheretianski | d3a108a | 2016-02-28 17:12:21 +0200 | [diff] [blame] | 426 | 	HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF), | 
| Aviya Erenfeld | 0309826 | 2016-02-18 14:09:33 +0200 | [diff] [blame] | 427 | }; | 
 | 428 |  | 
 | 429 | /* Please keep this array *SORTED* by hex value. | 
 | 430 |  * Access is done through binary search | 
 | 431 |  */ | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 432 | static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { | 
 | 433 | 	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), | 
| Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 434 | 	HCMD_NAME(CTDP_CONFIG_CMD), | 
| Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 435 | 	HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), | 
| Haim Dreyfuss | a6bff3c | 2017-01-19 12:00:46 +0200 | [diff] [blame] | 436 | 	HCMD_NAME(GEO_TX_POWER_LIMIT), | 
| Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 437 | 	HCMD_NAME(CT_KILL_NOTIFICATION), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 438 | 	HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), | 
 | 439 | }; | 
 | 440 |  | 
| Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 441 | /* Please keep this array *SORTED* by hex value. | 
 | 442 |  * Access is done through binary search | 
 | 443 |  */ | 
| Sara Sharon | e0d8fde | 2015-12-28 22:37:08 +0200 | [diff] [blame] | 444 | static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { | 
| Emmanuel Grumbach | ddef2f9 | 2016-12-19 08:41:16 +0200 | [diff] [blame] | 445 | 	HCMD_NAME(DQA_ENABLE_CMD), | 
| Sara Sharon | e0d8fde | 2015-12-28 22:37:08 +0200 | [diff] [blame] | 446 | 	HCMD_NAME(UPDATE_MU_GROUPS_CMD), | 
| Sara Sharon | 94bb448 | 2015-12-16 18:48:28 +0200 | [diff] [blame] | 447 | 	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), | 
| Johannes Berg | 65e2548 | 2016-04-13 14:24:22 +0200 | [diff] [blame] | 448 | 	HCMD_NAME(STA_PM_NOTIF), | 
| Sara Sharon | f92659a | 2016-02-03 15:04:49 +0200 | [diff] [blame] | 449 | 	HCMD_NAME(MU_GROUP_MGMT_NOTIF), | 
| Sara Sharon | 94bb448 | 2015-12-16 18:48:28 +0200 | [diff] [blame] | 450 | 	HCMD_NAME(RX_QUEUES_NOTIFICATION), | 
| Sara Sharon | e0d8fde | 2015-12-28 22:37:08 +0200 | [diff] [blame] | 451 | }; | 
 | 452 |  | 
 | 453 | /* Please keep this array *SORTED* by hex value. | 
 | 454 |  * Access is done through binary search | 
 | 455 |  */ | 
| Golan Ben-Ami | bdccdb8 | 2016-11-15 14:45:29 +0200 | [diff] [blame] | 456 | static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { | 
 | 457 | 	HCMD_NAME(MFU_ASSERT_DUMP_NTF), | 
 | 458 | }; | 
 | 459 |  | 
 | 460 | /* Please keep this array *SORTED* by hex value. | 
 | 461 |  * Access is done through binary search | 
 | 462 |  */ | 
| Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 463 | static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { | 
 | 464 | 	HCMD_NAME(STORED_BEACON_NTF), | 
 | 465 | }; | 
 | 466 |  | 
| Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 467 | /* Please keep this array *SORTED* by hex value. | 
 | 468 |  * Access is done through binary search | 
 | 469 |  */ | 
 | 470 | static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { | 
 | 471 | 	HCMD_NAME(NVM_ACCESS_COMPLETE), | 
| Sara Sharon | e9e1ba3 | 2017-01-08 16:46:14 +0200 | [diff] [blame] | 472 | 	HCMD_NAME(NVM_GET_INFO), | 
| Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 473 | }; | 
 | 474 |  | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 475 | static const struct iwl_hcmd_arr iwl_mvm_groups[] = { | 
 | 476 | 	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), | 
 | 477 | 	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), | 
| Golan Ben-Ami | 5b08641 | 2016-02-09 12:57:16 +0200 | [diff] [blame] | 478 | 	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), | 
| Aviya Erenfeld | 0309826 | 2016-02-18 14:09:33 +0200 | [diff] [blame] | 479 | 	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 480 | 	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), | 
| Sara Sharon | e0d8fde | 2015-12-28 22:37:08 +0200 | [diff] [blame] | 481 | 	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), | 
| Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 482 | 	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), | 
| Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 483 | 	[REGULATORY_AND_NVM_GROUP] = | 
 | 484 | 		HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 485 | }; | 
 | 486 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 487 | /* this forward declaration can avoid to export the function */ | 
 | 488 | static void iwl_mvm_async_handlers_wk(struct work_struct *wk); | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 489 | static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 490 |  | 
| Luca Coelho | f2abcfa | 2017-09-28 15:29:27 +0300 | [diff] [blame] | 491 | static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) | 
| Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 492 | { | 
| Luca Coelho | f2abcfa | 2017-09-28 15:29:27 +0300 | [diff] [blame] | 493 | 	const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs; | 
 | 494 | 	u64 dflt_pwr_limit; | 
| Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 495 |  | 
| Luca Coelho | f2abcfa | 2017-09-28 15:29:27 +0300 | [diff] [blame] | 496 | 	if (!backoff) | 
| Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 497 | 		return 0; | 
 | 498 |  | 
| Luca Coelho | f2abcfa | 2017-09-28 15:29:27 +0300 | [diff] [blame] | 499 | 	dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev); | 
| Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 500 |  | 
| Luca Coelho | f2abcfa | 2017-09-28 15:29:27 +0300 | [diff] [blame] | 501 | 	while (backoff->pwr) { | 
 | 502 | 		if (dflt_pwr_limit >= backoff->pwr) | 
 | 503 | 			return backoff->backoff; | 
 | 504 |  | 
 | 505 | 		backoff++; | 
| Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 506 | 	} | 
 | 507 |  | 
 | 508 | 	return 0; | 
 | 509 | } | 
 | 510 |  | 
| Andrei Otcheretianski | d3a108a | 2016-02-28 17:12:21 +0200 | [diff] [blame] | 511 | static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) | 
 | 512 | { | 
 | 513 | 	struct iwl_mvm *mvm = | 
 | 514 | 		container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work); | 
 | 515 | 	struct ieee80211_vif *tx_blocked_vif; | 
 | 516 | 	struct iwl_mvm_vif *mvmvif; | 
 | 517 |  | 
 | 518 | 	mutex_lock(&mvm->mutex); | 
 | 519 |  | 
 | 520 | 	tx_blocked_vif = | 
 | 521 | 		rcu_dereference_protected(mvm->csa_tx_blocked_vif, | 
 | 522 | 					  lockdep_is_held(&mvm->mutex)); | 
 | 523 |  | 
 | 524 | 	if (!tx_blocked_vif) | 
 | 525 | 		goto unlock; | 
 | 526 |  | 
 | 527 | 	mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); | 
 | 528 | 	iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); | 
 | 529 | 	RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); | 
 | 530 | unlock: | 
 | 531 | 	mutex_unlock(&mvm->mutex); | 
 | 532 | } | 
 | 533 |  | 
| Johannes Berg | 7174beb | 2017-06-01 16:03:19 +0200 | [diff] [blame] | 534 | static int iwl_mvm_fwrt_dump_start(void *ctx) | 
 | 535 | { | 
 | 536 | 	struct iwl_mvm *mvm = ctx; | 
 | 537 | 	int ret; | 
 | 538 |  | 
 | 539 | 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT); | 
 | 540 | 	if (ret) | 
 | 541 | 		return ret; | 
 | 542 |  | 
 | 543 | 	mutex_lock(&mvm->mutex); | 
 | 544 |  | 
 | 545 | 	return 0; | 
 | 546 | } | 
 | 547 |  | 
 | 548 | static void iwl_mvm_fwrt_dump_end(void *ctx) | 
 | 549 | { | 
 | 550 | 	struct iwl_mvm *mvm = ctx; | 
 | 551 |  | 
 | 552 | 	mutex_unlock(&mvm->mutex); | 
 | 553 |  | 
 | 554 | 	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); | 
 | 555 | } | 
 | 556 |  | 
| Shaul Triebitz | 8745f12 | 2018-01-11 16:18:46 +0200 | [diff] [blame] | 557 | static bool iwl_mvm_fwrt_fw_running(void *ctx) | 
 | 558 | { | 
 | 559 | 	return iwl_mvm_firmware_running(ctx); | 
 | 560 | } | 
 | 561 |  | 
| Johannes Berg | 7174beb | 2017-06-01 16:03:19 +0200 | [diff] [blame] | 562 | static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { | 
 | 563 | 	.dump_start = iwl_mvm_fwrt_dump_start, | 
 | 564 | 	.dump_end = iwl_mvm_fwrt_dump_end, | 
| Shaul Triebitz | 8745f12 | 2018-01-11 16:18:46 +0200 | [diff] [blame] | 565 | 	.fw_running = iwl_mvm_fwrt_fw_running, | 
| Johannes Berg | 7174beb | 2017-06-01 16:03:19 +0200 | [diff] [blame] | 566 | }; | 
 | 567 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 568 | static struct iwl_op_mode * | 
 | 569 | iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | 
 | 570 | 		      const struct iwl_fw *fw, struct dentry *dbgfs_dir) | 
 | 571 | { | 
 | 572 | 	struct ieee80211_hw *hw; | 
 | 573 | 	struct iwl_op_mode *op_mode; | 
 | 574 | 	struct iwl_mvm *mvm; | 
 | 575 | 	struct iwl_trans_config trans_cfg = {}; | 
 | 576 | 	static const u8 no_reclaim_cmds[] = { | 
 | 577 | 		TX_CMD, | 
 | 578 | 	}; | 
 | 579 | 	int err, scan_size; | 
| Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 580 | 	u32 min_backoff; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 581 |  | 
| Emmanuel Grumbach | c4d8327 | 2014-01-14 08:45:26 +0200 | [diff] [blame] | 582 | 	/* | 
 | 583 | 	 * We use IWL_MVM_STATION_COUNT to check the validity of the station | 
 | 584 | 	 * index all over the driver - check that its value corresponds to the | 
 | 585 | 	 * array size. | 
 | 586 | 	 */ | 
 | 587 | 	BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT); | 
 | 588 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 589 | 	/******************************** | 
 | 590 | 	 * 1. Allocating and configuring HW data | 
 | 591 | 	 ********************************/ | 
 | 592 | 	hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + | 
 | 593 | 				sizeof(struct iwl_mvm), | 
 | 594 | 				&iwl_mvm_hw_ops); | 
 | 595 | 	if (!hw) | 
 | 596 | 		return NULL; | 
 | 597 |  | 
| Oren Givon | 745160e | 2014-06-16 10:54:52 +0300 | [diff] [blame] | 598 | 	if (cfg->max_rx_agg_size) | 
 | 599 | 		hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; | 
 | 600 |  | 
| Gregory Greenman | 77d9673 | 2014-09-02 16:04:58 +0200 | [diff] [blame] | 601 | 	if (cfg->max_tx_agg_size) | 
 | 602 | 		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; | 
 | 603 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 604 | 	op_mode = hw->priv; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 605 |  | 
 | 606 | 	mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 607 | 	mvm->dev = trans->dev; | 
 | 608 | 	mvm->trans = trans; | 
 | 609 | 	mvm->cfg = cfg; | 
 | 610 | 	mvm->fw = fw; | 
 | 611 | 	mvm->hw = hw; | 
 | 612 |  | 
| Mordechay Goodstein | 93b167c | 2017-09-26 11:31:55 +0000 | [diff] [blame] | 613 | 	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, | 
 | 614 | 			    dbgfs_dir); | 
| Johannes Berg | 235acb1 | 2017-06-01 12:10:32 +0200 | [diff] [blame] | 615 |  | 
| Liad Kaufman | de8ba41 | 2017-03-16 13:00:59 +0200 | [diff] [blame] | 616 | 	mvm->init_status = 0; | 
 | 617 |  | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 618 | 	if (iwl_mvm_has_new_rx_api(mvm)) { | 
 | 619 | 		op_mode->ops = &iwl_mvm_ops_mq; | 
| Sara Sharon | 25c2b22 | 2016-02-07 13:09:59 +0200 | [diff] [blame] | 620 | 		trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc); | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 621 | 	} else { | 
 | 622 | 		op_mode->ops = &iwl_mvm_ops; | 
| Sara Sharon | 25c2b22 | 2016-02-07 13:09:59 +0200 | [diff] [blame] | 623 | 		trans->rx_mpdu_cmd_hdr_size = | 
 | 624 | 			sizeof(struct iwl_rx_mpdu_res_start); | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 625 |  | 
 | 626 | 		if (WARN_ON(trans->num_rx_queues > 1)) | 
 | 627 | 			goto out_free; | 
 | 628 | 	} | 
 | 629 |  | 
| Johannes Berg | 3b37f4c | 2017-05-30 16:45:31 +0200 | [diff] [blame] | 630 | 	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; | 
| Eran Harary | 291aa7c | 2013-07-03 11:00:06 +0300 | [diff] [blame] | 631 |  | 
| Johannes Berg | c8f5470 | 2017-06-19 23:50:31 +0200 | [diff] [blame] | 632 | 	mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; | 
| Emmanuel Grumbach | b13f43a | 2017-11-19 10:35:14 +0200 | [diff] [blame] | 633 | 	mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; | 
| Johannes Berg | c8f5470 | 2017-06-19 23:50:31 +0200 | [diff] [blame] | 634 | 	mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; | 
 | 635 | 	mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; | 
| Liad Kaufman | 28d0793 | 2015-09-01 16:36:25 +0300 | [diff] [blame] | 636 |  | 
| Lilach Edelstein | 1f3b0ff | 2013-10-06 13:03:32 +0200 | [diff] [blame] | 637 | 	mvm->sf_state = SF_UNINIT; | 
| Johannes Berg | 7d6222e2 | 2017-06-08 09:18:22 +0200 | [diff] [blame] | 638 | 	if (iwl_mvm_has_unified_ucode(mvm)) | 
| Johannes Berg | 702e975 | 2017-06-02 11:56:58 +0200 | [diff] [blame] | 639 | 		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); | 
| Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 640 | 	else | 
| Johannes Berg | 702e975 | 2017-06-02 11:56:58 +0200 | [diff] [blame] | 641 | 		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); | 
| Andrei Otcheretianski | c89e333 | 2016-01-26 18:12:28 +0200 | [diff] [blame] | 642 | 	mvm->drop_bcn_ap_mode = true; | 
| Eytan Lifshitz | 19e737c | 2013-09-09 13:30:15 +0200 | [diff] [blame] | 643 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 644 | 	mutex_init(&mvm->mutex); | 
| Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 645 | 	mutex_init(&mvm->d0i3_suspend_mutex); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 646 | 	spin_lock_init(&mvm->async_handlers_lock); | 
 | 647 | 	INIT_LIST_HEAD(&mvm->time_event_list); | 
| Ariej Marjieh | b112889 | 2014-07-16 21:11:12 +0300 | [diff] [blame] | 648 | 	INIT_LIST_HEAD(&mvm->aux_roc_te_list); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 649 | 	INIT_LIST_HEAD(&mvm->async_handlers_list); | 
 | 650 | 	spin_lock_init(&mvm->time_event_lock); | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 651 | 	spin_lock_init(&mvm->queue_info_lock); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 652 |  | 
 | 653 | 	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); | 
 | 654 | 	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 655 | 	INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); | 
| Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 656 | 	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); | 
| Luca Coelho | 69e0464 | 2016-05-03 12:18:33 +0300 | [diff] [blame] | 657 | 	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); | 
| Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 658 | 	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 659 |  | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 660 | 	spin_lock_init(&mvm->d0i3_tx_lock); | 
| Eliad Peller | 576eeee | 2014-07-01 18:38:38 +0300 | [diff] [blame] | 661 | 	spin_lock_init(&mvm->refs_lock); | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 662 | 	skb_queue_head_init(&mvm->d0i3_tx); | 
 | 663 | 	init_waitqueue_head(&mvm->d0i3_exit_waitq); | 
| Sara Sharon | 3a732c6 | 2016-10-09 17:34:24 +0300 | [diff] [blame] | 664 | 	init_waitqueue_head(&mvm->rx_sync_waitq); | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 665 |  | 
| Sara Sharon | 0636b93 | 2016-02-18 14:21:12 +0200 | [diff] [blame] | 666 | 	atomic_set(&mvm->queue_sync_counter, 0); | 
 | 667 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 668 | 	SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); | 
 | 669 |  | 
| Andrei Otcheretianski | d3a108a | 2016-02-28 17:12:21 +0200 | [diff] [blame] | 670 | 	INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); | 
 | 671 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 672 | 	/* | 
 | 673 | 	 * Populate the state variables that the transport layer needs | 
 | 674 | 	 * to know about. | 
 | 675 | 	 */ | 
 | 676 | 	trans_cfg.op_mode = op_mode; | 
 | 677 | 	trans_cfg.no_reclaim_cmds = no_reclaim_cmds; | 
 | 678 | 	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); | 
| Emmanuel Grumbach | 6c4fbcb | 2015-11-10 11:57:41 +0200 | [diff] [blame] | 679 | 	switch (iwlwifi_mod_params.amsdu_size) { | 
| Emmanuel Grumbach | 4bdd4df | 2016-04-07 16:44:42 +0300 | [diff] [blame] | 680 | 	case IWL_AMSDU_DEF: | 
| Emmanuel Grumbach | 6c4fbcb | 2015-11-10 11:57:41 +0200 | [diff] [blame] | 681 | 	case IWL_AMSDU_4K: | 
 | 682 | 		trans_cfg.rx_buf_size = IWL_AMSDU_4K; | 
 | 683 | 		break; | 
 | 684 | 	case IWL_AMSDU_8K: | 
 | 685 | 		trans_cfg.rx_buf_size = IWL_AMSDU_8K; | 
 | 686 | 		break; | 
 | 687 | 	case IWL_AMSDU_12K: | 
 | 688 | 		trans_cfg.rx_buf_size = IWL_AMSDU_12K; | 
 | 689 | 		break; | 
 | 690 | 	default: | 
 | 691 | 		pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, | 
 | 692 | 		       iwlwifi_mod_params.amsdu_size); | 
 | 693 | 		trans_cfg.rx_buf_size = IWL_AMSDU_4K; | 
 | 694 | 	} | 
| Emmanuel Grumbach | 4bdd4df | 2016-04-07 16:44:42 +0300 | [diff] [blame] | 695 |  | 
 | 696 | 	/* the hardware splits the A-MSDU */ | 
 | 697 | 	if (mvm->cfg->mq_rx_supported) | 
 | 698 | 		trans_cfg.rx_buf_size = IWL_AMSDU_4K; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 699 |  | 
| Luca Coelho | 4b87e5a | 2016-09-12 16:03:30 +0300 | [diff] [blame] | 700 | 	trans->wide_cmd_header = true; | 
 | 701 | 	trans_cfg.bc_table_dword = true; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 702 |  | 
| Sharon Dvir | 39bdb17 | 2015-10-15 18:18:09 +0300 | [diff] [blame] | 703 | 	trans_cfg.command_groups = iwl_mvm_groups; | 
 | 704 | 	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 705 |  | 
| Johannes Berg | c8f5470 | 2017-06-19 23:50:31 +0200 | [diff] [blame] | 706 | 	trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; | 
| Johannes Berg | b2d81db | 2014-08-01 20:48:25 +0200 | [diff] [blame] | 707 | 	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; | 
| Emmanuel Grumbach | 3a736bc | 2014-09-10 11:16:41 +0300 | [diff] [blame] | 708 | 	trans_cfg.scd_set_active = true; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 709 |  | 
| Johannes Berg | 21cb322 | 2016-06-21 13:11:48 +0200 | [diff] [blame] | 710 | 	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, | 
 | 711 | 					  driver_data[2]); | 
 | 712 |  | 
| Emmanuel Grumbach | 41837ca9 | 2015-10-21 09:00:07 +0300 | [diff] [blame] | 713 | 	trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD; | 
| Liad Kaufman | b482176 | 2014-10-19 16:58:15 +0200 | [diff] [blame] | 714 |  | 
| Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 715 | 	/* Set a short watchdog for the command queue */ | 
 | 716 | 	trans_cfg.cmd_q_wdg_timeout = | 
| Emmanuel Grumbach | 5d42e7b | 2015-03-19 20:04:51 +0200 | [diff] [blame] | 717 | 		iwl_mvm_get_wd_timeout(mvm, NULL, false, true); | 
| Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 718 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 719 | 	snprintf(mvm->hw->wiphy->fw_version, | 
 | 720 | 		 sizeof(mvm->hw->wiphy->fw_version), | 
 | 721 | 		 "%s", fw->fw_version); | 
 | 722 |  | 
 | 723 | 	/* Configure transport layer */ | 
 | 724 | 	iwl_trans_configure(mvm->trans, &trans_cfg); | 
 | 725 |  | 
 | 726 | 	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; | 
| Liad Kaufman | 09e350f | 2014-11-17 11:41:07 +0200 | [diff] [blame] | 727 | 	trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; | 
 | 728 | 	trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; | 
 | 729 | 	memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, | 
 | 730 | 	       sizeof(trans->dbg_conf_tlv)); | 
| Emmanuel Grumbach | d2709ad | 2015-01-29 14:58:06 +0200 | [diff] [blame] | 731 | 	trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 732 |  | 
 | 733 | 	/* set up notification wait support */ | 
 | 734 | 	iwl_notification_wait_init(&mvm->notif_wait); | 
 | 735 |  | 
 | 736 | 	/* Init phy db */ | 
 | 737 | 	mvm->phy_db = iwl_phy_db_init(trans); | 
 | 738 | 	if (!mvm->phy_db) { | 
 | 739 | 		IWL_ERR(mvm, "Cannot init phy_db\n"); | 
 | 740 | 		goto out_free; | 
 | 741 | 	} | 
 | 742 |  | 
 | 743 | 	IWL_INFO(mvm, "Detected %s, REV=0x%X\n", | 
 | 744 | 		 mvm->cfg->name, mvm->trans->hw_rev); | 
 | 745 |  | 
| Eran Harary | 4fb0628 | 2015-04-19 10:05:18 +0300 | [diff] [blame] | 746 | 	if (iwlwifi_mod_params.nvm_file) | 
| Eran Harary | e02a9d6 | 2014-05-07 12:27:10 +0300 | [diff] [blame] | 747 | 		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; | 
| Eran Harary | 4fb0628 | 2015-04-19 10:05:18 +0300 | [diff] [blame] | 748 | 	else | 
 | 749 | 		IWL_DEBUG_EEPROM(mvm->trans->dev, | 
 | 750 | 				 "working without external nvm file\n"); | 
| Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 751 |  | 
| Sara Sharon | 56f2929 | 2016-08-31 12:37:55 +0300 | [diff] [blame] | 752 | 	err = iwl_trans_start_hw(mvm->trans); | 
 | 753 | 	if (err) | 
| Eran Harary | 14b485f | 2014-04-23 10:46:09 +0300 | [diff] [blame] | 754 | 		goto out_free; | 
 | 755 |  | 
| Sara Sharon | 56f2929 | 2016-08-31 12:37:55 +0300 | [diff] [blame] | 756 | 	mutex_lock(&mvm->mutex); | 
 | 757 | 	iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); | 
| Johannes Berg | 8c5f47b | 2017-02-20 17:47:04 +0100 | [diff] [blame] | 758 | 	err = iwl_run_init_mvm_ucode(mvm, true); | 
| Liad Kaufman | f474425 | 2017-11-23 10:29:04 +0200 | [diff] [blame] | 759 | 	if (!iwlmvm_mod_params.init_dbg || !err) | 
| Sara Sharon | 56f2929 | 2016-08-31 12:37:55 +0300 | [diff] [blame] | 760 | 		iwl_mvm_stop_device(mvm); | 
 | 761 | 	iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); | 
 | 762 | 	mutex_unlock(&mvm->mutex); | 
| Liad Kaufman | de8ba41 | 2017-03-16 13:00:59 +0200 | [diff] [blame] | 763 | 	if (err < 0) { | 
| Sara Sharon | 56f2929 | 2016-08-31 12:37:55 +0300 | [diff] [blame] | 764 | 		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); | 
 | 765 | 		goto out_free; | 
| Eytan Lifshitz | 81a67e3 | 2013-09-11 12:39:18 +0200 | [diff] [blame] | 766 | 	} | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 767 |  | 
| David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 768 | 	scan_size = iwl_mvm_scan_size(mvm); | 
| David Spinadel | fb98be5 | 2014-05-04 12:51:10 +0300 | [diff] [blame] | 769 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 770 | 	mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); | 
 | 771 | 	if (!mvm->scan_cmd) | 
 | 772 | 		goto out_free; | 
 | 773 |  | 
| Haim Dreyfuss | 5a4b2af | 2015-01-13 11:54:51 +0200 | [diff] [blame] | 774 | 	/* Set EBS as successful as long as not stated otherwise by the FW. */ | 
 | 775 | 	mvm->last_ebs_successful = true; | 
 | 776 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 777 | 	err = iwl_mvm_mac_setup_register(mvm); | 
 | 778 | 	if (err) | 
 | 779 | 		goto out_free; | 
| Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 780 | 	mvm->hw_registered = true; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 781 |  | 
| Luca Coelho | f2abcfa | 2017-09-28 15:29:27 +0300 | [diff] [blame] | 782 | 	min_backoff = iwl_mvm_min_backoff(mvm); | 
| Chaya Rachel Ivgi | 04ddc2a | 2016-03-03 13:31:39 +0200 | [diff] [blame] | 783 | 	iwl_mvm_thermal_initialize(mvm, min_backoff); | 
 | 784 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 785 | 	err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); | 
 | 786 | 	if (err) | 
 | 787 | 		goto out_unregister; | 
 | 788 |  | 
| Liad Kaufman | 678d9b6 | 2017-05-18 18:00:49 +0300 | [diff] [blame] | 789 | 	if (!iwl_mvm_has_new_rx_stats_api(mvm)) | 
 | 790 | 		memset(&mvm->rx_stats_v3, 0, | 
 | 791 | 		       sizeof(struct mvm_statistics_rx_v3)); | 
 | 792 | 	else | 
 | 793 | 		memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); | 
| Matti Gottlieb | 3848ab6 | 2013-07-30 15:29:37 +0300 | [diff] [blame] | 794 |  | 
| Luca Coelho | 33c85ea | 2016-02-22 15:44:13 +0200 | [diff] [blame] | 795 | 	/* The transport always starts with a taken reference, we can | 
 | 796 | 	 * release it now if d0i3 is supported */ | 
 | 797 | 	if (iwl_mvm_is_d0i3_supported(mvm)) | 
 | 798 | 		iwl_trans_unref(mvm->trans); | 
| Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 799 |  | 
| Gregory Greenman | ce79291 | 2015-06-02 18:06:16 +0300 | [diff] [blame] | 800 | 	iwl_mvm_tof_init(mvm); | 
 | 801 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 802 | 	return op_mode; | 
 | 803 |  | 
 | 804 |  out_unregister: | 
| Liad Kaufman | de8ba41 | 2017-03-16 13:00:59 +0200 | [diff] [blame] | 805 | 	if (iwlmvm_mod_params.init_dbg) | 
 | 806 | 		return op_mode; | 
 | 807 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 808 | 	ieee80211_unregister_hw(mvm->hw); | 
| Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 809 | 	mvm->hw_registered = false; | 
| Eliad Peller | 91b0d11 | 2014-01-05 12:41:12 +0200 | [diff] [blame] | 810 | 	iwl_mvm_leds_exit(mvm); | 
| Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 811 | 	iwl_mvm_thermal_exit(mvm); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 812 |  out_free: | 
| Johannes Berg | 7174beb | 2017-06-01 16:03:19 +0200 | [diff] [blame] | 813 | 	iwl_fw_flush_dump(&mvm->fwrt); | 
| Liad Kaufman | de8ba41 | 2017-03-16 13:00:59 +0200 | [diff] [blame] | 814 |  | 
 | 815 | 	if (iwlmvm_mod_params.init_dbg) | 
 | 816 | 		return op_mode; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 817 | 	iwl_phy_db_free(mvm->phy_db); | 
 | 818 | 	kfree(mvm->scan_cmd); | 
| Sara Sharon | 56f2929 | 2016-08-31 12:37:55 +0300 | [diff] [blame] | 819 | 	iwl_trans_op_mode_leave(trans); | 
 | 820 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 821 | 	ieee80211_free_hw(mvm->hw); | 
 | 822 | 	return NULL; | 
 | 823 | } | 
 | 824 |  | 
 | 825 | static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) | 
 | 826 | { | 
 | 827 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 828 | 	int i; | 
 | 829 |  | 
| Luca Coelho | e27deb4 | 2016-03-01 10:30:48 +0200 | [diff] [blame] | 830 | 	/* If d0i3 is supported, we have released the reference that | 
 | 831 | 	 * the transport started with, so we should take it back now | 
 | 832 | 	 * that we are leaving. | 
 | 833 | 	 */ | 
 | 834 | 	if (iwl_mvm_is_d0i3_supported(mvm)) | 
 | 835 | 		iwl_trans_ref(mvm->trans); | 
 | 836 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 837 | 	iwl_mvm_leds_exit(mvm); | 
 | 838 |  | 
| Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 839 | 	iwl_mvm_thermal_exit(mvm); | 
| Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 840 |  | 
| Liad Kaufman | de8ba41 | 2017-03-16 13:00:59 +0200 | [diff] [blame] | 841 | 	if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) { | 
 | 842 | 		ieee80211_unregister_hw(mvm->hw); | 
 | 843 | 		mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; | 
 | 844 | 	} | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 845 |  | 
 | 846 | 	kfree(mvm->scan_cmd); | 
| Eliad Peller | e59647e | 2013-11-28 14:08:50 +0200 | [diff] [blame] | 847 | 	kfree(mvm->mcast_filter_cmd); | 
 | 848 | 	mvm->mcast_filter_cmd = NULL; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 849 |  | 
| Johannes Berg | afc66bb | 2013-05-03 11:44:16 +0200 | [diff] [blame] | 850 | #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) | 
 | 851 | 	kfree(mvm->d3_resume_sram); | 
 | 852 | #endif | 
| Arik Nemtsov | a408284 | 2013-11-24 19:10:46 +0200 | [diff] [blame] | 853 | 	iwl_trans_op_mode_leave(mvm->trans); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 854 |  | 
 | 855 | 	iwl_phy_db_free(mvm->phy_db); | 
 | 856 | 	mvm->phy_db = NULL; | 
 | 857 |  | 
| Luca Coelho | 1dad3e0 | 2017-05-03 15:09:52 +0300 | [diff] [blame] | 858 | 	kfree(mvm->nvm_data); | 
| Eran Harary | ae2b21b | 2014-01-09 08:08:24 +0200 | [diff] [blame] | 859 | 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 860 | 		kfree(mvm->nvm_sections[i].data); | 
 | 861 |  | 
| Gregory Greenman | ce79291 | 2015-06-02 18:06:16 +0300 | [diff] [blame] | 862 | 	iwl_mvm_tof_clean(mvm); | 
 | 863 |  | 
| Emmanuel Grumbach | a2a57a3 | 2016-03-15 15:36:36 +0200 | [diff] [blame] | 864 | 	mutex_destroy(&mvm->mutex); | 
 | 865 | 	mutex_destroy(&mvm->d0i3_suspend_mutex); | 
 | 866 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 867 | 	ieee80211_free_hw(mvm->hw); | 
 | 868 | } | 
 | 869 |  | 
 | 870 | struct iwl_async_handler_entry { | 
 | 871 | 	struct list_head list; | 
 | 872 | 	struct iwl_rx_cmd_buffer rxb; | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 873 | 	enum iwl_rx_handler_context context; | 
| Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 874 | 	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 875 | }; | 
 | 876 |  | 
 | 877 | void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) | 
 | 878 | { | 
 | 879 | 	struct iwl_async_handler_entry *entry, *tmp; | 
 | 880 |  | 
 | 881 | 	spin_lock_bh(&mvm->async_handlers_lock); | 
 | 882 | 	list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { | 
 | 883 | 		iwl_free_rxb(&entry->rxb); | 
 | 884 | 		list_del(&entry->list); | 
 | 885 | 		kfree(entry); | 
 | 886 | 	} | 
 | 887 | 	spin_unlock_bh(&mvm->async_handlers_lock); | 
 | 888 | } | 
 | 889 |  | 
 | 890 | static void iwl_mvm_async_handlers_wk(struct work_struct *wk) | 
 | 891 | { | 
 | 892 | 	struct iwl_mvm *mvm = | 
 | 893 | 		container_of(wk, struct iwl_mvm, async_handlers_wk); | 
 | 894 | 	struct iwl_async_handler_entry *entry, *tmp; | 
| Johannes Berg | 8098203 | 2016-08-31 22:16:11 +0200 | [diff] [blame] | 895 | 	LIST_HEAD(local_list); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 896 |  | 
 | 897 | 	/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 898 |  | 
 | 899 | 	/* | 
 | 900 | 	 * Sync with Rx path with a lock. Remove all the entries from this list, | 
 | 901 | 	 * add them to a local one (lock free), and then handle them. | 
 | 902 | 	 */ | 
 | 903 | 	spin_lock_bh(&mvm->async_handlers_lock); | 
 | 904 | 	list_splice_init(&mvm->async_handlers_list, &local_list); | 
 | 905 | 	spin_unlock_bh(&mvm->async_handlers_lock); | 
 | 906 |  | 
 | 907 | 	list_for_each_entry_safe(entry, tmp, &local_list, list) { | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 908 | 		if (entry->context == RX_HANDLER_ASYNC_LOCKED) | 
 | 909 | 			mutex_lock(&mvm->mutex); | 
| Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 910 | 		entry->fn(mvm, &entry->rxb); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 911 | 		iwl_free_rxb(&entry->rxb); | 
 | 912 | 		list_del(&entry->list); | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 913 | 		if (entry->context == RX_HANDLER_ASYNC_LOCKED) | 
 | 914 | 			mutex_unlock(&mvm->mutex); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 915 | 		kfree(entry); | 
 | 916 | 	} | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 917 | } | 
 | 918 |  | 
| Emmanuel Grumbach | 917f39b | 2015-02-10 10:49:20 +0200 | [diff] [blame] | 919 | static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, | 
 | 920 | 					    struct iwl_rx_packet *pkt) | 
 | 921 | { | 
 | 922 | 	struct iwl_fw_dbg_trigger_tlv *trig; | 
 | 923 | 	struct iwl_fw_dbg_trigger_cmd *cmds_trig; | 
| Emmanuel Grumbach | 917f39b | 2015-02-10 10:49:20 +0200 | [diff] [blame] | 924 | 	int i; | 
 | 925 |  | 
 | 926 | 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) | 
 | 927 | 		return; | 
 | 928 |  | 
 | 929 | 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); | 
 | 930 | 	cmds_trig = (void *)trig->data; | 
 | 931 |  | 
| Johannes Berg | 7174beb | 2017-06-01 16:03:19 +0200 | [diff] [blame] | 932 | 	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) | 
| Emmanuel Grumbach | 917f39b | 2015-02-10 10:49:20 +0200 | [diff] [blame] | 933 | 		return; | 
 | 934 |  | 
 | 935 | 	for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { | 
 | 936 | 		/* don't collect on CMD 0 */ | 
 | 937 | 		if (!cmds_trig->cmds[i].cmd_id) | 
 | 938 | 			break; | 
 | 939 |  | 
| Sara Sharon | 0ab66e6 | 2015-07-13 14:23:59 +0300 | [diff] [blame] | 940 | 		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || | 
 | 941 | 		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id) | 
| Emmanuel Grumbach | 917f39b | 2015-02-10 10:49:20 +0200 | [diff] [blame] | 942 | 			continue; | 
 | 943 |  | 
| Johannes Berg | 7174beb | 2017-06-01 16:03:19 +0200 | [diff] [blame] | 944 | 		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, | 
 | 945 | 					"CMD 0x%02x.%02x received", | 
 | 946 | 					pkt->hdr.group_id, pkt->hdr.cmd); | 
| Emmanuel Grumbach | 917f39b | 2015-02-10 10:49:20 +0200 | [diff] [blame] | 947 | 		break; | 
 | 948 | 	} | 
 | 949 | } | 
 | 950 |  | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 951 | static void iwl_mvm_rx_common(struct iwl_mvm *mvm, | 
 | 952 | 			      struct iwl_rx_cmd_buffer *rxb, | 
 | 953 | 			      struct iwl_rx_packet *pkt) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 954 | { | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 955 | 	int i; | 
| Johannes Berg | 1738d60 | 2015-05-22 12:09:44 +0200 | [diff] [blame] | 956 |  | 
| Emmanuel Grumbach | 917f39b | 2015-02-10 10:49:20 +0200 | [diff] [blame] | 957 | 	iwl_mvm_rx_check_trigger(mvm, pkt); | 
 | 958 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 959 | 	/* | 
 | 960 | 	 * Do the notification wait before RX handlers so | 
 | 961 | 	 * even if the RX handler consumes the RXB we have | 
 | 962 | 	 * access to it in the notification wait entry. | 
 | 963 | 	 */ | 
 | 964 | 	iwl_notification_wait_notify(&mvm->notif_wait, pkt); | 
 | 965 |  | 
 | 966 | 	for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { | 
 | 967 | 		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; | 
| Emmanuel Grumbach | 36eed56 | 2013-02-10 13:25:25 +0200 | [diff] [blame] | 968 | 		struct iwl_async_handler_entry *entry; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 969 |  | 
| Avraham Stern | 1230b16 | 2015-07-09 17:17:03 +0300 | [diff] [blame] | 970 | 		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) | 
| Emmanuel Grumbach | 36eed56 | 2013-02-10 13:25:25 +0200 | [diff] [blame] | 971 | 			continue; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 972 |  | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 973 | 		if (rx_h->context == RX_HANDLER_SYNC) { | 
| Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 974 | 			rx_h->fn(mvm, rxb); | 
| Johannes Berg | f7e6469 | 2015-06-23 21:58:17 +0200 | [diff] [blame] | 975 | 			return; | 
| Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 976 | 		} | 
| Emmanuel Grumbach | 36eed56 | 2013-02-10 13:25:25 +0200 | [diff] [blame] | 977 |  | 
 | 978 | 		entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 
 | 979 | 		/* we can't do much... */ | 
 | 980 | 		if (!entry) | 
| Johannes Berg | f7e6469 | 2015-06-23 21:58:17 +0200 | [diff] [blame] | 981 | 			return; | 
| Emmanuel Grumbach | 36eed56 | 2013-02-10 13:25:25 +0200 | [diff] [blame] | 982 |  | 
 | 983 | 		entry->rxb._page = rxb_steal_page(rxb); | 
 | 984 | 		entry->rxb._offset = rxb->_offset; | 
 | 985 | 		entry->rxb._rx_page_order = rxb->_rx_page_order; | 
 | 986 | 		entry->fn = rx_h->fn; | 
| Chaya Rachel Ivgi | c9cb14a | 2016-03-03 15:35:34 +0200 | [diff] [blame] | 987 | 		entry->context = rx_h->context; | 
| Emmanuel Grumbach | 36eed56 | 2013-02-10 13:25:25 +0200 | [diff] [blame] | 988 | 		spin_lock(&mvm->async_handlers_lock); | 
 | 989 | 		list_add_tail(&entry->list, &mvm->async_handlers_list); | 
 | 990 | 		spin_unlock(&mvm->async_handlers_lock); | 
 | 991 | 		schedule_work(&mvm->async_handlers_wk); | 
| Mordechai Goodstein | f2e66c8 | 2017-06-13 17:17:27 +0300 | [diff] [blame] | 992 | 		return; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 993 | 	} | 
| Mordechai Goodstein | f2e66c8 | 2017-06-13 17:17:27 +0300 | [diff] [blame] | 994 |  | 
 | 995 | 	iwl_fwrt_handle_notification(&mvm->fwrt, rxb); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 996 | } | 
 | 997 |  | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 998 | static void iwl_mvm_rx(struct iwl_op_mode *op_mode, | 
 | 999 | 		       struct napi_struct *napi, | 
 | 1000 | 		       struct iwl_rx_cmd_buffer *rxb) | 
 | 1001 | { | 
 | 1002 | 	struct iwl_rx_packet *pkt = rxb_addr(rxb); | 
 | 1003 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1004 | 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1005 |  | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1006 | 	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1007 | 		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1008 | 	else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD)) | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1009 | 		iwl_mvm_rx_rx_phy_cmd(mvm, rxb); | 
 | 1010 | 	else | 
 | 1011 | 		iwl_mvm_rx_common(mvm, rxb, pkt); | 
 | 1012 | } | 
 | 1013 |  | 
 | 1014 | static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, | 
 | 1015 | 			  struct napi_struct *napi, | 
 | 1016 | 			  struct iwl_rx_cmd_buffer *rxb) | 
 | 1017 | { | 
 | 1018 | 	struct iwl_rx_packet *pkt = rxb_addr(rxb); | 
 | 1019 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1020 | 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1021 |  | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1022 | 	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) | 
| Johannes Berg | 780e87c | 2015-09-03 14:56:10 +0200 | [diff] [blame] | 1023 | 		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1024 | 	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, | 
 | 1025 | 					 RX_QUEUES_NOTIFICATION))) | 
| Sara Sharon | 94bb448 | 2015-12-16 18:48:28 +0200 | [diff] [blame] | 1026 | 		iwl_mvm_rx_queue_notif(mvm, rxb, 0); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1027 | 	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) | 
| Johannes Berg | 5803543 | 2016-04-27 13:33:26 +0200 | [diff] [blame] | 1028 | 		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); | 
| Gregory Greenman | 46d372a | 2017-11-01 09:21:24 +0200 | [diff] [blame] | 1029 | 	else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF)) | 
 | 1030 | 		iwl_mvm_tlc_update_notif(mvm, pkt); | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1031 | 	else | 
 | 1032 | 		iwl_mvm_rx_common(mvm, rxb, pkt); | 
 | 1033 | } | 
 | 1034 |  | 
| Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1035 | void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1036 | { | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1037 | 	int q; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1038 |  | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1039 | 	if (WARN_ON_ONCE(!mq)) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1040 | 		return; | 
 | 1041 |  | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1042 | 	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { | 
 | 1043 | 		if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { | 
 | 1044 | 			IWL_DEBUG_TX_QUEUES(mvm, | 
| Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1045 | 					    "mac80211 %d already stopped\n", q); | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1046 | 			continue; | 
 | 1047 | 		} | 
 | 1048 |  | 
 | 1049 | 		ieee80211_stop_queue(mvm->hw, q); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1050 | 	} | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1051 | } | 
 | 1052 |  | 
| Emmanuel Grumbach | 156f92f | 2015-11-24 14:55:18 +0200 | [diff] [blame] | 1053 | static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, | 
 | 1054 | 			     const struct iwl_device_cmd *cmd) | 
 | 1055 | { | 
 | 1056 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 1057 |  | 
 | 1058 | 	/* | 
 | 1059 | 	 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA | 
 | 1060 | 	 * commands that need to block the Tx queues. | 
 | 1061 | 	 */ | 
 | 1062 | 	iwl_trans_block_txq_ptrs(mvm->trans, false); | 
 | 1063 | } | 
 | 1064 |  | 
| Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1065 | static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1066 | { | 
 | 1067 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1068 | 	unsigned long mq; | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1069 |  | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1070 | 	spin_lock_bh(&mvm->queue_info_lock); | 
| Sara Sharon | 34e1086 | 2017-02-23 13:15:07 +0200 | [diff] [blame] | 1071 | 	mq = mvm->hw_queue_to_mac80211[hw_queue]; | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1072 | 	spin_unlock_bh(&mvm->queue_info_lock); | 
 | 1073 |  | 
| Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1074 | 	iwl_mvm_stop_mac_queues(mvm, mq); | 
 | 1075 | } | 
 | 1076 |  | 
 | 1077 | void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) | 
 | 1078 | { | 
 | 1079 | 	int q; | 
 | 1080 |  | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1081 | 	if (WARN_ON_ONCE(!mq)) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1082 | 		return; | 
 | 1083 |  | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1084 | 	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { | 
 | 1085 | 		if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { | 
 | 1086 | 			IWL_DEBUG_TX_QUEUES(mvm, | 
| Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1087 | 					    "mac80211 %d still stopped\n", q); | 
| Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1088 | 			continue; | 
 | 1089 | 		} | 
 | 1090 |  | 
 | 1091 | 		ieee80211_wake_queue(mvm->hw, q); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1092 | 	} | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1093 | } | 
 | 1094 |  | 
| Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1095 | static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) | 
 | 1096 | { | 
 | 1097 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 1098 | 	unsigned long mq; | 
 | 1099 |  | 
 | 1100 | 	spin_lock_bh(&mvm->queue_info_lock); | 
| Sara Sharon | 34e1086 | 2017-02-23 13:15:07 +0200 | [diff] [blame] | 1101 | 	mq = mvm->hw_queue_to_mac80211[hw_queue]; | 
| Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1102 | 	spin_unlock_bh(&mvm->queue_info_lock); | 
 | 1103 |  | 
 | 1104 | 	iwl_mvm_start_mac_queues(mvm, mq); | 
 | 1105 | } | 
 | 1106 |  | 
| Johannes Berg | 6ad0435 | 2017-04-25 10:21:18 +0200 | [diff] [blame] | 1107 | static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) | 
 | 1108 | { | 
 | 1109 | 	bool state = iwl_mvm_is_radio_killed(mvm); | 
 | 1110 |  | 
 | 1111 | 	if (state) | 
 | 1112 | 		wake_up(&mvm->rx_sync_waitq); | 
 | 1113 |  | 
 | 1114 | 	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); | 
 | 1115 | } | 
 | 1116 |  | 
| Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1117 | void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) | 
 | 1118 | { | 
 | 1119 | 	if (state) | 
 | 1120 | 		set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); | 
 | 1121 | 	else | 
 | 1122 | 		clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); | 
 | 1123 |  | 
| Johannes Berg | 6ad0435 | 2017-04-25 10:21:18 +0200 | [diff] [blame] | 1124 | 	iwl_mvm_set_rfkill_state(mvm); | 
| Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1125 | } | 
 | 1126 |  | 
| Johannes Berg | 14cfca7 | 2014-02-25 20:50:53 +0100 | [diff] [blame] | 1127 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1128 | { | 
 | 1129 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
| Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 1130 | 	bool calibrating = READ_ONCE(mvm->calibrating); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1131 |  | 
 | 1132 | 	if (state) | 
 | 1133 | 		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); | 
 | 1134 | 	else | 
 | 1135 | 		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); | 
 | 1136 |  | 
| Johannes Berg | 6ad0435 | 2017-04-25 10:21:18 +0200 | [diff] [blame] | 1137 | 	iwl_mvm_set_rfkill_state(mvm); | 
| Johannes Berg | 14cfca7 | 2014-02-25 20:50:53 +0100 | [diff] [blame] | 1138 |  | 
| Emmanuel Grumbach | 31b8b34 | 2014-11-02 15:48:09 +0200 | [diff] [blame] | 1139 | 	/* iwl_run_init_mvm_ucode is waiting for results, abort it */ | 
 | 1140 | 	if (calibrating) | 
 | 1141 | 		iwl_abort_notification_waits(&mvm->notif_wait); | 
 | 1142 |  | 
 | 1143 | 	/* | 
 | 1144 | 	 * Stop the device if we run OPERATIONAL firmware or if we are in the | 
 | 1145 | 	 * middle of the calibrations. | 
 | 1146 | 	 */ | 
| Johannes Berg | 702e975 | 2017-06-02 11:56:58 +0200 | [diff] [blame] | 1147 | 	return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1148 | } | 
 | 1149 |  | 
 | 1150 | static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) | 
 | 1151 | { | 
 | 1152 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 1153 | 	struct ieee80211_tx_info *info; | 
 | 1154 |  | 
 | 1155 | 	info = IEEE80211_SKB_CB(skb); | 
 | 1156 | 	iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); | 
 | 1157 | 	ieee80211_free_txskb(mvm->hw, skb); | 
 | 1158 | } | 
 | 1159 |  | 
| Johannes Berg | ac1ed41 | 2013-07-04 15:25:25 +0200 | [diff] [blame] | 1160 | struct iwl_mvm_reprobe { | 
 | 1161 | 	struct device *dev; | 
 | 1162 | 	struct work_struct work; | 
 | 1163 | }; | 
 | 1164 |  | 
 | 1165 | static void iwl_mvm_reprobe_wk(struct work_struct *wk) | 
 | 1166 | { | 
 | 1167 | 	struct iwl_mvm_reprobe *reprobe; | 
 | 1168 |  | 
 | 1169 | 	reprobe = container_of(wk, struct iwl_mvm_reprobe, work); | 
 | 1170 | 	if (device_reprobe(reprobe->dev)) | 
 | 1171 | 		dev_err(reprobe->dev, "reprobe failed!\n"); | 
 | 1172 | 	kfree(reprobe); | 
 | 1173 | 	module_put(THIS_MODULE); | 
 | 1174 | } | 
 | 1175 |  | 
| Luciano Coelho | b08c1d9 | 2014-05-20 23:31:05 +0300 | [diff] [blame] | 1176 | void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1177 | { | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1178 | 	iwl_abort_notification_waits(&mvm->notif_wait); | 
 | 1179 |  | 
 | 1180 | 	/* | 
| David Spinadel | 992f81f | 2014-01-09 14:22:55 +0200 | [diff] [blame] | 1181 | 	 * This is a bit racy, but worst case we tell mac80211 about | 
 | 1182 | 	 * a stopped/aborted scan when that was already done which | 
 | 1183 | 	 * is not a problem. It is necessary to abort any os scan | 
 | 1184 | 	 * here because mac80211 requires having the scan cleared | 
 | 1185 | 	 * before restarting. | 
 | 1186 | 	 * We'll reset the scan_status to NONE in restart cleanup in | 
 | 1187 | 	 * the next start() call from mac80211. If restart isn't called | 
 | 1188 | 	 * (no fw restart) scan status will stay busy. | 
 | 1189 | 	 */ | 
| David Spinadel | 4ffb365 | 2015-03-10 10:06:02 +0200 | [diff] [blame] | 1190 | 	iwl_mvm_report_scan_aborted(mvm); | 
| David Spinadel | 992f81f | 2014-01-09 14:22:55 +0200 | [diff] [blame] | 1191 |  | 
 | 1192 | 	/* | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1193 | 	 * If we're restarting already, don't cycle restarts. | 
 | 1194 | 	 * If INIT fw asserted, it will likely fail again. | 
 | 1195 | 	 * If WoWLAN fw asserted, don't restart either, mac80211 | 
 | 1196 | 	 * can't recover this since we're already half suspended. | 
 | 1197 | 	 */ | 
| Johannes Berg | 3b37f4c | 2017-05-30 16:45:31 +0200 | [diff] [blame] | 1198 | 	if (!mvm->fw_restart && fw_error) { | 
| Johannes Berg | 7174beb | 2017-06-01 16:03:19 +0200 | [diff] [blame] | 1199 | 		iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, | 
| Johannes Berg | bf8b286 | 2017-06-30 10:48:28 +0200 | [diff] [blame] | 1200 | 					NULL); | 
 | 1201 | 	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | 
| Johannes Berg | ac1ed41 | 2013-07-04 15:25:25 +0200 | [diff] [blame] | 1202 | 		struct iwl_mvm_reprobe *reprobe; | 
 | 1203 |  | 
 | 1204 | 		IWL_ERR(mvm, | 
 | 1205 | 			"Firmware error during reconfiguration - reprobe!\n"); | 
 | 1206 |  | 
 | 1207 | 		/* | 
 | 1208 | 		 * get a module reference to avoid doing this while unloading | 
 | 1209 | 		 * anyway and to avoid scheduling a work with code that's | 
 | 1210 | 		 * being removed. | 
 | 1211 | 		 */ | 
 | 1212 | 		if (!try_module_get(THIS_MODULE)) { | 
 | 1213 | 			IWL_ERR(mvm, "Module is being unloaded - abort\n"); | 
 | 1214 | 			return; | 
 | 1215 | 		} | 
 | 1216 |  | 
 | 1217 | 		reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); | 
 | 1218 | 		if (!reprobe) { | 
 | 1219 | 			module_put(THIS_MODULE); | 
 | 1220 | 			return; | 
 | 1221 | 		} | 
 | 1222 | 		reprobe->dev = mvm->trans->dev; | 
 | 1223 | 		INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); | 
 | 1224 | 		schedule_work(&reprobe->work); | 
| Johannes Berg | 702e975 | 2017-06-02 11:56:58 +0200 | [diff] [blame] | 1225 | 	} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && | 
| Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 1226 | 		   mvm->hw_registered) { | 
| Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 1227 | 		/* don't let the transport/FW power down */ | 
 | 1228 | 		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); | 
 | 1229 |  | 
| Johannes Berg | 3b37f4c | 2017-05-30 16:45:31 +0200 | [diff] [blame] | 1230 | 		if (fw_error && mvm->fw_restart > 0) | 
 | 1231 | 			mvm->fw_restart--; | 
| Johannes Berg | bf8b286 | 2017-06-30 10:48:28 +0200 | [diff] [blame] | 1232 | 		set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1233 | 		ieee80211_restart_hw(mvm->hw); | 
 | 1234 | 	} | 
 | 1235 | } | 
 | 1236 |  | 
| Emmanuel Grumbach | 715c998 | 2013-02-28 08:57:31 +0200 | [diff] [blame] | 1237 | static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) | 
 | 1238 | { | 
 | 1239 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 1240 |  | 
 | 1241 | 	iwl_mvm_dump_nic_error_log(mvm); | 
| Emmanuel Grumbach | 1bd3cbc | 2014-03-18 21:15:06 +0200 | [diff] [blame] | 1242 |  | 
| Luciano Coelho | b08c1d9 | 2014-05-20 23:31:05 +0300 | [diff] [blame] | 1243 | 	iwl_mvm_nic_restart(mvm, true); | 
| Emmanuel Grumbach | 715c998 | 2013-02-28 08:57:31 +0200 | [diff] [blame] | 1244 | } | 
 | 1245 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1246 | static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) | 
 | 1247 | { | 
| Emmanuel Grumbach | 715c998 | 2013-02-28 08:57:31 +0200 | [diff] [blame] | 1248 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 1249 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1250 | 	WARN_ON(1); | 
| Luciano Coelho | b08c1d9 | 2014-05-20 23:31:05 +0300 | [diff] [blame] | 1251 | 	iwl_mvm_nic_restart(mvm, true); | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1252 | } | 
 | 1253 |  | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1254 | struct iwl_d0i3_iter_data { | 
 | 1255 | 	struct iwl_mvm *mvm; | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1256 | 	struct ieee80211_vif *connected_vif; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1257 | 	u8 ap_sta_id; | 
 | 1258 | 	u8 vif_count; | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1259 | 	u8 offloading_tid; | 
 | 1260 | 	bool disable_offloading; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1261 | }; | 
 | 1262 |  | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1263 | static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, | 
 | 1264 | 					struct ieee80211_vif *vif, | 
 | 1265 | 					struct iwl_d0i3_iter_data *iter_data) | 
 | 1266 | { | 
 | 1267 | 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1268 | 	struct iwl_mvm_sta *mvmsta; | 
 | 1269 | 	u32 available_tids = 0; | 
 | 1270 | 	u8 tid; | 
 | 1271 |  | 
 | 1272 | 	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || | 
| Sara Sharon | 0ae9881 | 2017-01-04 14:53:58 +0200 | [diff] [blame] | 1273 | 		    mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)) | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1274 | 		return false; | 
 | 1275 |  | 
| Sara Sharon | 13303c0 | 2016-04-10 15:51:54 +0300 | [diff] [blame] | 1276 | 	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); | 
 | 1277 | 	if (!mvmsta) | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1278 | 		return false; | 
 | 1279 |  | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1280 | 	spin_lock_bh(&mvmsta->lock); | 
 | 1281 | 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { | 
 | 1282 | 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; | 
 | 1283 |  | 
 | 1284 | 		/* | 
 | 1285 | 		 * in case of pending tx packets, don't use this tid | 
 | 1286 | 		 * for offloading in order to prevent reuse of the same | 
 | 1287 | 		 * qos seq counters. | 
 | 1288 | 		 */ | 
| Liad Kaufman | dd32162 | 2017-04-05 16:25:11 +0300 | [diff] [blame] | 1289 | 		if (iwl_mvm_tid_queued(mvm, tid_data)) | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1290 | 			continue; | 
 | 1291 |  | 
 | 1292 | 		if (tid_data->state != IWL_AGG_OFF) | 
 | 1293 | 			continue; | 
 | 1294 |  | 
 | 1295 | 		available_tids |= BIT(tid); | 
 | 1296 | 	} | 
 | 1297 | 	spin_unlock_bh(&mvmsta->lock); | 
 | 1298 |  | 
 | 1299 | 	/* | 
 | 1300 | 	 * disallow protocol offloading if we have no available tid | 
 | 1301 | 	 * (with no pending frames and no active aggregation, | 
 | 1302 | 	 * as we don't handle "holes" properly - the scheduler needs the | 
 | 1303 | 	 * frame's seq number and TFD index to match) | 
 | 1304 | 	 */ | 
 | 1305 | 	if (!available_tids) | 
 | 1306 | 		return true; | 
 | 1307 |  | 
 | 1308 | 	/* for simplicity, just use the first available tid */ | 
 | 1309 | 	iter_data->offloading_tid = ffs(available_tids) - 1; | 
 | 1310 | 	return false; | 
 | 1311 | } | 
 | 1312 |  | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1313 | static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, | 
 | 1314 | 					struct ieee80211_vif *vif) | 
 | 1315 | { | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1316 | 	struct iwl_d0i3_iter_data *data = _data; | 
 | 1317 | 	struct iwl_mvm *mvm = data->mvm; | 
 | 1318 | 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1319 | 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; | 
 | 1320 |  | 
 | 1321 | 	IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr); | 
 | 1322 | 	if (vif->type != NL80211_IFTYPE_STATION || | 
 | 1323 | 	    !vif->bss_conf.assoc) | 
 | 1324 | 		return; | 
 | 1325 |  | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1326 | 	/* | 
 | 1327 | 	 * in case of pending tx packets or active aggregations, | 
 | 1328 | 	 * avoid offloading features in order to prevent reuse of | 
 | 1329 | 	 * the same qos seq counters. | 
 | 1330 | 	 */ | 
 | 1331 | 	if (iwl_mvm_disallow_offloading(mvm, vif, data)) | 
 | 1332 | 		data->disable_offloading = true; | 
 | 1333 |  | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1334 | 	iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags); | 
| Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 1335 | 	iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, | 
 | 1336 | 				   false, flags); | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1337 |  | 
 | 1338 | 	/* | 
 | 1339 | 	 * on init/association, mvm already configures POWER_TABLE_CMD | 
 | 1340 | 	 * and REPLY_MCAST_FILTER_CMD, so currently don't | 
 | 1341 | 	 * reconfigure them (we might want to use different | 
 | 1342 | 	 * params later on, though). | 
 | 1343 | 	 */ | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1344 | 	data->ap_sta_id = mvmvif->ap_sta_id; | 
 | 1345 | 	data->vif_count++; | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1346 |  | 
 | 1347 | 	/* | 
 | 1348 | 	 * no new commands can be sent at this stage, so it's safe | 
 | 1349 | 	 * to save the vif pointer during d0i3 entrance. | 
 | 1350 | 	 */ | 
 | 1351 | 	data->connected_vif = vif; | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1352 | } | 
 | 1353 |  | 
| Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 1354 | static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, | 
| Emmanuel Grumbach | c8b06a9 | 2014-11-24 09:06:57 +0200 | [diff] [blame] | 1355 | 				    struct iwl_wowlan_config_cmd *cmd, | 
| Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 1356 | 				    struct iwl_d0i3_iter_data *iter_data) | 
 | 1357 | { | 
 | 1358 | 	struct ieee80211_sta *ap_sta; | 
 | 1359 | 	struct iwl_mvm_sta *mvm_ap_sta; | 
 | 1360 |  | 
| Sara Sharon | 0ae9881 | 2017-01-04 14:53:58 +0200 | [diff] [blame] | 1361 | 	if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA) | 
| Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 1362 | 		return; | 
 | 1363 |  | 
 | 1364 | 	rcu_read_lock(); | 
 | 1365 |  | 
 | 1366 | 	ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]); | 
 | 1367 | 	if (IS_ERR_OR_NULL(ap_sta)) | 
 | 1368 | 		goto out; | 
 | 1369 |  | 
 | 1370 | 	mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); | 
| Emmanuel Grumbach | c8b06a9 | 2014-11-24 09:06:57 +0200 | [diff] [blame] | 1371 | 	cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1372 | 	cmd->offloading_tid = iter_data->offloading_tid; | 
| Sara Sharon | 70b4c53 | 2015-11-19 13:12:15 +0200 | [diff] [blame] | 1373 | 	cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | | 
| Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 1374 | 		ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON; | 
| Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 1375 | 	/* | 
 | 1376 | 	 * The d0i3 uCode takes care of the nonqos counters, | 
 | 1377 | 	 * so configure only the qos seq ones. | 
 | 1378 | 	 */ | 
| Emmanuel Grumbach | c8b06a9 | 2014-11-24 09:06:57 +0200 | [diff] [blame] | 1379 | 	iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd); | 
| Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 1380 | out: | 
 | 1381 | 	rcu_read_unlock(); | 
 | 1382 | } | 
| Eliad Peller | 6735943 | 2014-12-09 15:23:54 +0200 | [diff] [blame] | 1383 |  | 
 | 1384 | int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) | 
| Eliad Peller | b3370d4 | 2013-11-25 15:20:16 +0200 | [diff] [blame] | 1385 | { | 
 | 1386 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
| Arik Nemtsov | 98ee778 | 2013-10-02 16:58:09 +0300 | [diff] [blame] | 1387 | 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; | 
| Eliad Peller | b77f06d | 2013-11-06 10:49:32 +0200 | [diff] [blame] | 1388 | 	int ret; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1389 | 	struct iwl_d0i3_iter_data d0i3_iter_data = { | 
 | 1390 | 		.mvm = mvm, | 
 | 1391 | 	}; | 
| Emmanuel Grumbach | c8b06a9 | 2014-11-24 09:06:57 +0200 | [diff] [blame] | 1392 | 	struct iwl_wowlan_config_cmd wowlan_config_cmd = { | 
 | 1393 | 		.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | | 
 | 1394 | 					     IWL_WOWLAN_WAKEUP_BEACON_MISS | | 
| Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 1395 | 					     IWL_WOWLAN_WAKEUP_LINK_CHANGE), | 
| Eliad Peller | b77f06d | 2013-11-06 10:49:32 +0200 | [diff] [blame] | 1396 | 	}; | 
| Arik Nemtsov | 98ee778 | 2013-10-02 16:58:09 +0300 | [diff] [blame] | 1397 | 	struct iwl_d3_manager_config d3_cfg_cmd = { | 
 | 1398 | 		.min_sleep_time = cpu_to_le32(1000), | 
| Eliad Peller | d9f1fc2 | 2014-12-23 15:05:14 +0200 | [diff] [blame] | 1399 | 		.wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR), | 
| Arik Nemtsov | 98ee778 | 2013-10-02 16:58:09 +0300 | [diff] [blame] | 1400 | 	}; | 
| Eliad Peller | b3370d4 | 2013-11-25 15:20:16 +0200 | [diff] [blame] | 1401 |  | 
 | 1402 | 	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); | 
| Arik Nemtsov | 98ee778 | 2013-10-02 16:58:09 +0300 | [diff] [blame] | 1403 |  | 
| Johannes Berg | 702e975 | 2017-06-02 11:56:58 +0200 | [diff] [blame] | 1404 | 	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) | 
| Eliad Peller | 08f0d23 | 2015-12-10 15:47:11 +0200 | [diff] [blame] | 1405 | 		return -EINVAL; | 
 | 1406 |  | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1407 | 	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1408 |  | 
| Eliad Peller | f4cf868 | 2014-11-04 16:57:06 +0200 | [diff] [blame] | 1409 | 	/* | 
 | 1410 | 	 * iwl_mvm_ref_sync takes a reference before checking the flag. | 
 | 1411 | 	 * so by checking there is no held reference we prevent a state | 
 | 1412 | 	 * in which iwl_mvm_ref_sync continues successfully while we | 
 | 1413 | 	 * configure the firmware to enter d0i3 | 
 | 1414 | 	 */ | 
 | 1415 | 	if (iwl_mvm_ref_taken(mvm)) { | 
 | 1416 | 		IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n"); | 
 | 1417 | 		clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); | 
| Eliad Peller | caf1578 | 2014-11-09 15:25:33 +0200 | [diff] [blame] | 1418 | 		wake_up(&mvm->d0i3_exit_waitq); | 
| Eliad Peller | f4cf868 | 2014-11-04 16:57:06 +0200 | [diff] [blame] | 1419 | 		return 1; | 
 | 1420 | 	} | 
 | 1421 |  | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1422 | 	ieee80211_iterate_active_interfaces_atomic(mvm->hw, | 
 | 1423 | 						   IEEE80211_IFACE_ITER_NORMAL, | 
 | 1424 | 						   iwl_mvm_enter_d0i3_iterator, | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1425 | 						   &d0i3_iter_data); | 
 | 1426 | 	if (d0i3_iter_data.vif_count == 1) { | 
 | 1427 | 		mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id; | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1428 | 		mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1429 | 	} else { | 
 | 1430 | 		WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); | 
| Sara Sharon | 0ae9881 | 2017-01-04 14:53:58 +0200 | [diff] [blame] | 1431 | 		mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1432 | 		mvm->d0i3_offloading = false; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1433 | 	} | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1434 |  | 
| Emmanuel Grumbach | ecc7c51 | 2015-08-17 15:54:41 +0300 | [diff] [blame] | 1435 | 	/* make sure we have no running tx while configuring the seqno */ | 
 | 1436 | 	synchronize_net(); | 
 | 1437 |  | 
| Luca Coelho | eb3908d | 2015-10-02 18:13:10 +0300 | [diff] [blame] | 1438 | 	/* Flush the hw queues, in case something got queued during entry */ | 
| Mordechai Goodstein | d167e81 | 2017-05-10 16:42:53 +0300 | [diff] [blame] | 1439 | 	/* TODO new tx api */ | 
 | 1440 | 	if (iwl_mvm_has_new_tx_api(mvm)) { | 
 | 1441 | 		WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n"); | 
 | 1442 | 	} else { | 
 | 1443 | 		ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), | 
 | 1444 | 					    flags); | 
 | 1445 | 		if (ret) | 
 | 1446 | 			return ret; | 
 | 1447 | 	} | 
| Luca Coelho | eb3908d | 2015-10-02 18:13:10 +0300 | [diff] [blame] | 1448 |  | 
| Eliad Peller | 183edd8 | 2015-09-01 14:16:00 +0300 | [diff] [blame] | 1449 | 	/* configure wowlan configuration only if needed */ | 
| Sara Sharon | 0ae9881 | 2017-01-04 14:53:58 +0200 | [diff] [blame] | 1450 | 	if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) { | 
| Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 1451 | 		/* wake on beacons only if beacon storing isn't supported */ | 
 | 1452 | 		if (!fw_has_capa(&mvm->fw->ucode_capa, | 
 | 1453 | 				 IWL_UCODE_TLV_CAPA_BEACON_STORING)) | 
 | 1454 | 			wowlan_config_cmd.wakeup_filter |= | 
 | 1455 | 				cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING); | 
 | 1456 |  | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1457 | 		iwl_mvm_wowlan_config_key_params(mvm, | 
 | 1458 | 						 d0i3_iter_data.connected_vif, | 
 | 1459 | 						 true, flags); | 
 | 1460 |  | 
| Eliad Peller | 183edd8 | 2015-09-01 14:16:00 +0300 | [diff] [blame] | 1461 | 		iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, | 
 | 1462 | 					&d0i3_iter_data); | 
 | 1463 |  | 
 | 1464 | 		ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, | 
 | 1465 | 					   sizeof(wowlan_config_cmd), | 
 | 1466 | 					   &wowlan_config_cmd); | 
 | 1467 | 		if (ret) | 
 | 1468 | 			return ret; | 
 | 1469 | 	} | 
| Eliad Peller | b77f06d | 2013-11-06 10:49:32 +0200 | [diff] [blame] | 1470 |  | 
| Arik Nemtsov | 98ee778 | 2013-10-02 16:58:09 +0300 | [diff] [blame] | 1471 | 	return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, | 
 | 1472 | 				    flags | CMD_MAKE_TRANS_IDLE, | 
 | 1473 | 				    sizeof(d3_cfg_cmd), &d3_cfg_cmd); | 
| Eliad Peller | b3370d4 | 2013-11-25 15:20:16 +0200 | [diff] [blame] | 1474 | } | 
 | 1475 |  | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1476 | static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, | 
 | 1477 | 				       struct ieee80211_vif *vif) | 
 | 1478 | { | 
 | 1479 | 	struct iwl_mvm *mvm = _data; | 
 | 1480 | 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO; | 
 | 1481 |  | 
 | 1482 | 	IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr); | 
 | 1483 | 	if (vif->type != NL80211_IFTYPE_STATION || | 
 | 1484 | 	    !vif->bss_conf.assoc) | 
 | 1485 | 		return; | 
 | 1486 |  | 
 | 1487 | 	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); | 
 | 1488 | } | 
 | 1489 |  | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1490 | struct iwl_mvm_d0i3_exit_work_iter_data { | 
| David Spinadel | b3df224 | 2015-08-06 10:26:50 +0300 | [diff] [blame] | 1491 | 	struct iwl_mvm *mvm; | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1492 | 	struct iwl_wowlan_status *status; | 
| David Spinadel | b3df224 | 2015-08-06 10:26:50 +0300 | [diff] [blame] | 1493 | 	u32 wakeup_reasons; | 
 | 1494 | }; | 
 | 1495 |  | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1496 | static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac, | 
 | 1497 | 					struct ieee80211_vif *vif) | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1498 | { | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1499 | 	struct iwl_mvm_d0i3_exit_work_iter_data *data = _data; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1500 | 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1501 | 	u32 reasons = data->wakeup_reasons; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1502 |  | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1503 | 	/* consider only the relevant station interface */ | 
 | 1504 | 	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || | 
 | 1505 | 	    data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id) | 
 | 1506 | 		return; | 
 | 1507 |  | 
 | 1508 | 	if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) | 
 | 1509 | 		iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); | 
 | 1510 | 	else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON) | 
 | 1511 | 		ieee80211_beacon_loss(vif); | 
 | 1512 | 	else | 
 | 1513 | 		iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status); | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1514 | } | 
 | 1515 |  | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1516 | void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) | 
 | 1517 | { | 
 | 1518 | 	struct ieee80211_sta *sta = NULL; | 
 | 1519 | 	struct iwl_mvm_sta *mvm_ap_sta; | 
 | 1520 | 	int i; | 
 | 1521 | 	bool wake_queues = false; | 
 | 1522 |  | 
 | 1523 | 	lockdep_assert_held(&mvm->mutex); | 
 | 1524 |  | 
 | 1525 | 	spin_lock_bh(&mvm->d0i3_tx_lock); | 
 | 1526 |  | 
| Sara Sharon | 0ae9881 | 2017-01-04 14:53:58 +0200 | [diff] [blame] | 1527 | 	if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA) | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1528 | 		goto out; | 
 | 1529 |  | 
 | 1530 | 	IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); | 
 | 1531 |  | 
 | 1532 | 	/* get the sta in order to update seq numbers and re-enqueue skbs */ | 
 | 1533 | 	sta = rcu_dereference_protected( | 
 | 1534 | 			mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id], | 
 | 1535 | 			lockdep_is_held(&mvm->mutex)); | 
 | 1536 |  | 
 | 1537 | 	if (IS_ERR_OR_NULL(sta)) { | 
 | 1538 | 		sta = NULL; | 
 | 1539 | 		goto out; | 
 | 1540 | 	} | 
 | 1541 |  | 
 | 1542 | 	if (mvm->d0i3_offloading && qos_seq) { | 
 | 1543 | 		/* update qos seq numbers if offloading was enabled */ | 
| Johannes Berg | 9d8ce6a | 2014-12-23 16:02:40 +0100 | [diff] [blame] | 1544 | 		mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta); | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1545 | 		for (i = 0; i < IWL_MAX_TID_COUNT; i++) { | 
 | 1546 | 			u16 seq = le16_to_cpu(qos_seq[i]); | 
 | 1547 | 			/* firmware stores last-used one, we store next one */ | 
 | 1548 | 			seq += 0x10; | 
 | 1549 | 			mvm_ap_sta->tid_data[i].seq_number = seq; | 
 | 1550 | 		} | 
 | 1551 | 	} | 
 | 1552 | out: | 
 | 1553 | 	/* re-enqueue (or drop) all packets */ | 
 | 1554 | 	while (!skb_queue_empty(&mvm->d0i3_tx)) { | 
 | 1555 | 		struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx); | 
 | 1556 |  | 
 | 1557 | 		if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) | 
 | 1558 | 			ieee80211_free_txskb(mvm->hw, skb); | 
 | 1559 |  | 
 | 1560 | 		/* if the skb_queue is not empty, we need to wake queues */ | 
 | 1561 | 		wake_queues = true; | 
 | 1562 | 	} | 
 | 1563 | 	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); | 
 | 1564 | 	wake_up(&mvm->d0i3_exit_waitq); | 
| Sara Sharon | 0ae9881 | 2017-01-04 14:53:58 +0200 | [diff] [blame] | 1565 | 	mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1566 | 	if (wake_queues) | 
 | 1567 | 		ieee80211_wake_queues(mvm->hw); | 
 | 1568 |  | 
 | 1569 | 	spin_unlock_bh(&mvm->d0i3_tx_lock); | 
 | 1570 | } | 
 | 1571 |  | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1572 | static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) | 
 | 1573 | { | 
 | 1574 | 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); | 
 | 1575 | 	struct iwl_host_cmd get_status_cmd = { | 
 | 1576 | 		.id = WOWLAN_GET_STATUSES, | 
| Emmanuel Grumbach | a102292 | 2014-05-12 11:36:41 +0300 | [diff] [blame] | 1577 | 		.flags = CMD_HIGH_PRIO | CMD_WANT_SKB, | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1578 | 	}; | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1579 | 	struct iwl_mvm_d0i3_exit_work_iter_data iter_data = { | 
 | 1580 | 		.mvm = mvm, | 
 | 1581 | 	}; | 
 | 1582 |  | 
| Emmanuel Grumbach | 3afec639 | 2014-03-30 09:10:28 +0300 | [diff] [blame] | 1583 | 	struct iwl_wowlan_status *status; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1584 | 	int ret; | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1585 | 	u32 wakeup_reasons = 0; | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1586 | 	__le16 *qos_seq = NULL; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1587 |  | 
 | 1588 | 	mutex_lock(&mvm->mutex); | 
 | 1589 | 	ret = iwl_mvm_send_cmd(mvm, &get_status_cmd); | 
 | 1590 | 	if (ret) | 
 | 1591 | 		goto out; | 
 | 1592 |  | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1593 | 	status = (void *)get_status_cmd.resp_pkt->data; | 
 | 1594 | 	wakeup_reasons = le32_to_cpu(status->wakeup_reasons); | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1595 | 	qos_seq = status->qos_seq_ctr; | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1596 |  | 
 | 1597 | 	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); | 
 | 1598 |  | 
| Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1599 | 	iter_data.wakeup_reasons = wakeup_reasons; | 
 | 1600 | 	iter_data.status = status; | 
 | 1601 | 	ieee80211_iterate_active_interfaces(mvm->hw, | 
 | 1602 | 					    IEEE80211_IFACE_ITER_NORMAL, | 
 | 1603 | 					    iwl_mvm_d0i3_exit_work_iter, | 
 | 1604 | 					    &iter_data); | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1605 | out: | 
| Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1606 | 	iwl_mvm_d0i3_enable_tx(mvm, qos_seq); | 
| Jonathan Doron | 47c8b15 | 2014-11-27 16:55:25 +0200 | [diff] [blame] | 1607 |  | 
| Eliad Peller | 7c014e3 | 2015-09-06 14:17:17 +0300 | [diff] [blame] | 1608 | 	IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", | 
 | 1609 | 		       wakeup_reasons); | 
 | 1610 |  | 
| Eliad Peller | e5629be | 2015-04-14 11:36:23 +0300 | [diff] [blame] | 1611 | 	/* qos_seq might point inside resp_pkt, so free it only now */ | 
 | 1612 | 	if (get_status_cmd.resp_pkt) | 
 | 1613 | 		iwl_free_resp(&get_status_cmd); | 
 | 1614 |  | 
| Jonathan Doron | 47c8b15 | 2014-11-27 16:55:25 +0200 | [diff] [blame] | 1615 | 	/* the FW might have updated the regdomain */ | 
 | 1616 | 	iwl_mvm_update_changed_regdom(mvm); | 
 | 1617 |  | 
| Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 1618 | 	iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1619 | 	mutex_unlock(&mvm->mutex); | 
 | 1620 | } | 
 | 1621 |  | 
| Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 1622 | int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) | 
| Eliad Peller | b3370d4 | 2013-11-25 15:20:16 +0200 | [diff] [blame] | 1623 | { | 
| Arik Nemtsov | 98ee778 | 2013-10-02 16:58:09 +0300 | [diff] [blame] | 1624 | 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | | 
 | 1625 | 		    CMD_WAKE_UP_TRANS; | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1626 | 	int ret; | 
| Eliad Peller | b3370d4 | 2013-11-25 15:20:16 +0200 | [diff] [blame] | 1627 |  | 
 | 1628 | 	IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); | 
| Arik Nemtsov | 98ee778 | 2013-10-02 16:58:09 +0300 | [diff] [blame] | 1629 |  | 
| Johannes Berg | 702e975 | 2017-06-02 11:56:58 +0200 | [diff] [blame] | 1630 | 	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) | 
| Eliad Peller | 08f0d23 | 2015-12-10 15:47:11 +0200 | [diff] [blame] | 1631 | 		return -EINVAL; | 
 | 1632 |  | 
| Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 1633 | 	mutex_lock(&mvm->d0i3_suspend_mutex); | 
 | 1634 | 	if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) { | 
 | 1635 | 		IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n"); | 
 | 1636 | 		__set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags); | 
 | 1637 | 		mutex_unlock(&mvm->d0i3_suspend_mutex); | 
 | 1638 | 		return 0; | 
 | 1639 | 	} | 
 | 1640 | 	mutex_unlock(&mvm->d0i3_suspend_mutex); | 
 | 1641 |  | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1642 | 	ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); | 
 | 1643 | 	if (ret) | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1644 | 		goto out; | 
| Eliad Peller | d623097 | 2013-11-03 20:09:08 +0200 | [diff] [blame] | 1645 |  | 
 | 1646 | 	ieee80211_iterate_active_interfaces_atomic(mvm->hw, | 
 | 1647 | 						   IEEE80211_IFACE_ITER_NORMAL, | 
 | 1648 | 						   iwl_mvm_exit_d0i3_iterator, | 
 | 1649 | 						   mvm); | 
| Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 1650 | out: | 
 | 1651 | 	schedule_work(&mvm->d0i3_exit_work); | 
 | 1652 | 	return ret; | 
| Eliad Peller | b3370d4 | 2013-11-25 15:20:16 +0200 | [diff] [blame] | 1653 | } | 
 | 1654 |  | 
| Eliad Peller | 6735943 | 2014-12-09 15:23:54 +0200 | [diff] [blame] | 1655 | int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) | 
| Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 1656 | { | 
 | 1657 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
 | 1658 |  | 
 | 1659 | 	iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK); | 
 | 1660 | 	return _iwl_mvm_exit_d0i3(mvm); | 
 | 1661 | } | 
 | 1662 |  | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1663 | #define IWL_MVM_COMMON_OPS					\ | 
 | 1664 | 	/* these could be differentiated */			\ | 
| Emmanuel Grumbach | 156f92f | 2015-11-24 14:55:18 +0200 | [diff] [blame] | 1665 | 	.async_cb = iwl_mvm_async_cb,				\ | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1666 | 	.queue_full = iwl_mvm_stop_sw_queue,			\ | 
 | 1667 | 	.queue_not_full = iwl_mvm_wake_sw_queue,		\ | 
 | 1668 | 	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\ | 
 | 1669 | 	.free_skb = iwl_mvm_free_skb,				\ | 
 | 1670 | 	.nic_error = iwl_mvm_nic_error,				\ | 
 | 1671 | 	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\ | 
 | 1672 | 	.nic_config = iwl_mvm_nic_config,			\ | 
 | 1673 | 	.enter_d0i3 = iwl_mvm_enter_d0i3,			\ | 
 | 1674 | 	.exit_d0i3 = iwl_mvm_exit_d0i3,				\ | 
 | 1675 | 	/* as we only register one, these MUST be common! */	\ | 
 | 1676 | 	.start = iwl_op_mode_mvm_start,				\ | 
 | 1677 | 	.stop = iwl_op_mode_mvm_stop | 
 | 1678 |  | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1679 | static const struct iwl_op_mode_ops iwl_mvm_ops = { | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1680 | 	IWL_MVM_COMMON_OPS, | 
 | 1681 | 	.rx = iwl_mvm_rx, | 
 | 1682 | }; | 
 | 1683 |  | 
 | 1684 | static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, | 
 | 1685 | 			      struct napi_struct *napi, | 
 | 1686 | 			      struct iwl_rx_cmd_buffer *rxb, | 
 | 1687 | 			      unsigned int queue) | 
 | 1688 | { | 
 | 1689 | 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 
| Sara Sharon | 585a6fc | 2015-12-01 13:48:18 +0200 | [diff] [blame] | 1690 | 	struct iwl_rx_packet *pkt = rxb_addr(rxb); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1691 | 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1692 |  | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1693 | 	if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) | 
| Sara Sharon | a338384 | 2016-02-28 15:41:47 +0200 | [diff] [blame] | 1694 | 		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1695 | 	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, | 
 | 1696 | 					 RX_QUEUES_NOTIFICATION))) | 
| Sara Sharon | 94bb448 | 2015-12-16 18:48:28 +0200 | [diff] [blame] | 1697 | 		iwl_mvm_rx_queue_notif(mvm, rxb, queue); | 
| Johannes Berg | 61b0f5d | 2016-08-04 08:57:59 +0200 | [diff] [blame] | 1698 | 	else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) | 
| Sara Sharon | 585a6fc | 2015-12-01 13:48:18 +0200 | [diff] [blame] | 1699 | 		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); | 
| Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1700 | } | 
 | 1701 |  | 
 | 1702 | static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { | 
 | 1703 | 	IWL_MVM_COMMON_OPS, | 
 | 1704 | 	.rx = iwl_mvm_rx_mq, | 
 | 1705 | 	.rx_rss = iwl_mvm_rx_mq_rss, | 
| Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1706 | }; |