Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Luciano Coelho | 9af91f4 | 2015-02-10 10:42:26 +0200 | [diff] [blame] | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 10 | * Copyright(c) 2016 Intel Deutschland GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of version 2 of the GNU General Public License as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, but |
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program; if not, write to the Free Software |
| 23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 24 | * USA |
| 25 | * |
| 26 | * The full GNU General Public License is included in this distribution |
Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 27 | * in the file called COPYING. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 28 | * |
| 29 | * Contact Information: |
Emmanuel Grumbach | cb2f827 | 2015-11-17 15:39:56 +0200 | [diff] [blame] | 30 | * Intel Linux Wireless <linuxwifi@intel.com> |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 31 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 32 | * |
| 33 | * BSD LICENSE |
| 34 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Luciano Coelho | 9af91f4 | 2015-02-10 10:42:26 +0200 | [diff] [blame] | 36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 37 | * Copyright(c) 2016 Intel Deutschland GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 38 | * All rights reserved. |
| 39 | * |
| 40 | * Redistribution and use in source and binary forms, with or without |
| 41 | * modification, are permitted provided that the following conditions |
| 42 | * are met: |
| 43 | * |
| 44 | * * Redistributions of source code must retain the above copyright |
| 45 | * notice, this list of conditions and the following disclaimer. |
| 46 | * * Redistributions in binary form must reproduce the above copyright |
| 47 | * notice, this list of conditions and the following disclaimer in |
| 48 | * the documentation and/or other materials provided with the |
| 49 | * distribution. |
| 50 | * * Neither the name Intel Corporation nor the names of its |
| 51 | * contributors may be used to endorse or promote products derived |
| 52 | * from this software without specific prior written permission. |
| 53 | * |
| 54 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 55 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 56 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 57 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 58 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 59 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 60 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 61 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 62 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 63 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 64 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 65 | * |
| 66 | *****************************************************************************/ |
| 67 | |
| 68 | #ifndef __IWL_MVM_H__ |
| 69 | #define __IWL_MVM_H__ |
| 70 | |
| 71 | #include <linux/list.h> |
| 72 | #include <linux/spinlock.h> |
| 73 | #include <linux/leds.h> |
| 74 | #include <linux/in6.h> |
| 75 | |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 76 | #ifdef CONFIG_THERMAL |
| 77 | #include <linux/thermal.h> |
| 78 | #endif |
| 79 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 80 | #include "iwl-op-mode.h" |
| 81 | #include "iwl-trans.h" |
| 82 | #include "iwl-notif-wait.h" |
| 83 | #include "iwl-eeprom-parse.h" |
Emmanuel Grumbach | d2709ad | 2015-01-29 14:58:06 +0200 | [diff] [blame] | 84 | #include "iwl-fw-file.h" |
Chaya Rachel Ivgi | 3444682 | 2015-04-19 12:26:39 +0300 | [diff] [blame] | 85 | #include "iwl-config.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 86 | #include "sta.h" |
| 87 | #include "fw-api.h" |
Johannes Berg | 9954592 | 2013-06-14 13:36:21 +0200 | [diff] [blame] | 88 | #include "constants.h" |
Gregory Greenman | ce79291 | 2015-06-02 18:06:16 +0300 | [diff] [blame] | 89 | #include "tof.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 90 | |
Ilan Peer | 831e85f | 2013-02-07 17:09:09 +0200 | [diff] [blame] | 91 | #define IWL_MVM_MAX_ADDRESSES 5 |
Emmanuel Grumbach | 8101a7f | 2013-02-28 11:54:28 +0200 | [diff] [blame] | 92 | /* RSSI offset for WkP */ |
| 93 | #define IWL_RSSI_OFFSET 50 |
Ilan Peer | 12d423e | 2013-12-24 22:08:14 +0200 | [diff] [blame] | 94 | #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 |
Ariej Marjieh | b112889 | 2014-07-16 21:11:12 +0300 | [diff] [blame] | 95 | /* A TimeUnit is 1024 microsecond */ |
| 96 | #define MSEC_TO_TU(_msec) (_msec*1000/1024) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 97 | |
Luciano Coelho | 4500e13 | 2014-11-10 11:10:15 +0200 | [diff] [blame] | 98 | /* For GO, this value represents the number of TUs before CSA "beacon |
| 99 | * 0" TBTT when the CSA time-event needs to be scheduled to start. It |
| 100 | * must be big enough to ensure that we switch in time. |
Andrei Otcheretianski | 7f0a7c6 | 2014-05-04 11:48:12 +0300 | [diff] [blame] | 101 | */ |
Luciano Coelho | f991e17 | 2014-08-26 16:14:10 +0300 | [diff] [blame] | 102 | #define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 |
Luciano Coelho | 4500e13 | 2014-11-10 11:10:15 +0200 | [diff] [blame] | 103 | |
| 104 | /* For client, this value represents the number of TUs before CSA |
| 105 | * "beacon 1" TBTT, instead. This is because we don't know when the |
| 106 | * GO/AP will be in the new channel, so we switch early enough. |
| 107 | */ |
| 108 | #define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 |
Andrei Otcheretianski | 7f0a7c6 | 2014-05-04 11:48:12 +0300 | [diff] [blame] | 109 | |
| 110 | /* |
| 111 | * This value (in TUs) is used to fine tune the CSA NoA end time which should |
| 112 | * be just before "beacon 0" TBTT. |
| 113 | */ |
| 114 | #define IWL_MVM_CHANNEL_SWITCH_MARGIN 4 |
| 115 | |
Andrei Otcheretianski | 003e5236 | 2014-05-25 17:24:22 +0300 | [diff] [blame] | 116 | /* |
| 117 | * Number of beacons to transmit on a new channel until we unblock tx to |
| 118 | * the stations, even if we didn't identify them on a new channel |
| 119 | */ |
| 120 | #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 |
| 121 | |
Johannes Berg | e520926 | 2014-01-20 23:38:59 +0100 | [diff] [blame] | 122 | extern const struct ieee80211_ops iwl_mvm_hw_ops; |
Alexander Bondar | e811ada | 2013-03-10 15:29:44 +0200 | [diff] [blame] | 123 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 124 | /** |
| 125 | * struct iwl_mvm_mod_params - module parameters for iwlmvm |
| 126 | * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. |
| 127 | * We will register to mac80211 to have testmode working. The NIC must not |
| 128 | * be up'ed after the INIT fw asserted. This is useful to be able to use |
| 129 | * proprietary tools over testmode to debug the INIT fw. |
Emmanuel Grumbach | ce71c2f | 2015-01-11 17:19:39 +0200 | [diff] [blame] | 130 | * @tfd_q_hang_detect: enabled the detection of hung transmit queues |
Avri Altman | d576cd9 | 2015-06-28 08:10:46 +0300 | [diff] [blame] | 131 | * @power_scheme: one of enum iwl_power_scheme |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 132 | */ |
| 133 | struct iwl_mvm_mod_params { |
| 134 | bool init_dbg; |
Emmanuel Grumbach | ce71c2f | 2015-01-11 17:19:39 +0200 | [diff] [blame] | 135 | bool tfd_q_hang_detect; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 136 | int power_scheme; |
| 137 | }; |
| 138 | extern struct iwl_mvm_mod_params iwlmvm_mod_params; |
| 139 | |
Emmanuel Grumbach | 48eb7b3 | 2014-07-08 19:45:17 +0300 | [diff] [blame] | 140 | /** |
| 141 | * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump |
| 142 | * |
| 143 | * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode |
| 144 | * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the |
| 145 | * transport's data. |
| 146 | * @trans_len: length of the valid data in trans_ptr |
| 147 | * @op_mode_len: length of the valid data in op_mode_ptr |
| 148 | */ |
| 149 | struct iwl_mvm_dump_ptrs { |
| 150 | struct iwl_trans_dump_data *trans_ptr; |
| 151 | void *op_mode_ptr; |
| 152 | u32 op_mode_len; |
| 153 | }; |
| 154 | |
Emmanuel Grumbach | b6eaa45 | 2015-01-29 14:58:20 +0200 | [diff] [blame] | 155 | /** |
| 156 | * struct iwl_mvm_dump_desc - describes the dump |
| 157 | * @len: length of trig_desc->data |
| 158 | * @trig_desc: the description of the dump |
| 159 | */ |
| 160 | struct iwl_mvm_dump_desc { |
| 161 | size_t len; |
| 162 | /* must be last */ |
| 163 | struct iwl_fw_error_dump_trigger_desc trig_desc; |
| 164 | }; |
| 165 | |
Emmanuel Grumbach | a80c7a6 | 2016-01-05 09:14:08 +0200 | [diff] [blame] | 166 | extern const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; |
Emmanuel Grumbach | b6eaa45 | 2015-01-29 14:58:20 +0200 | [diff] [blame] | 167 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 168 | struct iwl_mvm_phy_ctxt { |
| 169 | u16 id; |
| 170 | u16 color; |
Ilan Peer | fe0f2de | 2013-03-21 10:23:52 +0200 | [diff] [blame] | 171 | u32 ref; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 172 | |
| 173 | /* |
| 174 | * TODO: This should probably be removed. Currently here only for rate |
| 175 | * scaling algorithm |
| 176 | */ |
| 177 | struct ieee80211_channel *channel; |
| 178 | }; |
| 179 | |
| 180 | struct iwl_mvm_time_event_data { |
| 181 | struct ieee80211_vif *vif; |
| 182 | struct list_head list; |
| 183 | unsigned long end_jiffies; |
| 184 | u32 duration; |
| 185 | bool running; |
| 186 | u32 uid; |
| 187 | |
| 188 | /* |
| 189 | * The access to the 'id' field must be done when the |
| 190 | * mvm->time_event_lock is held, as it value is used to indicate |
| 191 | * if the te is in the time event list or not (when id == TE_MAX) |
| 192 | */ |
| 193 | u32 id; |
| 194 | }; |
| 195 | |
| 196 | /* Power management */ |
| 197 | |
| 198 | /** |
| 199 | * enum iwl_power_scheme |
| 200 | * @IWL_POWER_LEVEL_CAM - Continuously Active Mode |
| 201 | * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) |
| 202 | * @IWL_POWER_LEVEL_LP - Low Power |
| 203 | */ |
| 204 | enum iwl_power_scheme { |
| 205 | IWL_POWER_SCHEME_CAM = 1, |
| 206 | IWL_POWER_SCHEME_BPS, |
| 207 | IWL_POWER_SCHEME_LP |
| 208 | }; |
| 209 | |
Max Stepanov | e7eb65c | 2014-02-16 16:36:57 +0200 | [diff] [blame] | 210 | #define IWL_CONN_MAX_LISTEN_INTERVAL 10 |
Emmanuel Grumbach | 6e2611f | 2016-03-15 11:12:20 +0200 | [diff] [blame] | 211 | #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 212 | |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 213 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 214 | enum iwl_dbgfs_pm_mask { |
| 215 | MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), |
| 216 | MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), |
| 217 | MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), |
| 218 | MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), |
| 219 | MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), |
Alexander Bondar | bd4ace2 | 2013-03-21 17:14:14 +0200 | [diff] [blame] | 220 | MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), |
| 221 | MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), |
Alexander Bondar | 8971634 | 2013-08-04 17:52:23 +0300 | [diff] [blame] | 222 | MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 223 | MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), |
Eliad Peller | 84fd760 | 2014-07-30 15:42:19 +0300 | [diff] [blame] | 224 | MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 225 | }; |
| 226 | |
| 227 | struct iwl_dbgfs_pm { |
Alexander Bondar | e811ada | 2013-03-10 15:29:44 +0200 | [diff] [blame] | 228 | u16 keep_alive_seconds; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 229 | u32 rx_data_timeout; |
| 230 | u32 tx_data_timeout; |
| 231 | bool skip_over_dtim; |
| 232 | u8 skip_dtim_periods; |
Alexander Bondar | bd4ace2 | 2013-03-21 17:14:14 +0200 | [diff] [blame] | 233 | bool lprx_ena; |
| 234 | u32 lprx_rssi_threshold; |
Alexander Bondar | 8971634 | 2013-08-04 17:52:23 +0300 | [diff] [blame] | 235 | bool snooze_ena; |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 236 | bool uapsd_misbehaving; |
Eliad Peller | 84fd760 | 2014-07-30 15:42:19 +0300 | [diff] [blame] | 237 | bool use_ps_poll; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 238 | int mask; |
| 239 | }; |
| 240 | |
| 241 | /* beacon filtering */ |
| 242 | |
| 243 | enum iwl_dbgfs_bf_mask { |
| 244 | MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), |
| 245 | MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), |
| 246 | MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), |
Hila Gonen | 5dca7c2 | 2013-07-16 11:15:35 +0300 | [diff] [blame] | 247 | MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), |
| 248 | MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), |
| 249 | MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), |
| 250 | MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), |
| 251 | MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), |
| 252 | MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), |
| 253 | MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), |
| 254 | MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 255 | }; |
| 256 | |
| 257 | struct iwl_dbgfs_bf { |
Hila Gonen | 5dca7c2 | 2013-07-16 11:15:35 +0300 | [diff] [blame] | 258 | u32 bf_energy_delta; |
| 259 | u32 bf_roaming_energy_delta; |
| 260 | u32 bf_roaming_state; |
| 261 | u32 bf_temp_threshold; |
| 262 | u32 bf_temp_fast_filter; |
| 263 | u32 bf_temp_slow_filter; |
| 264 | u32 bf_enable_beacon_filter; |
| 265 | u32 bf_debug_flag; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 266 | u32 bf_escape_timer; |
| 267 | u32 ba_escape_timer; |
Hila Gonen | 5dca7c2 | 2013-07-16 11:15:35 +0300 | [diff] [blame] | 268 | u32 ba_enable_beacon_abort; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 269 | int mask; |
| 270 | }; |
| 271 | #endif |
| 272 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 273 | enum iwl_mvm_smps_type_request { |
| 274 | IWL_MVM_SMPS_REQ_BT_COEX, |
| 275 | IWL_MVM_SMPS_REQ_TT, |
Emmanuel Grumbach | 697162a | 2014-07-30 15:56:42 +0300 | [diff] [blame] | 276 | IWL_MVM_SMPS_REQ_PROT, |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 277 | NUM_IWL_MVM_SMPS_REQ, |
| 278 | }; |
| 279 | |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 280 | enum iwl_mvm_ref_type { |
| 281 | IWL_MVM_REF_UCODE_DOWN, |
Arik Nemtsov | 519e202 | 2013-10-17 17:51:35 +0300 | [diff] [blame] | 282 | IWL_MVM_REF_SCAN, |
Eliad Peller | 9f45c36 | 2013-10-28 13:13:56 +0200 | [diff] [blame] | 283 | IWL_MVM_REF_ROC, |
Eliad Peller | c779273 | 2015-04-19 11:41:04 +0300 | [diff] [blame] | 284 | IWL_MVM_REF_ROC_AUX, |
Eliad Peller | 29a90a4 | 2013-11-05 14:06:29 +0200 | [diff] [blame] | 285 | IWL_MVM_REF_P2P_CLIENT, |
| 286 | IWL_MVM_REF_AP_IBSS, |
Eliad Peller | 0eb8365 | 2013-11-11 18:56:35 +0200 | [diff] [blame] | 287 | IWL_MVM_REF_USER, |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 288 | IWL_MVM_REF_TX, |
| 289 | IWL_MVM_REF_TX_AGG, |
Gregory Greenman | d40fc48 | 2014-06-25 14:08:50 +0200 | [diff] [blame] | 290 | IWL_MVM_REF_ADD_IF, |
Eliad Peller | 576eeee | 2014-07-01 18:38:38 +0300 | [diff] [blame] | 291 | IWL_MVM_REF_START_AP, |
| 292 | IWL_MVM_REF_BSS_CHANGED, |
| 293 | IWL_MVM_REF_PREPARE_TX, |
| 294 | IWL_MVM_REF_PROTECT_TDLS, |
| 295 | IWL_MVM_REF_CHECK_CTKILL, |
| 296 | IWL_MVM_REF_PRPH_READ, |
| 297 | IWL_MVM_REF_PRPH_WRITE, |
| 298 | IWL_MVM_REF_NMI, |
| 299 | IWL_MVM_REF_TM_CMD, |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 300 | IWL_MVM_REF_EXIT_WORK, |
Luciano Coelho | 686e7fe | 2014-11-10 11:10:21 +0200 | [diff] [blame] | 301 | IWL_MVM_REF_PROTECT_CSA, |
Liad Kaufman | fb2380a | 2015-01-01 17:42:46 +0200 | [diff] [blame] | 302 | IWL_MVM_REF_FW_DBG_COLLECT, |
Eliad Peller | 08f0d23 | 2015-12-10 15:47:11 +0200 | [diff] [blame] | 303 | IWL_MVM_REF_INIT_UCODE, |
Luca Coelho | 71b1230 | 2016-03-11 12:12:16 +0200 | [diff] [blame] | 304 | IWL_MVM_REF_SENDING_CMD, |
Luca Coelho | 16e4dd8 | 2016-03-30 15:05:56 +0300 | [diff] [blame] | 305 | IWL_MVM_REF_RX, |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 306 | |
Johannes Berg | 34e611e | 2014-09-12 10:28:01 +0200 | [diff] [blame] | 307 | /* update debugfs.c when changing this */ |
| 308 | |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 309 | IWL_MVM_REF_COUNT, |
| 310 | }; |
| 311 | |
Emmanuel Grumbach | a39979a | 2014-05-28 12:06:41 +0300 | [diff] [blame] | 312 | enum iwl_bt_force_ant_mode { |
| 313 | BT_FORCE_ANT_DIS = 0, |
| 314 | BT_FORCE_ANT_AUTO, |
| 315 | BT_FORCE_ANT_BT, |
| 316 | BT_FORCE_ANT_WIFI, |
| 317 | |
| 318 | BT_FORCE_ANT_MAX, |
| 319 | }; |
| 320 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 321 | /** |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 322 | * struct iwl_mvm_vif_bf_data - beacon filtering related data |
| 323 | * @bf_enabled: indicates if beacon filtering is enabled |
| 324 | * @ba_enabled: indicated if beacon abort is enabled |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 325 | * @ave_beacon_signal: average beacon signal |
| 326 | * @last_cqm_event: rssi of the last cqm event |
Andrei Otcheretianski | 911222b | 2013-07-21 17:37:19 +0300 | [diff] [blame] | 327 | * @bt_coex_min_thold: minimum threshold for BT coex |
| 328 | * @bt_coex_max_thold: maximum threshold for BT coex |
| 329 | * @last_bt_coex_event: rssi of the last BT coex event |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 330 | */ |
| 331 | struct iwl_mvm_vif_bf_data { |
| 332 | bool bf_enabled; |
| 333 | bool ba_enabled; |
Sara Sharon | 62e004f | 2015-08-20 14:12:58 +0300 | [diff] [blame] | 334 | int ave_beacon_signal; |
| 335 | int last_cqm_event; |
| 336 | int bt_coex_min_thold; |
| 337 | int bt_coex_max_thold; |
| 338 | int last_bt_coex_event; |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 339 | }; |
| 340 | |
| 341 | /** |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 342 | * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context |
| 343 | * @id: between 0 and 3 |
| 344 | * @color: to solve races upon MAC addition and removal |
| 345 | * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA |
Johannes Berg | 3dfd3a9 | 2014-08-11 21:37:30 +0200 | [diff] [blame] | 346 | * @bssid: BSSID for this (client) interface |
| 347 | * @associated: indicates that we're currently associated, used only for |
| 348 | * managing the firmware state in iwl_mvm_bss_info_changed_station() |
Gregory Greenman | 9493908 | 2015-08-24 14:38:35 +0300 | [diff] [blame] | 349 | * @ap_assoc_sta_count: count of stations associated to us - valid only |
| 350 | * if VIF type is AP |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 351 | * @uploaded: indicates the MAC context has been added to the device |
Johannes Berg | 5023d96 | 2013-07-31 14:07:43 +0200 | [diff] [blame] | 352 | * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface |
| 353 | * should get quota etc. |
Avri Altman | 1988902 | 2014-03-19 07:25:06 +0200 | [diff] [blame] | 354 | * @pm_enabled - Indicate if MAC power management is allowed |
Ilan Peer | 1e1391c | 2013-03-13 14:52:04 +0200 | [diff] [blame] | 355 | * @monitor_active: indicates that monitor context is configured, and that the |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 356 | * interface should get quota etc. |
Johannes Berg | b525d08 | 2016-01-06 10:01:41 +0100 | [diff] [blame] | 357 | * @low_latency_traffic: indicates low latency traffic was detected |
| 358 | * @low_latency_dbgfs: low latency mode set from debugfs |
| 359 | * @low_latency_vcmd: low latency mode set from vendor command |
Luciano Coelho | 2533edc | 2014-08-08 19:50:46 +0300 | [diff] [blame] | 360 | * @ps_disabled: indicates that this interface requires PS to be disabled |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 361 | * @queue_params: QoS params for this MAC |
| 362 | * @bcast_sta: station used for broadcast packets. Used by the following |
| 363 | * vifs: P2P_DEVICE, GO and AP. |
| 364 | * @beacon_skb: the skb used to hold the AP/GO beacon template |
Sara Sharon | 0d365ae | 2015-03-31 12:24:05 +0300 | [diff] [blame] | 365 | * @smps_requests: the SMPS requests of different parts of the driver, |
Johannes Berg | 8b206d1 | 2013-12-04 11:56:58 +0100 | [diff] [blame] | 366 | * combined on update to yield the overall request to mac80211. |
Johannes Berg | 33cef92 | 2015-01-21 21:41:29 +0100 | [diff] [blame] | 367 | * @beacon_stats: beacon statistics, containing the # of received beacons, |
| 368 | * # of received beacons accumulated over FW restart, and the current |
| 369 | * average signal of beacons retrieved from the firmware |
Johannes Berg | 81d62d5 | 2015-03-10 14:44:00 +0100 | [diff] [blame] | 370 | * @csa_failed: CSA failed to schedule time event, report an error later |
Avri Altman | 93190fb | 2014-12-27 09:09:47 +0200 | [diff] [blame] | 371 | * @features: hw features active for this vif |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 372 | */ |
| 373 | struct iwl_mvm_vif { |
Emmanuel Grumbach | aa5e183 | 2015-03-07 19:35:37 +0200 | [diff] [blame] | 374 | struct iwl_mvm *mvm; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 375 | u16 id; |
| 376 | u16 color; |
| 377 | u8 ap_sta_id; |
| 378 | |
Johannes Berg | 3dfd3a9 | 2014-08-11 21:37:30 +0200 | [diff] [blame] | 379 | u8 bssid[ETH_ALEN]; |
| 380 | bool associated; |
Gregory Greenman | 9493908 | 2015-08-24 14:38:35 +0300 | [diff] [blame] | 381 | u8 ap_assoc_sta_count; |
Johannes Berg | 3dfd3a9 | 2014-08-11 21:37:30 +0200 | [diff] [blame] | 382 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 383 | bool uploaded; |
Johannes Berg | 5023d96 | 2013-07-31 14:07:43 +0200 | [diff] [blame] | 384 | bool ap_ibss_active; |
Avri Altman | 1988902 | 2014-03-19 07:25:06 +0200 | [diff] [blame] | 385 | bool pm_enabled; |
Ilan Peer | 1e1391c | 2013-03-13 14:52:04 +0200 | [diff] [blame] | 386 | bool monitor_active; |
Johannes Berg | b525d08 | 2016-01-06 10:01:41 +0100 | [diff] [blame] | 387 | bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd; |
Luciano Coelho | 2533edc | 2014-08-08 19:50:46 +0300 | [diff] [blame] | 388 | bool ps_disabled; |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 389 | struct iwl_mvm_vif_bf_data bf_data; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 390 | |
Johannes Berg | 33cef92 | 2015-01-21 21:41:29 +0100 | [diff] [blame] | 391 | struct { |
| 392 | u32 num_beacons, accu_num_beacons; |
| 393 | u8 avg_signal; |
| 394 | } beacon_stats; |
| 395 | |
Johannes Berg | 506a81e | 2013-02-28 14:05:14 +0100 | [diff] [blame] | 396 | u32 ap_beacon_time; |
| 397 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 398 | enum iwl_tsf_id tsf_id; |
| 399 | |
| 400 | /* |
| 401 | * QoS data from mac80211, need to store this here |
| 402 | * as mac80211 has a separate callback but we need |
| 403 | * to have the data for the MAC context |
| 404 | */ |
| 405 | struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; |
| 406 | struct iwl_mvm_time_event_data time_event_data; |
Ariej Marjieh | b112889 | 2014-07-16 21:11:12 +0300 | [diff] [blame] | 407 | struct iwl_mvm_time_event_data hs_time_event_data; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 408 | |
| 409 | struct iwl_mvm_int_sta bcast_sta; |
| 410 | |
| 411 | /* |
| 412 | * Assigned while mac80211 has the interface in a channel context, |
| 413 | * or, for P2P Device, while it exists. |
| 414 | */ |
| 415 | struct iwl_mvm_phy_ctxt *phy_ctxt; |
| 416 | |
Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 417 | #ifdef CONFIG_PM |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 418 | /* WoWLAN GTK rekey data */ |
| 419 | struct { |
| 420 | u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; |
| 421 | __le64 replay_ctr; |
| 422 | bool valid; |
| 423 | } rekey_data; |
| 424 | |
| 425 | int tx_key_idx; |
| 426 | |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 427 | bool seqno_valid; |
| 428 | u16 seqno; |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 429 | #endif |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 430 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 431 | #if IS_ENABLED(CONFIG_IPV6) |
| 432 | /* IPv6 addresses for WoWLAN */ |
Johannes Berg | 5369d6c | 2013-07-08 11:17:06 +0200 | [diff] [blame] | 433 | struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 434 | unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 435 | int num_target_ipv6_addrs; |
| 436 | #endif |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 437 | |
| 438 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 439 | struct dentry *dbgfs_dir; |
Johannes Berg | 6349437 | 2013-03-26 10:47:53 +0100 | [diff] [blame] | 440 | struct dentry *dbgfs_slink; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 441 | struct iwl_dbgfs_pm dbgfs_pm; |
| 442 | struct iwl_dbgfs_bf dbgfs_bf; |
Emmanuel Grumbach | 474b50c | 2014-01-28 09:13:04 +0200 | [diff] [blame] | 443 | struct iwl_mac_power_cmd mac_pwr_cmd; |
Johannes Berg | a80c1cf | 2016-01-13 15:01:00 +0100 | [diff] [blame] | 444 | int dbgfs_quota_min; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 445 | #endif |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 446 | |
| 447 | enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 448 | |
| 449 | /* FW identified misbehaving AP */ |
| 450 | u8 uapsd_misbehaving_bssid[ETH_ALEN]; |
Andrei Otcheretianski | 7ef0aab | 2014-11-10 11:10:11 +0200 | [diff] [blame] | 451 | |
| 452 | /* Indicates that CSA countdown may be started */ |
| 453 | bool csa_countdown; |
Johannes Berg | 81d62d5 | 2015-03-10 14:44:00 +0100 | [diff] [blame] | 454 | bool csa_failed; |
Andrei Otcheretianski | d3a108a | 2016-02-28 17:12:21 +0200 | [diff] [blame] | 455 | u16 csa_target_freq; |
Avri Altman | 93190fb | 2014-12-27 09:09:47 +0200 | [diff] [blame] | 456 | |
| 457 | /* TCP Checksum Offload */ |
| 458 | netdev_features_t features; |
Aviya Erenfeld | 0309826 | 2016-02-18 14:09:33 +0200 | [diff] [blame] | 459 | |
| 460 | /* |
| 461 | * link quality measurement - used to check whether this interface |
| 462 | * is in the middle of a link quality measurement |
| 463 | */ |
| 464 | bool lqm_active; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 465 | }; |
| 466 | |
| 467 | static inline struct iwl_mvm_vif * |
| 468 | iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) |
| 469 | { |
Sharon Dvir | bdc98b1 | 2016-07-25 16:11:05 +0300 | [diff] [blame] | 470 | if (!vif) |
| 471 | return NULL; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 472 | return (void *)vif->drv_priv; |
| 473 | } |
| 474 | |
Emmanuel Grumbach | b797e3f | 2014-03-06 14:49:36 +0200 | [diff] [blame] | 475 | extern const u8 tid_to_mac80211_ac[]; |
| 476 | |
Luciano Coelho | c7d4248 | 2015-05-07 16:00:26 +0300 | [diff] [blame] | 477 | #define IWL_MVM_SCAN_STOPPING_SHIFT 8 |
| 478 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 479 | enum iwl_scan_status { |
Luciano Coelho | 9af91f4 | 2015-02-10 10:42:26 +0200 | [diff] [blame] | 480 | IWL_MVM_SCAN_REGULAR = BIT(0), |
| 481 | IWL_MVM_SCAN_SCHED = BIT(1), |
Luciano Coelho | 19945df | 2015-03-20 16:11:28 +0200 | [diff] [blame] | 482 | IWL_MVM_SCAN_NETDETECT = BIT(2), |
Luciano Coelho | 9af91f4 | 2015-02-10 10:42:26 +0200 | [diff] [blame] | 483 | |
| 484 | IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), |
| 485 | IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), |
Luciano Coelho | 19945df | 2015-03-20 16:11:28 +0200 | [diff] [blame] | 486 | IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), |
Luciano Coelho | 9af91f4 | 2015-02-10 10:42:26 +0200 | [diff] [blame] | 487 | |
| 488 | IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | |
| 489 | IWL_MVM_SCAN_STOPPING_REGULAR, |
| 490 | IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | |
| 491 | IWL_MVM_SCAN_STOPPING_SCHED, |
Luciano Coelho | 19945df | 2015-03-20 16:11:28 +0200 | [diff] [blame] | 492 | IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | |
| 493 | IWL_MVM_SCAN_STOPPING_NETDETECT, |
Luciano Coelho | 9af91f4 | 2015-02-10 10:42:26 +0200 | [diff] [blame] | 494 | |
Luciano Coelho | c7d4248 | 2015-05-07 16:00:26 +0300 | [diff] [blame] | 495 | IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, |
| 496 | IWL_MVM_SCAN_MASK = 0xff, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 497 | }; |
| 498 | |
Avraham Stern | 355346b | 2015-11-26 11:22:33 +0200 | [diff] [blame] | 499 | enum iwl_mvm_scan_type { |
| 500 | IWL_SCAN_TYPE_NOT_SET, |
| 501 | IWL_SCAN_TYPE_UNASSOC, |
| 502 | IWL_SCAN_TYPE_WILD, |
| 503 | IWL_SCAN_TYPE_MILD, |
| 504 | IWL_SCAN_TYPE_FRAGMENTED, |
| 505 | }; |
| 506 | |
Luca Coelho | a339e91 | 2016-02-02 22:58:46 +0200 | [diff] [blame] | 507 | enum iwl_mvm_sched_scan_pass_all_states { |
| 508 | SCHED_SCAN_PASS_ALL_DISABLED, |
| 509 | SCHED_SCAN_PASS_ALL_ENABLED, |
| 510 | SCHED_SCAN_PASS_ALL_FOUND, |
| 511 | }; |
| 512 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 513 | /** |
| 514 | * struct iwl_nvm_section - describes an NVM section in memory. |
| 515 | * |
| 516 | * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, |
| 517 | * and saved for later use by the driver. Not all NVM sections are saved |
| 518 | * this way, only the needed ones. |
| 519 | */ |
| 520 | struct iwl_nvm_section { |
| 521 | u16 length; |
| 522 | const u8 *data; |
| 523 | }; |
| 524 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 525 | /** |
| 526 | * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure |
| 527 | * @ct_kill_exit: worker to exit thermal kill |
| 528 | * @dynamic_smps: Is thermal throttling enabled dynamic_smps? |
| 529 | * @tx_backoff: The current thremal throttling tx backoff in uSec. |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 530 | * @min_backoff: The minimal tx backoff due to power restrictions |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 531 | * @params: Parameters to configure the thermal throttling algorithm. |
eytan lifshitz | dafe6c4 | 2013-06-18 14:28:56 +0300 | [diff] [blame] | 532 | * @throttle: Is thermal throttling is active? |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 533 | */ |
| 534 | struct iwl_mvm_tt_mgmt { |
| 535 | struct delayed_work ct_kill_exit; |
| 536 | bool dynamic_smps; |
| 537 | u32 tx_backoff; |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 538 | u32 min_backoff; |
Chaya Rachel Ivgi | 3444682 | 2015-04-19 12:26:39 +0300 | [diff] [blame] | 539 | struct iwl_tt_params params; |
eytan lifshitz | dafe6c4 | 2013-06-18 14:28:56 +0300 | [diff] [blame] | 540 | bool throttle; |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 541 | }; |
| 542 | |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 543 | #ifdef CONFIG_THERMAL |
| 544 | /** |
| 545 | *struct iwl_mvm_thermal_device - thermal zone related data |
| 546 | * @temp_trips: temperature thresholds for report |
| 547 | * @fw_trips_index: keep indexes to original array - temp_trips |
| 548 | * @tzone: thermal zone device data |
| 549 | */ |
| 550 | struct iwl_mvm_thermal_device { |
| 551 | s16 temp_trips[IWL_MAX_DTS_TRIPS]; |
| 552 | u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; |
| 553 | struct thermal_zone_device *tzone; |
| 554 | }; |
Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 555 | |
| 556 | /* |
Chaya Rachel Ivgi | b358993 | 2016-02-14 14:03:10 +0200 | [diff] [blame] | 557 | * struct iwl_mvm_cooling_device |
| 558 | * @cur_state: current state |
Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 559 | * @cdev: struct thermal cooling device |
| 560 | */ |
| 561 | struct iwl_mvm_cooling_device { |
| 562 | u32 cur_state; |
| 563 | struct thermal_cooling_device *cdev; |
| 564 | }; |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 565 | #endif |
| 566 | |
Eyal Shapira | 5fc0f76 | 2014-01-28 01:35:32 +0200 | [diff] [blame] | 567 | #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 |
| 568 | |
| 569 | struct iwl_mvm_frame_stats { |
| 570 | u32 legacy_frames; |
| 571 | u32 ht_frames; |
| 572 | u32 vht_frames; |
| 573 | u32 bw_20_frames; |
| 574 | u32 bw_40_frames; |
| 575 | u32 bw_80_frames; |
| 576 | u32 bw_160_frames; |
| 577 | u32 sgi_frames; |
| 578 | u32 ngi_frames; |
| 579 | u32 siso_frames; |
| 580 | u32 mimo2_frames; |
| 581 | u32 agg_frames; |
| 582 | u32 ampdu_count; |
| 583 | u32 success_frames; |
| 584 | u32 fail_frames; |
| 585 | u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; |
| 586 | int last_frame_idx; |
| 587 | }; |
| 588 | |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 589 | enum { |
| 590 | D0I3_DEFER_WAKEUP, |
| 591 | D0I3_PENDING_WAKEUP, |
| 592 | }; |
| 593 | |
Matti Gottlieb | 7280d1f | 2014-07-17 16:41:14 +0300 | [diff] [blame] | 594 | #define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff |
| 595 | #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 |
| 596 | #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 |
| 597 | |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 598 | enum iwl_mvm_tdls_cs_state { |
| 599 | IWL_MVM_TDLS_SW_IDLE = 0, |
| 600 | IWL_MVM_TDLS_SW_REQ_SENT, |
Arik Nemtsov | 5cb1270 | 2015-01-22 12:19:26 +0200 | [diff] [blame] | 601 | IWL_MVM_TDLS_SW_RESP_RCVD, |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 602 | IWL_MVM_TDLS_SW_REQ_RCVD, |
| 603 | IWL_MVM_TDLS_SW_ACTIVE, |
| 604 | }; |
| 605 | |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 606 | struct iwl_mvm_shared_mem_cfg { |
| 607 | u32 shared_mem_addr; |
| 608 | u32 shared_mem_size; |
| 609 | u32 sample_buff_addr; |
| 610 | u32 sample_buff_size; |
| 611 | u32 txfifo_addr; |
| 612 | u32 txfifo_size[TX_FIFO_MAX_NUM]; |
| 613 | u32 rxfifo_size[RX_FIFO_MAX_NUM]; |
| 614 | u32 page_buff_addr; |
| 615 | u32 page_buff_size; |
Golan Ben-Ami | 5b08641 | 2016-02-09 12:57:16 +0200 | [diff] [blame] | 616 | u32 rxfifo_addr; |
| 617 | u32 internal_txfifo_addr; |
| 618 | u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 619 | }; |
| 620 | |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 621 | /** |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 622 | * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer |
| 623 | * @head_sn: reorder window head sn |
| 624 | * @num_stored: number of mpdus stored in the buffer |
| 625 | * @buf_size: the reorder buffer size as set by the last addba request |
| 626 | * @sta_id: sta id of this reorder buffer |
| 627 | * @queue: queue of this reorder buffer |
| 628 | * @last_amsdu: track last ASMDU SN for duplication detection |
| 629 | * @last_sub_index: track ASMDU sub frame index for duplication detection |
| 630 | * @entries: list of skbs stored |
Sara Sharon | 0690405 | 2016-02-28 20:28:17 +0200 | [diff] [blame] | 631 | * @reorder_time: time the packet was stored in the reorder buffer |
| 632 | * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU |
| 633 | * it is the time of last received sub-frame |
| 634 | * @removed: prevent timer re-arming |
| 635 | * @lock: protect reorder buffer internal state |
| 636 | * @mvm: mvm pointer, needed for frame timer context |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 637 | */ |
| 638 | struct iwl_mvm_reorder_buffer { |
| 639 | u16 head_sn; |
| 640 | u16 num_stored; |
| 641 | u8 buf_size; |
| 642 | u8 sta_id; |
| 643 | int queue; |
| 644 | u16 last_amsdu; |
| 645 | u8 last_sub_index; |
| 646 | struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF]; |
Sara Sharon | 0690405 | 2016-02-28 20:28:17 +0200 | [diff] [blame] | 647 | unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF]; |
| 648 | struct timer_list reorder_timer; |
| 649 | bool removed; |
| 650 | spinlock_t lock; |
| 651 | struct iwl_mvm *mvm; |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 652 | } ____cacheline_aligned_in_smp; |
| 653 | |
| 654 | /** |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 655 | * struct iwl_mvm_baid_data - BA session data |
| 656 | * @sta_id: station id |
| 657 | * @tid: tid of the session |
| 658 | * @baid baid of the session |
| 659 | * @timeout: the timeout set in the addba request |
| 660 | * @last_rx: last rx jiffies, updated only if timeout passed from last update |
| 661 | * @session_timer: timer to check if BA session expired, runs at 2 * timeout |
| 662 | * @mvm: mvm pointer, needed for timer context |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 663 | * @reorder_buf: reorder buffer, allocated per queue |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 664 | */ |
| 665 | struct iwl_mvm_baid_data { |
| 666 | struct rcu_head rcu_head; |
| 667 | u8 sta_id; |
| 668 | u8 tid; |
| 669 | u8 baid; |
| 670 | u16 timeout; |
| 671 | unsigned long last_rx; |
| 672 | struct timer_list session_timer; |
| 673 | struct iwl_mvm *mvm; |
Sara Sharon | b915c10 | 2016-03-23 16:32:02 +0200 | [diff] [blame] | 674 | struct iwl_mvm_reorder_buffer reorder_buf[]; |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 675 | }; |
| 676 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 677 | /* |
| 678 | * enum iwl_mvm_queue_status - queue status |
| 679 | * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved |
| 680 | * Basically, this means that this queue can be used for any purpose |
| 681 | * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use |
| 682 | * This is the state of a queue that has been dedicated for some RATID |
| 683 | * (agg'd or not), but that hasn't yet gone through the actual enablement |
| 684 | * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet. |
| 685 | * Note that in this state there is no requirement to already know what TID |
| 686 | * should be used with this queue, it is just marked as a queue that will |
| 687 | * be used, and shouldn't be allocated to anyone else. |
| 688 | * @IWL_MVM_QUEUE_READY: queue is ready to be used |
| 689 | * This is the state of a queue that has been fully configured (including |
| 690 | * SCD pointers, etc), has a specific RA/TID assigned to it, and can be |
| 691 | * used to send traffic. |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 692 | * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared |
| 693 | * This is a state in which a single queue serves more than one TID, all of |
| 694 | * which are not aggregated. Note that the queue is only associated to one |
| 695 | * RA. |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 696 | * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it |
| 697 | * This is a state of a queue that has had traffic on it, but during the |
| 698 | * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on |
| 699 | * it. In this state, when a new queue is needed to be allocated but no |
| 700 | * such free queue exists, an inactive queue might be freed and given to |
| 701 | * the new RA/TID. |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 702 | * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured |
| 703 | * This is the state of a queue that has had traffic pass through it, but |
| 704 | * needs to be reconfigured for some reason, e.g. the queue needs to |
| 705 | * become unshared and aggregations re-enabled on. |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 706 | */ |
| 707 | enum iwl_mvm_queue_status { |
| 708 | IWL_MVM_QUEUE_FREE, |
| 709 | IWL_MVM_QUEUE_RESERVED, |
| 710 | IWL_MVM_QUEUE_READY, |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 711 | IWL_MVM_QUEUE_SHARED, |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 712 | IWL_MVM_QUEUE_INACTIVE, |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 713 | IWL_MVM_QUEUE_RECONFIGURING, |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 714 | }; |
| 715 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 716 | #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) |
Ayala Beker | 8e160ab | 2016-04-11 11:37:38 +0300 | [diff] [blame] | 717 | #define IWL_MVM_NUM_CIPHERS 10 |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 718 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 719 | struct iwl_mvm { |
| 720 | /* for logger access */ |
| 721 | struct device *dev; |
| 722 | |
| 723 | struct iwl_trans *trans; |
| 724 | const struct iwl_fw *fw; |
| 725 | const struct iwl_cfg *cfg; |
| 726 | struct iwl_phy_db *phy_db; |
| 727 | struct ieee80211_hw *hw; |
| 728 | |
| 729 | /* for protecting access to iwl_mvm */ |
| 730 | struct mutex mutex; |
| 731 | struct list_head async_handlers_list; |
| 732 | spinlock_t async_handlers_lock; |
| 733 | struct work_struct async_handlers_wk; |
| 734 | |
| 735 | struct work_struct roc_done_wk; |
| 736 | |
| 737 | unsigned long status; |
| 738 | |
Sara Sharon | 0636b93 | 2016-02-18 14:21:12 +0200 | [diff] [blame] | 739 | u32 queue_sync_cookie; |
| 740 | atomic_t queue_sync_counter; |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 741 | /* |
| 742 | * for beacon filtering - |
| 743 | * currently only one interface can be supported |
| 744 | */ |
| 745 | struct iwl_mvm_vif *bf_allowed_vif; |
| 746 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 747 | enum iwl_ucode_type cur_ucode; |
| 748 | bool ucode_loaded; |
Emmanuel Grumbach | 31b8b34 | 2014-11-02 15:48:09 +0200 | [diff] [blame] | 749 | bool calibrating; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 750 | u32 error_event_table; |
| 751 | u32 log_event_table; |
Eran Harary | 01a9ca5 | 2014-02-03 09:29:57 +0200 | [diff] [blame] | 752 | u32 umac_error_event_table; |
| 753 | bool support_umac_log; |
Eran Harary | 91479b6 | 2014-05-11 08:11:34 +0300 | [diff] [blame] | 754 | struct iwl_sf_region sf_space; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 755 | |
| 756 | u32 ampdu_ref; |
Sara Sharon | fbe4112 | 2016-04-04 19:28:45 +0300 | [diff] [blame] | 757 | bool ampdu_toggle; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 758 | |
| 759 | struct iwl_notif_wait_data notif_wait; |
| 760 | |
Matti Gottlieb | 3848ab6 | 2013-07-30 15:29:37 +0300 | [diff] [blame] | 761 | struct mvm_statistics_rx rx_stats; |
| 762 | |
Johannes Berg | 91a8bcd | 2015-01-14 18:12:41 +0100 | [diff] [blame] | 763 | struct { |
| 764 | u64 rx_time; |
| 765 | u64 tx_time; |
| 766 | u64 on_time_rf; |
| 767 | u64 on_time_scan; |
| 768 | } radio_stats, accu_radio_stats; |
| 769 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 770 | struct { |
| 771 | /* Map to HW queue */ |
| 772 | u32 hw_queue_to_mac80211; |
| 773 | u8 hw_queue_refcount; |
Liad Kaufman | f02669b | 2016-02-28 16:15:07 +0200 | [diff] [blame] | 774 | u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 775 | bool reserved; /* Is this the TXQ reserved for a STA */ |
Liad Kaufman | 42db09c | 2016-05-02 14:01:14 +0300 | [diff] [blame] | 776 | u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ |
Liad Kaufman | edbe961 | 2016-02-02 15:43:32 +0200 | [diff] [blame] | 777 | u8 txq_tid; /* The TID "owner" of this queue*/ |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 778 | u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 779 | /* Timestamp for inactivation per TID of this queue */ |
| 780 | unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 781 | enum iwl_mvm_queue_status status; |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 782 | } queue_info[IWL_MAX_HW_QUEUES]; |
| 783 | spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 784 | struct work_struct add_stream_wk; /* To add streams to queues */ |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 785 | |
Johannes Berg | df197c0 | 2014-08-01 18:14:45 +0200 | [diff] [blame] | 786 | atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 787 | |
Eran Harary | e02a9d6 | 2014-05-07 12:27:10 +0300 | [diff] [blame] | 788 | const char *nvm_file_name; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 789 | struct iwl_nvm_data *nvm_data; |
Emmanuel Grumbach | b9545b4 | 2013-03-06 11:34:44 +0200 | [diff] [blame] | 790 | /* NVM sections */ |
Eran Harary | ae2b21b | 2014-01-09 08:08:24 +0200 | [diff] [blame] | 791 | struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 792 | |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 793 | /* Paging section */ |
| 794 | struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; |
| 795 | u16 num_of_paging_blk; |
| 796 | u16 num_of_pages_in_last_blk; |
| 797 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 798 | /* EEPROM MAC addresses */ |
| 799 | struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; |
| 800 | |
| 801 | /* data related to data path */ |
| 802 | struct iwl_rx_phy_info last_phy_info; |
| 803 | struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; |
| 804 | struct work_struct sta_drained_wk; |
Liad Kaufman | 24afba7 | 2015-07-28 18:56:08 +0300 | [diff] [blame] | 805 | unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 806 | unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 807 | atomic_t pending_frames[IWL_MVM_STATION_COUNT]; |
Arik Nemtsov | a0f6bf2 | 2014-09-21 19:10:04 +0300 | [diff] [blame] | 808 | u32 tfd_drained[IWL_MVM_STATION_COUNT]; |
Emmanuel Grumbach | 113a044 | 2013-07-02 14:16:38 +0300 | [diff] [blame] | 809 | u8 rx_ba_sessions; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 810 | |
| 811 | /* configured by mac80211 */ |
| 812 | u32 rts_threshold; |
| 813 | |
| 814 | /* Scan status, cmd (pre-allocated) and auxiliary station */ |
Luciano Coelho | 9af91f4 | 2015-02-10 10:42:26 +0200 | [diff] [blame] | 815 | unsigned int scan_status; |
David Spinadel | fb98be5 | 2014-05-04 12:51:10 +0300 | [diff] [blame] | 816 | void *scan_cmd; |
Eliad Peller | e59647e | 2013-11-28 14:08:50 +0200 | [diff] [blame] | 817 | struct iwl_mcast_filter_cmd *mcast_filter_cmd; |
Avraham Stern | 355346b | 2015-11-26 11:22:33 +0200 | [diff] [blame] | 818 | enum iwl_mvm_scan_type scan_type; |
Luca Coelho | a339e91 | 2016-02-02 22:58:46 +0200 | [diff] [blame] | 819 | enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; |
Luca Coelho | 69e0464 | 2016-05-03 12:18:33 +0300 | [diff] [blame] | 820 | struct delayed_work scan_timeout_dwork; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 821 | |
Luciano Coelho | 507e4cd | 2015-03-19 22:58:33 +0200 | [diff] [blame] | 822 | /* max number of simultaneous scans the FW supports */ |
| 823 | unsigned int max_scans; |
| 824 | |
Golan Ben-Ami | a977a15 | 2015-11-25 11:44:57 +0200 | [diff] [blame] | 825 | /* ts of the beginning of a non-collect fw dbg data period */ |
| 826 | unsigned long fw_dbg_non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; |
| 827 | |
David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 828 | /* UMAC scan tracking */ |
Luciano Coelho | 6185af2a | 2015-05-07 11:13:24 +0300 | [diff] [blame] | 829 | u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; |
David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 830 | |
Oren Givon | 91b05d1 | 2013-08-19 08:36:48 +0300 | [diff] [blame] | 831 | /* rx chain antennas set through debugfs for the scan command */ |
| 832 | u8 scan_rx_ant; |
| 833 | |
Eliad Peller | c87163b | 2014-01-08 10:11:11 +0200 | [diff] [blame] | 834 | #ifdef CONFIG_IWLWIFI_BCAST_FILTERING |
| 835 | /* broadcast filters to configure for each associated station */ |
| 836 | const struct iwl_fw_bcast_filter *bcast_filters; |
Eliad Peller | de06a59 | 2014-01-08 10:11:12 +0200 | [diff] [blame] | 837 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 838 | struct { |
Viresh Kumar | 621a5f7 | 2015-09-26 15:04:07 -0700 | [diff] [blame] | 839 | bool override; |
Eliad Peller | de06a59 | 2014-01-08 10:11:12 +0200 | [diff] [blame] | 840 | struct iwl_bcast_filter_cmd cmd; |
| 841 | } dbgfs_bcast_filtering; |
| 842 | #endif |
Eliad Peller | c87163b | 2014-01-08 10:11:11 +0200 | [diff] [blame] | 843 | #endif |
| 844 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 845 | /* Internal station */ |
| 846 | struct iwl_mvm_int_sta aux_sta; |
Chaya Rachel Ivgi | 0e39eb0 | 2015-12-03 15:51:46 +0200 | [diff] [blame] | 847 | struct iwl_mvm_int_sta snif_sta; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 848 | |
Haim Dreyfuss | e820c2d | 2014-04-06 11:19:09 +0300 | [diff] [blame] | 849 | bool last_ebs_successful; |
| 850 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 851 | u8 scan_last_antenna_idx; /* to toggle TX between antennas */ |
| 852 | u8 mgmt_last_antenna_idx; |
| 853 | |
Lilach Edelstein | 1f3b0ff | 2013-10-06 13:03:32 +0200 | [diff] [blame] | 854 | /* last smart fifo state that was successfully sent to firmware */ |
| 855 | enum iwl_sf_state sf_state; |
| 856 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 857 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 858 | struct dentry *debugfs_dir; |
| 859 | u32 dbgfs_sram_offset, dbgfs_sram_len; |
Eliad Peller | f3c221f | 2014-01-09 13:12:54 +0200 | [diff] [blame] | 860 | u32 dbgfs_prph_reg_addr; |
Alexander Bondar | 64b928c | 2013-09-03 14:18:03 +0300 | [diff] [blame] | 861 | bool disable_power_off; |
| 862 | bool disable_power_off_d3; |
Emmanuel Grumbach | 086f736 | 2013-11-18 17:00:03 +0200 | [diff] [blame] | 863 | |
Viresh Kumar | 621a5f7 | 2015-09-26 15:04:07 -0700 | [diff] [blame] | 864 | bool scan_iter_notif_enabled; |
Alexander Bondar | e5d7464 | 2014-12-09 19:15:49 +0200 | [diff] [blame] | 865 | |
Emmanuel Grumbach | 086f736 | 2013-11-18 17:00:03 +0200 | [diff] [blame] | 866 | struct debugfs_blob_wrapper nvm_hw_blob; |
| 867 | struct debugfs_blob_wrapper nvm_sw_blob; |
| 868 | struct debugfs_blob_wrapper nvm_calib_blob; |
| 869 | struct debugfs_blob_wrapper nvm_prod_blob; |
Moshe Harel | 91fac94 | 2015-09-02 12:45:12 +0300 | [diff] [blame] | 870 | struct debugfs_blob_wrapper nvm_phy_sku_blob; |
Eyal Shapira | 5fc0f76 | 2014-01-28 01:35:32 +0200 | [diff] [blame] | 871 | |
| 872 | struct iwl_mvm_frame_stats drv_rx_stats; |
| 873 | spinlock_t drv_stats_lock; |
Emmanuel Grumbach | ddf89ab | 2015-02-08 10:56:43 +0200 | [diff] [blame] | 874 | u16 dbgfs_rx_phyinfo; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 875 | #endif |
| 876 | |
Ilan Peer | fe0f2de | 2013-03-21 10:23:52 +0200 | [diff] [blame] | 877 | struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 878 | |
| 879 | struct list_head time_event_list; |
| 880 | spinlock_t time_event_lock; |
| 881 | |
| 882 | /* |
| 883 | * A bitmap indicating the index of the key in use. The firmware |
| 884 | * can hold 16 keys at most. Reflect this fact. |
| 885 | */ |
| 886 | unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; |
Johannes Berg | 2dc2a15 | 2015-06-16 17:09:18 +0200 | [diff] [blame] | 887 | u8 fw_key_deleted[STA_KEY_MAX_NUM]; |
Alexander Bondar | 5ee2b21 | 2013-03-05 10:16:40 +0200 | [diff] [blame] | 888 | |
Eliad Peller | 576eeee | 2014-07-01 18:38:38 +0300 | [diff] [blame] | 889 | /* references taken by the driver and spinlock protecting them */ |
| 890 | spinlock_t refs_lock; |
| 891 | u8 refs[IWL_MVM_REF_COUNT]; |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 892 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 893 | u8 vif_count; |
| 894 | |
Eran Harary | 291aa7c | 2013-07-03 11:00:06 +0300 | [diff] [blame] | 895 | /* -1 for always, 0 for never, >0 for that many times */ |
| 896 | s8 restart_fw; |
Emmanuel Grumbach | d2709ad | 2015-01-29 14:58:06 +0200 | [diff] [blame] | 897 | u8 fw_dbg_conf; |
| 898 | struct delayed_work fw_dump_wk; |
Emmanuel Grumbach | a80c7a6 | 2016-01-05 09:14:08 +0200 | [diff] [blame] | 899 | const struct iwl_mvm_dump_desc *fw_dump_desc; |
| 900 | const struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; |
Eran Harary | 291aa7c | 2013-07-03 11:00:06 +0300 | [diff] [blame] | 901 | |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 902 | #ifdef CONFIG_IWLWIFI_LEDS |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 903 | struct led_classdev led; |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 904 | #endif |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 905 | |
| 906 | struct ieee80211_vif *p2p_device_vif; |
Johannes Berg | f444eb1 | 2013-02-26 12:04:18 +0100 | [diff] [blame] | 907 | |
Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 908 | #ifdef CONFIG_PM |
Johannes Berg | 964dc9e | 2013-06-03 17:25:34 +0200 | [diff] [blame] | 909 | struct wiphy_wowlan_support wowlan; |
Johannes Berg | f444eb1 | 2013-02-26 12:04:18 +0100 | [diff] [blame] | 910 | int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; |
Luciano Coelho | 3c2f3b2 | 2014-09-24 08:29:11 +0300 | [diff] [blame] | 911 | |
| 912 | /* sched scan settings for net detect */ |
Luciano Coelho | cc4c1ab | 2014-11-10 23:21:55 +0200 | [diff] [blame] | 913 | struct ieee80211_scan_ies nd_ies; |
Luciano Coelho | d9718da | 2014-11-21 09:32:23 +0200 | [diff] [blame] | 914 | struct cfg80211_match_set *nd_match_sets; |
| 915 | int n_nd_match_sets; |
Luciano Coelho | 8ed4e65 | 2014-11-21 22:08:01 +0200 | [diff] [blame] | 916 | struct ieee80211_channel **nd_channels; |
| 917 | int n_nd_channels; |
Luciano Coelho | 2021a89 | 2014-11-20 08:59:51 +0200 | [diff] [blame] | 918 | bool net_detect; |
Johannes Berg | afc66bb | 2013-05-03 11:44:16 +0200 | [diff] [blame] | 919 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
Viresh Kumar | 621a5f7 | 2015-09-26 15:04:07 -0700 | [diff] [blame] | 920 | bool d3_wake_sysassert; |
Johannes Berg | debff61 | 2013-05-14 13:53:45 +0200 | [diff] [blame] | 921 | bool d3_test_active; |
Johannes Berg | afc66bb | 2013-05-03 11:44:16 +0200 | [diff] [blame] | 922 | bool store_d3_resume_sram; |
| 923 | void *d3_resume_sram; |
Johannes Berg | debff61 | 2013-05-14 13:53:45 +0200 | [diff] [blame] | 924 | u32 d3_test_pme_ptr; |
Eliad Peller | 78c9df6 | 2013-11-07 14:13:30 +0200 | [diff] [blame] | 925 | struct ieee80211_vif *keep_vif; |
Luciano Coelho | 484b3d1 | 2015-03-30 20:46:32 +0300 | [diff] [blame] | 926 | u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ |
Johannes Berg | afc66bb | 2013-05-03 11:44:16 +0200 | [diff] [blame] | 927 | #endif |
Johannes Berg | f444eb1 | 2013-02-26 12:04:18 +0100 | [diff] [blame] | 928 | #endif |
Emmanuel Grumbach | f421f9c | 2013-01-17 14:20:29 +0200 | [diff] [blame] | 929 | |
Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 930 | /* d0i3 */ |
| 931 | u8 d0i3_ap_sta_id; |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 932 | bool d0i3_offloading; |
Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 933 | struct work_struct d0i3_exit_work; |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 934 | struct sk_buff_head d0i3_tx; |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 935 | /* protect d0i3_suspend_flags */ |
| 936 | struct mutex d0i3_suspend_mutex; |
| 937 | unsigned long d0i3_suspend_flags; |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 938 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ |
| 939 | spinlock_t d0i3_tx_lock; |
| 940 | wait_queue_head_t d0i3_exit_waitq; |
Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 941 | |
Emmanuel Grumbach | f421f9c | 2013-01-17 14:20:29 +0200 | [diff] [blame] | 942 | /* BT-Coex */ |
Emmanuel Grumbach | 430a3bb | 2014-04-02 09:55:16 +0300 | [diff] [blame] | 943 | struct iwl_bt_coex_profile_notif last_bt_notif; |
| 944 | struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; |
Emmanuel Grumbach | 0ea8d04 | 2014-04-02 09:31:36 +0300 | [diff] [blame] | 945 | |
Emmanuel Grumbach | b9fae2d | 2014-02-17 11:24:10 +0200 | [diff] [blame] | 946 | u32 last_ant_isol; |
| 947 | u8 last_corun_lut; |
Emmanuel Grumbach | cdb0056 | 2014-03-16 21:55:43 +0200 | [diff] [blame] | 948 | u8 bt_tx_prio; |
Emmanuel Grumbach | a39979a | 2014-05-28 12:06:41 +0300 | [diff] [blame] | 949 | enum iwl_bt_force_ant_mode bt_force_ant_mode; |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 950 | |
Ariej Marjieh | b112889 | 2014-07-16 21:11:12 +0300 | [diff] [blame] | 951 | /* Aux ROC */ |
| 952 | struct list_head aux_roc_te_list; |
| 953 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 954 | /* Thermal Throttling and CTkill */ |
| 955 | struct iwl_mvm_tt_mgmt thermal_throttle; |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 956 | #ifdef CONFIG_THERMAL |
| 957 | struct iwl_mvm_thermal_device tz_device; |
Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 958 | struct iwl_mvm_cooling_device cooling_dev; |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 959 | #endif |
| 960 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 961 | s32 temperature; /* Celsius */ |
Matti Gottlieb | 7280d1f | 2014-07-17 16:41:14 +0300 | [diff] [blame] | 962 | /* |
| 963 | * Debug option to set the NIC temperature. This option makes the |
| 964 | * driver think this is the actual NIC temperature, and ignore the |
| 965 | * real temperature that is received from the fw |
| 966 | */ |
| 967 | bool temperature_test; /* Debug test temperature is enabled */ |
Alexander Bondar | e811ada | 2013-03-10 15:29:44 +0200 | [diff] [blame] | 968 | |
Johannes Berg | b2b7875 | 2014-09-08 16:42:54 +0200 | [diff] [blame] | 969 | struct iwl_time_quota_cmd last_quota_cmd; |
| 970 | |
David Spinadel | 507cadf | 2013-07-31 18:07:21 +0300 | [diff] [blame] | 971 | #ifdef CONFIG_NL80211_TESTMODE |
| 972 | u32 noa_duration; |
| 973 | struct ieee80211_vif *noa_vif; |
| 974 | #endif |
Eytan Lifshitz | 19e737c | 2013-09-09 13:30:15 +0200 | [diff] [blame] | 975 | |
| 976 | /* Tx queues */ |
| 977 | u8 aux_queue; |
| 978 | u8 first_agg_queue; |
| 979 | u8 last_agg_queue; |
Alexander Bondar | 1c2abf7 | 2013-08-27 20:31:48 +0300 | [diff] [blame] | 980 | |
Alexander Bondar | 92d8556 | 2013-10-23 11:50:34 +0200 | [diff] [blame] | 981 | /* Indicate if device power save is allowed */ |
Luciano Coelho | bdd5483 | 2014-08-07 18:08:56 +0300 | [diff] [blame] | 982 | u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ |
Emmanuel Grumbach | 9e7dce2 | 2015-10-26 16:14:06 +0200 | [diff] [blame] | 983 | unsigned int max_amsdu_len; /* used for debugfs only */ |
Andrei Otcheretianski | bd3398e | 2013-10-22 05:01:12 +0200 | [diff] [blame] | 984 | |
Andrei Otcheretianski | 664322f | 2014-06-05 16:40:36 +0300 | [diff] [blame] | 985 | struct ieee80211_vif __rcu *csa_vif; |
Andrei Otcheretianski | 003e5236 | 2014-05-25 17:24:22 +0300 | [diff] [blame] | 986 | struct ieee80211_vif __rcu *csa_tx_blocked_vif; |
| 987 | u8 csa_tx_block_bcn_timeout; |
David Spinadel | 1c87bba | 2014-02-27 16:41:52 +0200 | [diff] [blame] | 988 | |
| 989 | /* system time of last beacon (for AP/GO interface) */ |
| 990 | u32 ap_last_beacon_gp2; |
Eyal Shapira | 9ecd051 | 2014-08-28 02:21:05 +0300 | [diff] [blame] | 991 | |
Arik Nemtsov | 88931cc | 2014-03-05 12:26:15 +0200 | [diff] [blame] | 992 | bool lar_regdom_set; |
Eran Harary | 8ba2d7a | 2015-02-08 11:41:43 +0200 | [diff] [blame] | 993 | enum iwl_mcc_source mcc_src; |
Arik Nemtsov | 88931cc | 2014-03-05 12:26:15 +0200 | [diff] [blame] | 994 | |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 995 | /* TDLS channel switch data */ |
| 996 | struct { |
| 997 | struct delayed_work dwork; |
| 998 | enum iwl_mvm_tdls_cs_state state; |
| 999 | |
| 1000 | /* |
| 1001 | * Current cs sta - might be different from periodic cs peer |
| 1002 | * station. Value is meaningless when the cs-state is idle. |
| 1003 | */ |
| 1004 | u8 cur_sta_id; |
| 1005 | |
| 1006 | /* TDLS periodic channel-switch peer */ |
| 1007 | struct { |
| 1008 | u8 sta_id; |
| 1009 | u8 op_class; |
| 1010 | bool initiator; /* are we the link initiator */ |
| 1011 | struct cfg80211_chan_def chandef; |
| 1012 | struct sk_buff *skb; /* ch sw template */ |
| 1013 | u32 ch_sw_tm_ie; |
Arik Nemtsov | b9dccdb | 2015-01-22 14:24:44 +0200 | [diff] [blame] | 1014 | |
| 1015 | /* timestamp of last ch-sw request sent (GP2 time) */ |
| 1016 | u32 sent_timestamp; |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 1017 | } peer; |
| 1018 | } tdls_cs; |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 1019 | |
| 1020 | struct iwl_mvm_shared_mem_cfg shared_mem_cfg; |
Johannes Berg | 5f4c02e | 2015-05-20 16:51:28 +0200 | [diff] [blame] | 1021 | |
Ayala Beker | 2a53d16 | 2016-04-07 16:21:57 +0300 | [diff] [blame] | 1022 | u32 ciphers[IWL_MVM_NUM_CIPHERS]; |
Johannes Berg | 24ddddf | 2016-06-21 12:34:36 +0200 | [diff] [blame] | 1023 | struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; |
Gregory Greenman | ce79291 | 2015-06-02 18:06:16 +0300 | [diff] [blame] | 1024 | struct iwl_mvm_tof_data tof_data; |
Andrei Otcheretianski | c89e333 | 2016-01-26 18:12:28 +0200 | [diff] [blame] | 1025 | |
Sara Sharon | 10b2b20 | 2016-03-20 16:23:41 +0200 | [diff] [blame] | 1026 | struct ieee80211_vif *nan_vif; |
| 1027 | #define IWL_MAX_BAID 32 |
| 1028 | struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; |
| 1029 | |
Andrei Otcheretianski | c89e333 | 2016-01-26 18:12:28 +0200 | [diff] [blame] | 1030 | /* |
| 1031 | * Drop beacons from other APs in AP mode when there are no connected |
| 1032 | * clients. |
| 1033 | */ |
| 1034 | bool drop_bcn_ap_mode; |
Andrei Otcheretianski | d3a108a | 2016-02-28 17:12:21 +0200 | [diff] [blame] | 1035 | |
| 1036 | struct delayed_work cs_tx_unblock_dwork; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1037 | }; |
| 1038 | |
| 1039 | /* Extract MVM priv from op_mode and _hw */ |
| 1040 | #define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ |
| 1041 | ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) |
| 1042 | |
| 1043 | #define IWL_MAC80211_GET_MVM(_hw) \ |
| 1044 | IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) |
| 1045 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1046 | enum iwl_mvm_status { |
| 1047 | IWL_MVM_STATUS_HW_RFKILL, |
| 1048 | IWL_MVM_STATUS_HW_CTKILL, |
| 1049 | IWL_MVM_STATUS_ROC_RUNNING, |
| 1050 | IWL_MVM_STATUS_IN_HW_RESTART, |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1051 | IWL_MVM_STATUS_IN_D0I3, |
Ariej Marjieh | b112889 | 2014-07-16 21:11:12 +0300 | [diff] [blame] | 1052 | IWL_MVM_STATUS_ROC_AUX_RUNNING, |
Johannes Berg | 58629d9 | 2014-11-06 09:40:50 +0100 | [diff] [blame] | 1053 | IWL_MVM_STATUS_D3_RECONFIG, |
Emmanuel Grumbach | d2709ad | 2015-01-29 14:58:06 +0200 | [diff] [blame] | 1054 | IWL_MVM_STATUS_DUMPING_FW_LOG, |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1055 | }; |
| 1056 | |
| 1057 | static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) |
| 1058 | { |
| 1059 | return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || |
| 1060 | test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); |
| 1061 | } |
| 1062 | |
Arik Nemtsov | 1a3fe0b | 2015-09-30 11:19:55 +0300 | [diff] [blame] | 1063 | static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) |
| 1064 | { |
| 1065 | return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
| 1066 | } |
| 1067 | |
Luciano Coelho | dc88b4b | 2014-11-10 11:10:14 +0200 | [diff] [blame] | 1068 | /* Must be called with rcu_read_lock() held and it can only be |
| 1069 | * released when mvmsta is not needed anymore. |
| 1070 | */ |
| 1071 | static inline struct iwl_mvm_sta * |
| 1072 | iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) |
| 1073 | { |
| 1074 | struct ieee80211_sta *sta; |
| 1075 | |
| 1076 | if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) |
| 1077 | return NULL; |
| 1078 | |
| 1079 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
| 1080 | |
| 1081 | /* This can happen if the station has been removed right now */ |
| 1082 | if (IS_ERR_OR_NULL(sta)) |
| 1083 | return NULL; |
| 1084 | |
| 1085 | return iwl_mvm_sta_from_mac80211(sta); |
| 1086 | } |
| 1087 | |
Emmanuel Grumbach | f327b04 | 2014-01-14 08:30:32 +0200 | [diff] [blame] | 1088 | static inline struct iwl_mvm_sta * |
| 1089 | iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) |
| 1090 | { |
| 1091 | struct ieee80211_sta *sta; |
| 1092 | |
| 1093 | if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) |
| 1094 | return NULL; |
| 1095 | |
| 1096 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 1097 | lockdep_is_held(&mvm->mutex)); |
| 1098 | |
| 1099 | /* This can happen if the station has been removed right now */ |
| 1100 | if (IS_ERR_OR_NULL(sta)) |
| 1101 | return NULL; |
| 1102 | |
| 1103 | return iwl_mvm_sta_from_mac80211(sta); |
| 1104 | } |
| 1105 | |
Eliad Peller | 7bb426e | 2014-02-24 12:54:37 +0200 | [diff] [blame] | 1106 | static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) |
| 1107 | { |
Luca Coelho | 6f73066 | 2015-10-26 13:43:12 +0200 | [diff] [blame] | 1108 | return !iwlwifi_mod_params.d0i3_disable && |
Luca Coelho | 5053e29 | 2015-09-17 21:55:24 +0300 | [diff] [blame] | 1109 | fw_has_capa(&mvm->fw->ucode_capa, |
| 1110 | IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); |
Eliad Peller | 7bb426e | 2014-02-24 12:54:37 +0200 | [diff] [blame] | 1111 | } |
| 1112 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1113 | static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) |
| 1114 | { |
Liad Kaufman | 280452c | 2016-02-23 16:23:38 +0200 | [diff] [blame] | 1115 | /* Make sure DQA isn't allowed in driver until feature is complete */ |
| 1116 | return false && fw_has_capa(&mvm->fw->ucode_capa, |
| 1117 | IWL_UCODE_TLV_CAPA_DQA_SUPPORT); |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1118 | } |
| 1119 | |
Luca Coelho | b728264 | 2015-09-17 23:44:14 +0300 | [diff] [blame] | 1120 | static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) |
| 1121 | { |
| 1122 | /* For now we only use this mode to differentiate between |
| 1123 | * slave transports, which handle D0i3 entry in suspend by |
| 1124 | * themselves in conjunction with runtime PM D0i3. So, this |
| 1125 | * function is used to check whether we need to do anything |
| 1126 | * when entering suspend or if the transport layer has already |
| 1127 | * done it. |
| 1128 | */ |
| 1129 | return (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) && |
| 1130 | (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3); |
| 1131 | } |
| 1132 | |
Liad Kaufman | 9f9af3d | 2015-12-23 16:03:46 +0200 | [diff] [blame] | 1133 | static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue) |
| 1134 | { |
| 1135 | return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && |
| 1136 | (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE); |
| 1137 | } |
| 1138 | |
| 1139 | static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue) |
| 1140 | { |
| 1141 | return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) && |
| 1142 | (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE); |
| 1143 | } |
| 1144 | |
Arik Nemtsov | dcaf9f5 | 2014-03-04 19:54:12 +0200 | [diff] [blame] | 1145 | static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) |
| 1146 | { |
Matti Gottlieb | d0d1519 | 2014-07-31 09:16:25 +0300 | [diff] [blame] | 1147 | bool nvm_lar = mvm->nvm_data->lar_enabled; |
Johannes Berg | 859d914 | 2015-06-01 17:11:11 +0200 | [diff] [blame] | 1148 | bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, |
| 1149 | IWL_UCODE_TLV_CAPA_LAR_SUPPORT); |
Arik Nemtsov | 5711cac | 2014-12-28 09:23:16 +0200 | [diff] [blame] | 1150 | |
| 1151 | if (iwlwifi_mod_params.lar_disable) |
| 1152 | return false; |
| 1153 | |
Matti Gottlieb | d0d1519 | 2014-07-31 09:16:25 +0300 | [diff] [blame] | 1154 | /* |
| 1155 | * Enable LAR only if it is supported by the FW (TLV) && |
| 1156 | * enabled in the NVM |
| 1157 | */ |
| 1158 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) |
| 1159 | return nvm_lar && tlv_lar; |
| 1160 | else |
| 1161 | return tlv_lar; |
Arik Nemtsov | dcaf9f5 | 2014-03-04 19:54:12 +0200 | [diff] [blame] | 1162 | } |
| 1163 | |
Eran Harary | 8ba2d7a | 2015-02-08 11:41:43 +0200 | [diff] [blame] | 1164 | static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) |
| 1165 | { |
Johannes Berg | 859d914 | 2015-06-01 17:11:11 +0200 | [diff] [blame] | 1166 | return fw_has_api(&mvm->fw->ucode_capa, |
| 1167 | IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || |
| 1168 | fw_has_capa(&mvm->fw->ucode_capa, |
| 1169 | IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); |
Eran Harary | 8ba2d7a | 2015-02-08 11:41:43 +0200 | [diff] [blame] | 1170 | } |
| 1171 | |
Emmanuel Grumbach | 0522588 | 2015-02-12 12:33:09 +0200 | [diff] [blame] | 1172 | static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) |
| 1173 | { |
Johannes Berg | 859d914 | 2015-06-01 17:11:11 +0200 | [diff] [blame] | 1174 | return fw_has_capa(&mvm->fw->ucode_capa, |
| 1175 | IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && |
Emmanuel Grumbach | 0522588 | 2015-02-12 12:33:09 +0200 | [diff] [blame] | 1176 | IWL_MVM_BT_COEX_CORUNNING; |
| 1177 | } |
| 1178 | |
Emmanuel Grumbach | 70e9099 | 2015-02-26 16:54:24 +0200 | [diff] [blame] | 1179 | static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) |
| 1180 | { |
Johannes Berg | 859d914 | 2015-06-01 17:11:11 +0200 | [diff] [blame] | 1181 | return fw_has_capa(&mvm->fw->ucode_capa, |
| 1182 | IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && |
Emmanuel Grumbach | 70e9099 | 2015-02-26 16:54:24 +0200 | [diff] [blame] | 1183 | IWL_MVM_BT_COEX_RRC; |
| 1184 | } |
| 1185 | |
Avri Altman | 93190fb | 2014-12-27 09:09:47 +0200 | [diff] [blame] | 1186 | static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) |
| 1187 | { |
| 1188 | return fw_has_capa(&mvm->fw->ucode_capa, |
Sara Sharon | e9eb5e3 | 2016-04-03 10:19:16 +0300 | [diff] [blame] | 1189 | IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && |
| 1190 | !IWL_MVM_HW_CSUM_DISABLE; |
Avri Altman | 93190fb | 2014-12-27 09:09:47 +0200 | [diff] [blame] | 1191 | } |
| 1192 | |
Avri Altman | e7c2e1f | 2015-10-29 16:50:57 +0200 | [diff] [blame] | 1193 | static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) |
| 1194 | { |
| 1195 | return fw_has_capa(&mvm->fw->ucode_capa, |
| 1196 | IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) && |
| 1197 | IWL_MVM_BT_COEX_MPLUT; |
| 1198 | } |
| 1199 | |
Avri Altman | ee95ed3 | 2015-11-25 13:17:10 +0200 | [diff] [blame] | 1200 | static inline |
Avraham Stern | c5241b0 | 2016-04-20 09:29:18 +0300 | [diff] [blame] | 1201 | bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm) |
Avri Altman | ee95ed3 | 2015-11-25 13:17:10 +0200 | [diff] [blame] | 1202 | { |
| 1203 | return fw_has_capa(&mvm->fw->ucode_capa, |
Avraham Stern | c5241b0 | 2016-04-20 09:29:18 +0300 | [diff] [blame] | 1204 | IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) && |
Emmanuel Grumbach | 11dee0b | 2016-03-15 11:04:29 +0200 | [diff] [blame] | 1205 | !(iwlwifi_mod_params.uapsd_disable & |
| 1206 | IWL_DISABLE_UAPSD_P2P_CLIENT); |
Avri Altman | ee95ed3 | 2015-11-25 13:17:10 +0200 | [diff] [blame] | 1207 | } |
| 1208 | |
Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1209 | static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) |
| 1210 | { |
Sara Sharon | 81f02ba | 2015-12-30 23:58:29 +0200 | [diff] [blame] | 1211 | return fw_has_capa(&mvm->fw->ucode_capa, |
| 1212 | IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); |
Johannes Berg | 0316d30 | 2015-05-22 13:41:07 +0200 | [diff] [blame] | 1213 | } |
| 1214 | |
Sara Sharon | d975d72 | 2016-07-04 11:52:07 +0300 | [diff] [blame] | 1215 | static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) |
| 1216 | { |
| 1217 | /* TODO - replace with TLV once defined */ |
| 1218 | return mvm->trans->cfg->use_tfh; |
| 1219 | } |
| 1220 | |
Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 1221 | static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) |
| 1222 | { |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 1223 | #ifdef CONFIG_THERMAL |
Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 1224 | /* these two TLV are redundant since the responsibility to CT-kill by |
| 1225 | * FW happens only after we send at least one command of |
| 1226 | * temperature THs report. |
| 1227 | */ |
| 1228 | return fw_has_capa(&mvm->fw->ucode_capa, |
| 1229 | IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && |
| 1230 | fw_has_capa(&mvm->fw->ucode_capa, |
| 1231 | IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 1232 | #else /* CONFIG_THERMAL */ |
| 1233 | return false; |
| 1234 | #endif /* CONFIG_THERMAL */ |
Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 1235 | } |
| 1236 | |
Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 1237 | static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) |
| 1238 | { |
| 1239 | return fw_has_capa(&mvm->fw->ucode_capa, |
| 1240 | IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); |
| 1241 | } |
| 1242 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1243 | extern const u8 iwl_mvm_ac_to_tx_fifo[]; |
| 1244 | |
| 1245 | struct iwl_rate_info { |
| 1246 | u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ |
| 1247 | u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ |
| 1248 | u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ |
| 1249 | u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ |
| 1250 | u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ |
| 1251 | }; |
| 1252 | |
Luciano Coelho | a0a0924 | 2014-09-04 12:29:15 +0300 | [diff] [blame] | 1253 | void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); |
| 1254 | int __iwl_mvm_mac_start(struct iwl_mvm *mvm); |
| 1255 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1256 | /****************** |
| 1257 | * MVM Methods |
| 1258 | ******************/ |
| 1259 | /* uCode */ |
| 1260 | int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); |
| 1261 | |
| 1262 | /* Utils */ |
| 1263 | int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, |
Johannes Berg | 57fbcce | 2016-04-12 15:56:15 +0200 | [diff] [blame] | 1264 | enum nl80211_band band); |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 1265 | void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, |
Johannes Berg | 57fbcce | 2016-04-12 15:56:15 +0200 | [diff] [blame] | 1266 | enum nl80211_band band, |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 1267 | struct ieee80211_tx_rate *r); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1268 | u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); |
| 1269 | void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); |
| 1270 | u8 first_antenna(u8 mask); |
| 1271 | u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); |
| 1272 | |
| 1273 | /* Tx / Host Commands */ |
| 1274 | int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, |
| 1275 | struct iwl_host_cmd *cmd); |
Aviya Erenfeld | ab02165 | 2015-06-09 16:45:52 +0300 | [diff] [blame] | 1276 | int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1277 | u32 flags, u16 len, const void *data); |
| 1278 | int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, |
| 1279 | struct iwl_host_cmd *cmd, |
| 1280 | u32 *status); |
Aviya Erenfeld | ab02165 | 2015-06-09 16:45:52 +0300 | [diff] [blame] | 1281 | int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1282 | u16 len, const void *data, |
| 1283 | u32 *status); |
| 1284 | int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, |
| 1285 | struct ieee80211_sta *sta); |
| 1286 | int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); |
Arik Nemtsov | 6ce73e6 | 2014-09-11 13:00:19 +0300 | [diff] [blame] | 1287 | void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, |
| 1288 | struct iwl_tx_cmd *tx_cmd, |
| 1289 | struct ieee80211_tx_info *info, u8 sta_id); |
Arik Nemtsov | 6ce73e6 | 2014-09-11 13:00:19 +0300 | [diff] [blame] | 1290 | void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, |
| 1291 | struct ieee80211_tx_info *info, |
| 1292 | struct ieee80211_sta *sta, __le16 fc); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1293 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 1294 | const char *iwl_mvm_get_tx_fail_reason(u32 status); |
| 1295 | #else |
| 1296 | static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } |
| 1297 | #endif |
Luca Coelho | 5888a40 | 2015-10-06 09:54:57 +0300 | [diff] [blame] | 1298 | int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1299 | void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); |
| 1300 | |
Johannes Berg | ca8c0f4 | 2015-04-20 17:54:54 +0200 | [diff] [blame] | 1301 | static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, |
| 1302 | struct iwl_tx_cmd *tx_cmd) |
| 1303 | { |
| 1304 | struct ieee80211_key_conf *keyconf = info->control.hw_key; |
| 1305 | |
| 1306 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; |
| 1307 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); |
| 1308 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
| 1309 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); |
| 1310 | } |
| 1311 | |
Arik Nemtsov | 33ea27f | 2014-02-10 15:34:29 +0200 | [diff] [blame] | 1312 | static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) |
| 1313 | { |
| 1314 | flush_work(&mvm->async_handlers_wk); |
| 1315 | } |
| 1316 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1317 | /* Statistics */ |
Johannes Berg | 91a8bcd | 2015-01-14 18:12:41 +0100 | [diff] [blame] | 1318 | void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, |
| 1319 | struct iwl_rx_packet *pkt); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1320 | void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, |
| 1321 | struct iwl_rx_cmd_buffer *rxb); |
Johannes Berg | 33cef92 | 2015-01-21 21:41:29 +0100 | [diff] [blame] | 1322 | int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); |
Johannes Berg | 91a8bcd | 2015-01-14 18:12:41 +0100 | [diff] [blame] | 1323 | void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1324 | |
| 1325 | /* NVM */ |
Eran Harary | 14b485f | 2014-04-23 10:46:09 +0300 | [diff] [blame] | 1326 | int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); |
Eytan Lifshitz | 81a67e3 | 2013-09-11 12:39:18 +0200 | [diff] [blame] | 1327 | int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1328 | |
Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 1329 | static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) |
| 1330 | { |
| 1331 | return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? |
| 1332 | mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : |
| 1333 | mvm->fw->valid_tx_ant; |
| 1334 | } |
| 1335 | |
| 1336 | static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) |
| 1337 | { |
| 1338 | return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? |
| 1339 | mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : |
| 1340 | mvm->fw->valid_rx_ant; |
| 1341 | } |
| 1342 | |
| 1343 | static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) |
| 1344 | { |
| 1345 | u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | |
| 1346 | FW_PHY_CFG_RX_CHAIN); |
| 1347 | u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm); |
| 1348 | u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); |
| 1349 | |
| 1350 | phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | |
| 1351 | valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; |
| 1352 | |
| 1353 | return mvm->fw->phy_config & phy_config; |
| 1354 | } |
| 1355 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1356 | int iwl_mvm_up(struct iwl_mvm *mvm); |
| 1357 | int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); |
| 1358 | |
| 1359 | int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); |
Eliad Peller | de06a59 | 2014-01-08 10:11:12 +0200 | [diff] [blame] | 1360 | bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, |
| 1361 | struct iwl_bcast_filter_cmd *cmd); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1362 | |
| 1363 | /* |
| 1364 | * FW notifications / CMD responses handlers |
| 1365 | * Convention: iwl_mvm_rx_<NAME OF THE CMD> |
| 1366 | */ |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1367 | void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
Johannes Berg | 1be5d8c | 2015-06-11 16:51:24 +0200 | [diff] [blame] | 1368 | void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, |
| 1369 | struct iwl_rx_cmd_buffer *rxb); |
Johannes Berg | 780e87c | 2015-09-03 14:56:10 +0200 | [diff] [blame] | 1370 | void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, |
| 1371 | struct iwl_rx_cmd_buffer *rxb, int queue); |
Sara Sharon | a338384 | 2016-02-28 15:41:47 +0200 | [diff] [blame] | 1372 | void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, |
Sara Sharon | 585a6fc | 2015-12-01 13:48:18 +0200 | [diff] [blame] | 1373 | struct iwl_rx_cmd_buffer *rxb, int queue); |
Sara Sharon | 94bb448 | 2015-12-16 18:48:28 +0200 | [diff] [blame] | 1374 | int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, |
| 1375 | const u8 *data, u32 count); |
| 1376 | void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 1377 | int queue); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1378 | void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
| 1379 | void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
| 1380 | void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, |
| 1381 | struct iwl_rx_cmd_buffer *rxb); |
| 1382 | void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
| 1383 | void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, |
| 1384 | struct iwl_rx_cmd_buffer *rxb); |
| 1385 | void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, |
| 1386 | struct iwl_rx_cmd_buffer *rxb); |
| 1387 | void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, |
| 1388 | struct iwl_rx_cmd_buffer *rxb); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1389 | |
| 1390 | /* MVM PHY */ |
| 1391 | int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, |
| 1392 | struct cfg80211_chan_def *chandef, |
| 1393 | u8 chains_static, u8 chains_dynamic); |
| 1394 | int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, |
| 1395 | struct cfg80211_chan_def *chandef, |
| 1396 | u8 chains_static, u8 chains_dynamic); |
Ilan Peer | fe0f2de | 2013-03-21 10:23:52 +0200 | [diff] [blame] | 1397 | void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, |
| 1398 | struct iwl_mvm_phy_ctxt *ctxt); |
| 1399 | void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, |
| 1400 | struct iwl_mvm_phy_ctxt *ctxt); |
Arik Nemtsov | cf7b491 | 2014-05-15 11:44:40 +0300 | [diff] [blame] | 1401 | int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); |
Arik Nemtsov | 6ce73e6 | 2014-09-11 13:00:19 +0300 | [diff] [blame] | 1402 | u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); |
| 1403 | u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1404 | |
| 1405 | /* MAC (virtual interface) programming */ |
| 1406 | int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 1407 | void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 1408 | int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
Luciano Coelho | bca49d9 | 2014-05-13 17:33:38 +0300 | [diff] [blame] | 1409 | int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
Johannes Berg | 3dfd3a9 | 2014-08-11 21:37:30 +0200 | [diff] [blame] | 1410 | bool force_assoc_off, const u8 *bssid_override); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1411 | int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
Arik Nemtsov | d92b732e | 2014-09-21 19:00:42 +0300 | [diff] [blame] | 1412 | u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1413 | int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, |
| 1414 | struct ieee80211_vif *vif); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1415 | void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, |
| 1416 | struct iwl_rx_cmd_buffer *rxb); |
| 1417 | void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, |
| 1418 | struct iwl_rx_cmd_buffer *rxb); |
Sara Sharon | 0db056d | 2015-12-29 11:07:15 +0200 | [diff] [blame] | 1419 | void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, |
| 1420 | struct iwl_rx_cmd_buffer *rxb); |
Sara Sharon | f92659a | 2016-02-03 15:04:49 +0200 | [diff] [blame] | 1421 | void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, |
| 1422 | struct iwl_rx_cmd_buffer *rxb); |
Sara Sharon | 3af512d6 | 2015-07-22 11:38:40 +0300 | [diff] [blame] | 1423 | void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, |
| 1424 | struct iwl_rx_cmd_buffer *rxb); |
Ilan Peer | 6e97b0d | 2013-12-23 22:18:02 +0200 | [diff] [blame] | 1425 | void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, |
| 1426 | struct ieee80211_vif *vif); |
Arik Nemtsov | a74346d | 2014-09-21 19:07:30 +0300 | [diff] [blame] | 1427 | unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, |
| 1428 | struct ieee80211_vif *exclude_vif); |
Andrei Otcheretianski | d3a108a | 2016-02-28 17:12:21 +0200 | [diff] [blame] | 1429 | void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, |
| 1430 | struct iwl_rx_cmd_buffer *rxb); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1431 | /* Bindings */ |
| 1432 | int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 1433 | int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 1434 | |
| 1435 | /* Quota management */ |
Emmanuel Grumbach | 7754ae7 | 2015-02-26 15:14:35 +0200 | [diff] [blame] | 1436 | int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, |
Johannes Berg | 0166230 | 2014-06-06 15:18:45 +0200 | [diff] [blame] | 1437 | struct ieee80211_vif *disabled_vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1438 | |
| 1439 | /* Scanning */ |
Luciano Coelho | 6749dd8 | 2015-03-20 15:51:36 +0200 | [diff] [blame] | 1440 | int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1441 | struct cfg80211_scan_request *req, |
| 1442 | struct ieee80211_scan_ies *ies); |
David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 1443 | int iwl_mvm_scan_size(struct iwl_mvm *mvm); |
Luciano Coelho | c7d4248 | 2015-05-07 16:00:26 +0300 | [diff] [blame] | 1444 | int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); |
Luciano Coelho | 999d256 | 2015-03-27 10:28:26 +0300 | [diff] [blame] | 1445 | int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); |
David Spinadel | 4ffb365 | 2015-03-10 10:06:02 +0200 | [diff] [blame] | 1446 | void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); |
Luca Coelho | 69e0464 | 2016-05-03 12:18:33 +0300 | [diff] [blame] | 1447 | void iwl_mvm_scan_timeout_wk(struct work_struct *work); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1448 | |
David Spinadel | 35a000b | 2013-08-28 09:29:43 +0300 | [diff] [blame] | 1449 | /* Scheduled scan */ |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1450 | void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, |
| 1451 | struct iwl_rx_cmd_buffer *rxb); |
| 1452 | void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, |
| 1453 | struct iwl_rx_cmd_buffer *rxb); |
Luciano Coelho | 65ff556 | 2015-03-20 13:35:47 +0200 | [diff] [blame] | 1454 | int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, |
| 1455 | struct ieee80211_vif *vif, |
| 1456 | struct cfg80211_sched_scan_request *req, |
Luciano Coelho | 19945df | 2015-03-20 16:11:28 +0200 | [diff] [blame] | 1457 | struct ieee80211_scan_ies *ies, |
| 1458 | int type); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1459 | void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, |
| 1460 | struct iwl_rx_cmd_buffer *rxb); |
David Spinadel | fb98be5 | 2014-05-04 12:51:10 +0300 | [diff] [blame] | 1461 | |
David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 1462 | /* UMAC scan */ |
| 1463 | int iwl_mvm_config_scan(struct iwl_mvm *mvm); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1464 | void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, |
| 1465 | struct iwl_rx_cmd_buffer *rxb); |
| 1466 | void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, |
| 1467 | struct iwl_rx_cmd_buffer *rxb); |
David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 1468 | |
Matti Gottlieb | 905e36a | 2016-02-14 17:05:39 +0200 | [diff] [blame] | 1469 | /* Paging */ |
| 1470 | void iwl_free_fw_paging(struct iwl_mvm *mvm); |
| 1471 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1472 | /* MVM debugfs */ |
| 1473 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 1474 | int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); |
Johannes Berg | 6349437 | 2013-03-26 10:47:53 +0100 | [diff] [blame] | 1475 | void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 1476 | void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1477 | #else |
| 1478 | static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, |
| 1479 | struct dentry *dbgfs_dir) |
| 1480 | { |
| 1481 | return 0; |
| 1482 | } |
Johannes Berg | 6349437 | 2013-03-26 10:47:53 +0100 | [diff] [blame] | 1483 | static inline void |
| 1484 | iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 1485 | { |
| 1486 | } |
| 1487 | static inline void |
| 1488 | iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 1489 | { |
| 1490 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1491 | #endif /* CONFIG_IWLWIFI_DEBUGFS */ |
| 1492 | |
| 1493 | /* rate scaling */ |
Eyal Shapira | 9e68094 | 2013-11-09 00:16:16 +0200 | [diff] [blame] | 1494 | int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); |
Johannes Berg | 2f15a82 | 2015-01-21 18:05:04 +0100 | [diff] [blame] | 1495 | void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); |
Eyal Shapira | 5fc0f76 | 2014-01-28 01:35:32 +0200 | [diff] [blame] | 1496 | int rs_pretty_print_rate(char *buf, const u32 rate); |
Eyal Shapira | 361dbec | 2014-08-13 00:31:13 +0300 | [diff] [blame] | 1497 | void rs_update_last_rssi(struct iwl_mvm *mvm, |
| 1498 | struct iwl_lq_sta *lq_sta, |
| 1499 | struct ieee80211_rx_status *rx_status); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1500 | |
Emmanuel Grumbach | c1cb92f | 2014-01-28 10:17:18 +0200 | [diff] [blame] | 1501 | /* power management */ |
Emmanuel Grumbach | c1cb92f | 2014-01-28 10:17:18 +0200 | [diff] [blame] | 1502 | int iwl_mvm_power_update_device(struct iwl_mvm *mvm); |
Arik Nemtsov | 999609f | 2014-05-15 17:31:51 +0300 | [diff] [blame] | 1503 | int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); |
Luciano Coelho | ef9203d | 2014-08-08 19:18:35 +0300 | [diff] [blame] | 1504 | int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); |
Emmanuel Grumbach | c1cb92f | 2014-01-28 10:17:18 +0200 | [diff] [blame] | 1505 | int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1506 | char *buf, int bufsz); |
Alexander Bondar | 1c2abf7 | 2013-08-27 20:31:48 +0300 | [diff] [blame] | 1507 | |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 1508 | void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1509 | void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, |
| 1510 | struct iwl_rx_cmd_buffer *rxb); |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 1511 | |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 1512 | #ifdef CONFIG_IWLWIFI_LEDS |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1513 | int iwl_mvm_leds_init(struct iwl_mvm *mvm); |
| 1514 | void iwl_mvm_leds_exit(struct iwl_mvm *mvm); |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 1515 | #else |
| 1516 | static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) |
| 1517 | { |
| 1518 | return 0; |
| 1519 | } |
| 1520 | static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) |
| 1521 | { |
| 1522 | } |
| 1523 | #endif |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1524 | |
| 1525 | /* D3 (WoWLAN, NetDetect) */ |
| 1526 | int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); |
| 1527 | int iwl_mvm_resume(struct ieee80211_hw *hw); |
| 1528 | void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); |
| 1529 | void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, |
| 1530 | struct ieee80211_vif *vif, |
| 1531 | struct cfg80211_gtk_rekey_data *data); |
| 1532 | void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, |
| 1533 | struct ieee80211_vif *vif, |
| 1534 | struct inet6_dev *idev); |
| 1535 | void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, |
| 1536 | struct ieee80211_vif *vif, int idx); |
Johannes Berg | debff61 | 2013-05-14 13:53:45 +0200 | [diff] [blame] | 1537 | extern const struct file_operations iwl_dbgfs_d3_test_ops; |
Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1538 | #ifdef CONFIG_PM |
| 1539 | int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, |
| 1540 | struct ieee80211_vif *vif, |
| 1541 | bool host_awake, |
| 1542 | u32 cmd_flags); |
| 1543 | void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, |
| 1544 | struct ieee80211_vif *vif, |
| 1545 | struct iwl_wowlan_status *status); |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 1546 | void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, |
| 1547 | struct ieee80211_vif *vif); |
| 1548 | #else |
Eliad Peller | a3f7ba5 | 2015-11-11 17:23:59 +0200 | [diff] [blame] | 1549 | static inline int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, |
| 1550 | struct ieee80211_vif *vif, |
| 1551 | bool host_awake, |
| 1552 | u32 cmd_flags) |
| 1553 | { |
| 1554 | return 0; |
| 1555 | } |
| 1556 | |
| 1557 | static inline void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, |
| 1558 | struct ieee80211_vif *vif, |
| 1559 | struct iwl_wowlan_status *status) |
| 1560 | { |
| 1561 | } |
| 1562 | |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 1563 | static inline void |
| 1564 | iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 1565 | { |
| 1566 | } |
| 1567 | #endif |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 1568 | void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, |
Emmanuel Grumbach | c8b06a9 | 2014-11-24 09:06:57 +0200 | [diff] [blame] | 1569 | struct iwl_wowlan_config_cmd *cmd); |
Eliad Peller | 8bd22e7 | 2013-11-03 19:48:50 +0200 | [diff] [blame] | 1570 | int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, |
| 1571 | struct ieee80211_vif *vif, |
| 1572 | bool disable_offloading, |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 1573 | bool offload_ns, |
Eliad Peller | 8bd22e7 | 2013-11-03 19:48:50 +0200 | [diff] [blame] | 1574 | u32 cmd_flags); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1575 | |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 1576 | /* D0i3 */ |
| 1577 | void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); |
| 1578 | void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); |
Eliad Peller | 576eeee | 2014-07-01 18:38:38 +0300 | [diff] [blame] | 1579 | int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); |
Eliad Peller | f4cf868 | 2014-11-04 16:57:06 +0200 | [diff] [blame] | 1580 | bool iwl_mvm_ref_taken(struct iwl_mvm *mvm); |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 1581 | void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); |
Eliad Peller | 6735943 | 2014-12-09 15:23:54 +0200 | [diff] [blame] | 1582 | int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode); |
| 1583 | int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode); |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 1584 | int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 1585 | |
Emmanuel Grumbach | 931d416 | 2013-01-17 09:42:25 +0200 | [diff] [blame] | 1586 | /* BT Coex */ |
Emmanuel Grumbach | 931d416 | 2013-01-17 09:42:25 +0200 | [diff] [blame] | 1587 | int iwl_send_bt_init_conf(struct iwl_mvm *mvm); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1588 | void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, |
| 1589 | struct iwl_rx_cmd_buffer *rxb); |
Emmanuel Grumbach | 2b76ef1 | 2013-01-24 10:35:13 +0200 | [diff] [blame] | 1590 | void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
Emmanuel Grumbach | a818292 | 2015-03-16 23:23:34 +0200 | [diff] [blame] | 1591 | enum ieee80211_rssi_event_data); |
Emmanuel Grumbach | 8e484f0 | 2013-10-02 15:02:25 +0300 | [diff] [blame] | 1592 | void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); |
Emmanuel Grumbach | 5b7ff61 | 2014-03-11 19:27:45 +0200 | [diff] [blame] | 1593 | u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, |
| 1594 | struct ieee80211_sta *sta); |
Emmanuel Grumbach | ffa6c70 | 2013-10-06 11:41:20 +0300 | [diff] [blame] | 1595 | bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, |
| 1596 | struct ieee80211_sta *sta); |
Emmanuel Grumbach | 219fb66 | 2014-10-30 11:59:40 +0200 | [diff] [blame] | 1597 | bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); |
Emmanuel Grumbach | 34c8b24 | 2014-05-28 21:53:39 +0300 | [diff] [blame] | 1598 | bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); |
Eliad Peller | 2fd647f | 2014-03-13 17:21:36 +0200 | [diff] [blame] | 1599 | bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, |
Johannes Berg | 57fbcce | 2016-04-12 15:56:15 +0200 | [diff] [blame] | 1600 | enum nl80211_band band); |
Emmanuel Grumbach | ee7bea5 | 2014-03-06 10:30:49 +0200 | [diff] [blame] | 1601 | u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, |
Emmanuel Grumbach | b797e3f | 2014-03-06 14:49:36 +0200 | [diff] [blame] | 1602 | struct ieee80211_tx_info *info, u8 ac); |
Emmanuel Grumbach | ffa6c70 | 2013-10-06 11:41:20 +0300 | [diff] [blame] | 1603 | |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 1604 | /* beacon filtering */ |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 1605 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 1606 | void |
| 1607 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, |
| 1608 | struct iwl_beacon_filter_cmd *cmd); |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 1609 | #else |
| 1610 | static inline void |
| 1611 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, |
| 1612 | struct iwl_beacon_filter_cmd *cmd) |
| 1613 | {} |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 1614 | #endif |
Eliad Peller | 3dd37d0 | 2014-01-07 14:00:24 +0200 | [diff] [blame] | 1615 | int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, |
| 1616 | struct ieee80211_vif *vif, |
| 1617 | bool enable, u32 flags); |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 1618 | int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, |
Eliad Peller | 3dd37d0 | 2014-01-07 14:00:24 +0200 | [diff] [blame] | 1619 | struct ieee80211_vif *vif, |
| 1620 | u32 flags); |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 1621 | int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, |
Eliad Peller | 3dd37d0 | 2014-01-07 14:00:24 +0200 | [diff] [blame] | 1622 | struct ieee80211_vif *vif, |
| 1623 | u32 flags); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1624 | /* SMPS */ |
| 1625 | void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1626 | enum iwl_mvm_smps_type_request req_type, |
| 1627 | enum ieee80211_smps_mode smps_request); |
Emmanuel Grumbach | 5c90422 | 2014-05-18 09:16:45 +0300 | [diff] [blame] | 1628 | bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1629 | |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 1630 | /* Low latency */ |
| 1631 | int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1632 | bool value); |
Alexander Bondar | 50df8a3 | 2014-03-12 20:30:51 +0200 | [diff] [blame] | 1633 | /* get SystemLowLatencyMode - only needed for beacon threshold? */ |
| 1634 | bool iwl_mvm_low_latency(struct iwl_mvm *mvm); |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 1635 | /* get VMACLowLatencyMode */ |
| 1636 | static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) |
| 1637 | { |
| 1638 | /* |
| 1639 | * should this consider associated/active/... state? |
| 1640 | * |
| 1641 | * Normally low-latency should only be active on interfaces |
| 1642 | * that are active, but at least with debugfs it can also be |
| 1643 | * enabled on interfaces that aren't active. However, when |
| 1644 | * interface aren't active then they aren't added into the |
| 1645 | * binding, so this has no real impact. For now, just return |
| 1646 | * the current desired low-latency state. |
| 1647 | */ |
Johannes Berg | b525d08 | 2016-01-06 10:01:41 +0100 | [diff] [blame] | 1648 | return mvmvif->low_latency_dbgfs || |
| 1649 | mvmvif->low_latency_traffic || |
| 1650 | mvmvif->low_latency_vcmd; |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 1651 | } |
| 1652 | |
Avri Altman | 3edf8ff | 2014-07-30 11:41:01 +0300 | [diff] [blame] | 1653 | /* hw scheduler queue config */ |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1654 | void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, |
| 1655 | u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, |
Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 1656 | unsigned int wdg_timeout); |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1657 | /* |
| 1658 | * Disable a TXQ. |
| 1659 | * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. |
| 1660 | */ |
| 1661 | void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, |
| 1662 | u8 tid, u8 flags); |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 1663 | int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); |
Avri Altman | 3edf8ff | 2014-07-30 11:41:01 +0300 | [diff] [blame] | 1664 | |
Luca Coelho | eb3908d | 2015-10-02 18:13:10 +0300 | [diff] [blame] | 1665 | /* Return a bitmask with all the hw supported queues, except for the |
| 1666 | * command queue, which can't be flushed. |
| 1667 | */ |
| 1668 | static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) |
| 1669 | { |
| 1670 | return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & |
| 1671 | ~BIT(IWL_MVM_CMD_QUEUE)); |
| 1672 | } |
| 1673 | |
Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 1674 | static inline |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 1675 | void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, |
Liad Kaufman | 5c1156e | 2015-07-22 17:59:53 +0300 | [diff] [blame] | 1676 | u8 fifo, u16 ssn, unsigned int wdg_timeout) |
Avri Altman | 3edf8ff | 2014-07-30 11:41:01 +0300 | [diff] [blame] | 1677 | { |
| 1678 | struct iwl_trans_txq_scd_cfg cfg = { |
| 1679 | .fifo = fifo, |
| 1680 | .tid = IWL_MAX_TID_COUNT, |
| 1681 | .aggregate = false, |
| 1682 | .frame_limit = IWL_FRAME_LIMIT, |
| 1683 | }; |
| 1684 | |
Liad Kaufman | 5c1156e | 2015-07-22 17:59:53 +0300 | [diff] [blame] | 1685 | iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); |
Avri Altman | 3edf8ff | 2014-07-30 11:41:01 +0300 | [diff] [blame] | 1686 | } |
| 1687 | |
Chaya Rachel Ivgi | fcb6b92 | 2016-02-22 10:21:41 +0200 | [diff] [blame] | 1688 | static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) |
| 1689 | { |
| 1690 | mvm->ucode_loaded = false; |
| 1691 | iwl_trans_stop_device(mvm->trans); |
| 1692 | } |
| 1693 | |
Liad Kaufman | b4f7a9d | 2016-02-03 11:05:41 +0200 | [diff] [blame] | 1694 | /* Stop/start all mac queues in a given bitmap */ |
| 1695 | void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); |
| 1696 | void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); |
| 1697 | |
Liad Kaufman | cf961e1 | 2015-08-13 19:16:08 +0300 | [diff] [blame] | 1698 | /* Re-configure the SCD for a queue that has already been configured */ |
| 1699 | int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, |
| 1700 | int tid, int frame_limit, u16 ssn); |
| 1701 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1702 | /* Thermal management and CT-kill */ |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 1703 | void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); |
Luciano Coelho | fd1f755 | 2014-11-04 16:17:46 +0200 | [diff] [blame] | 1704 | void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1705 | void iwl_mvm_temp_notif(struct iwl_mvm *mvm, |
| 1706 | struct iwl_rx_cmd_buffer *rxb); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1707 | void iwl_mvm_tt_handler(struct iwl_mvm *mvm); |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 1708 | void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff); |
| 1709 | void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1710 | void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); |
Chaya Rachel Ivgi | 7869318 | 2015-12-27 13:45:42 +0200 | [diff] [blame] | 1711 | int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); |
Chaya Rachel Ivgi | 0a3b711 | 2015-12-16 16:34:55 +0200 | [diff] [blame] | 1712 | void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 1713 | int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); |
Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 1714 | int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1715 | |
Arik Nemtsov | dcaf9f5 | 2014-03-04 19:54:12 +0200 | [diff] [blame] | 1716 | /* Location Aware Regulatory */ |
| 1717 | struct iwl_mcc_update_resp * |
Eran Harary | 8ba2d7a | 2015-02-08 11:41:43 +0200 | [diff] [blame] | 1718 | iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, |
| 1719 | enum iwl_mcc_source src_id); |
Arik Nemtsov | 90d4f7d | 2014-03-04 19:58:46 +0200 | [diff] [blame] | 1720 | int iwl_mvm_init_mcc(struct iwl_mvm *mvm); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1721 | void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, |
| 1722 | struct iwl_rx_cmd_buffer *rxb); |
Arik Nemtsov | 88931cc | 2014-03-05 12:26:15 +0200 | [diff] [blame] | 1723 | struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, |
Eran Harary | 8ba2d7a | 2015-02-08 11:41:43 +0200 | [diff] [blame] | 1724 | const char *alpha2, |
Jonathan Doron | 47c8b15 | 2014-11-27 16:55:25 +0200 | [diff] [blame] | 1725 | enum iwl_mcc_source src_id, |
| 1726 | bool *changed); |
| 1727 | struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, |
| 1728 | bool *changed); |
Eran Harary | 8ba2d7a | 2015-02-08 11:41:43 +0200 | [diff] [blame] | 1729 | int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); |
Jonathan Doron | 47c8b15 | 2014-11-27 16:55:25 +0200 | [diff] [blame] | 1730 | void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); |
Arik Nemtsov | dcaf9f5 | 2014-03-04 19:54:12 +0200 | [diff] [blame] | 1731 | |
Lilach Edelstein | 1f3b0ff | 2013-10-06 13:03:32 +0200 | [diff] [blame] | 1732 | /* smart fifo */ |
| 1733 | int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1734 | bool added_vif); |
| 1735 | |
Arik Nemtsov | fa3d07e | 2014-05-15 18:59:32 +0300 | [diff] [blame] | 1736 | /* TDLS */ |
Arik Nemtsov | 307e472 | 2014-09-15 18:48:59 +0300 | [diff] [blame] | 1737 | |
| 1738 | /* |
| 1739 | * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. |
| 1740 | * This TID is marked as used vs the AP and all connected TDLS peers. |
| 1741 | */ |
| 1742 | #define IWL_MVM_TDLS_FW_TID 4 |
| 1743 | |
Arik Nemtsov | fa3d07e | 2014-05-15 18:59:32 +0300 | [diff] [blame] | 1744 | int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
Arik Nemtsov | d431725 | 2014-09-07 19:18:31 +0300 | [diff] [blame] | 1745 | void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); |
| 1746 | void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1747 | bool sta_added); |
| 1748 | void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, |
| 1749 | struct ieee80211_vif *vif); |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 1750 | int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, |
| 1751 | struct ieee80211_vif *vif, |
| 1752 | struct ieee80211_sta *sta, u8 oper_class, |
| 1753 | struct cfg80211_chan_def *chandef, |
| 1754 | struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); |
| 1755 | void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, |
| 1756 | struct ieee80211_vif *vif, |
| 1757 | struct ieee80211_tdls_ch_sw_params *params); |
| 1758 | void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, |
| 1759 | struct ieee80211_vif *vif, |
| 1760 | struct ieee80211_sta *sta); |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1761 | void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 1762 | void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); |
Arik Nemtsov | fa3d07e | 2014-05-15 18:59:32 +0300 | [diff] [blame] | 1763 | |
Sara Sharon | d0ff5d2 | 2016-03-23 16:31:43 +0200 | [diff] [blame] | 1764 | void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, |
| 1765 | struct iwl_mvm_internal_rxq_notif *notif, |
| 1766 | u32 size); |
Sara Sharon | 0690405 | 2016-02-28 20:28:17 +0200 | [diff] [blame] | 1767 | void iwl_mvm_reorder_timer_expired(unsigned long data); |
Luciano Coelho | 7f549e2 | 2014-10-02 15:38:04 +0300 | [diff] [blame] | 1768 | struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); |
| 1769 | |
Liad Kaufman | 9794c64 | 2015-08-19 17:34:28 +0300 | [diff] [blame] | 1770 | void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); |
| 1771 | |
Luciano Coelho | b08c1d9 | 2014-05-20 23:31:05 +0300 | [diff] [blame] | 1772 | void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); |
Emmanuel Grumbach | 5d42e7b | 2015-03-19 20:04:51 +0200 | [diff] [blame] | 1773 | unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, |
| 1774 | struct ieee80211_vif *vif, |
| 1775 | bool tdls, bool cmd_q); |
Emmanuel Grumbach | 3175520 | 2015-03-30 10:55:57 +0300 | [diff] [blame] | 1776 | void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1777 | const char *errmsg); |
Emmanuel Grumbach | 8c23f95 | 2014-12-04 10:07:47 +0200 | [diff] [blame] | 1778 | |
Aviya Erenfeld | 0309826 | 2016-02-18 14:09:33 +0200 | [diff] [blame] | 1779 | /* Link Quality Measurement */ |
| 1780 | int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, |
| 1781 | enum iwl_lqm_cmd_operatrions operation, |
| 1782 | u32 duration, u32 timeout); |
| 1783 | bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); |
| 1784 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1785 | #endif /* __IWL_MVM_H__ */ |