Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of version 2 of the GNU General Public License as |
| 12 | * published by the Free Software Foundation. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, but |
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 17 | * General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this program; if not, write to the Free Software |
| 21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 22 | * USA |
| 23 | * |
| 24 | * The full GNU General Public License is included in this distribution |
Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 25 | * in the file called COPYING. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 26 | * |
| 27 | * Contact Information: |
| 28 | * Intel Linux Wireless <ilw@linux.intel.com> |
| 29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 30 | * |
| 31 | * BSD LICENSE |
| 32 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 33 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 34 | * All rights reserved. |
| 35 | * |
| 36 | * Redistribution and use in source and binary forms, with or without |
| 37 | * modification, are permitted provided that the following conditions |
| 38 | * are met: |
| 39 | * |
| 40 | * * Redistributions of source code must retain the above copyright |
| 41 | * notice, this list of conditions and the following disclaimer. |
| 42 | * * Redistributions in binary form must reproduce the above copyright |
| 43 | * notice, this list of conditions and the following disclaimer in |
| 44 | * the documentation and/or other materials provided with the |
| 45 | * distribution. |
| 46 | * * Neither the name Intel Corporation nor the names of its |
| 47 | * contributors may be used to endorse or promote products derived |
| 48 | * from this software without specific prior written permission. |
| 49 | * |
| 50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 61 | * |
| 62 | *****************************************************************************/ |
| 63 | |
| 64 | #ifndef __IWL_MVM_H__ |
| 65 | #define __IWL_MVM_H__ |
| 66 | |
| 67 | #include <linux/list.h> |
| 68 | #include <linux/spinlock.h> |
| 69 | #include <linux/leds.h> |
| 70 | #include <linux/in6.h> |
| 71 | |
| 72 | #include "iwl-op-mode.h" |
| 73 | #include "iwl-trans.h" |
| 74 | #include "iwl-notif-wait.h" |
| 75 | #include "iwl-eeprom-parse.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 76 | #include "sta.h" |
| 77 | #include "fw-api.h" |
Johannes Berg | 9954592 | 2013-06-14 13:36:21 +0200 | [diff] [blame] | 78 | #include "constants.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 79 | |
| 80 | #define IWL_INVALID_MAC80211_QUEUE 0xff |
Ilan Peer | 831e85f | 2013-02-07 17:09:09 +0200 | [diff] [blame] | 81 | #define IWL_MVM_MAX_ADDRESSES 5 |
Emmanuel Grumbach | 8101a7f | 2013-02-28 11:54:28 +0200 | [diff] [blame] | 82 | /* RSSI offset for WkP */ |
| 83 | #define IWL_RSSI_OFFSET 50 |
Ilan Peer | 12d423e | 2013-12-24 22:08:14 +0200 | [diff] [blame] | 84 | #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 85 | |
| 86 | enum iwl_mvm_tx_fifo { |
| 87 | IWL_MVM_TX_FIFO_BK = 0, |
| 88 | IWL_MVM_TX_FIFO_BE, |
| 89 | IWL_MVM_TX_FIFO_VI, |
| 90 | IWL_MVM_TX_FIFO_VO, |
Emmanuel Grumbach | 86a91ec | 2013-05-26 20:47:53 +0300 | [diff] [blame] | 91 | IWL_MVM_TX_FIFO_MCAST = 5, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 92 | }; |
| 93 | |
Johannes Berg | e520926 | 2014-01-20 23:38:59 +0100 | [diff] [blame] | 94 | extern const struct ieee80211_ops iwl_mvm_hw_ops; |
Alexander Bondar | e811ada | 2013-03-10 15:29:44 +0200 | [diff] [blame] | 95 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 96 | /** |
| 97 | * struct iwl_mvm_mod_params - module parameters for iwlmvm |
| 98 | * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. |
| 99 | * We will register to mac80211 to have testmode working. The NIC must not |
| 100 | * be up'ed after the INIT fw asserted. This is useful to be able to use |
| 101 | * proprietary tools over testmode to debug the INIT fw. |
| 102 | * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power |
| 103 | * Save)-2(default), LP(Low Power)-3 |
| 104 | */ |
| 105 | struct iwl_mvm_mod_params { |
| 106 | bool init_dbg; |
| 107 | int power_scheme; |
| 108 | }; |
| 109 | extern struct iwl_mvm_mod_params iwlmvm_mod_params; |
| 110 | |
| 111 | struct iwl_mvm_phy_ctxt { |
| 112 | u16 id; |
| 113 | u16 color; |
Ilan Peer | fe0f2de | 2013-03-21 10:23:52 +0200 | [diff] [blame] | 114 | u32 ref; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 115 | |
| 116 | /* |
| 117 | * TODO: This should probably be removed. Currently here only for rate |
| 118 | * scaling algorithm |
| 119 | */ |
| 120 | struct ieee80211_channel *channel; |
| 121 | }; |
| 122 | |
| 123 | struct iwl_mvm_time_event_data { |
| 124 | struct ieee80211_vif *vif; |
| 125 | struct list_head list; |
| 126 | unsigned long end_jiffies; |
| 127 | u32 duration; |
| 128 | bool running; |
| 129 | u32 uid; |
| 130 | |
| 131 | /* |
| 132 | * The access to the 'id' field must be done when the |
| 133 | * mvm->time_event_lock is held, as it value is used to indicate |
| 134 | * if the te is in the time event list or not (when id == TE_MAX) |
| 135 | */ |
| 136 | u32 id; |
| 137 | }; |
| 138 | |
| 139 | /* Power management */ |
| 140 | |
| 141 | /** |
| 142 | * enum iwl_power_scheme |
| 143 | * @IWL_POWER_LEVEL_CAM - Continuously Active Mode |
| 144 | * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) |
| 145 | * @IWL_POWER_LEVEL_LP - Low Power |
| 146 | */ |
| 147 | enum iwl_power_scheme { |
| 148 | IWL_POWER_SCHEME_CAM = 1, |
| 149 | IWL_POWER_SCHEME_BPS, |
| 150 | IWL_POWER_SCHEME_LP |
| 151 | }; |
| 152 | |
Max Stepanov | e7eb65c | 2014-02-16 16:36:57 +0200 | [diff] [blame] | 153 | #define IWL_CONN_MAX_LISTEN_INTERVAL 10 |
Alexander Bondar | e3c588e | 2013-04-07 14:08:59 +0300 | [diff] [blame] | 154 | #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ |
| 155 | IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ |
| 156 | IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ |
| 157 | IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) |
| 158 | #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 159 | |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 160 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 161 | enum iwl_dbgfs_pm_mask { |
| 162 | MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), |
| 163 | MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), |
| 164 | MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), |
| 165 | MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), |
| 166 | MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), |
Alexander Bondar | bd4ace2 | 2013-03-21 17:14:14 +0200 | [diff] [blame] | 167 | MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), |
| 168 | MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), |
Alexander Bondar | 8971634 | 2013-08-04 17:52:23 +0300 | [diff] [blame] | 169 | MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 170 | MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 171 | }; |
| 172 | |
| 173 | struct iwl_dbgfs_pm { |
Alexander Bondar | e811ada | 2013-03-10 15:29:44 +0200 | [diff] [blame] | 174 | u16 keep_alive_seconds; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 175 | u32 rx_data_timeout; |
| 176 | u32 tx_data_timeout; |
| 177 | bool skip_over_dtim; |
| 178 | u8 skip_dtim_periods; |
Alexander Bondar | bd4ace2 | 2013-03-21 17:14:14 +0200 | [diff] [blame] | 179 | bool lprx_ena; |
| 180 | u32 lprx_rssi_threshold; |
Alexander Bondar | 8971634 | 2013-08-04 17:52:23 +0300 | [diff] [blame] | 181 | bool snooze_ena; |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 182 | bool uapsd_misbehaving; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 183 | int mask; |
| 184 | }; |
| 185 | |
| 186 | /* beacon filtering */ |
| 187 | |
| 188 | enum iwl_dbgfs_bf_mask { |
| 189 | MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), |
| 190 | MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), |
| 191 | MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), |
Hila Gonen | 5dca7c2 | 2013-07-16 11:15:35 +0300 | [diff] [blame] | 192 | MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), |
| 193 | MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), |
| 194 | MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), |
| 195 | MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), |
| 196 | MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), |
| 197 | MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), |
| 198 | MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), |
| 199 | MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 200 | }; |
| 201 | |
| 202 | struct iwl_dbgfs_bf { |
Hila Gonen | 5dca7c2 | 2013-07-16 11:15:35 +0300 | [diff] [blame] | 203 | u32 bf_energy_delta; |
| 204 | u32 bf_roaming_energy_delta; |
| 205 | u32 bf_roaming_state; |
| 206 | u32 bf_temp_threshold; |
| 207 | u32 bf_temp_fast_filter; |
| 208 | u32 bf_temp_slow_filter; |
| 209 | u32 bf_enable_beacon_filter; |
| 210 | u32 bf_debug_flag; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 211 | u32 bf_escape_timer; |
| 212 | u32 ba_escape_timer; |
Hila Gonen | 5dca7c2 | 2013-07-16 11:15:35 +0300 | [diff] [blame] | 213 | u32 ba_enable_beacon_abort; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 214 | int mask; |
| 215 | }; |
| 216 | #endif |
| 217 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 218 | enum iwl_mvm_smps_type_request { |
| 219 | IWL_MVM_SMPS_REQ_BT_COEX, |
| 220 | IWL_MVM_SMPS_REQ_TT, |
| 221 | NUM_IWL_MVM_SMPS_REQ, |
| 222 | }; |
| 223 | |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 224 | enum iwl_mvm_ref_type { |
| 225 | IWL_MVM_REF_UCODE_DOWN, |
Arik Nemtsov | 519e202 | 2013-10-17 17:51:35 +0300 | [diff] [blame] | 226 | IWL_MVM_REF_SCAN, |
Eliad Peller | 9f45c36 | 2013-10-28 13:13:56 +0200 | [diff] [blame] | 227 | IWL_MVM_REF_ROC, |
Eliad Peller | 29a90a4 | 2013-11-05 14:06:29 +0200 | [diff] [blame] | 228 | IWL_MVM_REF_P2P_CLIENT, |
| 229 | IWL_MVM_REF_AP_IBSS, |
Eliad Peller | 0eb8365 | 2013-11-11 18:56:35 +0200 | [diff] [blame] | 230 | IWL_MVM_REF_USER, |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 231 | IWL_MVM_REF_TX, |
| 232 | IWL_MVM_REF_TX_AGG, |
Gregory Greenman | d40fc48 | 2014-06-25 14:08:50 +0200 | [diff] [blame] | 233 | IWL_MVM_REF_ADD_IF, |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 234 | IWL_MVM_REF_EXIT_WORK, |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 235 | |
| 236 | IWL_MVM_REF_COUNT, |
| 237 | }; |
| 238 | |
Emmanuel Grumbach | a39979a | 2014-05-28 12:06:41 +0300 | [diff] [blame] | 239 | enum iwl_bt_force_ant_mode { |
| 240 | BT_FORCE_ANT_DIS = 0, |
| 241 | BT_FORCE_ANT_AUTO, |
| 242 | BT_FORCE_ANT_BT, |
| 243 | BT_FORCE_ANT_WIFI, |
| 244 | |
| 245 | BT_FORCE_ANT_MAX, |
| 246 | }; |
| 247 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 248 | /** |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 249 | * struct iwl_mvm_vif_bf_data - beacon filtering related data |
| 250 | * @bf_enabled: indicates if beacon filtering is enabled |
| 251 | * @ba_enabled: indicated if beacon abort is enabled |
| 252 | * @last_beacon_signal: last beacon rssi signal in dbm |
| 253 | * @ave_beacon_signal: average beacon signal |
| 254 | * @last_cqm_event: rssi of the last cqm event |
Andrei Otcheretianski | 911222b | 2013-07-21 17:37:19 +0300 | [diff] [blame] | 255 | * @bt_coex_min_thold: minimum threshold for BT coex |
| 256 | * @bt_coex_max_thold: maximum threshold for BT coex |
| 257 | * @last_bt_coex_event: rssi of the last BT coex event |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 258 | */ |
| 259 | struct iwl_mvm_vif_bf_data { |
| 260 | bool bf_enabled; |
| 261 | bool ba_enabled; |
| 262 | s8 ave_beacon_signal; |
| 263 | s8 last_cqm_event; |
Andrei Otcheretianski | 911222b | 2013-07-21 17:37:19 +0300 | [diff] [blame] | 264 | s8 bt_coex_min_thold; |
| 265 | s8 bt_coex_max_thold; |
| 266 | s8 last_bt_coex_event; |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 267 | }; |
| 268 | |
| 269 | /** |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 270 | * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context |
| 271 | * @id: between 0 and 3 |
| 272 | * @color: to solve races upon MAC addition and removal |
| 273 | * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA |
| 274 | * @uploaded: indicates the MAC context has been added to the device |
Johannes Berg | 5023d96 | 2013-07-31 14:07:43 +0200 | [diff] [blame] | 275 | * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface |
| 276 | * should get quota etc. |
Avri Altman | 1988902 | 2014-03-19 07:25:06 +0200 | [diff] [blame] | 277 | * @pm_enabled - Indicate if MAC power management is allowed |
Ilan Peer | 1e1391c | 2013-03-13 14:52:04 +0200 | [diff] [blame] | 278 | * @monitor_active: indicates that monitor context is configured, and that the |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 279 | * interface should get quota etc. |
| 280 | * @low_latency: indicates that this interface is in low-latency mode |
| 281 | * (VMACLowLatencyMode) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 282 | * @queue_params: QoS params for this MAC |
| 283 | * @bcast_sta: station used for broadcast packets. Used by the following |
| 284 | * vifs: P2P_DEVICE, GO and AP. |
| 285 | * @beacon_skb: the skb used to hold the AP/GO beacon template |
Johannes Berg | 8b206d1 | 2013-12-04 11:56:58 +0100 | [diff] [blame] | 286 | * @smps_requests: the SMPS requests of differents parts of the driver, |
| 287 | * combined on update to yield the overall request to mac80211. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 288 | */ |
| 289 | struct iwl_mvm_vif { |
| 290 | u16 id; |
| 291 | u16 color; |
| 292 | u8 ap_sta_id; |
| 293 | |
| 294 | bool uploaded; |
Johannes Berg | 5023d96 | 2013-07-31 14:07:43 +0200 | [diff] [blame] | 295 | bool ap_ibss_active; |
Avri Altman | 1988902 | 2014-03-19 07:25:06 +0200 | [diff] [blame] | 296 | bool pm_enabled; |
Ilan Peer | 1e1391c | 2013-03-13 14:52:04 +0200 | [diff] [blame] | 297 | bool monitor_active; |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 298 | bool low_latency; |
Andrei Otcheretianski | a20fd39 | 2013-07-21 17:23:59 +0300 | [diff] [blame] | 299 | struct iwl_mvm_vif_bf_data bf_data; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 300 | |
Johannes Berg | 506a81e | 2013-02-28 14:05:14 +0100 | [diff] [blame] | 301 | u32 ap_beacon_time; |
| 302 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 303 | enum iwl_tsf_id tsf_id; |
| 304 | |
| 305 | /* |
| 306 | * QoS data from mac80211, need to store this here |
| 307 | * as mac80211 has a separate callback but we need |
| 308 | * to have the data for the MAC context |
| 309 | */ |
| 310 | struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; |
| 311 | struct iwl_mvm_time_event_data time_event_data; |
| 312 | |
| 313 | struct iwl_mvm_int_sta bcast_sta; |
| 314 | |
| 315 | /* |
| 316 | * Assigned while mac80211 has the interface in a channel context, |
| 317 | * or, for P2P Device, while it exists. |
| 318 | */ |
| 319 | struct iwl_mvm_phy_ctxt *phy_ctxt; |
| 320 | |
| 321 | #ifdef CONFIG_PM_SLEEP |
| 322 | /* WoWLAN GTK rekey data */ |
| 323 | struct { |
| 324 | u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; |
| 325 | __le64 replay_ctr; |
| 326 | bool valid; |
| 327 | } rekey_data; |
| 328 | |
| 329 | int tx_key_idx; |
| 330 | |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 331 | bool seqno_valid; |
| 332 | u16 seqno; |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 333 | #endif |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 334 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 335 | #if IS_ENABLED(CONFIG_IPV6) |
| 336 | /* IPv6 addresses for WoWLAN */ |
Johannes Berg | 5369d6c | 2013-07-08 11:17:06 +0200 | [diff] [blame] | 337 | struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 338 | int num_target_ipv6_addrs; |
| 339 | #endif |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 340 | |
| 341 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
Johannes Berg | 7f09d70 | 2013-11-12 17:16:38 +0100 | [diff] [blame] | 342 | struct iwl_mvm *mvm; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 343 | struct dentry *dbgfs_dir; |
Johannes Berg | 6349437 | 2013-03-26 10:47:53 +0100 | [diff] [blame] | 344 | struct dentry *dbgfs_slink; |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 345 | struct iwl_dbgfs_pm dbgfs_pm; |
| 346 | struct iwl_dbgfs_bf dbgfs_bf; |
Emmanuel Grumbach | 474b50c | 2014-01-28 09:13:04 +0200 | [diff] [blame] | 347 | struct iwl_mac_power_cmd mac_pwr_cmd; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 348 | #endif |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 349 | |
| 350 | enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 351 | |
| 352 | /* FW identified misbehaving AP */ |
| 353 | u8 uapsd_misbehaving_bssid[ETH_ALEN]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 354 | }; |
| 355 | |
| 356 | static inline struct iwl_mvm_vif * |
| 357 | iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) |
| 358 | { |
| 359 | return (void *)vif->drv_priv; |
| 360 | } |
| 361 | |
Emmanuel Grumbach | b797e3f | 2014-03-06 14:49:36 +0200 | [diff] [blame] | 362 | extern const u8 tid_to_mac80211_ac[]; |
| 363 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 364 | enum iwl_scan_status { |
| 365 | IWL_MVM_SCAN_NONE, |
| 366 | IWL_MVM_SCAN_OS, |
David Spinadel | 35a000b | 2013-08-28 09:29:43 +0300 | [diff] [blame] | 367 | IWL_MVM_SCAN_SCHED, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 368 | }; |
| 369 | |
| 370 | /** |
| 371 | * struct iwl_nvm_section - describes an NVM section in memory. |
| 372 | * |
| 373 | * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, |
| 374 | * and saved for later use by the driver. Not all NVM sections are saved |
| 375 | * this way, only the needed ones. |
| 376 | */ |
| 377 | struct iwl_nvm_section { |
| 378 | u16 length; |
| 379 | const u8 *data; |
| 380 | }; |
| 381 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 382 | /* |
| 383 | * Tx-backoff threshold |
| 384 | * @temperature: The threshold in Celsius |
| 385 | * @backoff: The tx-backoff in uSec |
| 386 | */ |
| 387 | struct iwl_tt_tx_backoff { |
| 388 | s32 temperature; |
| 389 | u32 backoff; |
| 390 | }; |
| 391 | |
| 392 | #define TT_TX_BACKOFF_SIZE 6 |
| 393 | |
| 394 | /** |
| 395 | * struct iwl_tt_params - thermal throttling parameters |
| 396 | * @ct_kill_entry: CT Kill entry threshold |
| 397 | * @ct_kill_exit: CT Kill exit threshold |
| 398 | * @ct_kill_duration: The time intervals (in uSec) in which the driver needs |
| 399 | * to checks whether to exit CT Kill. |
| 400 | * @dynamic_smps_entry: Dynamic SMPS entry threshold |
| 401 | * @dynamic_smps_exit: Dynamic SMPS exit threshold |
| 402 | * @tx_protection_entry: TX protection entry threshold |
| 403 | * @tx_protection_exit: TX protection exit threshold |
| 404 | * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. |
| 405 | * @support_ct_kill: Support CT Kill? |
| 406 | * @support_dynamic_smps: Support dynamic SMPS? |
| 407 | * @support_tx_protection: Support tx protection? |
| 408 | * @support_tx_backoff: Support tx-backoff? |
| 409 | */ |
| 410 | struct iwl_tt_params { |
| 411 | s32 ct_kill_entry; |
| 412 | s32 ct_kill_exit; |
| 413 | u32 ct_kill_duration; |
| 414 | s32 dynamic_smps_entry; |
| 415 | s32 dynamic_smps_exit; |
| 416 | s32 tx_protection_entry; |
| 417 | s32 tx_protection_exit; |
| 418 | struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; |
| 419 | bool support_ct_kill; |
| 420 | bool support_dynamic_smps; |
| 421 | bool support_tx_protection; |
| 422 | bool support_tx_backoff; |
| 423 | }; |
| 424 | |
| 425 | /** |
| 426 | * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure |
| 427 | * @ct_kill_exit: worker to exit thermal kill |
| 428 | * @dynamic_smps: Is thermal throttling enabled dynamic_smps? |
| 429 | * @tx_backoff: The current thremal throttling tx backoff in uSec. |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 430 | * @min_backoff: The minimal tx backoff due to power restrictions |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 431 | * @params: Parameters to configure the thermal throttling algorithm. |
eytan lifshitz | dafe6c4 | 2013-06-18 14:28:56 +0300 | [diff] [blame] | 432 | * @throttle: Is thermal throttling is active? |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 433 | */ |
| 434 | struct iwl_mvm_tt_mgmt { |
| 435 | struct delayed_work ct_kill_exit; |
| 436 | bool dynamic_smps; |
| 437 | u32 tx_backoff; |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 438 | u32 min_backoff; |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 439 | const struct iwl_tt_params *params; |
eytan lifshitz | dafe6c4 | 2013-06-18 14:28:56 +0300 | [diff] [blame] | 440 | bool throttle; |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 441 | }; |
| 442 | |
Eyal Shapira | 5fc0f76 | 2014-01-28 01:35:32 +0200 | [diff] [blame] | 443 | #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 |
| 444 | |
| 445 | struct iwl_mvm_frame_stats { |
| 446 | u32 legacy_frames; |
| 447 | u32 ht_frames; |
| 448 | u32 vht_frames; |
| 449 | u32 bw_20_frames; |
| 450 | u32 bw_40_frames; |
| 451 | u32 bw_80_frames; |
| 452 | u32 bw_160_frames; |
| 453 | u32 sgi_frames; |
| 454 | u32 ngi_frames; |
| 455 | u32 siso_frames; |
| 456 | u32 mimo2_frames; |
| 457 | u32 agg_frames; |
| 458 | u32 ampdu_count; |
| 459 | u32 success_frames; |
| 460 | u32 fail_frames; |
| 461 | u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; |
| 462 | int last_frame_idx; |
| 463 | }; |
| 464 | |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 465 | enum { |
| 466 | D0I3_DEFER_WAKEUP, |
| 467 | D0I3_PENDING_WAKEUP, |
| 468 | }; |
| 469 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 470 | struct iwl_mvm { |
| 471 | /* for logger access */ |
| 472 | struct device *dev; |
| 473 | |
| 474 | struct iwl_trans *trans; |
| 475 | const struct iwl_fw *fw; |
| 476 | const struct iwl_cfg *cfg; |
| 477 | struct iwl_phy_db *phy_db; |
| 478 | struct ieee80211_hw *hw; |
| 479 | |
| 480 | /* for protecting access to iwl_mvm */ |
| 481 | struct mutex mutex; |
| 482 | struct list_head async_handlers_list; |
| 483 | spinlock_t async_handlers_lock; |
| 484 | struct work_struct async_handlers_wk; |
| 485 | |
| 486 | struct work_struct roc_done_wk; |
| 487 | |
| 488 | unsigned long status; |
| 489 | |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 490 | /* |
| 491 | * for beacon filtering - |
| 492 | * currently only one interface can be supported |
| 493 | */ |
| 494 | struct iwl_mvm_vif *bf_allowed_vif; |
| 495 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 496 | enum iwl_ucode_type cur_ucode; |
| 497 | bool ucode_loaded; |
Eytan Lifshitz | ff11637 | 2013-09-03 12:06:11 +0300 | [diff] [blame] | 498 | bool init_ucode_complete; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 499 | u32 error_event_table; |
| 500 | u32 log_event_table; |
Eran Harary | 01a9ca5 | 2014-02-03 09:29:57 +0200 | [diff] [blame] | 501 | u32 umac_error_event_table; |
| 502 | bool support_umac_log; |
Eran Harary | 91479b6 | 2014-05-11 08:11:34 +0300 | [diff] [blame] | 503 | struct iwl_sf_region sf_space; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 504 | |
| 505 | u32 ampdu_ref; |
| 506 | |
| 507 | struct iwl_notif_wait_data notif_wait; |
| 508 | |
Matti Gottlieb | 3848ab6 | 2013-07-30 15:29:37 +0300 | [diff] [blame] | 509 | struct mvm_statistics_rx rx_stats; |
| 510 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 511 | unsigned long transport_queue_stop; |
| 512 | u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; |
| 513 | atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; |
| 514 | |
Eran Harary | e02a9d6 | 2014-05-07 12:27:10 +0300 | [diff] [blame] | 515 | const char *nvm_file_name; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 516 | struct iwl_nvm_data *nvm_data; |
Emmanuel Grumbach | b9545b4 | 2013-03-06 11:34:44 +0200 | [diff] [blame] | 517 | /* NVM sections */ |
Eran Harary | ae2b21b | 2014-01-09 08:08:24 +0200 | [diff] [blame] | 518 | struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 519 | |
| 520 | /* EEPROM MAC addresses */ |
| 521 | struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; |
| 522 | |
| 523 | /* data related to data path */ |
| 524 | struct iwl_rx_phy_info last_phy_info; |
| 525 | struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; |
| 526 | struct work_struct sta_drained_wk; |
| 527 | unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; |
Emmanuel Grumbach | e3d4bc8 | 2013-05-07 14:08:24 +0300 | [diff] [blame] | 528 | atomic_t pending_frames[IWL_MVM_STATION_COUNT]; |
Emmanuel Grumbach | 113a044 | 2013-07-02 14:16:38 +0300 | [diff] [blame] | 529 | u8 rx_ba_sessions; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 530 | |
| 531 | /* configured by mac80211 */ |
| 532 | u32 rts_threshold; |
| 533 | |
| 534 | /* Scan status, cmd (pre-allocated) and auxiliary station */ |
| 535 | enum iwl_scan_status scan_status; |
David Spinadel | fb98be5 | 2014-05-04 12:51:10 +0300 | [diff] [blame^] | 536 | void *scan_cmd; |
Eliad Peller | e59647e | 2013-11-28 14:08:50 +0200 | [diff] [blame] | 537 | struct iwl_mcast_filter_cmd *mcast_filter_cmd; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 538 | |
Oren Givon | 91b05d1 | 2013-08-19 08:36:48 +0300 | [diff] [blame] | 539 | /* rx chain antennas set through debugfs for the scan command */ |
| 540 | u8 scan_rx_ant; |
| 541 | |
Eliad Peller | c87163b | 2014-01-08 10:11:11 +0200 | [diff] [blame] | 542 | #ifdef CONFIG_IWLWIFI_BCAST_FILTERING |
| 543 | /* broadcast filters to configure for each associated station */ |
| 544 | const struct iwl_fw_bcast_filter *bcast_filters; |
Eliad Peller | de06a59 | 2014-01-08 10:11:12 +0200 | [diff] [blame] | 545 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 546 | struct { |
| 547 | u32 override; /* u32 for debugfs_create_bool */ |
| 548 | struct iwl_bcast_filter_cmd cmd; |
| 549 | } dbgfs_bcast_filtering; |
| 550 | #endif |
Eliad Peller | c87163b | 2014-01-08 10:11:11 +0200 | [diff] [blame] | 551 | #endif |
| 552 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 553 | /* Internal station */ |
| 554 | struct iwl_mvm_int_sta aux_sta; |
| 555 | |
Haim Dreyfuss | e820c2d | 2014-04-06 11:19:09 +0300 | [diff] [blame] | 556 | bool last_ebs_successful; |
| 557 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 558 | u8 scan_last_antenna_idx; /* to toggle TX between antennas */ |
| 559 | u8 mgmt_last_antenna_idx; |
| 560 | |
Lilach Edelstein | 1f3b0ff | 2013-10-06 13:03:32 +0200 | [diff] [blame] | 561 | /* last smart fifo state that was successfully sent to firmware */ |
| 562 | enum iwl_sf_state sf_state; |
| 563 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 564 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 565 | struct dentry *debugfs_dir; |
| 566 | u32 dbgfs_sram_offset, dbgfs_sram_len; |
Eliad Peller | f3c221f | 2014-01-09 13:12:54 +0200 | [diff] [blame] | 567 | u32 dbgfs_prph_reg_addr; |
Alexander Bondar | 64b928c | 2013-09-03 14:18:03 +0300 | [diff] [blame] | 568 | bool disable_power_off; |
| 569 | bool disable_power_off_d3; |
Emmanuel Grumbach | 086f736 | 2013-11-18 17:00:03 +0200 | [diff] [blame] | 570 | |
| 571 | struct debugfs_blob_wrapper nvm_hw_blob; |
| 572 | struct debugfs_blob_wrapper nvm_sw_blob; |
| 573 | struct debugfs_blob_wrapper nvm_calib_blob; |
| 574 | struct debugfs_blob_wrapper nvm_prod_blob; |
Eyal Shapira | 5fc0f76 | 2014-01-28 01:35:32 +0200 | [diff] [blame] | 575 | |
| 576 | struct iwl_mvm_frame_stats drv_rx_stats; |
| 577 | spinlock_t drv_stats_lock; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 578 | #endif |
| 579 | |
Ilan Peer | fe0f2de | 2013-03-21 10:23:52 +0200 | [diff] [blame] | 580 | struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 581 | |
| 582 | struct list_head time_event_list; |
| 583 | spinlock_t time_event_lock; |
| 584 | |
| 585 | /* |
| 586 | * A bitmap indicating the index of the key in use. The firmware |
| 587 | * can hold 16 keys at most. Reflect this fact. |
| 588 | */ |
| 589 | unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; |
Alexander Bondar | 5ee2b21 | 2013-03-05 10:16:40 +0200 | [diff] [blame] | 590 | |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 591 | /* A bitmap of reference types taken by the driver. */ |
| 592 | unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)]; |
| 593 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 594 | u8 vif_count; |
| 595 | |
Eran Harary | 291aa7c | 2013-07-03 11:00:06 +0300 | [diff] [blame] | 596 | /* -1 for always, 0 for never, >0 for that many times */ |
| 597 | s8 restart_fw; |
Emmanuel Grumbach | 1bd3cbc | 2014-03-18 21:15:06 +0200 | [diff] [blame] | 598 | void *fw_error_dump; |
Eran Harary | 291aa7c | 2013-07-03 11:00:06 +0300 | [diff] [blame] | 599 | |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 600 | #ifdef CONFIG_IWLWIFI_LEDS |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 601 | struct led_classdev led; |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 602 | #endif |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 603 | |
| 604 | struct ieee80211_vif *p2p_device_vif; |
Johannes Berg | f444eb1 | 2013-02-26 12:04:18 +0100 | [diff] [blame] | 605 | |
| 606 | #ifdef CONFIG_PM_SLEEP |
Johannes Berg | 964dc9e | 2013-06-03 17:25:34 +0200 | [diff] [blame] | 607 | struct wiphy_wowlan_support wowlan; |
Johannes Berg | f444eb1 | 2013-02-26 12:04:18 +0100 | [diff] [blame] | 608 | int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; |
Johannes Berg | afc66bb | 2013-05-03 11:44:16 +0200 | [diff] [blame] | 609 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
Johannes Berg | b011471 | 2013-06-12 14:55:40 +0200 | [diff] [blame] | 610 | u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */ |
Johannes Berg | debff61 | 2013-05-14 13:53:45 +0200 | [diff] [blame] | 611 | bool d3_test_active; |
Johannes Berg | afc66bb | 2013-05-03 11:44:16 +0200 | [diff] [blame] | 612 | bool store_d3_resume_sram; |
| 613 | void *d3_resume_sram; |
Johannes Berg | debff61 | 2013-05-14 13:53:45 +0200 | [diff] [blame] | 614 | u32 d3_test_pme_ptr; |
Eliad Peller | 78c9df6 | 2013-11-07 14:13:30 +0200 | [diff] [blame] | 615 | struct ieee80211_vif *keep_vif; |
Johannes Berg | afc66bb | 2013-05-03 11:44:16 +0200 | [diff] [blame] | 616 | #endif |
Johannes Berg | f444eb1 | 2013-02-26 12:04:18 +0100 | [diff] [blame] | 617 | #endif |
Emmanuel Grumbach | f421f9c | 2013-01-17 14:20:29 +0200 | [diff] [blame] | 618 | |
Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 619 | /* d0i3 */ |
| 620 | u8 d0i3_ap_sta_id; |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 621 | bool d0i3_offloading; |
Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 622 | struct work_struct d0i3_exit_work; |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 623 | struct sk_buff_head d0i3_tx; |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 624 | /* protect d0i3_suspend_flags */ |
| 625 | struct mutex d0i3_suspend_mutex; |
| 626 | unsigned long d0i3_suspend_flags; |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 627 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ |
| 628 | spinlock_t d0i3_tx_lock; |
| 629 | wait_queue_head_t d0i3_exit_waitq; |
Eliad Peller | 37577fe | 2013-12-05 17:19:39 +0200 | [diff] [blame] | 630 | |
Emmanuel Grumbach | f421f9c | 2013-01-17 14:20:29 +0200 | [diff] [blame] | 631 | /* BT-Coex */ |
| 632 | u8 bt_kill_msk; |
Emmanuel Grumbach | 0ea8d04 | 2014-04-02 09:31:36 +0300 | [diff] [blame] | 633 | |
| 634 | struct iwl_bt_coex_profile_notif_old last_bt_notif_old; |
| 635 | struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old; |
Emmanuel Grumbach | 430a3bb | 2014-04-02 09:55:16 +0300 | [diff] [blame] | 636 | struct iwl_bt_coex_profile_notif last_bt_notif; |
| 637 | struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; |
Emmanuel Grumbach | 0ea8d04 | 2014-04-02 09:31:36 +0300 | [diff] [blame] | 638 | |
Emmanuel Grumbach | b9fae2d | 2014-02-17 11:24:10 +0200 | [diff] [blame] | 639 | u32 last_ant_isol; |
| 640 | u8 last_corun_lut; |
Emmanuel Grumbach | cdb0056 | 2014-03-16 21:55:43 +0200 | [diff] [blame] | 641 | u8 bt_tx_prio; |
Emmanuel Grumbach | a39979a | 2014-05-28 12:06:41 +0300 | [diff] [blame] | 642 | enum iwl_bt_force_ant_mode bt_force_ant_mode; |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 643 | |
| 644 | /* Thermal Throttling and CTkill */ |
| 645 | struct iwl_mvm_tt_mgmt thermal_throttle; |
| 646 | s32 temperature; /* Celsius */ |
Alexander Bondar | e811ada | 2013-03-10 15:29:44 +0200 | [diff] [blame] | 647 | |
David Spinadel | 507cadf | 2013-07-31 18:07:21 +0300 | [diff] [blame] | 648 | #ifdef CONFIG_NL80211_TESTMODE |
| 649 | u32 noa_duration; |
| 650 | struct ieee80211_vif *noa_vif; |
| 651 | #endif |
Eytan Lifshitz | 19e737c | 2013-09-09 13:30:15 +0200 | [diff] [blame] | 652 | |
| 653 | /* Tx queues */ |
| 654 | u8 aux_queue; |
| 655 | u8 first_agg_queue; |
| 656 | u8 last_agg_queue; |
Alexander Bondar | 1c2abf7 | 2013-08-27 20:31:48 +0300 | [diff] [blame] | 657 | |
Alexander Bondar | 92d8556 | 2013-10-23 11:50:34 +0200 | [diff] [blame] | 658 | /* Indicate if device power save is allowed */ |
Emmanuel Grumbach | e5e7aa8 | 2014-01-27 16:57:33 +0200 | [diff] [blame] | 659 | bool ps_disabled; |
Andrei Otcheretianski | bd3398e | 2013-10-22 05:01:12 +0200 | [diff] [blame] | 660 | |
| 661 | struct ieee80211_vif *csa_vif; |
David Spinadel | 1c87bba | 2014-02-27 16:41:52 +0200 | [diff] [blame] | 662 | |
| 663 | /* system time of last beacon (for AP/GO interface) */ |
| 664 | u32 ap_last_beacon_gp2; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 665 | }; |
| 666 | |
| 667 | /* Extract MVM priv from op_mode and _hw */ |
| 668 | #define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ |
| 669 | ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) |
| 670 | |
| 671 | #define IWL_MAC80211_GET_MVM(_hw) \ |
| 672 | IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) |
| 673 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 674 | enum iwl_mvm_status { |
| 675 | IWL_MVM_STATUS_HW_RFKILL, |
| 676 | IWL_MVM_STATUS_HW_CTKILL, |
| 677 | IWL_MVM_STATUS_ROC_RUNNING, |
| 678 | IWL_MVM_STATUS_IN_HW_RESTART, |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 679 | IWL_MVM_STATUS_IN_D0I3, |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 680 | }; |
| 681 | |
| 682 | static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) |
| 683 | { |
| 684 | return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || |
| 685 | test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); |
| 686 | } |
| 687 | |
Emmanuel Grumbach | f327b04 | 2014-01-14 08:30:32 +0200 | [diff] [blame] | 688 | static inline struct iwl_mvm_sta * |
| 689 | iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) |
| 690 | { |
| 691 | struct ieee80211_sta *sta; |
| 692 | |
| 693 | if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) |
| 694 | return NULL; |
| 695 | |
| 696 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
| 697 | lockdep_is_held(&mvm->mutex)); |
| 698 | |
| 699 | /* This can happen if the station has been removed right now */ |
| 700 | if (IS_ERR_OR_NULL(sta)) |
| 701 | return NULL; |
| 702 | |
| 703 | return iwl_mvm_sta_from_mac80211(sta); |
| 704 | } |
| 705 | |
Eliad Peller | 7bb426e | 2014-02-24 12:54:37 +0200 | [diff] [blame] | 706 | static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) |
| 707 | { |
| 708 | return mvm->trans->cfg->d0i3 && |
| 709 | (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); |
| 710 | } |
| 711 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 712 | extern const u8 iwl_mvm_ac_to_tx_fifo[]; |
| 713 | |
| 714 | struct iwl_rate_info { |
| 715 | u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ |
| 716 | u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ |
| 717 | u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ |
| 718 | u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ |
| 719 | u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ |
| 720 | }; |
| 721 | |
| 722 | /****************** |
| 723 | * MVM Methods |
| 724 | ******************/ |
| 725 | /* uCode */ |
| 726 | int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); |
| 727 | |
| 728 | /* Utils */ |
| 729 | int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, |
| 730 | enum ieee80211_band band); |
Eyal Shapira | d310e40 | 2013-08-11 18:43:47 +0300 | [diff] [blame] | 731 | void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, |
| 732 | enum ieee80211_band band, |
| 733 | struct ieee80211_tx_rate *r); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 734 | u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); |
| 735 | void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); |
| 736 | u8 first_antenna(u8 mask); |
| 737 | u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); |
| 738 | |
| 739 | /* Tx / Host Commands */ |
| 740 | int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, |
| 741 | struct iwl_host_cmd *cmd); |
| 742 | int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, |
| 743 | u32 flags, u16 len, const void *data); |
| 744 | int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, |
| 745 | struct iwl_host_cmd *cmd, |
| 746 | u32 *status); |
| 747 | int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, |
| 748 | u16 len, const void *data, |
| 749 | u32 *status); |
| 750 | int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, |
| 751 | struct ieee80211_sta *sta); |
| 752 | int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); |
| 753 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 754 | const char *iwl_mvm_get_tx_fail_reason(u32 status); |
| 755 | #else |
| 756 | static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } |
| 757 | #endif |
| 758 | int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync); |
| 759 | void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); |
| 760 | |
Arik Nemtsov | 33ea27f | 2014-02-10 15:34:29 +0200 | [diff] [blame] | 761 | static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) |
| 762 | { |
| 763 | flush_work(&mvm->async_handlers_wk); |
| 764 | } |
| 765 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 766 | /* Statistics */ |
| 767 | int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm, |
| 768 | struct iwl_rx_cmd_buffer *rxb, |
| 769 | struct iwl_device_cmd *cmd); |
| 770 | int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, |
| 771 | struct iwl_rx_cmd_buffer *rxb, |
| 772 | struct iwl_device_cmd *cmd); |
| 773 | |
| 774 | /* NVM */ |
Eran Harary | 14b485f | 2014-04-23 10:46:09 +0300 | [diff] [blame] | 775 | int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); |
Eytan Lifshitz | 81a67e3 | 2013-09-11 12:39:18 +0200 | [diff] [blame] | 776 | int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 777 | |
| 778 | int iwl_mvm_up(struct iwl_mvm *mvm); |
| 779 | int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); |
| 780 | |
| 781 | int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); |
Eliad Peller | de06a59 | 2014-01-08 10:11:12 +0200 | [diff] [blame] | 782 | bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, |
| 783 | struct iwl_bcast_filter_cmd *cmd); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 784 | |
| 785 | /* |
| 786 | * FW notifications / CMD responses handlers |
| 787 | * Convention: iwl_mvm_rx_<NAME OF THE CMD> |
| 788 | */ |
| 789 | int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 790 | struct iwl_device_cmd *cmd); |
| 791 | int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 792 | struct iwl_device_cmd *cmd); |
| 793 | int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 794 | struct iwl_device_cmd *cmd); |
| 795 | int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 796 | struct iwl_device_cmd *cmd); |
| 797 | int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 798 | struct iwl_device_cmd *cmd); |
Emmanuel Grumbach | b9fae2d | 2014-02-17 11:24:10 +0200 | [diff] [blame] | 799 | int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, |
| 800 | struct iwl_rx_cmd_buffer *rxb, |
| 801 | struct iwl_device_cmd *cmd); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 802 | int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 803 | struct iwl_device_cmd *cmd); |
| 804 | int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, |
| 805 | struct iwl_rx_cmd_buffer *rxb, |
| 806 | struct iwl_device_cmd *cmd); |
| 807 | int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 808 | struct iwl_device_cmd *cmd); |
| 809 | |
| 810 | /* MVM PHY */ |
| 811 | int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, |
| 812 | struct cfg80211_chan_def *chandef, |
| 813 | u8 chains_static, u8 chains_dynamic); |
| 814 | int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, |
| 815 | struct cfg80211_chan_def *chandef, |
| 816 | u8 chains_static, u8 chains_dynamic); |
Ilan Peer | fe0f2de | 2013-03-21 10:23:52 +0200 | [diff] [blame] | 817 | void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, |
| 818 | struct iwl_mvm_phy_ctxt *ctxt); |
| 819 | void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, |
| 820 | struct iwl_mvm_phy_ctxt *ctxt); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 821 | |
| 822 | /* MAC (virtual interface) programming */ |
| 823 | int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 824 | void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 825 | int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
Luciano Coelho | bca49d9 | 2014-05-13 17:33:38 +0300 | [diff] [blame] | 826 | int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 827 | bool force_assoc_off); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 828 | int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 829 | u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, |
| 830 | struct ieee80211_vif *vif); |
| 831 | int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, |
| 832 | struct ieee80211_vif *vif); |
Ilan Peer | 571765c | 2013-03-05 15:26:03 +0200 | [diff] [blame] | 833 | int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, |
| 834 | struct iwl_rx_cmd_buffer *rxb, |
| 835 | struct iwl_device_cmd *cmd); |
Hila Gonen | d64048e | 2013-03-13 18:00:03 +0200 | [diff] [blame] | 836 | int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, |
| 837 | struct iwl_rx_cmd_buffer *rxb, |
| 838 | struct iwl_device_cmd *cmd); |
Ilan Peer | 6e97b0d | 2013-12-23 22:18:02 +0200 | [diff] [blame] | 839 | void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, |
| 840 | struct ieee80211_vif *vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 841 | |
| 842 | /* Bindings */ |
| 843 | int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 844 | int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 845 | |
| 846 | /* Quota management */ |
| 847 | int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif); |
| 848 | |
| 849 | /* Scanning */ |
| 850 | int iwl_mvm_scan_request(struct iwl_mvm *mvm, |
| 851 | struct ieee80211_vif *vif, |
| 852 | struct cfg80211_scan_request *req); |
| 853 | int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 854 | struct iwl_device_cmd *cmd); |
| 855 | int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, |
| 856 | struct iwl_device_cmd *cmd); |
Arik Nemtsov | 91b8025 | 2014-02-10 12:49:39 +0200 | [diff] [blame] | 857 | int iwl_mvm_cancel_scan(struct iwl_mvm *mvm); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 858 | |
David Spinadel | 35a000b | 2013-08-28 09:29:43 +0300 | [diff] [blame] | 859 | /* Scheduled scan */ |
| 860 | int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, |
| 861 | struct iwl_rx_cmd_buffer *rxb, |
| 862 | struct iwl_device_cmd *cmd); |
| 863 | int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, |
| 864 | struct ieee80211_vif *vif, |
| 865 | struct cfg80211_sched_scan_request *req, |
David Spinadel | 633e271 | 2014-02-06 16:15:23 +0200 | [diff] [blame] | 866 | struct ieee80211_scan_ies *ies); |
David Spinadel | 35a000b | 2013-08-28 09:29:43 +0300 | [diff] [blame] | 867 | int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, |
| 868 | struct cfg80211_sched_scan_request *req); |
| 869 | int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, |
| 870 | struct cfg80211_sched_scan_request *req); |
David Spinadel | fb98be5 | 2014-05-04 12:51:10 +0300 | [diff] [blame^] | 871 | int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify); |
| 872 | int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, |
| 873 | struct iwl_rx_cmd_buffer *rxb, |
| 874 | struct iwl_device_cmd *cmd); |
| 875 | |
| 876 | /* Unified scan */ |
| 877 | int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, |
| 878 | struct ieee80211_vif *vif, |
| 879 | struct ieee80211_scan_request *req); |
| 880 | int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, |
| 881 | struct ieee80211_vif *vif, |
| 882 | struct cfg80211_sched_scan_request *req, |
| 883 | struct ieee80211_scan_ies *ies); |
David Spinadel | 35a000b | 2013-08-28 09:29:43 +0300 | [diff] [blame] | 884 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 885 | /* MVM debugfs */ |
| 886 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 887 | int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); |
Johannes Berg | 6349437 | 2013-03-26 10:47:53 +0100 | [diff] [blame] | 888 | void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 889 | void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 890 | #else |
| 891 | static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, |
| 892 | struct dentry *dbgfs_dir) |
| 893 | { |
| 894 | return 0; |
| 895 | } |
Johannes Berg | 6349437 | 2013-03-26 10:47:53 +0100 | [diff] [blame] | 896 | static inline void |
| 897 | iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 898 | { |
| 899 | } |
| 900 | static inline void |
| 901 | iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 902 | { |
| 903 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 904 | #endif /* CONFIG_IWLWIFI_DEBUGFS */ |
| 905 | |
| 906 | /* rate scaling */ |
Eyal Shapira | 9e68094 | 2013-11-09 00:16:16 +0200 | [diff] [blame] | 907 | int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); |
Eyal Shapira | 5fc0f76 | 2014-01-28 01:35:32 +0200 | [diff] [blame] | 908 | void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, |
| 909 | struct iwl_mvm_frame_stats *stats, |
| 910 | u32 rate, bool agg); |
| 911 | int rs_pretty_print_rate(char *buf, const u32 rate); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 912 | |
Emmanuel Grumbach | c1cb92f | 2014-01-28 10:17:18 +0200 | [diff] [blame] | 913 | /* power management */ |
Emmanuel Grumbach | c1cb92f | 2014-01-28 10:17:18 +0200 | [diff] [blame] | 914 | int iwl_mvm_power_update_device(struct iwl_mvm *mvm); |
Arik Nemtsov | 999609f | 2014-05-15 17:31:51 +0300 | [diff] [blame] | 915 | int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); |
Emmanuel Grumbach | c1cb92f | 2014-01-28 10:17:18 +0200 | [diff] [blame] | 916 | int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 917 | char *buf, int bufsz); |
Alexander Bondar | 1c2abf7 | 2013-08-27 20:31:48 +0300 | [diff] [blame] | 918 | |
Alexander Bondar | 175a70b | 2013-04-14 20:59:37 +0300 | [diff] [blame] | 919 | void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
| 920 | int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, |
| 921 | struct iwl_rx_cmd_buffer *rxb, |
| 922 | struct iwl_device_cmd *cmd); |
| 923 | |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 924 | #ifdef CONFIG_IWLWIFI_LEDS |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 925 | int iwl_mvm_leds_init(struct iwl_mvm *mvm); |
| 926 | void iwl_mvm_leds_exit(struct iwl_mvm *mvm); |
Johannes Berg | c43e933 | 2014-04-24 16:31:08 +0200 | [diff] [blame] | 927 | #else |
| 928 | static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) |
| 929 | { |
| 930 | return 0; |
| 931 | } |
| 932 | static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) |
| 933 | { |
| 934 | } |
| 935 | #endif |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 936 | |
| 937 | /* D3 (WoWLAN, NetDetect) */ |
| 938 | int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); |
| 939 | int iwl_mvm_resume(struct ieee80211_hw *hw); |
| 940 | void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); |
| 941 | void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, |
| 942 | struct ieee80211_vif *vif, |
| 943 | struct cfg80211_gtk_rekey_data *data); |
| 944 | void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, |
| 945 | struct ieee80211_vif *vif, |
| 946 | struct inet6_dev *idev); |
| 947 | void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, |
| 948 | struct ieee80211_vif *vif, int idx); |
Johannes Berg | debff61 | 2013-05-14 13:53:45 +0200 | [diff] [blame] | 949 | extern const struct file_operations iwl_dbgfs_d3_test_ops; |
Johannes Berg | 6d9d32b | 2013-08-06 18:58:56 +0200 | [diff] [blame] | 950 | #ifdef CONFIG_PM_SLEEP |
| 951 | void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, |
| 952 | struct ieee80211_vif *vif); |
| 953 | #else |
| 954 | static inline void |
| 955 | iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
| 956 | { |
| 957 | } |
| 958 | #endif |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 959 | void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, |
| 960 | struct iwl_wowlan_config_cmd_v2 *cmd); |
Eliad Peller | 8bd22e7 | 2013-11-03 19:48:50 +0200 | [diff] [blame] | 961 | int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, |
| 962 | struct ieee80211_vif *vif, |
| 963 | bool disable_offloading, |
| 964 | u32 cmd_flags); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 965 | |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 966 | /* D0i3 */ |
| 967 | void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); |
| 968 | void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); |
Arik Nemtsov | b249250 | 2014-03-13 12:21:50 +0200 | [diff] [blame] | 969 | void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); |
Eliad Peller | d15a747 | 2014-03-27 18:53:12 +0200 | [diff] [blame] | 970 | int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 971 | |
Emmanuel Grumbach | 931d416 | 2013-01-17 09:42:25 +0200 | [diff] [blame] | 972 | /* BT Coex */ |
Emmanuel Grumbach | 931d416 | 2013-01-17 09:42:25 +0200 | [diff] [blame] | 973 | int iwl_send_bt_init_conf(struct iwl_mvm *mvm); |
Emmanuel Grumbach | f421f9c | 2013-01-17 14:20:29 +0200 | [diff] [blame] | 974 | int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, |
| 975 | struct iwl_rx_cmd_buffer *rxb, |
| 976 | struct iwl_device_cmd *cmd); |
Emmanuel Grumbach | 2b76ef1 | 2013-01-24 10:35:13 +0200 | [diff] [blame] | 977 | void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 978 | enum ieee80211_rssi_event rssi_event); |
Emmanuel Grumbach | 8e484f0 | 2013-10-02 15:02:25 +0300 | [diff] [blame] | 979 | void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); |
Emmanuel Grumbach | 5b7ff61 | 2014-03-11 19:27:45 +0200 | [diff] [blame] | 980 | u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, |
| 981 | struct ieee80211_sta *sta); |
Emmanuel Grumbach | ffa6c70 | 2013-10-06 11:41:20 +0300 | [diff] [blame] | 982 | bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, |
| 983 | struct ieee80211_sta *sta); |
Emmanuel Grumbach | 34c8b24 | 2014-05-28 21:53:39 +0300 | [diff] [blame] | 984 | bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); |
Eliad Peller | 2fd647f | 2014-03-13 17:21:36 +0200 | [diff] [blame] | 985 | bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, |
| 986 | enum ieee80211_band band); |
Emmanuel Grumbach | ee7bea5 | 2014-03-06 10:30:49 +0200 | [diff] [blame] | 987 | u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, |
Emmanuel Grumbach | b797e3f | 2014-03-06 14:49:36 +0200 | [diff] [blame] | 988 | struct ieee80211_tx_info *info, u8 ac); |
Emmanuel Grumbach | ffa6c70 | 2013-10-06 11:41:20 +0300 | [diff] [blame] | 989 | |
Emmanuel Grumbach | 0ea8d04 | 2014-04-02 09:31:36 +0300 | [diff] [blame] | 990 | bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); |
| 991 | void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); |
| 992 | int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); |
| 993 | int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, |
| 994 | struct iwl_rx_cmd_buffer *rxb, |
| 995 | struct iwl_device_cmd *cmd); |
| 996 | void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 997 | enum ieee80211_rssi_event rssi_event); |
| 998 | u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, |
| 999 | struct ieee80211_sta *sta); |
| 1000 | bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, |
| 1001 | struct ieee80211_sta *sta); |
| 1002 | bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, |
| 1003 | enum ieee80211_band band); |
| 1004 | int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, |
| 1005 | struct iwl_rx_cmd_buffer *rxb, |
| 1006 | struct iwl_device_cmd *cmd); |
| 1007 | |
Emmanuel Grumbach | 2de13ca | 2013-06-30 07:43:28 +0300 | [diff] [blame] | 1008 | enum iwl_bt_kill_msk { |
| 1009 | BT_KILL_MSK_DEFAULT, |
| 1010 | BT_KILL_MSK_SCO_HID_A2DP, |
| 1011 | BT_KILL_MSK_REDUCED_TXPOW, |
| 1012 | BT_KILL_MSK_MAX, |
| 1013 | }; |
| 1014 | extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX]; |
| 1015 | extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX]; |
Emmanuel Grumbach | 931d416 | 2013-01-17 09:42:25 +0200 | [diff] [blame] | 1016 | |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 1017 | /* beacon filtering */ |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 1018 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 1019 | void |
| 1020 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, |
| 1021 | struct iwl_beacon_filter_cmd *cmd); |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 1022 | #else |
| 1023 | static inline void |
| 1024 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, |
| 1025 | struct iwl_beacon_filter_cmd *cmd) |
| 1026 | {} |
Alexander Bondar | b571a69 | 2013-05-21 14:49:09 +0300 | [diff] [blame] | 1027 | #endif |
Eliad Peller | 3dd37d0 | 2014-01-07 14:00:24 +0200 | [diff] [blame] | 1028 | int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, |
| 1029 | struct ieee80211_vif *vif, |
| 1030 | bool enable, u32 flags); |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 1031 | int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, |
Eliad Peller | 3dd37d0 | 2014-01-07 14:00:24 +0200 | [diff] [blame] | 1032 | struct ieee80211_vif *vif, |
| 1033 | u32 flags); |
Hila Gonen | 7df15b1 | 2012-12-12 11:16:19 +0200 | [diff] [blame] | 1034 | int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, |
Eliad Peller | 3dd37d0 | 2014-01-07 14:00:24 +0200 | [diff] [blame] | 1035 | struct ieee80211_vif *vif, |
| 1036 | u32 flags); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1037 | /* SMPS */ |
| 1038 | void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1039 | enum iwl_mvm_smps_type_request req_type, |
| 1040 | enum ieee80211_smps_mode smps_request); |
Emmanuel Grumbach | 5c90422 | 2014-05-18 09:16:45 +0300 | [diff] [blame] | 1041 | bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1042 | |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 1043 | /* Low latency */ |
| 1044 | int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1045 | bool value); |
Alexander Bondar | 50df8a3 | 2014-03-12 20:30:51 +0200 | [diff] [blame] | 1046 | /* get SystemLowLatencyMode - only needed for beacon threshold? */ |
| 1047 | bool iwl_mvm_low_latency(struct iwl_mvm *mvm); |
Johannes Berg | a21d7bc | 2013-11-12 17:30:52 +0100 | [diff] [blame] | 1048 | /* get VMACLowLatencyMode */ |
| 1049 | static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) |
| 1050 | { |
| 1051 | /* |
| 1052 | * should this consider associated/active/... state? |
| 1053 | * |
| 1054 | * Normally low-latency should only be active on interfaces |
| 1055 | * that are active, but at least with debugfs it can also be |
| 1056 | * enabled on interfaces that aren't active. However, when |
| 1057 | * interface aren't active then they aren't added into the |
| 1058 | * binding, so this has no real impact. For now, just return |
| 1059 | * the current desired low-latency state. |
| 1060 | */ |
| 1061 | |
| 1062 | return mvmvif->low_latency; |
| 1063 | } |
| 1064 | |
David Spinadel | bd5e474 | 2014-04-24 13:15:29 +0300 | [diff] [blame] | 1065 | /* Assoc status */ |
David Spinadel | b538b8c | 2014-05-13 14:29:36 +0300 | [diff] [blame] | 1066 | bool iwl_mvm_is_idle(struct iwl_mvm *mvm); |
David Spinadel | bd5e474 | 2014-04-24 13:15:29 +0300 | [diff] [blame] | 1067 | |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1068 | /* Thermal management and CT-kill */ |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 1069 | void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1070 | void iwl_mvm_tt_handler(struct iwl_mvm *mvm); |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 1071 | void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); |
Eytan Lifshitz | 9ee718a | 2013-05-19 19:14:41 +0300 | [diff] [blame] | 1072 | void iwl_mvm_tt_exit(struct iwl_mvm *mvm); |
| 1073 | void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); |
| 1074 | |
Lilach Edelstein | 1f3b0ff | 2013-10-06 13:03:32 +0200 | [diff] [blame] | 1075 | /* smart fifo */ |
| 1076 | int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
| 1077 | bool added_vif); |
| 1078 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1079 | #endif /* __IWL_MVM_H__ */ |