Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Eran Harary | 8d193ca | 2015-04-27 10:29:31 +0300 | [diff] [blame] | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 10 | * Copyright(c) 2016 Intel Deutschland GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of version 2 of the GNU General Public License as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, but |
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program; if not, write to the Free Software |
| 23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 24 | * USA |
| 25 | * |
| 26 | * The full GNU General Public License is included in this distribution |
Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 27 | * in the file called COPYING. |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 28 | * |
| 29 | * Contact Information: |
Emmanuel Grumbach | cb2f827 | 2015-11-17 15:39:56 +0200 | [diff] [blame] | 30 | * Intel Linux Wireless <linuxwifi@intel.com> |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 31 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 32 | * |
| 33 | * BSD LICENSE |
| 34 | * |
Emmanuel Grumbach | 51368bf | 2013-12-30 13:15:54 +0200 | [diff] [blame] | 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Eran Harary | 8d193ca | 2015-04-27 10:29:31 +0300 | [diff] [blame] | 36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 37 | * All rights reserved. |
| 38 | * |
| 39 | * Redistribution and use in source and binary forms, with or without |
| 40 | * modification, are permitted provided that the following conditions |
| 41 | * are met: |
| 42 | * |
| 43 | * * Redistributions of source code must retain the above copyright |
| 44 | * notice, this list of conditions and the following disclaimer. |
| 45 | * * Redistributions in binary form must reproduce the above copyright |
| 46 | * notice, this list of conditions and the following disclaimer in |
| 47 | * the documentation and/or other materials provided with the |
| 48 | * distribution. |
| 49 | * * Neither the name Intel Corporation nor the names of its |
| 50 | * contributors may be used to endorse or promote products derived |
| 51 | * from this software without specific prior written permission. |
| 52 | * |
| 53 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 54 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 55 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 56 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 57 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 58 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 59 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 60 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 61 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 62 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 63 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 64 | * |
| 65 | *****************************************************************************/ |
| 66 | #include <net/mac80211.h> |
Sara Sharon | 854d773 | 2016-03-22 15:55:58 +0200 | [diff] [blame] | 67 | #include <linux/netdevice.h> |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 68 | #include <linux/acpi.h> |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 69 | |
| 70 | #include "iwl-trans.h" |
| 71 | #include "iwl-op-mode.h" |
| 72 | #include "iwl-fw.h" |
| 73 | #include "iwl-debug.h" |
| 74 | #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ |
| 75 | #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ |
Emmanuel Grumbach | 8c23f95 | 2014-12-04 10:07:47 +0200 | [diff] [blame] | 76 | #include "iwl-prph.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 77 | #include "iwl-eeprom-parse.h" |
| 78 | |
| 79 | #include "mvm.h" |
Golan Ben-Ami | 2f89a5d | 2015-10-27 19:17:14 +0200 | [diff] [blame] | 80 | #include "fw-dbg.h" |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 81 | #include "iwl-phy-db.h" |
| 82 | |
| 83 | #define MVM_UCODE_ALIVE_TIMEOUT HZ |
| 84 | #define MVM_UCODE_CALIB_TIMEOUT (2*HZ) |
| 85 | |
| 86 | #define UCODE_VALID_OK cpu_to_le32(0x1) |
| 87 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 88 | struct iwl_mvm_alive_data { |
| 89 | bool valid; |
| 90 | u32 scd_base_addr; |
| 91 | }; |
| 92 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 93 | static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) |
| 94 | { |
| 95 | struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { |
| 96 | .valid = cpu_to_le32(valid_tx_ant), |
| 97 | }; |
| 98 | |
Emmanuel Grumbach | 3322354 | 2013-03-09 20:38:19 +0200 | [diff] [blame] | 99 | IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); |
Emmanuel Grumbach | a102292 | 2014-05-12 11:36:41 +0300 | [diff] [blame] | 100 | return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 101 | sizeof(tx_ant_cmd), &tx_ant_cmd); |
| 102 | } |
| 103 | |
Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 104 | static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) |
| 105 | { |
| 106 | int i; |
| 107 | struct iwl_rss_config_cmd cmd = { |
| 108 | .flags = cpu_to_le32(IWL_RSS_ENABLE), |
| 109 | .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | |
Sara Sharon | 854d773 | 2016-03-22 15:55:58 +0200 | [diff] [blame] | 110 | IWL_RSS_HASH_TYPE_IPV4_UDP | |
Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 111 | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | |
| 112 | IWL_RSS_HASH_TYPE_IPV6_TCP | |
Sara Sharon | 854d773 | 2016-03-22 15:55:58 +0200 | [diff] [blame] | 113 | IWL_RSS_HASH_TYPE_IPV6_UDP | |
Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 114 | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, |
| 115 | }; |
| 116 | |
Sara Sharon | f43495f | 2016-05-04 14:22:10 +0300 | [diff] [blame] | 117 | if (mvm->trans->num_rx_queues == 1) |
| 118 | return 0; |
| 119 | |
Sara Sharon | 854d773 | 2016-03-22 15:55:58 +0200 | [diff] [blame] | 120 | /* Do not direct RSS traffic to Q 0 which is our fallback queue */ |
Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 121 | for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) |
Sara Sharon | 854d773 | 2016-03-22 15:55:58 +0200 | [diff] [blame] | 122 | cmd.indirection_table[i] = |
| 123 | 1 + (i % (mvm->trans->num_rx_queues - 1)); |
| 124 | netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); |
Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 125 | |
| 126 | return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); |
| 127 | } |
| 128 | |
Liad Kaufman | 97d5be7 | 2015-08-31 14:33:23 +0300 | [diff] [blame] | 129 | static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) |
| 130 | { |
| 131 | struct iwl_dqa_enable_cmd dqa_cmd = { |
| 132 | .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), |
| 133 | }; |
| 134 | u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0); |
| 135 | int ret; |
| 136 | |
| 137 | ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); |
| 138 | if (ret) |
| 139 | IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); |
| 140 | else |
| 141 | IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); |
| 142 | |
| 143 | return ret; |
| 144 | } |
| 145 | |
Matti Gottlieb | 905e36a | 2016-02-14 17:05:39 +0200 | [diff] [blame] | 146 | void iwl_free_fw_paging(struct iwl_mvm *mvm) |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 147 | { |
| 148 | int i; |
| 149 | |
| 150 | if (!mvm->fw_paging_db[0].fw_paging_block) |
| 151 | return; |
| 152 | |
| 153 | for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { |
Emmanuel Grumbach | 3edbc7d | 2016-06-19 20:57:02 +0300 | [diff] [blame] | 154 | struct iwl_fw_paging *paging = &mvm->fw_paging_db[i]; |
| 155 | |
| 156 | if (!paging->fw_paging_block) { |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 157 | IWL_DEBUG_FW(mvm, |
| 158 | "Paging: block %d already freed, continue to next page\n", |
| 159 | i); |
| 160 | |
| 161 | continue; |
| 162 | } |
Emmanuel Grumbach | 3edbc7d | 2016-06-19 20:57:02 +0300 | [diff] [blame] | 163 | dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys, |
| 164 | paging->fw_paging_size, DMA_BIDIRECTIONAL); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 165 | |
Emmanuel Grumbach | 3edbc7d | 2016-06-19 20:57:02 +0300 | [diff] [blame] | 166 | __free_pages(paging->fw_paging_block, |
| 167 | get_order(paging->fw_paging_size)); |
| 168 | paging->fw_paging_block = NULL; |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 169 | } |
Matti Gottlieb | e112018 | 2015-07-19 11:15:07 +0300 | [diff] [blame] | 170 | kfree(mvm->trans->paging_download_buf); |
Matti Gottlieb | 905e36a | 2016-02-14 17:05:39 +0200 | [diff] [blame] | 171 | mvm->trans->paging_download_buf = NULL; |
Matti Gottlieb | f742aaf | 2016-04-10 10:53:57 +0300 | [diff] [blame] | 172 | mvm->trans->paging_db = NULL; |
Matti Gottlieb | 905e36a | 2016-02-14 17:05:39 +0200 | [diff] [blame] | 173 | |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 174 | memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); |
| 175 | } |
| 176 | |
| 177 | static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) |
| 178 | { |
| 179 | int sec_idx, idx; |
| 180 | u32 offset = 0; |
| 181 | |
| 182 | /* |
| 183 | * find where is the paging image start point: |
| 184 | * if CPU2 exist and it's in paging format, then the image looks like: |
| 185 | * CPU1 sections (2 or more) |
| 186 | * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 |
| 187 | * CPU2 sections (not paged) |
| 188 | * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 |
| 189 | * non paged to CPU2 paging sec |
| 190 | * CPU2 paging CSS |
| 191 | * CPU2 paging image (including instruction and data) |
| 192 | */ |
Sara Sharon | eef187a | 2016-10-25 11:38:31 +0300 | [diff] [blame] | 193 | for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 194 | if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { |
| 195 | sec_idx++; |
| 196 | break; |
| 197 | } |
| 198 | } |
| 199 | |
Matti Gottlieb | cd47a3d | 2016-03-10 16:18:26 +0200 | [diff] [blame] | 200 | /* |
| 201 | * If paging is enabled there should be at least 2 more sections left |
| 202 | * (one for CSS and one for Paging data) |
| 203 | */ |
Sara Sharon | eef187a | 2016-10-25 11:38:31 +0300 | [diff] [blame] | 204 | if (sec_idx >= image->num_sec - 1) { |
Matti Gottlieb | cd47a3d | 2016-03-10 16:18:26 +0200 | [diff] [blame] | 205 | IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 206 | iwl_free_fw_paging(mvm); |
| 207 | return -EINVAL; |
| 208 | } |
| 209 | |
| 210 | /* copy the CSS block to the dram */ |
| 211 | IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", |
| 212 | sec_idx); |
| 213 | |
| 214 | memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), |
| 215 | image->sec[sec_idx].data, |
| 216 | mvm->fw_paging_db[0].fw_paging_size); |
Sara Sharon | 4b70f07 | 2016-11-30 16:49:11 +0200 | [diff] [blame] | 217 | dma_sync_single_for_device(mvm->trans->dev, |
| 218 | mvm->fw_paging_db[0].fw_paging_phys, |
| 219 | mvm->fw_paging_db[0].fw_paging_size, |
| 220 | DMA_BIDIRECTIONAL); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 221 | |
| 222 | IWL_DEBUG_FW(mvm, |
| 223 | "Paging: copied %d CSS bytes to first block\n", |
| 224 | mvm->fw_paging_db[0].fw_paging_size); |
| 225 | |
| 226 | sec_idx++; |
| 227 | |
| 228 | /* |
| 229 | * copy the paging blocks to the dram |
| 230 | * loop index start from 1 since that CSS block already copied to dram |
| 231 | * and CSS index is 0. |
| 232 | * loop stop at num_of_paging_blk since that last block is not full. |
| 233 | */ |
| 234 | for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { |
Sara Sharon | 4b70f07 | 2016-11-30 16:49:11 +0200 | [diff] [blame] | 235 | struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; |
| 236 | |
| 237 | memcpy(page_address(block->fw_paging_block), |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 238 | image->sec[sec_idx].data + offset, |
Sara Sharon | 4b70f07 | 2016-11-30 16:49:11 +0200 | [diff] [blame] | 239 | block->fw_paging_size); |
| 240 | dma_sync_single_for_device(mvm->trans->dev, |
| 241 | block->fw_paging_phys, |
| 242 | block->fw_paging_size, |
| 243 | DMA_BIDIRECTIONAL); |
| 244 | |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 245 | |
| 246 | IWL_DEBUG_FW(mvm, |
| 247 | "Paging: copied %d paging bytes to block %d\n", |
| 248 | mvm->fw_paging_db[idx].fw_paging_size, |
| 249 | idx); |
| 250 | |
| 251 | offset += mvm->fw_paging_db[idx].fw_paging_size; |
| 252 | } |
| 253 | |
| 254 | /* copy the last paging block */ |
| 255 | if (mvm->num_of_pages_in_last_blk > 0) { |
Sara Sharon | 4b70f07 | 2016-11-30 16:49:11 +0200 | [diff] [blame] | 256 | struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; |
| 257 | |
| 258 | memcpy(page_address(block->fw_paging_block), |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 259 | image->sec[sec_idx].data + offset, |
| 260 | FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); |
Sara Sharon | 4b70f07 | 2016-11-30 16:49:11 +0200 | [diff] [blame] | 261 | dma_sync_single_for_device(mvm->trans->dev, |
| 262 | block->fw_paging_phys, |
| 263 | block->fw_paging_size, |
| 264 | DMA_BIDIRECTIONAL); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 265 | |
| 266 | IWL_DEBUG_FW(mvm, |
| 267 | "Paging: copied %d pages in the last block %d\n", |
| 268 | mvm->num_of_pages_in_last_blk, idx); |
| 269 | } |
| 270 | |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, |
| 275 | const struct fw_img *image) |
| 276 | { |
| 277 | struct page *block; |
| 278 | dma_addr_t phys = 0; |
Sara Sharon | 08d785f | 2016-10-27 17:27:23 +0300 | [diff] [blame] | 279 | int blk_idx, order, num_of_pages, size, dma_enabled; |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 280 | |
| 281 | if (mvm->fw_paging_db[0].fw_paging_block) |
| 282 | return 0; |
| 283 | |
| 284 | dma_enabled = is_device_dma_capable(mvm->trans->dev); |
| 285 | |
| 286 | /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ |
| 287 | BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); |
| 288 | |
| 289 | num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; |
Sara Sharon | 850fe9a | 2016-10-27 17:58:27 +0300 | [diff] [blame] | 290 | mvm->num_of_paging_blk = |
| 291 | DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 292 | mvm->num_of_pages_in_last_blk = |
| 293 | num_of_pages - |
| 294 | NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); |
| 295 | |
| 296 | IWL_DEBUG_FW(mvm, |
| 297 | "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", |
| 298 | mvm->num_of_paging_blk, |
| 299 | mvm->num_of_pages_in_last_blk); |
| 300 | |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 301 | /* |
Sara Sharon | 08d785f | 2016-10-27 17:27:23 +0300 | [diff] [blame] | 302 | * Allocate CSS and paging blocks in dram. |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 303 | */ |
Sara Sharon | 08d785f | 2016-10-27 17:27:23 +0300 | [diff] [blame] | 304 | for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { |
| 305 | /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ |
| 306 | size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; |
| 307 | order = get_order(size); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 308 | block = alloc_pages(GFP_KERNEL, order); |
| 309 | if (!block) { |
| 310 | /* free all the previous pages since we failed */ |
| 311 | iwl_free_fw_paging(mvm); |
| 312 | return -ENOMEM; |
| 313 | } |
| 314 | |
| 315 | mvm->fw_paging_db[blk_idx].fw_paging_block = block; |
Sara Sharon | 08d785f | 2016-10-27 17:27:23 +0300 | [diff] [blame] | 316 | mvm->fw_paging_db[blk_idx].fw_paging_size = size; |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 317 | |
| 318 | if (dma_enabled) { |
| 319 | phys = dma_map_page(mvm->trans->dev, block, 0, |
| 320 | PAGE_SIZE << order, |
| 321 | DMA_BIDIRECTIONAL); |
| 322 | if (dma_mapping_error(mvm->trans->dev, phys)) { |
| 323 | /* |
| 324 | * free the previous pages and the current one |
| 325 | * since we failed to map_page. |
| 326 | */ |
| 327 | iwl_free_fw_paging(mvm); |
| 328 | return -ENOMEM; |
| 329 | } |
| 330 | mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; |
Matti Gottlieb | e112018 | 2015-07-19 11:15:07 +0300 | [diff] [blame] | 331 | } else { |
| 332 | mvm->fw_paging_db[blk_idx].fw_paging_phys = |
| 333 | PAGING_ADDR_SIG | |
| 334 | blk_idx << BLOCK_2_EXP_SIZE; |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 335 | } |
| 336 | |
Sara Sharon | 08d785f | 2016-10-27 17:27:23 +0300 | [diff] [blame] | 337 | if (!blk_idx) |
| 338 | IWL_DEBUG_FW(mvm, |
| 339 | "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", |
| 340 | order); |
| 341 | else |
| 342 | IWL_DEBUG_FW(mvm, |
| 343 | "Paging: allocated 32K bytes (order %d) for firmware paging.\n", |
| 344 | order); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | return 0; |
| 348 | } |
| 349 | |
| 350 | static int iwl_save_fw_paging(struct iwl_mvm *mvm, |
| 351 | const struct fw_img *fw) |
| 352 | { |
| 353 | int ret; |
| 354 | |
| 355 | ret = iwl_alloc_fw_paging_mem(mvm, fw); |
| 356 | if (ret) |
| 357 | return ret; |
| 358 | |
| 359 | return iwl_fill_paging_mem(mvm, fw); |
| 360 | } |
| 361 | |
| 362 | /* send paging cmd to FW in case CPU2 has paging image */ |
| 363 | static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) |
| 364 | { |
Sara Sharon | d975d72 | 2016-07-04 11:52:07 +0300 | [diff] [blame] | 365 | struct iwl_fw_paging_cmd paging_cmd = { |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 366 | .flags = |
| 367 | cpu_to_le32(PAGING_CMD_IS_SECURED | |
| 368 | PAGING_CMD_IS_ENABLED | |
| 369 | (mvm->num_of_pages_in_last_blk << |
| 370 | PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), |
| 371 | .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), |
| 372 | .block_num = cpu_to_le32(mvm->num_of_paging_blk), |
| 373 | }; |
Sara Sharon | d975d72 | 2016-07-04 11:52:07 +0300 | [diff] [blame] | 374 | int blk_idx, size = sizeof(paging_cmd); |
| 375 | |
| 376 | /* A bit hard coded - but this is the old API and will be deprecated */ |
| 377 | if (!iwl_mvm_has_new_tx_api(mvm)) |
| 378 | size -= NUM_OF_FW_PAGING_BLOCKS * 4; |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 379 | |
| 380 | /* loop for for all paging blocks + CSS block */ |
| 381 | for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { |
Sara Sharon | d975d72 | 2016-07-04 11:52:07 +0300 | [diff] [blame] | 382 | dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys; |
| 383 | |
| 384 | addr = addr >> PAGE_2_EXP_SIZE; |
| 385 | |
| 386 | if (iwl_mvm_has_new_tx_api(mvm)) { |
| 387 | __le64 phy_addr = cpu_to_le64(addr); |
| 388 | |
| 389 | paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr; |
| 390 | } else { |
| 391 | __le32 phy_addr = cpu_to_le32(addr); |
| 392 | |
| 393 | paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr; |
| 394 | } |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 395 | } |
| 396 | |
| 397 | return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, |
| 398 | IWL_ALWAYS_LONG_GROUP, 0), |
Sara Sharon | d975d72 | 2016-07-04 11:52:07 +0300 | [diff] [blame] | 399 | 0, size, &paging_cmd); |
Matti Gottlieb | a6c4fb4 | 2015-07-15 16:19:29 +0300 | [diff] [blame] | 400 | } |
| 401 | |
Matti Gottlieb | e112018 | 2015-07-19 11:15:07 +0300 | [diff] [blame] | 402 | /* |
| 403 | * Send paging item cmd to FW in case CPU2 has paging image |
| 404 | */ |
| 405 | static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) |
| 406 | { |
| 407 | int ret; |
| 408 | struct iwl_fw_get_item_cmd fw_get_item_cmd = { |
| 409 | .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), |
| 410 | }; |
| 411 | |
| 412 | struct iwl_fw_get_item_resp *item_resp; |
| 413 | struct iwl_host_cmd cmd = { |
| 414 | .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), |
| 415 | .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, |
| 416 | .data = { &fw_get_item_cmd, }, |
| 417 | }; |
| 418 | |
| 419 | cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); |
| 420 | |
| 421 | ret = iwl_mvm_send_cmd(mvm, &cmd); |
| 422 | if (ret) { |
| 423 | IWL_ERR(mvm, |
| 424 | "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", |
| 425 | ret); |
| 426 | return ret; |
| 427 | } |
| 428 | |
| 429 | item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; |
| 430 | if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { |
| 431 | IWL_ERR(mvm, |
| 432 | "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", |
| 433 | le32_to_cpu(item_resp->item_id)); |
| 434 | ret = -EIO; |
| 435 | goto exit; |
| 436 | } |
| 437 | |
Matti Gottlieb | c94d799 | 2016-03-09 10:54:10 +0200 | [diff] [blame] | 438 | /* Add an extra page for headers */ |
| 439 | mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + |
| 440 | FW_PAGING_SIZE, |
Matti Gottlieb | e112018 | 2015-07-19 11:15:07 +0300 | [diff] [blame] | 441 | GFP_KERNEL); |
| 442 | if (!mvm->trans->paging_download_buf) { |
| 443 | ret = -ENOMEM; |
| 444 | goto exit; |
| 445 | } |
| 446 | mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); |
| 447 | mvm->trans->paging_db = mvm->fw_paging_db; |
| 448 | IWL_DEBUG_FW(mvm, |
| 449 | "Paging: got paging request address (paging_req_addr 0x%08x)\n", |
| 450 | mvm->trans->paging_req_addr); |
| 451 | |
| 452 | exit: |
| 453 | iwl_free_resp(&cmd); |
| 454 | |
| 455 | return ret; |
| 456 | } |
| 457 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 458 | static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, |
| 459 | struct iwl_rx_packet *pkt, void *data) |
| 460 | { |
| 461 | struct iwl_mvm *mvm = |
| 462 | container_of(notif_wait, struct iwl_mvm, notif_wait); |
| 463 | struct iwl_mvm_alive_data *alive_data = data; |
Sara Sharon | 5c228d6 | 2016-11-24 13:48:27 +0200 | [diff] [blame] | 464 | struct mvm_alive_resp_v3 *palive3; |
Emmanuel Grumbach | 7e1223b | 2015-02-03 20:11:48 +0200 | [diff] [blame] | 465 | struct mvm_alive_resp *palive; |
Sara Sharon | 5c228d6 | 2016-11-24 13:48:27 +0200 | [diff] [blame] | 466 | struct iwl_umac_alive *umac; |
| 467 | struct iwl_lmac_alive *lmac1; |
| 468 | struct iwl_lmac_alive *lmac2 = NULL; |
| 469 | u16 status; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 470 | |
Sara Sharon | 5c228d6 | 2016-11-24 13:48:27 +0200 | [diff] [blame] | 471 | if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { |
Emmanuel Grumbach | 7e1223b | 2015-02-03 20:11:48 +0200 | [diff] [blame] | 472 | palive = (void *)pkt->data; |
Sara Sharon | 5c228d6 | 2016-11-24 13:48:27 +0200 | [diff] [blame] | 473 | umac = &palive->umac_data; |
| 474 | lmac1 = &palive->lmac_data[0]; |
| 475 | lmac2 = &palive->lmac_data[1]; |
| 476 | status = le16_to_cpu(palive->status); |
| 477 | } else { |
| 478 | palive3 = (void *)pkt->data; |
| 479 | umac = &palive3->umac_data; |
| 480 | lmac1 = &palive3->lmac_data; |
| 481 | status = le16_to_cpu(palive3->status); |
Eran Harary | 01a9ca5 | 2014-02-03 09:29:57 +0200 | [diff] [blame] | 482 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 483 | |
Sara Sharon | 5c228d6 | 2016-11-24 13:48:27 +0200 | [diff] [blame] | 484 | mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr); |
| 485 | if (lmac2) |
| 486 | mvm->error_event_table[1] = |
| 487 | le32_to_cpu(lmac2->error_event_table_ptr); |
| 488 | mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr); |
| 489 | mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr); |
| 490 | mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size); |
| 491 | |
| 492 | mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr); |
| 493 | |
| 494 | alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr); |
| 495 | alive_data->valid = status == IWL_ALIVE_STATUS_OK; |
| 496 | if (mvm->umac_error_event_table) |
| 497 | mvm->support_umac_log = true; |
| 498 | |
| 499 | IWL_DEBUG_FW(mvm, |
| 500 | "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", |
| 501 | status, lmac1->ver_type, lmac1->ver_subtype); |
| 502 | |
| 503 | if (lmac2) |
| 504 | IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); |
| 505 | |
| 506 | IWL_DEBUG_FW(mvm, |
| 507 | "UMAC version: Major - 0x%x, Minor - 0x%x\n", |
| 508 | le32_to_cpu(umac->umac_major), |
| 509 | le32_to_cpu(umac->umac_minor)); |
| 510 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 511 | return true; |
| 512 | } |
| 513 | |
Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 514 | static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, |
| 515 | struct iwl_rx_packet *pkt, void *data) |
| 516 | { |
| 517 | WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); |
| 518 | |
| 519 | return true; |
| 520 | } |
| 521 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 522 | static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, |
| 523 | struct iwl_rx_packet *pkt, void *data) |
| 524 | { |
| 525 | struct iwl_phy_db *phy_db = data; |
| 526 | |
| 527 | if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { |
| 528 | WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); |
| 529 | return true; |
| 530 | } |
| 531 | |
Sara Sharon | ce1f277 | 2016-04-10 16:02:12 +0300 | [diff] [blame] | 532 | WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 533 | |
| 534 | return false; |
| 535 | } |
| 536 | |
Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 537 | static int iwl_mvm_init_paging(struct iwl_mvm *mvm) |
| 538 | { |
| 539 | const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode]; |
| 540 | int ret; |
| 541 | |
| 542 | /* |
| 543 | * Configure and operate fw paging mechanism. |
| 544 | * The driver configures the paging flow only once. |
| 545 | * The CPU2 paging image is included in the IWL_UCODE_INIT image. |
| 546 | */ |
| 547 | if (!fw->paging_mem_size) |
| 548 | return 0; |
| 549 | |
| 550 | /* |
| 551 | * When dma is not enabled, the driver needs to copy / write |
| 552 | * the downloaded / uploaded page to / from the smem. |
| 553 | * This gets the location of the place were the pages are |
| 554 | * stored. |
| 555 | */ |
| 556 | if (!is_device_dma_capable(mvm->trans->dev)) { |
| 557 | ret = iwl_trans_get_paging_item(mvm); |
| 558 | if (ret) { |
| 559 | IWL_ERR(mvm, "failed to get FW paging item\n"); |
| 560 | return ret; |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | ret = iwl_save_fw_paging(mvm, fw); |
| 565 | if (ret) { |
| 566 | IWL_ERR(mvm, "failed to save the FW paging image\n"); |
| 567 | return ret; |
| 568 | } |
| 569 | |
| 570 | ret = iwl_send_paging_cmd(mvm, fw); |
| 571 | if (ret) { |
| 572 | IWL_ERR(mvm, "failed to send the paging cmd\n"); |
| 573 | iwl_free_fw_paging(mvm); |
| 574 | return ret; |
| 575 | } |
| 576 | |
| 577 | return 0; |
| 578 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 579 | static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, |
| 580 | enum iwl_ucode_type ucode_type) |
| 581 | { |
| 582 | struct iwl_notification_wait alive_wait; |
| 583 | struct iwl_mvm_alive_data alive_data; |
| 584 | const struct fw_img *fw; |
| 585 | int ret, i; |
| 586 | enum iwl_ucode_type old_type = mvm->cur_ucode; |
Sara Sharon | 6eb031d | 2015-07-13 14:50:47 +0300 | [diff] [blame] | 587 | static const u16 alive_cmd[] = { MVM_ALIVE }; |
Eran Harary | 91479b6 | 2014-05-11 08:11:34 +0300 | [diff] [blame] | 588 | struct iwl_sf_region st_fwrd_space; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 589 | |
Eran Harary | 61df750 | 2014-12-01 17:40:37 +0200 | [diff] [blame] | 590 | if (ucode_type == IWL_UCODE_REGULAR && |
Golan Ben-Ami | 3d2d442 | 2016-02-23 10:34:48 +0200 | [diff] [blame] | 591 | iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && |
| 592 | !(fw_has_capa(&mvm->fw->ucode_capa, |
| 593 | IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) |
Sharon Dvir | 612da1e | 2016-08-03 10:55:45 +0300 | [diff] [blame] | 594 | fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); |
Eran Harary | 61df750 | 2014-12-01 17:40:37 +0200 | [diff] [blame] | 595 | else |
Sharon Dvir | 612da1e | 2016-08-03 10:55:45 +0300 | [diff] [blame] | 596 | fw = iwl_get_ucode_image(mvm->fw, ucode_type); |
Johannes Berg | befe9b6 | 2013-10-25 12:32:51 +0200 | [diff] [blame] | 597 | if (WARN_ON(!fw)) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 598 | return -EINVAL; |
Johannes Berg | befe9b6 | 2013-10-25 12:32:51 +0200 | [diff] [blame] | 599 | mvm->cur_ucode = ucode_type; |
| 600 | mvm->ucode_loaded = false; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 601 | |
| 602 | iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, |
| 603 | alive_cmd, ARRAY_SIZE(alive_cmd), |
| 604 | iwl_alive_fn, &alive_data); |
| 605 | |
| 606 | ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); |
| 607 | if (ret) { |
| 608 | mvm->cur_ucode = old_type; |
| 609 | iwl_remove_notification(&mvm->notif_wait, &alive_wait); |
| 610 | return ret; |
| 611 | } |
| 612 | |
| 613 | /* |
| 614 | * Some things may run in the background now, but we |
| 615 | * just wait for the ALIVE notification here. |
| 616 | */ |
| 617 | ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, |
| 618 | MVM_UCODE_ALIVE_TIMEOUT); |
| 619 | if (ret) { |
Dor Shaish | 192de2b | 2015-07-15 11:41:21 +0300 | [diff] [blame] | 620 | if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) |
| 621 | IWL_ERR(mvm, |
| 622 | "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", |
| 623 | iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), |
| 624 | iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 625 | mvm->cur_ucode = old_type; |
| 626 | return ret; |
| 627 | } |
| 628 | |
| 629 | if (!alive_data.valid) { |
| 630 | IWL_ERR(mvm, "Loaded ucode is not valid!\n"); |
| 631 | mvm->cur_ucode = old_type; |
| 632 | return -EIO; |
| 633 | } |
| 634 | |
Eran Harary | 91479b6 | 2014-05-11 08:11:34 +0300 | [diff] [blame] | 635 | /* |
| 636 | * update the sdio allocation according to the pointer we get in the |
| 637 | * alive notification. |
| 638 | */ |
| 639 | st_fwrd_space.addr = mvm->sf_space.addr; |
| 640 | st_fwrd_space.size = mvm->sf_space.size; |
| 641 | ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); |
Eyal Shapira | 82e8aea | 2014-10-19 14:56:40 +0300 | [diff] [blame] | 642 | if (ret) { |
| 643 | IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); |
| 644 | return ret; |
| 645 | } |
Eran Harary | 91479b6 | 2014-05-11 08:11:34 +0300 | [diff] [blame] | 646 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 647 | iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); |
| 648 | |
| 649 | /* |
| 650 | * Note: all the queues are enabled as part of the interface |
| 651 | * initialization, but in firmware restart scenarios they |
| 652 | * could be stopped, so wake them up. In firmware restart, |
| 653 | * mac80211 will have the queues stopped as well until the |
| 654 | * reconfiguration completes. During normal startup, they |
| 655 | * will be empty. |
| 656 | */ |
| 657 | |
Liad Kaufman | 4ecafae | 2015-07-14 13:36:18 +0300 | [diff] [blame] | 658 | memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); |
Liad Kaufman | 097129c | 2015-08-09 18:28:43 +0300 | [diff] [blame] | 659 | if (iwl_mvm_is_dqa_supported(mvm)) |
| 660 | mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; |
| 661 | else |
| 662 | mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 663 | |
Johannes Berg | df197c0 | 2014-08-01 18:14:45 +0200 | [diff] [blame] | 664 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) |
| 665 | atomic_set(&mvm->mac80211_queue_stop_count[i], 0); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 666 | |
| 667 | mvm->ucode_loaded = true; |
| 668 | |
| 669 | return 0; |
| 670 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 671 | |
| 672 | static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) |
| 673 | { |
| 674 | struct iwl_phy_cfg_cmd phy_cfg_cmd; |
| 675 | enum iwl_ucode_type ucode_type = mvm->cur_ucode; |
| 676 | |
| 677 | /* Set parameters */ |
Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 678 | phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 679 | phy_cfg_cmd.calib_control.event_trigger = |
| 680 | mvm->fw->default_calib[ucode_type].event_trigger; |
| 681 | phy_cfg_cmd.calib_control.flow_trigger = |
| 682 | mvm->fw->default_calib[ucode_type].flow_trigger; |
| 683 | |
| 684 | IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", |
| 685 | phy_cfg_cmd.phy_cfg); |
| 686 | |
Emmanuel Grumbach | a102292 | 2014-05-12 11:36:41 +0300 | [diff] [blame] | 687 | return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 688 | sizeof(phy_cfg_cmd), &phy_cfg_cmd); |
| 689 | } |
| 690 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 691 | int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) |
| 692 | { |
| 693 | struct iwl_notification_wait calib_wait; |
Sara Sharon | 6eb031d | 2015-07-13 14:50:47 +0300 | [diff] [blame] | 694 | static const u16 init_complete[] = { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 695 | INIT_COMPLETE_NOTIF, |
| 696 | CALIB_RES_NOTIF_PHY_DB |
| 697 | }; |
| 698 | int ret; |
| 699 | |
| 700 | lockdep_assert_held(&mvm->mutex); |
| 701 | |
Eran Harary | 8d193ca | 2015-04-27 10:29:31 +0300 | [diff] [blame] | 702 | if (WARN_ON_ONCE(mvm->calibrating)) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 703 | return 0; |
| 704 | |
| 705 | iwl_init_notification_wait(&mvm->notif_wait, |
| 706 | &calib_wait, |
| 707 | init_complete, |
| 708 | ARRAY_SIZE(init_complete), |
| 709 | iwl_wait_phy_db_entry, |
| 710 | mvm->phy_db); |
| 711 | |
| 712 | /* Will also start the device */ |
| 713 | ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); |
| 714 | if (ret) { |
| 715 | IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); |
| 716 | goto error; |
| 717 | } |
| 718 | |
Emmanuel Grumbach | ae39747 | 2014-03-30 16:55:18 +0300 | [diff] [blame] | 719 | ret = iwl_send_bt_init_conf(mvm); |
Emmanuel Grumbach | 931d416 | 2013-01-17 09:42:25 +0200 | [diff] [blame] | 720 | if (ret) |
| 721 | goto error; |
| 722 | |
Eytan Lifshitz | 81a67e3 | 2013-09-11 12:39:18 +0200 | [diff] [blame] | 723 | /* Read the NVM only at driver load time, no need to do this twice */ |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 724 | if (read_nvm) { |
| 725 | /* Read nvm */ |
Eran Harary | 14b485f | 2014-04-23 10:46:09 +0300 | [diff] [blame] | 726 | ret = iwl_nvm_init(mvm, true); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 727 | if (ret) { |
| 728 | IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); |
| 729 | goto error; |
| 730 | } |
| 731 | } |
| 732 | |
Eytan Lifshitz | 81a67e3 | 2013-09-11 12:39:18 +0200 | [diff] [blame] | 733 | /* In case we read the NVM from external file, load it to the NIC */ |
Eran Harary | e02a9d6 | 2014-05-07 12:27:10 +0300 | [diff] [blame] | 734 | if (mvm->nvm_file_name) |
Eytan Lifshitz | 81a67e3 | 2013-09-11 12:39:18 +0200 | [diff] [blame] | 735 | iwl_mvm_load_nvm_to_nic(mvm); |
| 736 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 737 | ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); |
| 738 | WARN_ON(ret); |
| 739 | |
Eran Harary | 4f59334 | 2013-05-13 07:53:26 +0300 | [diff] [blame] | 740 | /* |
| 741 | * abort after reading the nvm in case RF Kill is on, we will complete |
| 742 | * the init seq later when RF kill will switch to off |
| 743 | */ |
Arik Nemtsov | 1a3fe0b | 2015-09-30 11:19:55 +0300 | [diff] [blame] | 744 | if (iwl_mvm_is_radio_hw_killed(mvm)) { |
Eran Harary | 4f59334 | 2013-05-13 07:53:26 +0300 | [diff] [blame] | 745 | IWL_DEBUG_RF_KILL(mvm, |
| 746 | "jump over all phy activities due to RF kill\n"); |
| 747 | iwl_remove_notification(&mvm->notif_wait, &calib_wait); |
Arik Nemtsov | a408284 | 2013-11-24 19:10:46 +0200 | [diff] [blame] | 748 | ret = 1; |
| 749 | goto out; |
Eran Harary | 4f59334 | 2013-05-13 07:53:26 +0300 | [diff] [blame] | 750 | } |
| 751 | |
Emmanuel Grumbach | 31b8b34 | 2014-11-02 15:48:09 +0200 | [diff] [blame] | 752 | mvm->calibrating = true; |
| 753 | |
Dor Shaish | e07cbb5 | 2013-02-27 23:00:27 +0200 | [diff] [blame] | 754 | /* Send TX valid antennas before triggering calibrations */ |
Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 755 | ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); |
Dor Shaish | e07cbb5 | 2013-02-27 23:00:27 +0200 | [diff] [blame] | 756 | if (ret) |
| 757 | goto error; |
| 758 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 759 | /* |
| 760 | * Send phy configurations command to init uCode |
| 761 | * to start the 16.0 uCode init image internal calibrations. |
| 762 | */ |
| 763 | ret = iwl_send_phy_cfg_cmd(mvm); |
| 764 | if (ret) { |
| 765 | IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", |
| 766 | ret); |
| 767 | goto error; |
| 768 | } |
| 769 | |
| 770 | /* |
| 771 | * Some things may run in the background now, but we |
| 772 | * just wait for the calibration complete notification. |
| 773 | */ |
| 774 | ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, |
| 775 | MVM_UCODE_CALIB_TIMEOUT); |
Emmanuel Grumbach | 31b8b34 | 2014-11-02 15:48:09 +0200 | [diff] [blame] | 776 | |
Arik Nemtsov | 1a3fe0b | 2015-09-30 11:19:55 +0300 | [diff] [blame] | 777 | if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { |
Emmanuel Grumbach | 31b8b34 | 2014-11-02 15:48:09 +0200 | [diff] [blame] | 778 | IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); |
| 779 | ret = 1; |
| 780 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 781 | goto out; |
| 782 | |
| 783 | error: |
| 784 | iwl_remove_notification(&mvm->notif_wait, &calib_wait); |
| 785 | out: |
Emmanuel Grumbach | 31b8b34 | 2014-11-02 15:48:09 +0200 | [diff] [blame] | 786 | mvm->calibrating = false; |
Arik Nemtsov | a408284 | 2013-11-24 19:10:46 +0200 | [diff] [blame] | 787 | if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 788 | /* we want to debug INIT and we have no NVM - fake */ |
| 789 | mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + |
| 790 | sizeof(struct ieee80211_channel) + |
| 791 | sizeof(struct ieee80211_rate), |
| 792 | GFP_KERNEL); |
| 793 | if (!mvm->nvm_data) |
| 794 | return -ENOMEM; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 795 | mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; |
| 796 | mvm->nvm_data->bands[0].n_channels = 1; |
| 797 | mvm->nvm_data->bands[0].n_bitrates = 1; |
| 798 | mvm->nvm_data->bands[0].bitrates = |
| 799 | (void *)mvm->nvm_data->channels + 1; |
| 800 | mvm->nvm_data->bands[0].bitrates->hw_value = 10; |
| 801 | } |
| 802 | |
| 803 | return ret; |
| 804 | } |
| 805 | |
Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 806 | int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) |
| 807 | { |
| 808 | struct iwl_notification_wait init_wait; |
| 809 | struct iwl_nvm_access_complete_cmd nvm_complete = {}; |
| 810 | static const u16 init_complete[] = { |
| 811 | INIT_COMPLETE_NOTIF, |
| 812 | }; |
| 813 | int ret; |
| 814 | |
| 815 | lockdep_assert_held(&mvm->mutex); |
| 816 | |
| 817 | iwl_init_notification_wait(&mvm->notif_wait, |
| 818 | &init_wait, |
| 819 | init_complete, |
| 820 | ARRAY_SIZE(init_complete), |
| 821 | iwl_wait_init_complete, |
| 822 | NULL); |
| 823 | |
| 824 | /* Will also start the device */ |
| 825 | ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); |
| 826 | if (ret) { |
| 827 | IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); |
| 828 | goto error; |
| 829 | } |
| 830 | |
| 831 | /* TODO: remove when integrating context info */ |
| 832 | ret = iwl_mvm_init_paging(mvm); |
| 833 | if (ret) { |
| 834 | IWL_ERR(mvm, "Failed to init paging: %d\n", |
| 835 | ret); |
| 836 | goto error; |
| 837 | } |
| 838 | |
| 839 | /* Read the NVM only at driver load time, no need to do this twice */ |
| 840 | if (read_nvm) { |
| 841 | /* Read nvm */ |
| 842 | ret = iwl_nvm_init(mvm, true); |
| 843 | if (ret) { |
| 844 | IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); |
| 845 | goto error; |
| 846 | } |
| 847 | } |
| 848 | |
| 849 | /* In case we read the NVM from external file, load it to the NIC */ |
| 850 | if (mvm->nvm_file_name) |
| 851 | iwl_mvm_load_nvm_to_nic(mvm); |
| 852 | |
| 853 | ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); |
| 854 | if (WARN_ON(ret)) |
| 855 | goto error; |
| 856 | |
| 857 | ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, |
| 858 | NVM_ACCESS_COMPLETE), 0, |
| 859 | sizeof(nvm_complete), &nvm_complete); |
| 860 | if (ret) { |
| 861 | IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", |
| 862 | ret); |
| 863 | goto error; |
| 864 | } |
| 865 | |
| 866 | /* We wait for the INIT complete notification */ |
| 867 | return iwl_wait_notification(&mvm->notif_wait, &init_wait, |
| 868 | MVM_UCODE_ALIVE_TIMEOUT); |
| 869 | |
| 870 | error: |
| 871 | iwl_remove_notification(&mvm->notif_wait, &init_wait); |
| 872 | return ret; |
| 873 | } |
| 874 | |
Sara Sharon | db06f04 | 2016-07-05 17:37:58 +0300 | [diff] [blame] | 875 | static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, |
| 876 | struct iwl_rx_packet *pkt) |
| 877 | { |
| 878 | struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; |
| 879 | int i; |
| 880 | |
| 881 | mvm->shared_mem_cfg.num_txfifo_entries = |
| 882 | ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); |
| 883 | for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) |
| 884 | mvm->shared_mem_cfg.txfifo_size[i] = |
| 885 | le32_to_cpu(mem_cfg->txfifo_size[i]); |
| 886 | for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) |
| 887 | mvm->shared_mem_cfg.rxfifo_size[i] = |
| 888 | le32_to_cpu(mem_cfg->rxfifo_size[i]); |
| 889 | |
| 890 | BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != |
| 891 | sizeof(mem_cfg->internal_txfifo_size)); |
| 892 | |
| 893 | for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); |
| 894 | i++) |
| 895 | mvm->shared_mem_cfg.internal_txfifo_size[i] = |
| 896 | le32_to_cpu(mem_cfg->internal_txfifo_size[i]); |
| 897 | } |
| 898 | |
| 899 | static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, |
| 900 | struct iwl_rx_packet *pkt) |
| 901 | { |
| 902 | struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data; |
| 903 | int i; |
| 904 | |
| 905 | mvm->shared_mem_cfg.num_txfifo_entries = |
| 906 | ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); |
| 907 | for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) |
| 908 | mvm->shared_mem_cfg.txfifo_size[i] = |
| 909 | le32_to_cpu(mem_cfg->txfifo_size[i]); |
| 910 | for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) |
| 911 | mvm->shared_mem_cfg.rxfifo_size[i] = |
| 912 | le32_to_cpu(mem_cfg->rxfifo_size[i]); |
| 913 | |
| 914 | /* new API has more data, from rxfifo_addr field and on */ |
| 915 | if (fw_has_capa(&mvm->fw->ucode_capa, |
| 916 | IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { |
| 917 | BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != |
| 918 | sizeof(mem_cfg->internal_txfifo_size)); |
| 919 | |
| 920 | for (i = 0; |
| 921 | i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); |
| 922 | i++) |
| 923 | mvm->shared_mem_cfg.internal_txfifo_size[i] = |
| 924 | le32_to_cpu(mem_cfg->internal_txfifo_size[i]); |
| 925 | } |
| 926 | } |
| 927 | |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 928 | static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) |
| 929 | { |
| 930 | struct iwl_host_cmd cmd = { |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 931 | .flags = CMD_WANT_SKB, |
| 932 | .data = { NULL, }, |
| 933 | .len = { 0, }, |
| 934 | }; |
Golan Ben-Ami | 5b08641 | 2016-02-09 12:57:16 +0200 | [diff] [blame] | 935 | struct iwl_rx_packet *pkt; |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 936 | |
| 937 | lockdep_assert_held(&mvm->mutex); |
| 938 | |
Golan Ben-Ami | 5b08641 | 2016-02-09 12:57:16 +0200 | [diff] [blame] | 939 | if (fw_has_capa(&mvm->fw->ucode_capa, |
| 940 | IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) |
| 941 | cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); |
| 942 | else |
| 943 | cmd.id = SHARED_MEM_CFG; |
| 944 | |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 945 | if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) |
| 946 | return; |
| 947 | |
| 948 | pkt = cmd.resp_pkt; |
Sara Sharon | db06f04 | 2016-07-05 17:37:58 +0300 | [diff] [blame] | 949 | if (iwl_mvm_has_new_tx_api(mvm)) |
| 950 | iwl_mvm_parse_shared_mem_a000(mvm, pkt); |
| 951 | else |
| 952 | iwl_mvm_parse_shared_mem(mvm, pkt); |
Golan Ben-Ami | 5b08641 | 2016-02-09 12:57:16 +0200 | [diff] [blame] | 953 | |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 954 | IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); |
| 955 | |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 956 | iwl_free_resp(&cmd); |
| 957 | } |
| 958 | |
Emmanuel Grumbach | 84bfffa | 2015-01-13 10:16:30 +0200 | [diff] [blame] | 959 | static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) |
| 960 | { |
| 961 | struct iwl_ltr_config_cmd cmd = { |
| 962 | .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), |
| 963 | }; |
| 964 | |
| 965 | if (!mvm->trans->ltr_enabled) |
| 966 | return 0; |
| 967 | |
Emmanuel Grumbach | 84bfffa | 2015-01-13 10:16:30 +0200 | [diff] [blame] | 968 | return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, |
| 969 | sizeof(cmd), &cmd); |
| 970 | } |
| 971 | |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 972 | #define ACPI_WRDS_METHOD "WRDS" |
| 973 | #define ACPI_WRDS_WIFI (0x07) |
| 974 | #define ACPI_WRDS_TABLE_SIZE 10 |
| 975 | |
| 976 | struct iwl_mvm_sar_table { |
| 977 | bool enabled; |
| 978 | u8 values[ACPI_WRDS_TABLE_SIZE]; |
| 979 | }; |
| 980 | |
| 981 | #ifdef CONFIG_ACPI |
| 982 | static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds, |
| 983 | struct iwl_mvm_sar_table *sar_table) |
| 984 | { |
| 985 | union acpi_object *data_pkg; |
| 986 | u32 i; |
| 987 | |
| 988 | /* We need at least two packages, one for the revision and one |
| 989 | * for the data itself. Also check that the revision is valid |
| 990 | * (i.e. it is an integer set to 0). |
| 991 | */ |
| 992 | if (wrds->type != ACPI_TYPE_PACKAGE || |
| 993 | wrds->package.count < 2 || |
| 994 | wrds->package.elements[0].type != ACPI_TYPE_INTEGER || |
| 995 | wrds->package.elements[0].integer.value != 0) { |
| 996 | IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n"); |
| 997 | return -EINVAL; |
| 998 | } |
| 999 | |
| 1000 | /* loop through all the packages to find the one for WiFi */ |
| 1001 | for (i = 1; i < wrds->package.count; i++) { |
| 1002 | union acpi_object *domain; |
| 1003 | |
| 1004 | data_pkg = &wrds->package.elements[i]; |
| 1005 | |
| 1006 | /* Skip anything that is not a package with the right |
| 1007 | * amount of elements (i.e. domain_type, |
| 1008 | * enabled/disabled plus the sar table size. |
| 1009 | */ |
| 1010 | if (data_pkg->type != ACPI_TYPE_PACKAGE || |
| 1011 | data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2) |
| 1012 | continue; |
| 1013 | |
| 1014 | domain = &data_pkg->package.elements[0]; |
| 1015 | if (domain->type == ACPI_TYPE_INTEGER && |
| 1016 | domain->integer.value == ACPI_WRDS_WIFI) |
| 1017 | break; |
| 1018 | |
| 1019 | data_pkg = NULL; |
| 1020 | } |
| 1021 | |
| 1022 | if (!data_pkg) |
| 1023 | return -ENOENT; |
| 1024 | |
| 1025 | if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) |
| 1026 | return -EINVAL; |
| 1027 | |
| 1028 | sar_table->enabled = !!(data_pkg->package.elements[1].integer.value); |
| 1029 | |
| 1030 | for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) { |
| 1031 | union acpi_object *entry; |
| 1032 | |
| 1033 | entry = &data_pkg->package.elements[i + 2]; |
| 1034 | if ((entry->type != ACPI_TYPE_INTEGER) || |
| 1035 | (entry->integer.value > U8_MAX)) |
| 1036 | return -EINVAL; |
| 1037 | |
| 1038 | sar_table->values[i] = entry->integer.value; |
| 1039 | } |
| 1040 | |
| 1041 | return 0; |
| 1042 | } |
| 1043 | |
| 1044 | static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, |
| 1045 | struct iwl_mvm_sar_table *sar_table) |
| 1046 | { |
| 1047 | acpi_handle root_handle; |
| 1048 | acpi_handle handle; |
| 1049 | struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL}; |
| 1050 | acpi_status status; |
| 1051 | int ret; |
| 1052 | |
| 1053 | root_handle = ACPI_HANDLE(mvm->dev); |
| 1054 | if (!root_handle) { |
| 1055 | IWL_DEBUG_RADIO(mvm, |
| 1056 | "Could not retrieve root port ACPI handle\n"); |
| 1057 | return -ENOENT; |
| 1058 | } |
| 1059 | |
| 1060 | /* Get the method's handle */ |
| 1061 | status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD, |
| 1062 | &handle); |
| 1063 | if (ACPI_FAILURE(status)) { |
| 1064 | IWL_DEBUG_RADIO(mvm, "WRDS method not found\n"); |
| 1065 | return -ENOENT; |
| 1066 | } |
| 1067 | |
| 1068 | /* Call WRDS with no arguments */ |
| 1069 | status = acpi_evaluate_object(handle, NULL, NULL, &wrds); |
| 1070 | if (ACPI_FAILURE(status)) { |
| 1071 | IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status); |
| 1072 | return -ENOENT; |
| 1073 | } |
| 1074 | |
| 1075 | ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table); |
| 1076 | kfree(wrds.pointer); |
| 1077 | |
| 1078 | return ret; |
| 1079 | } |
| 1080 | #else /* CONFIG_ACPI */ |
| 1081 | static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, |
| 1082 | struct iwl_mvm_sar_table *sar_table) |
| 1083 | { |
| 1084 | return -ENOENT; |
| 1085 | } |
| 1086 | #endif /* CONFIG_ACPI */ |
| 1087 | |
| 1088 | static int iwl_mvm_sar_init(struct iwl_mvm *mvm) |
| 1089 | { |
| 1090 | struct iwl_mvm_sar_table sar_table; |
| 1091 | struct iwl_dev_tx_power_cmd cmd = { |
Luca Coelho | 4b87e5a | 2016-09-12 16:03:30 +0300 | [diff] [blame] | 1092 | .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 1093 | }; |
| 1094 | int ret, i, j, idx; |
Luca Coelho | 55bfa4b | 2016-06-29 00:38:40 +0300 | [diff] [blame] | 1095 | int len = sizeof(cmd); |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 1096 | |
Luca Coelho | 55bfa4b | 2016-06-29 00:38:40 +0300 | [diff] [blame] | 1097 | if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) |
| 1098 | len = sizeof(cmd.v3); |
| 1099 | |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 1100 | ret = iwl_mvm_sar_get_table(mvm, &sar_table); |
| 1101 | if (ret < 0) { |
| 1102 | IWL_DEBUG_RADIO(mvm, |
| 1103 | "SAR BIOS table invalid or unavailable. (%d)\n", |
| 1104 | ret); |
| 1105 | /* we don't fail if the table is not available */ |
| 1106 | return 0; |
| 1107 | } |
| 1108 | |
| 1109 | if (!sar_table.enabled) |
| 1110 | return 0; |
| 1111 | |
| 1112 | IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); |
| 1113 | |
| 1114 | BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != |
| 1115 | ACPI_WRDS_TABLE_SIZE); |
| 1116 | |
| 1117 | for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { |
| 1118 | IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); |
| 1119 | for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { |
| 1120 | idx = (i * IWL_NUM_SUB_BANDS) + j; |
Luca Coelho | 55bfa4b | 2016-06-29 00:38:40 +0300 | [diff] [blame] | 1121 | cmd.v3.per_chain_restriction[i][j] = |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 1122 | cpu_to_le16(sar_table.values[idx]); |
| 1123 | IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", |
| 1124 | j, sar_table.values[idx]); |
| 1125 | } |
| 1126 | } |
| 1127 | |
Luca Coelho | 55bfa4b | 2016-06-29 00:38:40 +0300 | [diff] [blame] | 1128 | ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 1129 | if (ret) |
| 1130 | IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret); |
| 1131 | |
| 1132 | return ret; |
| 1133 | } |
| 1134 | |
Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 1135 | static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) |
| 1136 | { |
| 1137 | int ret; |
| 1138 | |
| 1139 | if (iwl_mvm_has_new_tx_api(mvm)) |
| 1140 | return iwl_run_unified_mvm_ucode(mvm, false); |
| 1141 | |
| 1142 | ret = iwl_run_init_mvm_ucode(mvm, false); |
| 1143 | |
| 1144 | if (iwlmvm_mod_params.init_dbg) |
| 1145 | return 0; |
| 1146 | |
| 1147 | if (ret) { |
| 1148 | IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); |
| 1149 | /* this can't happen */ |
| 1150 | if (WARN_ON(ret > 0)) |
| 1151 | ret = -ERFKILL; |
| 1152 | return ret; |
| 1153 | } |
| 1154 | |
| 1155 | /* |
| 1156 | * Stop and start the transport without entering low power |
| 1157 | * mode. This will save the state of other components on the |
| 1158 | * device that are triggered by the INIT firwmare (MFUART). |
| 1159 | */ |
| 1160 | _iwl_trans_stop_device(mvm->trans, false); |
| 1161 | ret = _iwl_trans_start_hw(mvm->trans, false); |
| 1162 | if (ret) |
| 1163 | return ret; |
| 1164 | |
| 1165 | ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); |
| 1166 | if (ret) |
| 1167 | return ret; |
| 1168 | |
| 1169 | return iwl_mvm_init_paging(mvm); |
| 1170 | } |
| 1171 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1172 | int iwl_mvm_up(struct iwl_mvm *mvm) |
| 1173 | { |
| 1174 | int ret, i; |
Ilan Peer | 53a9d61 | 2013-04-28 11:55:08 +0300 | [diff] [blame] | 1175 | struct ieee80211_channel *chan; |
| 1176 | struct cfg80211_chan_def chandef; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1177 | |
| 1178 | lockdep_assert_held(&mvm->mutex); |
| 1179 | |
| 1180 | ret = iwl_trans_start_hw(mvm->trans); |
| 1181 | if (ret) |
| 1182 | return ret; |
| 1183 | |
Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 1184 | ret = iwl_mvm_load_rt_fw(mvm); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1185 | if (ret) { |
| 1186 | IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); |
| 1187 | goto error; |
| 1188 | } |
| 1189 | |
Sara Sharon | 6c7d32c | 2015-06-07 09:34:57 +0300 | [diff] [blame] | 1190 | iwl_mvm_get_shared_mem_conf(mvm); |
Liad Kaufman | 04fd2c2 | 2014-12-15 17:54:16 +0200 | [diff] [blame] | 1191 | |
Lilach Edelstein | 1f3b0ff | 2013-10-06 13:03:32 +0200 | [diff] [blame] | 1192 | ret = iwl_mvm_sf_update(mvm, NULL, false); |
| 1193 | if (ret) |
| 1194 | IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); |
| 1195 | |
Liad Kaufman | 6a95126 | 2014-11-17 11:36:21 +0200 | [diff] [blame] | 1196 | mvm->fw_dbg_conf = FW_DBG_INVALID; |
Emmanuel Grumbach | 945d420 | 2015-02-15 17:16:16 +0200 | [diff] [blame] | 1197 | /* if we have a destination, assume EARLY START */ |
| 1198 | if (mvm->fw->dbg_dest_tlv) |
| 1199 | mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; |
Emmanuel Grumbach | d2709ad | 2015-01-29 14:58:06 +0200 | [diff] [blame] | 1200 | iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); |
Liad Kaufman | 6a95126 | 2014-11-17 11:36:21 +0200 | [diff] [blame] | 1201 | |
Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 1202 | ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1203 | if (ret) |
| 1204 | goto error; |
| 1205 | |
Emmanuel Grumbach | 931d416 | 2013-01-17 09:42:25 +0200 | [diff] [blame] | 1206 | ret = iwl_send_bt_init_conf(mvm); |
| 1207 | if (ret) |
| 1208 | goto error; |
| 1209 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1210 | /* Send phy db control command and then phy db calibration*/ |
Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 1211 | if (!iwl_mvm_has_new_tx_api(mvm)) { |
| 1212 | ret = iwl_send_phy_db_data(mvm->phy_db); |
| 1213 | if (ret) |
| 1214 | goto error; |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1215 | |
Sara Sharon | 1f37065 | 2016-08-31 18:13:57 +0300 | [diff] [blame] | 1216 | ret = iwl_send_phy_cfg_cmd(mvm); |
| 1217 | if (ret) |
| 1218 | goto error; |
| 1219 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1220 | |
Sara Sharon | 43413a9 | 2015-12-31 11:49:18 +0200 | [diff] [blame] | 1221 | /* Init RSS configuration */ |
| 1222 | if (iwl_mvm_has_new_rx_api(mvm)) { |
| 1223 | ret = iwl_send_rss_cfg_cmd(mvm); |
| 1224 | if (ret) { |
| 1225 | IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", |
| 1226 | ret); |
| 1227 | goto error; |
| 1228 | } |
| 1229 | } |
| 1230 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1231 | /* init the fw <-> mac80211 STA mapping */ |
| 1232 | for (i = 0; i < IWL_MVM_STATION_COUNT; i++) |
| 1233 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); |
| 1234 | |
Arik Nemtsov | 1d3c3f6 | 2014-10-23 18:03:10 +0300 | [diff] [blame] | 1235 | mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; |
| 1236 | |
Johannes Berg | b2b7875 | 2014-09-08 16:42:54 +0200 | [diff] [blame] | 1237 | /* reset quota debouncing buffer - 0xff will yield invalid data */ |
| 1238 | memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); |
| 1239 | |
Liad Kaufman | 97d5be7 | 2015-08-31 14:33:23 +0300 | [diff] [blame] | 1240 | /* Enable DQA-mode if required */ |
| 1241 | if (iwl_mvm_is_dqa_supported(mvm)) { |
| 1242 | ret = iwl_mvm_send_dqa_cmd(mvm); |
| 1243 | if (ret) |
| 1244 | goto error; |
| 1245 | } else { |
| 1246 | IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n"); |
| 1247 | } |
| 1248 | |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1249 | /* Add auxiliary station for scanning */ |
| 1250 | ret = iwl_mvm_add_aux_sta(mvm); |
| 1251 | if (ret) |
| 1252 | goto error; |
| 1253 | |
Ilan Peer | 53a9d61 | 2013-04-28 11:55:08 +0300 | [diff] [blame] | 1254 | /* Add all the PHY contexts */ |
Johannes Berg | 57fbcce | 2016-04-12 15:56:15 +0200 | [diff] [blame] | 1255 | chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; |
Ilan Peer | 53a9d61 | 2013-04-28 11:55:08 +0300 | [diff] [blame] | 1256 | cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); |
| 1257 | for (i = 0; i < NUM_PHY_CTX; i++) { |
| 1258 | /* |
| 1259 | * The channel used here isn't relevant as it's |
| 1260 | * going to be overwritten in the other flows. |
| 1261 | * For now use the first channel we have. |
| 1262 | */ |
| 1263 | ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], |
| 1264 | &chandef, 1, 1); |
| 1265 | if (ret) |
| 1266 | goto error; |
| 1267 | } |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1268 | |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 1269 | #ifdef CONFIG_THERMAL |
| 1270 | if (iwl_mvm_is_tt_in_fw(mvm)) { |
| 1271 | /* in order to give the responsibility of ct-kill and |
| 1272 | * TX backoff to FW we need to send empty temperature reporting |
| 1273 | * cmd during init time |
| 1274 | */ |
| 1275 | iwl_mvm_send_temp_report_ths_cmd(mvm); |
| 1276 | } else { |
| 1277 | /* Initialize tx backoffs to the minimal possible */ |
| 1278 | iwl_mvm_tt_tx_backoff(mvm, 0); |
| 1279 | } |
Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 1280 | |
| 1281 | /* TODO: read the budget from BIOS / Platform NVM */ |
Luca Coelho | 75cfe33 | 2016-09-14 11:54:36 +0300 | [diff] [blame] | 1282 | if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) { |
Chaya Rachel Ivgi | 5c89e7b | 2016-01-05 10:34:47 +0200 | [diff] [blame] | 1283 | ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, |
| 1284 | mvm->cooling_dev.cur_state); |
Luca Coelho | 75cfe33 | 2016-09-14 11:54:36 +0300 | [diff] [blame] | 1285 | if (ret) |
| 1286 | goto error; |
| 1287 | } |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 1288 | #else |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 1289 | /* Initialize tx backoffs to the minimal possible */ |
| 1290 | iwl_mvm_tt_tx_backoff(mvm, 0); |
Chaya Rachel Ivgi | c221daf | 2015-12-29 09:54:49 +0200 | [diff] [blame] | 1291 | #endif |
Ido Yariv | 0c0e2c7 | 2014-01-16 21:12:02 -0500 | [diff] [blame] | 1292 | |
Emmanuel Grumbach | 84bfffa | 2015-01-13 10:16:30 +0200 | [diff] [blame] | 1293 | WARN_ON(iwl_mvm_config_ltr(mvm)); |
Emmanuel Grumbach | 9180ac5 | 2014-09-23 23:02:41 +0300 | [diff] [blame] | 1294 | |
Emmanuel Grumbach | c1cb92f | 2014-01-28 10:17:18 +0200 | [diff] [blame] | 1295 | ret = iwl_mvm_power_update_device(mvm); |
Alexander Bondar | 64b928c | 2013-09-03 14:18:03 +0300 | [diff] [blame] | 1296 | if (ret) |
| 1297 | goto error; |
| 1298 | |
Arik Nemtsov | 35af15d | 2015-03-04 15:08:00 +0200 | [diff] [blame] | 1299 | /* |
| 1300 | * RTNL is not taken during Ct-kill, but we don't need to scan/Tx |
| 1301 | * anyway, so don't init MCC. |
| 1302 | */ |
| 1303 | if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { |
| 1304 | ret = iwl_mvm_init_mcc(mvm); |
| 1305 | if (ret) |
| 1306 | goto error; |
| 1307 | } |
Arik Nemtsov | 90d4f7d | 2014-03-04 19:58:46 +0200 | [diff] [blame] | 1308 | |
Johannes Berg | 859d914 | 2015-06-01 17:11:11 +0200 | [diff] [blame] | 1309 | if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { |
Emmanuel Grumbach | 4ca87a5 | 2016-01-03 22:23:40 +0200 | [diff] [blame] | 1310 | mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; |
David Spinadel | d249622 | 2014-05-20 12:46:37 +0300 | [diff] [blame] | 1311 | ret = iwl_mvm_config_scan(mvm); |
| 1312 | if (ret) |
| 1313 | goto error; |
| 1314 | } |
| 1315 | |
Avri Altman | 93190fb | 2014-12-27 09:09:47 +0200 | [diff] [blame] | 1316 | if (iwl_mvm_is_csum_supported(mvm) && |
| 1317 | mvm->cfg->features & NETIF_F_RXCSUM) |
| 1318 | iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); |
| 1319 | |
Eliad Peller | 7498cf4 | 2014-01-16 17:10:44 +0200 | [diff] [blame] | 1320 | /* allow FW/transport low power modes if not during restart */ |
| 1321 | if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) |
| 1322 | iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); |
| 1323 | |
Luca Coelho | da2830a | 2016-05-30 13:00:44 +0300 | [diff] [blame] | 1324 | ret = iwl_mvm_sar_init(mvm); |
| 1325 | if (ret) |
| 1326 | goto error; |
| 1327 | |
Ilan Peer | 53a9d61 | 2013-04-28 11:55:08 +0300 | [diff] [blame] | 1328 | IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1329 | return 0; |
| 1330 | error: |
Chaya Rachel Ivgi | fcb6b92 | 2016-02-22 10:21:41 +0200 | [diff] [blame] | 1331 | iwl_mvm_stop_device(mvm); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1332 | return ret; |
| 1333 | } |
| 1334 | |
| 1335 | int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) |
| 1336 | { |
| 1337 | int ret, i; |
| 1338 | |
| 1339 | lockdep_assert_held(&mvm->mutex); |
| 1340 | |
| 1341 | ret = iwl_trans_start_hw(mvm->trans); |
| 1342 | if (ret) |
| 1343 | return ret; |
| 1344 | |
| 1345 | ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); |
| 1346 | if (ret) { |
| 1347 | IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); |
| 1348 | goto error; |
| 1349 | } |
| 1350 | |
Moshe Harel | a054427 | 2014-12-08 21:13:14 +0200 | [diff] [blame] | 1351 | ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1352 | if (ret) |
| 1353 | goto error; |
| 1354 | |
| 1355 | /* Send phy db control command and then phy db calibration*/ |
| 1356 | ret = iwl_send_phy_db_data(mvm->phy_db); |
| 1357 | if (ret) |
| 1358 | goto error; |
| 1359 | |
| 1360 | ret = iwl_send_phy_cfg_cmd(mvm); |
| 1361 | if (ret) |
| 1362 | goto error; |
| 1363 | |
| 1364 | /* init the fw <-> mac80211 STA mapping */ |
| 1365 | for (i = 0; i < IWL_MVM_STATION_COUNT; i++) |
| 1366 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); |
| 1367 | |
| 1368 | /* Add auxiliary station for scanning */ |
| 1369 | ret = iwl_mvm_add_aux_sta(mvm); |
| 1370 | if (ret) |
| 1371 | goto error; |
| 1372 | |
| 1373 | return 0; |
| 1374 | error: |
Chaya Rachel Ivgi | fcb6b92 | 2016-02-22 10:21:41 +0200 | [diff] [blame] | 1375 | iwl_mvm_stop_device(mvm); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1376 | return ret; |
| 1377 | } |
| 1378 | |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1379 | void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, |
| 1380 | struct iwl_rx_cmd_buffer *rxb) |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1381 | { |
| 1382 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 1383 | struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; |
| 1384 | u32 flags = le32_to_cpu(card_state_notif->flags); |
| 1385 | |
| 1386 | IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", |
| 1387 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", |
| 1388 | (flags & SW_CARD_DISABLED) ? "Kill" : "On", |
| 1389 | (flags & CT_KILL_CARD_DISABLED) ? |
| 1390 | "Reached" : "Not reached"); |
Johannes Berg | 8ca151b | 2013-01-24 14:25:36 +0100 | [diff] [blame] | 1391 | } |
| 1392 | |
Johannes Berg | 0416841 | 2015-06-23 21:22:09 +0200 | [diff] [blame] | 1393 | void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, |
| 1394 | struct iwl_rx_cmd_buffer *rxb) |
Chaya Rachel Ivgy | 30269c1 | 2014-11-15 21:08:29 +0200 | [diff] [blame] | 1395 | { |
| 1396 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 1397 | struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; |
| 1398 | |
Golan Ben-Ami | 0c8d0a4 | 2017-01-25 15:11:30 +0200 | [diff] [blame] | 1399 | IWL_DEBUG_INFO(mvm, |
| 1400 | "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", |
| 1401 | le32_to_cpu(mfuart_notif->installed_ver), |
| 1402 | le32_to_cpu(mfuart_notif->external_ver), |
| 1403 | le32_to_cpu(mfuart_notif->status), |
| 1404 | le32_to_cpu(mfuart_notif->duration)); |
| 1405 | |
Golan Ben-Ami | 19f63c5 | 2016-11-07 17:40:43 +0200 | [diff] [blame] | 1406 | if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) |
| 1407 | IWL_DEBUG_INFO(mvm, |
Golan Ben-Ami | 0c8d0a4 | 2017-01-25 15:11:30 +0200 | [diff] [blame] | 1408 | "MFUART: image size: 0x%08x\n", |
Golan Ben-Ami | 19f63c5 | 2016-11-07 17:40:43 +0200 | [diff] [blame] | 1409 | le32_to_cpu(mfuart_notif->image_size)); |
Chaya Rachel Ivgy | 30269c1 | 2014-11-15 21:08:29 +0200 | [diff] [blame] | 1410 | } |