blob: 57efd4890dd09952dd9038894cfa9ff8605d1be3 [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080022 * Intel Linux Wireless <ilw@linux.intel.com>
Zhu Yib481de92007-09-25 17:54:57 -070023 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
Zhu Yib481de92007-09-25 17:54:57 -070029#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070037#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080038#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070039
Assaf Krauss6bc913b2008-03-11 16:17:18 -070040#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070041#include "iwl-dev.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070042#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070043#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070044#include "iwl-helpers.h"
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -070045#include "iwl-calib.h"
Tomas Winkler5083e562008-05-29 16:35:15 +080046#include "iwl-sta.h"
Zhu Yib481de92007-09-25 17:54:57 -070047
Tomas Winkler630fe9b2008-06-12 09:47:08 +080048static int iwl4965_send_tx_power(struct iwl_priv *priv);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +080049static int iwl4965_hw_get_temperature(const struct iwl_priv *priv);
Tomas Winkler630fe9b2008-06-12 09:47:08 +080050
Reinette Chatrea0987a82008-12-02 12:14:06 -080051/* Highest firmware API version supported */
52#define IWL4965_UCODE_API_MAX 2
53
54/* Lowest firmware API version supported */
55#define IWL4965_UCODE_API_MIN 2
56
57#define IWL4965_FW_PRE "iwlwifi-4965-"
58#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
59#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
Tomas Winklerd16dc482008-07-11 11:53:38 +080060
61
Assaf Krauss1ea87392008-03-18 14:57:50 -070062/* module parameters */
63static struct iwl_mod_params iwl4965_mod_params = {
Emmanuel Grumbach038669e2008-04-23 17:15:04 -070064 .num_of_queues = IWL49_NUM_QUEUES,
Tomas Winkler9f17b312008-07-11 11:53:35 +080065 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
Assaf Krauss1ea87392008-03-18 14:57:50 -070066 .amsdu_size_8K = 1,
Ester Kummer3a1081e2008-05-06 11:05:14 +080067 .restart_fw = 1,
Assaf Krauss1ea87392008-03-18 14:57:50 -070068 /* the rest are 0 by default */
69};
70
Tomas Winkler57aab752008-04-14 21:16:03 -070071/* check contents of special bootstrap uCode SRAM */
72static int iwl4965_verify_bsm(struct iwl_priv *priv)
73{
74 __le32 *image = priv->ucode_boot.v_addr;
75 u32 len = priv->ucode_boot.len;
76 u32 reg;
77 u32 val;
78
79 IWL_DEBUG_INFO("Begin verify bsm\n");
80
81 /* verify BSM SRAM contents */
82 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
83 for (reg = BSM_SRAM_LOWER_BOUND;
84 reg < BSM_SRAM_LOWER_BOUND + len;
85 reg += sizeof(u32), image++) {
86 val = iwl_read_prph(priv, reg);
87 if (val != le32_to_cpu(*image)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +080088 IWL_ERR(priv, "BSM uCode verification failed at "
Tomas Winkler57aab752008-04-14 21:16:03 -070089 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
90 BSM_SRAM_LOWER_BOUND,
91 reg - BSM_SRAM_LOWER_BOUND, len,
92 val, le32_to_cpu(*image));
93 return -EIO;
94 }
95 }
96
97 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
98
99 return 0;
100}
101
102/**
103 * iwl4965_load_bsm - Load bootstrap instructions
104 *
105 * BSM operation:
106 *
107 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
108 * in special SRAM that does not power down during RFKILL. When powering back
109 * up after power-saving sleeps (or during initial uCode load), the BSM loads
110 * the bootstrap program into the on-board processor, and starts it.
111 *
112 * The bootstrap program loads (via DMA) instructions and data for a new
113 * program from host DRAM locations indicated by the host driver in the
114 * BSM_DRAM_* registers. Once the new program is loaded, it starts
115 * automatically.
116 *
117 * When initializing the NIC, the host driver points the BSM to the
118 * "initialize" uCode image. This uCode sets up some internal data, then
119 * notifies host via "initialize alive" that it is complete.
120 *
121 * The host then replaces the BSM_DRAM_* pointer values to point to the
122 * normal runtime uCode instructions and a backup uCode data cache buffer
123 * (filled initially with starting data values for the on-board processor),
124 * then triggers the "initialize" uCode to load and launch the runtime uCode,
125 * which begins normal operation.
126 *
127 * When doing a power-save shutdown, runtime uCode saves data SRAM into
128 * the backup data cache in DRAM before SRAM is powered down.
129 *
130 * When powering back up, the BSM loads the bootstrap program. This reloads
131 * the runtime uCode instructions and the backup data cache into SRAM,
132 * and re-launches the runtime uCode from where it left off.
133 */
134static int iwl4965_load_bsm(struct iwl_priv *priv)
135{
136 __le32 *image = priv->ucode_boot.v_addr;
137 u32 len = priv->ucode_boot.len;
138 dma_addr_t pinst;
139 dma_addr_t pdata;
140 u32 inst_len;
141 u32 data_len;
142 int i;
143 u32 done;
144 u32 reg_offset;
145 int ret;
146
147 IWL_DEBUG_INFO("Begin load bsm\n");
148
Ron Rindjunskyfe9b6b72008-05-29 16:35:06 +0800149 priv->ucode_type = UCODE_RT;
150
Tomas Winkler57aab752008-04-14 21:16:03 -0700151 /* make sure bootstrap program is no larger than BSM's SRAM size */
Samuel Ortiz250bdd22008-12-19 10:37:11 +0800152 if (len > IWL49_MAX_BSM_SIZE)
Tomas Winkler57aab752008-04-14 21:16:03 -0700153 return -EINVAL;
154
155 /* Tell bootstrap uCode where to find the "Initialize" uCode
156 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
Tomas Winkler2d878892008-05-29 16:34:51 +0800157 * NOTE: iwl_init_alive_start() will replace these values,
Tomas Winkler57aab752008-04-14 21:16:03 -0700158 * after the "initialize" uCode has run, to point to
Tomas Winkler2d878892008-05-29 16:34:51 +0800159 * runtime/protocol instructions and backup data cache.
160 */
Tomas Winkler57aab752008-04-14 21:16:03 -0700161 pinst = priv->ucode_init.p_addr >> 4;
162 pdata = priv->ucode_init_data.p_addr >> 4;
163 inst_len = priv->ucode_init.len;
164 data_len = priv->ucode_init_data.len;
165
166 ret = iwl_grab_nic_access(priv);
167 if (ret)
168 return ret;
169
170 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
171 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
172 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
173 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
174
175 /* Fill BSM memory with bootstrap instructions */
176 for (reg_offset = BSM_SRAM_LOWER_BOUND;
177 reg_offset < BSM_SRAM_LOWER_BOUND + len;
178 reg_offset += sizeof(u32), image++)
179 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
180
181 ret = iwl4965_verify_bsm(priv);
182 if (ret) {
183 iwl_release_nic_access(priv);
184 return ret;
185 }
186
187 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
188 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
Samuel Ortiz250bdd22008-12-19 10:37:11 +0800189 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
Tomas Winkler57aab752008-04-14 21:16:03 -0700190 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
191
192 /* Load bootstrap code into instruction SRAM now,
193 * to prepare to load "initialize" uCode */
194 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
195
196 /* Wait for load of bootstrap uCode to finish */
197 for (i = 0; i < 100; i++) {
198 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
199 if (!(done & BSM_WR_CTRL_REG_BIT_START))
200 break;
201 udelay(10);
202 }
203 if (i < 100)
204 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
205 else {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800206 IWL_ERR(priv, "BSM write did not complete!\n");
Tomas Winkler57aab752008-04-14 21:16:03 -0700207 return -EIO;
208 }
209
210 /* Enable future boot loads whenever power management unit triggers it
211 * (e.g. when powering back up after power-save shutdown) */
212 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
213
214 iwl_release_nic_access(priv);
215
216 return 0;
217}
218
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800219/**
220 * iwl4965_set_ucode_ptrs - Set uCode address location
221 *
222 * Tell initialization uCode where to find runtime uCode.
223 *
224 * BSM registers initially contain pointers to initialization uCode.
225 * We need to replace them to load runtime uCode inst and data,
226 * and to save runtime data when powering down.
227 */
228static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
229{
230 dma_addr_t pinst;
231 dma_addr_t pdata;
232 unsigned long flags;
233 int ret = 0;
234
235 /* bits 35:4 for 4965 */
236 pinst = priv->ucode_code.p_addr >> 4;
237 pdata = priv->ucode_data_backup.p_addr >> 4;
238
239 spin_lock_irqsave(&priv->lock, flags);
240 ret = iwl_grab_nic_access(priv);
241 if (ret) {
242 spin_unlock_irqrestore(&priv->lock, flags);
243 return ret;
244 }
245
246 /* Tell bootstrap uCode where to find image to load */
247 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
248 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
249 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
250 priv->ucode_data.len);
251
Tomas Winklera96a27f2008-10-23 23:48:56 -0700252 /* Inst byte count must be last to set up, bit 31 signals uCode
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800253 * that all new ptr/size info is in place */
254 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
255 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
256 iwl_release_nic_access(priv);
257
258 spin_unlock_irqrestore(&priv->lock, flags);
259
260 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
261
262 return ret;
263}
264
265/**
266 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
267 *
268 * Called after REPLY_ALIVE notification received from "initialize" uCode.
269 *
270 * The 4965 "initialize" ALIVE reply contains calibration data for:
271 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
272 * (3945 does not contain this data).
273 *
274 * Tell "initialize" uCode to go ahead and load the runtime uCode.
275*/
276static void iwl4965_init_alive_start(struct iwl_priv *priv)
277{
278 /* Check alive response for "valid" sign from uCode */
279 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
280 /* We had an error bringing up the hardware, so take it
281 * all the way back down so we can try again */
282 IWL_DEBUG_INFO("Initialize Alive failed.\n");
283 goto restart;
284 }
285
286 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
287 * This is a paranoid check, because we would not have gotten the
288 * "initialize" alive if code weren't properly loaded. */
289 if (iwl_verify_ucode(priv)) {
290 /* Runtime instruction load was bad;
291 * take it all the way back down so we can try again */
292 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
293 goto restart;
294 }
295
296 /* Calculate temperature */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +0800297 priv->temperature = iwl4965_hw_get_temperature(priv);
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800298
299 /* Send pointers to protocol/runtime uCode image ... init code will
300 * load and launch runtime uCode, which will send us another "Alive"
301 * notification. */
302 IWL_DEBUG_INFO("Initialization Alive received.\n");
303 if (iwl4965_set_ucode_ptrs(priv)) {
304 /* Runtime instruction load won't happen;
305 * take it all the way back down so we can try again */
306 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
307 goto restart;
308 }
309 return;
310
311restart:
312 queue_work(priv->workqueue, &priv->restart);
313}
314
Zhu Yib481de92007-09-25 17:54:57 -0700315static int is_fat_channel(__le32 rxon_flags)
316{
317 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
318 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
319}
320
Tomas Winkler8614f362008-04-23 17:14:55 -0700321/*
322 * EEPROM handlers
323 */
Tomas Winkler0ef2ca62008-10-23 23:48:51 -0700324static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
Tomas Winkler8614f362008-04-23 17:14:55 -0700325{
Tomas Winkler0ef2ca62008-10-23 23:48:51 -0700326 return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
Tomas Winkler8614f362008-04-23 17:14:55 -0700327}
Zhu Yib481de92007-09-25 17:54:57 -0700328
Tomas Winklerda1bc452008-05-29 16:35:00 +0800329/*
Tomas Winklera96a27f2008-10-23 23:48:56 -0700330 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
Tomas Winklerda1bc452008-05-29 16:35:00 +0800331 * must be called under priv->lock and mac access
332 */
333static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
Zhu Yib481de92007-09-25 17:54:57 -0700334{
Tomas Winklerda1bc452008-05-29 16:35:00 +0800335 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
Zhu Yib481de92007-09-25 17:54:57 -0700336}
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800337
Tomas Winkler91238712008-04-23 17:14:53 -0700338static int iwl4965_apm_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700339{
Tomas Winkler91238712008-04-23 17:14:53 -0700340 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700341
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700342 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Tomas Winkler91238712008-04-23 17:14:53 -0700343 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
Zhu Yib481de92007-09-25 17:54:57 -0700344
Tomas Winkler8f061892008-05-29 16:34:56 +0800345 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
346 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
347 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
348
Tomas Winkler91238712008-04-23 17:14:53 -0700349 /* set "initialization complete" bit to move adapter
350 * D0U* --> D0A* state */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700351 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winkler91238712008-04-23 17:14:53 -0700352
353 /* wait for clock stabilization */
Zhu, Yi73d7b5a2008-12-05 07:58:40 -0800354 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
355 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
Tomas Winkler91238712008-04-23 17:14:53 -0700356 if (ret < 0) {
Zhu Yib481de92007-09-25 17:54:57 -0700357 IWL_DEBUG_INFO("Failed to init the card\n");
Tomas Winkler91238712008-04-23 17:14:53 -0700358 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700359 }
360
Tomas Winkler91238712008-04-23 17:14:53 -0700361 ret = iwl_grab_nic_access(priv);
362 if (ret)
363 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700364
Tomas Winkler91238712008-04-23 17:14:53 -0700365 /* enable DMA */
Tomas Winkler8f061892008-05-29 16:34:56 +0800366 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
367 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700368
369 udelay(20);
370
Tomas Winkler8f061892008-05-29 16:34:56 +0800371 /* disable L1-Active */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700372 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
Tomas Winkler91238712008-04-23 17:14:53 -0700373 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700374
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700375 iwl_release_nic_access(priv);
Tomas Winkler91238712008-04-23 17:14:53 -0700376out:
Tomas Winkler91238712008-04-23 17:14:53 -0700377 return ret;
378}
379
Tomas Winkler694cc562008-04-24 11:55:22 -0700380
381static void iwl4965_nic_config(struct iwl_priv *priv)
382{
383 unsigned long flags;
384 u32 val;
385 u16 radio_cfg;
Tomas Winklere7b63582008-09-03 11:26:49 +0800386 u16 link;
Tomas Winkler694cc562008-04-24 11:55:22 -0700387
388 spin_lock_irqsave(&priv->lock, flags);
389
390 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
391 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
392 /* Enable No Snoop field */
393 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
394 val & ~(1 << 11));
395 }
396
Tomas Winklere7b63582008-09-03 11:26:49 +0800397 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
Tomas Winkler694cc562008-04-24 11:55:22 -0700398
Tomas Winkler8f061892008-05-29 16:34:56 +0800399 /* L1 is enabled by BIOS */
Tomas Winklere7b63582008-09-03 11:26:49 +0800400 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
Tomas Winklera96a27f2008-10-23 23:48:56 -0700401 /* disable L0S disabled L1A enabled */
Tomas Winkler8f061892008-05-29 16:34:56 +0800402 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
403 else
404 /* L0S enabled L1A disabled */
405 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
Tomas Winkler694cc562008-04-24 11:55:22 -0700406
407 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
408
409 /* write radio config values to register */
410 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
411 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
412 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
413 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
414 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
415
416 /* set CSR_HW_CONFIG_REG for uCode use */
417 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
418 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
419 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
420
421 priv->calib_info = (struct iwl_eeprom_calib_info *)
422 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
423
424 spin_unlock_irqrestore(&priv->lock, flags);
425}
426
Tomas Winkler46315e02008-05-29 16:34:59 +0800427static int iwl4965_apm_stop_master(struct iwl_priv *priv)
428{
Tomas Winkler46315e02008-05-29 16:34:59 +0800429 unsigned long flags;
430
431 spin_lock_irqsave(&priv->lock, flags);
432
433 /* set stop master bit */
434 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
435
Wu Fengguangfebf3372008-12-17 16:52:31 +0800436 iwl_poll_direct_bit(priv, CSR_RESET,
Zhu, Yi73d7b5a2008-12-05 07:58:40 -0800437 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
Tomas Winkler46315e02008-05-29 16:34:59 +0800438
Tomas Winkler46315e02008-05-29 16:34:59 +0800439 spin_unlock_irqrestore(&priv->lock, flags);
440 IWL_DEBUG_INFO("stop master\n");
441
Wu Fengguangfebf3372008-12-17 16:52:31 +0800442 return 0;
Tomas Winkler46315e02008-05-29 16:34:59 +0800443}
444
Tomas Winklerf118a912008-05-29 16:34:58 +0800445static void iwl4965_apm_stop(struct iwl_priv *priv)
446{
447 unsigned long flags;
448
Tomas Winkler46315e02008-05-29 16:34:59 +0800449 iwl4965_apm_stop_master(priv);
Tomas Winklerf118a912008-05-29 16:34:58 +0800450
451 spin_lock_irqsave(&priv->lock, flags);
452
453 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
454
455 udelay(10);
Mohamed Abbas1d3e6c62008-08-28 17:25:05 +0800456 /* clear "init complete" move adapter D0A* --> D0U state */
457 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winklerf118a912008-05-29 16:34:58 +0800458 spin_unlock_irqrestore(&priv->lock, flags);
459}
460
Tomas Winkler7f066102008-05-29 16:34:57 +0800461static int iwl4965_apm_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700462{
Tomas Winkler7f066102008-05-29 16:34:57 +0800463 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700464 unsigned long flags;
465
Tomas Winkler46315e02008-05-29 16:34:59 +0800466 iwl4965_apm_stop_master(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700467
468 spin_lock_irqsave(&priv->lock, flags);
469
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700470 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
Zhu Yib481de92007-09-25 17:54:57 -0700471
472 udelay(10);
473
Tomas Winkler7f066102008-05-29 16:34:57 +0800474 /* FIXME: put here L1A -L0S w/a */
475
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700476 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winklerf118a912008-05-29 16:34:58 +0800477
Zhu, Yi73d7b5a2008-12-05 07:58:40 -0800478 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
479 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
Zhu, Yi42802d72008-12-05 07:58:39 -0800480 if (ret < 0)
Tomas Winkler7f066102008-05-29 16:34:57 +0800481 goto out;
482
Zhu Yib481de92007-09-25 17:54:57 -0700483 udelay(10);
484
Tomas Winkler7f066102008-05-29 16:34:57 +0800485 ret = iwl_grab_nic_access(priv);
486 if (ret)
487 goto out;
488 /* Enable DMA and BSM Clock */
489 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
490 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700491
Tomas Winkler7f066102008-05-29 16:34:57 +0800492 udelay(10);
Zhu Yib481de92007-09-25 17:54:57 -0700493
Tomas Winkler7f066102008-05-29 16:34:57 +0800494 /* disable L1A */
495 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
496 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700497
Tomas Winkler7f066102008-05-29 16:34:57 +0800498 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700499
500 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
501 wake_up_interruptible(&priv->wait_command_queue);
502
Tomas Winkler7f066102008-05-29 16:34:57 +0800503out:
Zhu Yib481de92007-09-25 17:54:57 -0700504 spin_unlock_irqrestore(&priv->lock, flags);
505
Tomas Winkler7f066102008-05-29 16:34:57 +0800506 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700507}
508
Zhu Yib481de92007-09-25 17:54:57 -0700509/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
510 * Called after every association, but this runs only once!
511 * ... once chain noise is calibrated the first time, it's good forever. */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700512static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700513{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700514 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
Zhu Yib481de92007-09-25 17:54:57 -0700515
Tomas Winkler3109ece2008-03-28 16:33:35 -0700516 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
Tomas Winklerf69f42a2008-10-23 23:48:52 -0700517 struct iwl_calib_diff_gain_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700518
519 memset(&cmd, 0, sizeof(cmd));
Tomas Winkler0d950d82008-11-25 13:36:01 -0800520 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
Zhu Yib481de92007-09-25 17:54:57 -0700521 cmd.diff_gain_a = 0;
522 cmd.diff_gain_b = 0;
523 cmd.diff_gain_c = 0;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700524 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
525 sizeof(cmd), &cmd))
Winkler, Tomas15b16872008-12-19 10:37:33 +0800526 IWL_ERR(priv,
527 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700528 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
529 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
530 }
Zhu Yib481de92007-09-25 17:54:57 -0700531}
532
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700533static void iwl4965_gain_computation(struct iwl_priv *priv,
534 u32 *average_noise,
535 u16 min_average_noise_antenna_i,
536 u32 min_average_noise)
Zhu Yib481de92007-09-25 17:54:57 -0700537{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700538 int i, ret;
539 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
Zhu Yib481de92007-09-25 17:54:57 -0700540
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700541 data->delta_gain_code[min_average_noise_antenna_i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700542
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700543 for (i = 0; i < NUM_RX_CHAINS; i++) {
544 s32 delta_g = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700545
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700546 if (!(data->disconn_array[i]) &&
547 (data->delta_gain_code[i] ==
Zhu Yib481de92007-09-25 17:54:57 -0700548 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700549 delta_g = average_noise[i] - min_average_noise;
550 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
551 data->delta_gain_code[i] =
552 min(data->delta_gain_code[i],
553 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
Zhu Yib481de92007-09-25 17:54:57 -0700554
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700555 data->delta_gain_code[i] =
556 (data->delta_gain_code[i] | (1 << 2));
557 } else {
558 data->delta_gain_code[i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700559 }
Zhu Yib481de92007-09-25 17:54:57 -0700560 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700561 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
562 data->delta_gain_code[0],
563 data->delta_gain_code[1],
564 data->delta_gain_code[2]);
Zhu Yib481de92007-09-25 17:54:57 -0700565
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700566 /* Differential gain gets sent to uCode only once */
567 if (!data->radio_write) {
Tomas Winklerf69f42a2008-10-23 23:48:52 -0700568 struct iwl_calib_diff_gain_cmd cmd;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700569 data->radio_write = 1;
Zhu Yib481de92007-09-25 17:54:57 -0700570
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700571 memset(&cmd, 0, sizeof(cmd));
Tomas Winkler0d950d82008-11-25 13:36:01 -0800572 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700573 cmd.diff_gain_a = data->delta_gain_code[0];
574 cmd.diff_gain_b = data->delta_gain_code[1];
575 cmd.diff_gain_c = data->delta_gain_code[2];
576 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
577 sizeof(cmd), &cmd);
578 if (ret)
579 IWL_DEBUG_CALIB("fail sending cmd "
580 "REPLY_PHY_CALIBRATION_CMD \n");
Zhu Yib481de92007-09-25 17:54:57 -0700581
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700582 /* TODO we might want recalculate
583 * rx_chain in rxon cmd */
584
585 /* Mark so we run this algo only once! */
586 data->state = IWL_CHAIN_NOISE_CALIBRATED;
Zhu Yib481de92007-09-25 17:54:57 -0700587 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700588 data->chain_noise_a = 0;
589 data->chain_noise_b = 0;
590 data->chain_noise_c = 0;
591 data->chain_signal_a = 0;
592 data->chain_signal_b = 0;
593 data->chain_signal_c = 0;
594 data->beacon_count = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700595}
596
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +0800597static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
598 __le32 *tx_flags)
599{
Johannes Berge6a98542008-10-21 12:40:02 +0200600 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +0800601 *tx_flags |= TX_CMD_FLG_RTS_MSK;
602 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
Johannes Berge6a98542008-10-21 12:40:02 +0200603 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +0800604 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
605 *tx_flags |= TX_CMD_FLG_CTS_MSK;
606 }
607}
608
Zhu Yib481de92007-09-25 17:54:57 -0700609static void iwl4965_bg_txpower_work(struct work_struct *work)
610{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700611 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700612 txpower_work);
613
614 /* If a scan happened to start before we got here
615 * then just return; the statistics notification will
616 * kick off another scheduled work to compensate for
617 * any temperature delta we missed here. */
618 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
619 test_bit(STATUS_SCANNING, &priv->status))
620 return;
621
622 mutex_lock(&priv->mutex);
623
Tomas Winklera96a27f2008-10-23 23:48:56 -0700624 /* Regardless of if we are associated, we must reconfigure the
Zhu Yib481de92007-09-25 17:54:57 -0700625 * TX power since frames can be sent on non-radar channels while
626 * not associated */
Tomas Winkler630fe9b2008-06-12 09:47:08 +0800627 iwl4965_send_tx_power(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700628
629 /* Update last_temperature to keep is_calib_needed from running
630 * when it isn't needed... */
631 priv->last_temperature = priv->temperature;
632
633 mutex_unlock(&priv->mutex);
634}
635
636/*
637 * Acquire priv->lock before calling this function !
638 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700639static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -0700640{
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700641 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -0700642 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -0700643 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -0700644}
645
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800646/**
647 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
648 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
649 * @scd_retry: (1) Indicates queue will be used in aggregation mode
650 *
651 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -0700652 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700653static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800654 struct iwl_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -0700655 int tx_fifo_id, int scd_retry)
656{
657 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800658
659 /* Find out whether to activate Tx queue */
Abhijeet Kolekarc3056062008-11-12 13:14:08 -0800660 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
Zhu Yib481de92007-09-25 17:54:57 -0700661
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800662 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700663 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700664 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
665 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
666 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
667 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
668 IWL49_SCD_QUEUE_STTS_REG_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700669
670 txq->sched_retry = scd_retry;
671
672 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800673 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -0700674 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
675}
676
677static const u16 default_queue_to_tx_fifo[] = {
678 IWL_TX_FIFO_AC3,
679 IWL_TX_FIFO_AC2,
680 IWL_TX_FIFO_AC1,
681 IWL_TX_FIFO_AC0,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700682 IWL49_CMD_FIFO_NUM,
Zhu Yib481de92007-09-25 17:54:57 -0700683 IWL_TX_FIFO_HCCA_1,
684 IWL_TX_FIFO_HCCA_2
685};
686
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800687static int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700688{
689 u32 a;
Zhu Yib481de92007-09-25 17:54:57 -0700690 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -0700691 int ret;
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800692 int i, chan;
Winkler, Tomas40fc95d2008-11-19 15:32:27 -0800693 u32 reg_val;
Zhu Yib481de92007-09-25 17:54:57 -0700694
695 spin_lock_irqsave(&priv->lock, flags);
696
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700697 ret = iwl_grab_nic_access(priv);
Tomas Winkler857485c2008-03-21 13:53:44 -0700698 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700699 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler857485c2008-03-21 13:53:44 -0700700 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700701 }
702
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800703 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700704 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700705 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
706 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700707 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700708 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700709 iwl_write_targ_mem(priv, a, 0);
Tomas Winkler5425e492008-04-15 16:01:38 -0700710 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700711 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700712
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800713 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700714 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800715 priv->scd_bc_tbls.dma >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800716
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800717 /* Enable DMA channel */
718 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
719 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
720 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
721 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
722
Winkler, Tomas40fc95d2008-11-19 15:32:27 -0800723 /* Update FH chicken bits */
724 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
725 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
726 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
727
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800728 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700729 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700730
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800731 /* Initialize each Tx queue (including the command queue) */
Tomas Winkler5425e492008-04-15 16:01:38 -0700732 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800733
734 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700735 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700736 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800737
738 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700739 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700740 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
741 (SCD_WIN_SIZE <<
742 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
743 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800744
745 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700746 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700747 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
748 sizeof(u32),
749 (SCD_FRAME_LIMIT <<
750 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
751 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700752
753 }
Tomas Winkler12a81f62008-04-03 16:05:20 -0700754 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Tomas Winkler5425e492008-04-15 16:01:38 -0700755 (1 << priv->hw_params.max_txq_num) - 1);
Zhu Yib481de92007-09-25 17:54:57 -0700756
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800757 /* Activate all Tx DMA/FIFO channels */
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800758 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
Zhu Yib481de92007-09-25 17:54:57 -0700759
760 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800761
762 /* Map each Tx/cmd queue to its corresponding fifo */
Zhu Yib481de92007-09-25 17:54:57 -0700763 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
764 int ac = default_queue_to_tx_fifo[i];
Ron Rindjunsky36470742008-05-15 13:54:10 +0800765 iwl_txq_ctx_activate(priv, i);
Zhu Yib481de92007-09-25 17:54:57 -0700766 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
767 }
768
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700769 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700770 spin_unlock_irqrestore(&priv->lock, flags);
771
Tomas Winkler857485c2008-03-21 13:53:44 -0700772 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700773}
774
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700775static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
776 .min_nrg_cck = 97,
777 .max_nrg_cck = 0,
778
779 .auto_corr_min_ofdm = 85,
780 .auto_corr_min_ofdm_mrc = 170,
781 .auto_corr_min_ofdm_x1 = 105,
782 .auto_corr_min_ofdm_mrc_x1 = 220,
783
784 .auto_corr_max_ofdm = 120,
785 .auto_corr_max_ofdm_mrc = 210,
786 .auto_corr_max_ofdm_x1 = 140,
787 .auto_corr_max_ofdm_mrc_x1 = 270,
788
789 .auto_corr_min_cck = 125,
790 .auto_corr_max_cck = 200,
791 .auto_corr_min_cck_mrc = 200,
792 .auto_corr_max_cck_mrc = 400,
793
794 .nrg_th_cck = 100,
795 .nrg_th_ofdm = 100,
796};
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700797
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800798/**
Tomas Winkler5425e492008-04-15 16:01:38 -0700799 * iwl4965_hw_set_hw_params
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800800 *
801 * Called when initializing driver
802 */
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800803static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700804{
Assaf Krauss316c30d2008-03-14 10:38:46 -0700805
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700806 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
Assaf Krauss1ea87392008-03-18 14:57:50 -0700807 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800808 IWL_ERR(priv,
809 "invalid queues_num, should be between %d and %d\n",
810 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
Tomas Winkler059ff822008-04-14 21:16:14 -0700811 return -EINVAL;
Assaf Krauss316c30d2008-03-14 10:38:46 -0700812 }
813
Tomas Winkler5425e492008-04-15 16:01:38 -0700814 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
Zhu Yif3f911d2008-12-02 12:14:04 -0800815 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800816 priv->hw_params.scd_bc_tbls_size =
817 IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl);
Tomas Winkler5425e492008-04-15 16:01:38 -0700818 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
819 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700820 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
821 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
822 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
823 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
824
Winkler, Tomas141c43a2009-01-08 10:19:53 -0800825 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
826
Tomas Winklerec35cf22008-04-15 16:01:39 -0700827 priv->hw_params.tx_chains_num = 2;
828 priv->hw_params.rx_chains_num = 2;
Guy Cohenfde0db32008-04-21 15:42:01 -0700829 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
830 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700831 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
832
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700833 priv->hw_params.sens = &iwl4965_sensitivity;
Tomas Winkler3e82a822008-02-13 11:32:31 -0800834
Tomas Winkler059ff822008-04-14 21:16:14 -0700835 return 0;
Zhu Yib481de92007-09-25 17:54:57 -0700836}
837
Zhu Yib481de92007-09-25 17:54:57 -0700838static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
839{
840 s32 sign = 1;
841
842 if (num < 0) {
843 sign = -sign;
844 num = -num;
845 }
846 if (denom < 0) {
847 sign = -sign;
848 denom = -denom;
849 }
850 *res = 1;
851 *res = ((num * 2 + denom) / (denom * 2)) * sign;
852
853 return 1;
854}
855
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800856/**
857 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
858 *
859 * Determines power supply voltage compensation for txpower calculations.
860 * Returns number of 1/2-dB steps to subtract from gain table index,
861 * to compensate for difference between power supply voltage during
862 * factory measurements, vs. current power supply voltage.
863 *
864 * Voltage indication is higher for lower voltage.
865 * Lower voltage requires more gain (lower gain table index).
866 */
Zhu Yib481de92007-09-25 17:54:57 -0700867static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
868 s32 current_voltage)
869{
870 s32 comp = 0;
871
872 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
873 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
874 return 0;
875
876 iwl4965_math_div_round(current_voltage - eeprom_voltage,
877 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
878
879 if (current_voltage > eeprom_voltage)
880 comp *= 2;
881 if ((comp < -2) || (comp > 2))
882 comp = 0;
883
884 return comp;
885}
886
Zhu Yib481de92007-09-25 17:54:57 -0700887static s32 iwl4965_get_tx_atten_grp(u16 channel)
888{
889 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
890 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
891 return CALIB_CH_GROUP_5;
892
893 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
894 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
895 return CALIB_CH_GROUP_1;
896
897 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
898 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
899 return CALIB_CH_GROUP_2;
900
901 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
902 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
903 return CALIB_CH_GROUP_3;
904
905 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
906 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
907 return CALIB_CH_GROUP_4;
908
Zhu Yib481de92007-09-25 17:54:57 -0700909 return -1;
910}
911
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700912static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -0700913{
914 s32 b = -1;
915
916 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700917 if (priv->calib_info->band_info[b].ch_from == 0)
Zhu Yib481de92007-09-25 17:54:57 -0700918 continue;
919
Tomas Winkler073d3f52008-04-21 15:41:52 -0700920 if ((channel >= priv->calib_info->band_info[b].ch_from)
921 && (channel <= priv->calib_info->band_info[b].ch_to))
Zhu Yib481de92007-09-25 17:54:57 -0700922 break;
923 }
924
925 return b;
926}
927
928static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
929{
930 s32 val;
931
932 if (x2 == x1)
933 return y1;
934 else {
935 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
936 return val + y2;
937 }
938}
939
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800940/**
941 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
942 *
943 * Interpolates factory measurements from the two sample channels within a
944 * sub-band, to apply to channel of interest. Interpolation is proportional to
945 * differences in channel frequencies, which is proportional to differences
946 * in channel number.
947 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700948static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Tomas Winkler073d3f52008-04-21 15:41:52 -0700949 struct iwl_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -0700950{
951 s32 s = -1;
952 u32 c;
953 u32 m;
Tomas Winkler073d3f52008-04-21 15:41:52 -0700954 const struct iwl_eeprom_calib_measure *m1;
955 const struct iwl_eeprom_calib_measure *m2;
956 struct iwl_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -0700957 u32 ch_i1;
958 u32 ch_i2;
959
960 s = iwl4965_get_sub_band(priv, channel);
961 if (s >= EEPROM_TX_POWER_BANDS) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800962 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
Zhu Yib481de92007-09-25 17:54:57 -0700963 return -1;
964 }
965
Tomas Winkler073d3f52008-04-21 15:41:52 -0700966 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
967 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
Zhu Yib481de92007-09-25 17:54:57 -0700968 chan_info->ch_num = (u8) channel;
969
970 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
971 channel, s, ch_i1, ch_i2);
972
973 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
974 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700975 m1 = &(priv->calib_info->band_info[s].ch1.
Zhu Yib481de92007-09-25 17:54:57 -0700976 measurements[c][m]);
Tomas Winkler073d3f52008-04-21 15:41:52 -0700977 m2 = &(priv->calib_info->band_info[s].ch2.
Zhu Yib481de92007-09-25 17:54:57 -0700978 measurements[c][m]);
979 omeas = &(chan_info->measurements[c][m]);
980
981 omeas->actual_pow =
982 (u8) iwl4965_interpolate_value(channel, ch_i1,
983 m1->actual_pow,
984 ch_i2,
985 m2->actual_pow);
986 omeas->gain_idx =
987 (u8) iwl4965_interpolate_value(channel, ch_i1,
988 m1->gain_idx, ch_i2,
989 m2->gain_idx);
990 omeas->temperature =
991 (u8) iwl4965_interpolate_value(channel, ch_i1,
992 m1->temperature,
993 ch_i2,
994 m2->temperature);
995 omeas->pa_det =
996 (s8) iwl4965_interpolate_value(channel, ch_i1,
997 m1->pa_det, ch_i2,
998 m2->pa_det);
999
1000 IWL_DEBUG_TXPOWER
1001 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
1002 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1003 IWL_DEBUG_TXPOWER
1004 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1005 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1006 IWL_DEBUG_TXPOWER
1007 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1008 m1->pa_det, m2->pa_det, omeas->pa_det);
1009 IWL_DEBUG_TXPOWER
1010 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1011 m1->temperature, m2->temperature,
1012 omeas->temperature);
1013 }
1014 }
1015
1016 return 0;
1017}
1018
1019/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
1020 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
1021static s32 back_off_table[] = {
1022 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
1023 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
1024 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
1025 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
1026 10 /* CCK */
1027};
1028
1029/* Thermal compensation values for txpower for various frequency ranges ...
1030 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001031static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -07001032 s32 degrees_per_05db_a;
1033 s32 degrees_per_05db_a_denom;
1034} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
1035 {9, 2}, /* group 0 5.2, ch 34-43 */
1036 {4, 1}, /* group 1 5.2, ch 44-70 */
1037 {4, 1}, /* group 2 5.2, ch 71-124 */
1038 {4, 1}, /* group 3 5.2, ch 125-200 */
1039 {3, 1} /* group 4 2.4, ch all */
1040};
1041
1042static s32 get_min_power_index(s32 rate_power_index, u32 band)
1043{
1044 if (!band) {
1045 if ((rate_power_index & 7) <= 4)
1046 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
1047 }
1048 return MIN_TX_GAIN_INDEX;
1049}
1050
1051struct gain_entry {
1052 u8 dsp;
1053 u8 radio;
1054};
1055
1056static const struct gain_entry gain_table[2][108] = {
1057 /* 5.2GHz power gain index table */
1058 {
1059 {123, 0x3F}, /* highest txpower */
1060 {117, 0x3F},
1061 {110, 0x3F},
1062 {104, 0x3F},
1063 {98, 0x3F},
1064 {110, 0x3E},
1065 {104, 0x3E},
1066 {98, 0x3E},
1067 {110, 0x3D},
1068 {104, 0x3D},
1069 {98, 0x3D},
1070 {110, 0x3C},
1071 {104, 0x3C},
1072 {98, 0x3C},
1073 {110, 0x3B},
1074 {104, 0x3B},
1075 {98, 0x3B},
1076 {110, 0x3A},
1077 {104, 0x3A},
1078 {98, 0x3A},
1079 {110, 0x39},
1080 {104, 0x39},
1081 {98, 0x39},
1082 {110, 0x38},
1083 {104, 0x38},
1084 {98, 0x38},
1085 {110, 0x37},
1086 {104, 0x37},
1087 {98, 0x37},
1088 {110, 0x36},
1089 {104, 0x36},
1090 {98, 0x36},
1091 {110, 0x35},
1092 {104, 0x35},
1093 {98, 0x35},
1094 {110, 0x34},
1095 {104, 0x34},
1096 {98, 0x34},
1097 {110, 0x33},
1098 {104, 0x33},
1099 {98, 0x33},
1100 {110, 0x32},
1101 {104, 0x32},
1102 {98, 0x32},
1103 {110, 0x31},
1104 {104, 0x31},
1105 {98, 0x31},
1106 {110, 0x30},
1107 {104, 0x30},
1108 {98, 0x30},
1109 {110, 0x25},
1110 {104, 0x25},
1111 {98, 0x25},
1112 {110, 0x24},
1113 {104, 0x24},
1114 {98, 0x24},
1115 {110, 0x23},
1116 {104, 0x23},
1117 {98, 0x23},
1118 {110, 0x22},
1119 {104, 0x18},
1120 {98, 0x18},
1121 {110, 0x17},
1122 {104, 0x17},
1123 {98, 0x17},
1124 {110, 0x16},
1125 {104, 0x16},
1126 {98, 0x16},
1127 {110, 0x15},
1128 {104, 0x15},
1129 {98, 0x15},
1130 {110, 0x14},
1131 {104, 0x14},
1132 {98, 0x14},
1133 {110, 0x13},
1134 {104, 0x13},
1135 {98, 0x13},
1136 {110, 0x12},
1137 {104, 0x08},
1138 {98, 0x08},
1139 {110, 0x07},
1140 {104, 0x07},
1141 {98, 0x07},
1142 {110, 0x06},
1143 {104, 0x06},
1144 {98, 0x06},
1145 {110, 0x05},
1146 {104, 0x05},
1147 {98, 0x05},
1148 {110, 0x04},
1149 {104, 0x04},
1150 {98, 0x04},
1151 {110, 0x03},
1152 {104, 0x03},
1153 {98, 0x03},
1154 {110, 0x02},
1155 {104, 0x02},
1156 {98, 0x02},
1157 {110, 0x01},
1158 {104, 0x01},
1159 {98, 0x01},
1160 {110, 0x00},
1161 {104, 0x00},
1162 {98, 0x00},
1163 {93, 0x00},
1164 {88, 0x00},
1165 {83, 0x00},
1166 {78, 0x00},
1167 },
1168 /* 2.4GHz power gain index table */
1169 {
1170 {110, 0x3f}, /* highest txpower */
1171 {104, 0x3f},
1172 {98, 0x3f},
1173 {110, 0x3e},
1174 {104, 0x3e},
1175 {98, 0x3e},
1176 {110, 0x3d},
1177 {104, 0x3d},
1178 {98, 0x3d},
1179 {110, 0x3c},
1180 {104, 0x3c},
1181 {98, 0x3c},
1182 {110, 0x3b},
1183 {104, 0x3b},
1184 {98, 0x3b},
1185 {110, 0x3a},
1186 {104, 0x3a},
1187 {98, 0x3a},
1188 {110, 0x39},
1189 {104, 0x39},
1190 {98, 0x39},
1191 {110, 0x38},
1192 {104, 0x38},
1193 {98, 0x38},
1194 {110, 0x37},
1195 {104, 0x37},
1196 {98, 0x37},
1197 {110, 0x36},
1198 {104, 0x36},
1199 {98, 0x36},
1200 {110, 0x35},
1201 {104, 0x35},
1202 {98, 0x35},
1203 {110, 0x34},
1204 {104, 0x34},
1205 {98, 0x34},
1206 {110, 0x33},
1207 {104, 0x33},
1208 {98, 0x33},
1209 {110, 0x32},
1210 {104, 0x32},
1211 {98, 0x32},
1212 {110, 0x31},
1213 {104, 0x31},
1214 {98, 0x31},
1215 {110, 0x30},
1216 {104, 0x30},
1217 {98, 0x30},
1218 {110, 0x6},
1219 {104, 0x6},
1220 {98, 0x6},
1221 {110, 0x5},
1222 {104, 0x5},
1223 {98, 0x5},
1224 {110, 0x4},
1225 {104, 0x4},
1226 {98, 0x4},
1227 {110, 0x3},
1228 {104, 0x3},
1229 {98, 0x3},
1230 {110, 0x2},
1231 {104, 0x2},
1232 {98, 0x2},
1233 {110, 0x1},
1234 {104, 0x1},
1235 {98, 0x1},
1236 {110, 0x0},
1237 {104, 0x0},
1238 {98, 0x0},
1239 {97, 0},
1240 {96, 0},
1241 {95, 0},
1242 {94, 0},
1243 {93, 0},
1244 {92, 0},
1245 {91, 0},
1246 {90, 0},
1247 {89, 0},
1248 {88, 0},
1249 {87, 0},
1250 {86, 0},
1251 {85, 0},
1252 {84, 0},
1253 {83, 0},
1254 {82, 0},
1255 {81, 0},
1256 {80, 0},
1257 {79, 0},
1258 {78, 0},
1259 {77, 0},
1260 {76, 0},
1261 {75, 0},
1262 {74, 0},
1263 {73, 0},
1264 {72, 0},
1265 {71, 0},
1266 {70, 0},
1267 {69, 0},
1268 {68, 0},
1269 {67, 0},
1270 {66, 0},
1271 {65, 0},
1272 {64, 0},
1273 {63, 0},
1274 {62, 0},
1275 {61, 0},
1276 {60, 0},
1277 {59, 0},
1278 }
1279};
1280
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001281static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Zhu Yib481de92007-09-25 17:54:57 -07001282 u8 is_fat, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001283 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07001284{
1285 u8 saturation_power;
1286 s32 target_power;
1287 s32 user_target_power;
1288 s32 power_limit;
1289 s32 current_temp;
1290 s32 reg_limit;
1291 s32 current_regulatory;
1292 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1293 int i;
1294 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001295 const struct iwl_channel_info *ch_info = NULL;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001296 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1297 const struct iwl_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07001298 s16 voltage;
1299 s32 init_voltage;
1300 s32 voltage_compensation;
1301 s32 degrees_per_05db_num;
1302 s32 degrees_per_05db_denom;
1303 s32 factory_temp;
1304 s32 temperature_comp[2];
1305 s32 factory_gain_index[2];
1306 s32 factory_actual_pwr[2];
1307 s32 power_index;
1308
Zhu Yib481de92007-09-25 17:54:57 -07001309 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
1310 * are used for indexing into txpower table) */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001311 user_target_power = 2 * priv->tx_power_user_lmt;
Zhu Yib481de92007-09-25 17:54:57 -07001312
1313 /* Get current (RXON) channel, band, width */
Zhu Yib481de92007-09-25 17:54:57 -07001314 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
1315 is_fat);
1316
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001317 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1318
1319 if (!is_channel_valid(ch_info))
Zhu Yib481de92007-09-25 17:54:57 -07001320 return -EINVAL;
1321
1322 /* get txatten group, used to select 1) thermal txpower adjustment
1323 * and 2) mimo txpower balance between Tx chains. */
1324 txatten_grp = iwl4965_get_tx_atten_grp(channel);
Samuel Ortiza3139c52008-12-19 10:37:09 +08001325 if (txatten_grp < 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001326 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
Samuel Ortiza3139c52008-12-19 10:37:09 +08001327 channel);
Zhu Yib481de92007-09-25 17:54:57 -07001328 return -EINVAL;
Samuel Ortiza3139c52008-12-19 10:37:09 +08001329 }
Zhu Yib481de92007-09-25 17:54:57 -07001330
1331 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
1332 channel, txatten_grp);
1333
1334 if (is_fat) {
1335 if (ctrl_chan_high)
1336 channel -= 2;
1337 else
1338 channel += 2;
1339 }
1340
1341 /* hardware txpower limits ...
1342 * saturation (clipping distortion) txpowers are in half-dBm */
1343 if (band)
Tomas Winkler073d3f52008-04-21 15:41:52 -07001344 saturation_power = priv->calib_info->saturation_power24;
Zhu Yib481de92007-09-25 17:54:57 -07001345 else
Tomas Winkler073d3f52008-04-21 15:41:52 -07001346 saturation_power = priv->calib_info->saturation_power52;
Zhu Yib481de92007-09-25 17:54:57 -07001347
1348 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1349 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1350 if (band)
1351 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1352 else
1353 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1354 }
1355
1356 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1357 * max_power_avg values are in dBm, convert * 2 */
1358 if (is_fat)
1359 reg_limit = ch_info->fat_max_power_avg * 2;
1360 else
1361 reg_limit = ch_info->max_power_avg * 2;
1362
1363 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1364 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1365 if (band)
1366 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1367 else
1368 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1369 }
1370
1371 /* Interpolate txpower calibration values for this channel,
1372 * based on factory calibration tests on spaced channels. */
1373 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1374
1375 /* calculate tx gain adjustment based on power supply voltage */
Tomas Winkler073d3f52008-04-21 15:41:52 -07001376 voltage = priv->calib_info->voltage;
Zhu Yib481de92007-09-25 17:54:57 -07001377 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1378 voltage_compensation =
1379 iwl4965_get_voltage_compensation(voltage, init_voltage);
1380
1381 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
1382 init_voltage,
1383 voltage, voltage_compensation);
1384
1385 /* get current temperature (Celsius) */
1386 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1387 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1388 current_temp = KELVIN_TO_CELSIUS(current_temp);
1389
1390 /* select thermal txpower adjustment params, based on channel group
1391 * (same frequency group used for mimo txatten adjustment) */
1392 degrees_per_05db_num =
1393 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1394 degrees_per_05db_denom =
1395 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1396
1397 /* get per-chain txpower values from factory measurements */
1398 for (c = 0; c < 2; c++) {
1399 measurement = &ch_eeprom_info.measurements[c][1];
1400
1401 /* txgain adjustment (in half-dB steps) based on difference
1402 * between factory and current temperature */
1403 factory_temp = measurement->temperature;
1404 iwl4965_math_div_round((current_temp - factory_temp) *
1405 degrees_per_05db_denom,
1406 degrees_per_05db_num,
1407 &temperature_comp[c]);
1408
1409 factory_gain_index[c] = measurement->gain_idx;
1410 factory_actual_pwr[c] = measurement->actual_pow;
1411
1412 IWL_DEBUG_TXPOWER("chain = %d\n", c);
1413 IWL_DEBUG_TXPOWER("fctry tmp %d, "
1414 "curr tmp %d, comp %d steps\n",
1415 factory_temp, current_temp,
1416 temperature_comp[c]);
1417
1418 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
1419 factory_gain_index[c],
1420 factory_actual_pwr[c]);
1421 }
1422
1423 /* for each of 33 bit-rates (including 1 for CCK) */
1424 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1425 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001426 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07001427
1428 /* for mimo, reduce each chain's txpower by half
1429 * (3dB, 6 steps), so total output power is regulatory
1430 * compliant. */
1431 if (i & 0x8) {
1432 current_regulatory = reg_limit -
1433 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1434 is_mimo_rate = 1;
1435 } else {
1436 current_regulatory = reg_limit;
1437 is_mimo_rate = 0;
1438 }
1439
1440 /* find txpower limit, either hardware or regulatory */
1441 power_limit = saturation_power - back_off_table[i];
1442 if (power_limit > current_regulatory)
1443 power_limit = current_regulatory;
1444
1445 /* reduce user's txpower request if necessary
1446 * for this rate on this channel */
1447 target_power = user_target_power;
1448 if (target_power > power_limit)
1449 target_power = power_limit;
1450
1451 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
1452 i, saturation_power - back_off_table[i],
1453 current_regulatory, user_target_power,
1454 target_power);
1455
1456 /* for each of 2 Tx chains (radio transmitters) */
1457 for (c = 0; c < 2; c++) {
1458 s32 atten_value;
1459
1460 if (is_mimo_rate)
1461 atten_value =
1462 (s32)le32_to_cpu(priv->card_alive_init.
1463 tx_atten[txatten_grp][c]);
1464 else
1465 atten_value = 0;
1466
1467 /* calculate index; higher index means lower txpower */
1468 power_index = (u8) (factory_gain_index[c] -
1469 (target_power -
1470 factory_actual_pwr[c]) -
1471 temperature_comp[c] -
1472 voltage_compensation +
1473 atten_value);
1474
1475/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
1476 power_index); */
1477
1478 if (power_index < get_min_power_index(i, band))
1479 power_index = get_min_power_index(i, band);
1480
1481 /* adjust 5 GHz index to support negative indexes */
1482 if (!band)
1483 power_index += 9;
1484
1485 /* CCK, rate 32, reduce txpower for CCK */
1486 if (i == POWER_TABLE_CCK_ENTRY)
1487 power_index +=
1488 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1489
1490 /* stay within the table! */
1491 if (power_index > 107) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001492 IWL_WARN(priv, "txpower index %d > 107\n",
Zhu Yib481de92007-09-25 17:54:57 -07001493 power_index);
1494 power_index = 107;
1495 }
1496 if (power_index < 0) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001497 IWL_WARN(priv, "txpower index %d < 0\n",
Zhu Yib481de92007-09-25 17:54:57 -07001498 power_index);
1499 power_index = 0;
1500 }
1501
1502 /* fill txpower command for this rate/chain */
1503 tx_power.s.radio_tx_gain[c] =
1504 gain_table[band][power_index].radio;
1505 tx_power.s.dsp_predis_atten[c] =
1506 gain_table[band][power_index].dsp;
1507
1508 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
1509 "gain 0x%02x dsp %d\n",
1510 c, atten_value, power_index,
1511 tx_power.s.radio_tx_gain[c],
1512 tx_power.s.dsp_predis_atten[c]);
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001513 } /* for each chain */
Zhu Yib481de92007-09-25 17:54:57 -07001514
1515 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1516
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001517 } /* for each rate */
Zhu Yib481de92007-09-25 17:54:57 -07001518
1519 return 0;
1520}
1521
1522/**
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001523 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07001524 *
1525 * Uses the active RXON for channel, band, and characteristics (fat, high)
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001526 * The power limit is taken from priv->tx_power_user_lmt.
Zhu Yib481de92007-09-25 17:54:57 -07001527 */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001528static int iwl4965_send_tx_power(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001529{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001530 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001531 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001532 u8 band = 0;
1533 u8 is_fat = 0;
1534 u8 ctrl_chan_high = 0;
1535
1536 if (test_bit(STATUS_SCANNING, &priv->status)) {
1537 /* If this gets hit a lot, switch it to a BUG() and catch
1538 * the stack trace to find out who is calling this during
1539 * a scan. */
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001540 IWL_WARN(priv, "TX Power requested while scanning!\n");
Zhu Yib481de92007-09-25 17:54:57 -07001541 return -EAGAIN;
1542 }
1543
Johannes Berg8318d782008-01-24 19:38:38 +01001544 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001545
1546 is_fat = is_fat_channel(priv->active_rxon.flags);
1547
1548 if (is_fat &&
1549 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1550 ctrl_chan_high = 1;
1551
1552 cmd.band = band;
1553 cmd.channel = priv->active_rxon.channel;
1554
Tomas Winkler857485c2008-03-21 13:53:44 -07001555 ret = iwl4965_fill_txpower_tbl(priv, band,
Zhu Yib481de92007-09-25 17:54:57 -07001556 le16_to_cpu(priv->active_rxon.channel),
1557 is_fat, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07001558 if (ret)
1559 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07001560
Tomas Winkler857485c2008-03-21 13:53:44 -07001561 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1562
1563out:
1564 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001565}
1566
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001567static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1568{
1569 int ret = 0;
1570 struct iwl4965_rxon_assoc_cmd rxon_assoc;
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08001571 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1572 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001573
1574 if ((rxon1->flags == rxon2->flags) &&
1575 (rxon1->filter_flags == rxon2->filter_flags) &&
1576 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1577 (rxon1->ofdm_ht_single_stream_basic_rates ==
1578 rxon2->ofdm_ht_single_stream_basic_rates) &&
1579 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1580 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1581 (rxon1->rx_chain == rxon2->rx_chain) &&
1582 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1583 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1584 return 0;
1585 }
1586
1587 rxon_assoc.flags = priv->staging_rxon.flags;
1588 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1589 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1590 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1591 rxon_assoc.reserved = 0;
1592 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1593 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1594 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1595 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1596 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1597
1598 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1599 sizeof(rxon_assoc), &rxon_assoc, NULL);
1600 if (ret)
1601 return ret;
1602
1603 return ret;
1604}
1605
Zhu Yi3c935522008-09-03 11:26:57 +08001606#ifdef IEEE80211_CONF_CHANNEL_SWITCH
Emmanuel Grumbacha33c2f42008-09-03 11:26:56 +08001607static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001608{
1609 int rc;
1610 u8 band = 0;
1611 u8 is_fat = 0;
1612 u8 ctrl_chan_high = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001613 struct iwl4965_channel_switch_cmd cmd = { 0 };
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001614 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07001615
Johannes Berg8318d782008-01-24 19:38:38 +01001616 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001617
Assaf Krauss8622e702008-03-21 13:53:43 -07001618 ch_info = iwl_get_channel_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001619
1620 is_fat = is_fat_channel(priv->staging_rxon.flags);
1621
1622 if (is_fat &&
1623 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1624 ctrl_chan_high = 1;
1625
1626 cmd.band = band;
1627 cmd.expect_beacon = 0;
1628 cmd.channel = cpu_to_le16(channel);
1629 cmd.rxon_flags = priv->active_rxon.flags;
1630 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
1631 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1632 if (ch_info)
1633 cmd.expect_beacon = is_channel_radar(ch_info);
1634 else
1635 cmd.expect_beacon = 1;
1636
1637 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
1638 ctrl_chan_high, &cmd.tx_power);
1639 if (rc) {
1640 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
1641 return rc;
1642 }
1643
Tomas Winkler857485c2008-03-21 13:53:44 -07001644 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001645 return rc;
1646}
Zhu Yi3c935522008-09-03 11:26:57 +08001647#endif
Zhu Yib481de92007-09-25 17:54:57 -07001648
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001649/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07001650 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001651 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07001652static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +08001653 struct iwl_tx_queue *txq,
Tomas Winklere2a722e2008-04-14 21:16:10 -07001654 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07001655{
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001656 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
Tomas Winkler127901a2008-10-23 23:48:55 -07001657 int txq_id = txq->q.id;
1658 int write_ptr = txq->q.write_ptr;
1659 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1660 __le16 bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001661
Tomas Winkler127901a2008-10-23 23:48:55 -07001662 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
Zhu Yib481de92007-09-25 17:54:57 -07001663
Tomas Winkler127901a2008-10-23 23:48:55 -07001664 bc_ent = cpu_to_le16(len & 0xFFF);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001665 /* Set up byte count within first 256 entries */
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001666 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001667
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001668 /* If within first 64 entries, duplicate at end */
Tomas Winkler127901a2008-10-23 23:48:55 -07001669 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001670 scd_bc_tbl[txq_id].
Tomas Winkler127901a2008-10-23 23:48:55 -07001671 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001672}
1673
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001674/**
Zhu Yib481de92007-09-25 17:54:57 -07001675 * sign_extend - Sign extend a value using specified bit as sign-bit
1676 *
1677 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
1678 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
1679 *
1680 * @param oper value to sign extend
1681 * @param index 0 based bit index (0<=index<32) to sign bit
1682 */
1683static s32 sign_extend(u32 oper, int index)
1684{
1685 u8 shift = 31 - index;
1686
1687 return (s32)(oper << shift) >> shift;
1688}
1689
1690/**
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001691 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
Zhu Yib481de92007-09-25 17:54:57 -07001692 * @statistics: Provides the temperature reading from the uCode
1693 *
1694 * A return of <0 indicates bogus data in the statistics
1695 */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001696static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001697{
1698 s32 temperature;
1699 s32 vt;
1700 s32 R1, R2, R3;
1701 u32 R4;
1702
1703 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1704 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
1705 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
1706 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1707 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1708 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1709 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1710 } else {
1711 IWL_DEBUG_TEMP("Running temperature calibration\n");
1712 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1713 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1714 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1715 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1716 }
1717
1718 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001719 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07001720 *
1721 * NOTE If we haven't received a statistics notification yet
1722 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001723 * "initialize" ALIVE response.
1724 */
Zhu Yib481de92007-09-25 17:54:57 -07001725 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1726 vt = sign_extend(R4, 23);
1727 else
1728 vt = sign_extend(
1729 le32_to_cpu(priv->statistics.general.temperature), 23);
1730
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001731 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
Zhu Yib481de92007-09-25 17:54:57 -07001732
1733 if (R3 == R1) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001734 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
Zhu Yib481de92007-09-25 17:54:57 -07001735 return -1;
1736 }
1737
1738 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1739 * Add offset to center the adjustment around 0 degrees Centigrade. */
1740 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1741 temperature /= (R3 - R1);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001742 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
Zhu Yib481de92007-09-25 17:54:57 -07001743
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001744 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n",
1745 temperature, KELVIN_TO_CELSIUS(temperature));
Zhu Yib481de92007-09-25 17:54:57 -07001746
1747 return temperature;
1748}
1749
1750/* Adjust Txpower only if temperature variance is greater than threshold. */
1751#define IWL_TEMPERATURE_THRESHOLD 3
1752
1753/**
1754 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1755 *
1756 * If the temperature changed has changed sufficiently, then a recalibration
1757 * is needed.
1758 *
1759 * Assumes caller will replace priv->last_temperature once calibration
1760 * executed.
1761 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001762static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001763{
1764 int temp_diff;
1765
1766 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1767 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
1768 return 0;
1769 }
1770
1771 temp_diff = priv->temperature - priv->last_temperature;
1772
1773 /* get absolute value */
1774 if (temp_diff < 0) {
1775 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
1776 temp_diff = -temp_diff;
1777 } else if (temp_diff == 0)
1778 IWL_DEBUG_POWER("Same temp, \n");
1779 else
1780 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
1781
1782 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1783 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
1784 return 0;
1785 }
1786
1787 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
1788
1789 return 1;
1790}
1791
Zhu Yi52256402008-06-30 17:23:31 +08001792static void iwl4965_temperature_calib(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001793{
Zhu Yib481de92007-09-25 17:54:57 -07001794 s32 temp;
Zhu Yib481de92007-09-25 17:54:57 -07001795
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001796 temp = iwl4965_hw_get_temperature(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001797 if (temp < 0)
1798 return;
1799
1800 if (priv->temperature != temp) {
1801 if (priv->temperature)
1802 IWL_DEBUG_TEMP("Temperature changed "
1803 "from %dC to %dC\n",
1804 KELVIN_TO_CELSIUS(priv->temperature),
1805 KELVIN_TO_CELSIUS(temp));
1806 else
1807 IWL_DEBUG_TEMP("Temperature "
1808 "initialized to %dC\n",
1809 KELVIN_TO_CELSIUS(temp));
1810 }
1811
1812 priv->temperature = temp;
1813 set_bit(STATUS_TEMPERATURE, &priv->status);
1814
Emmanuel Grumbach203566f2008-06-12 09:46:54 +08001815 if (!priv->disable_tx_power_cal &&
1816 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1817 iwl4965_is_temp_calib_needed(priv))
Zhu Yib481de92007-09-25 17:54:57 -07001818 queue_work(priv->workqueue, &priv->txpower_work);
1819}
1820
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001821/**
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001822 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1823 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001824static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001825 u16 txq_id)
1826{
1827 /* Simply stop the queue, but don't change any configuration;
1828 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001829 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07001830 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001831 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1832 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001833}
1834
1835/**
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001836 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001837 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001838 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001839static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1840 u16 ssn_idx, u8 tx_fifo)
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001841{
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001842 int ret = 0;
1843
Tomas Winkler9f17b312008-07-11 11:53:35 +08001844 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1845 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001846 IWL_WARN(priv,
1847 "queue number out of range: %d, must be %d to %d\n",
Tomas Winkler9f17b312008-07-11 11:53:35 +08001848 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1849 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001850 return -EINVAL;
1851 }
1852
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001853 ret = iwl_grab_nic_access(priv);
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001854 if (ret)
1855 return ret;
1856
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001857 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1858
Tomas Winkler12a81f62008-04-03 16:05:20 -07001859 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001860
1861 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1862 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1863 /* supposes that ssn_idx is valid (!= 0xFFF) */
1864 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1865
Tomas Winkler12a81f62008-04-03 16:05:20 -07001866 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunsky36470742008-05-15 13:54:10 +08001867 iwl_txq_ctx_deactivate(priv, txq_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001868 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1869
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001870 iwl_release_nic_access(priv);
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001871
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001872 return 0;
1873}
1874
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001875/**
1876 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1877 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001878static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07001879 u16 txq_id)
1880{
1881 u32 tbl_dw_addr;
1882 u32 tbl_dw;
1883 u16 scd_q2ratid;
1884
Tomas Winkler30e553e2008-05-29 16:35:16 +08001885 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07001886
1887 tbl_dw_addr = priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001888 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
Zhu Yib481de92007-09-25 17:54:57 -07001889
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001890 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07001891
1892 if (txq_id & 0x1)
1893 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1894 else
1895 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1896
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001897 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07001898
1899 return 0;
1900}
1901
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001902
Zhu Yib481de92007-09-25 17:54:57 -07001903/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001904 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
1905 *
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001906 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001907 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07001908 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001909static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1910 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
Zhu Yib481de92007-09-25 17:54:57 -07001911{
1912 unsigned long flags;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001913 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001914 u16 ra_tid;
1915
Tomas Winkler9f17b312008-07-11 11:53:35 +08001916 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1917 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001918 IWL_WARN(priv,
1919 "queue number out of range: %d, must be %d to %d\n",
Tomas Winkler9f17b312008-07-11 11:53:35 +08001920 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1921 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
1922 return -EINVAL;
1923 }
Zhu Yib481de92007-09-25 17:54:57 -07001924
1925 ra_tid = BUILD_RAxTID(sta_id, tid);
1926
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001927 /* Modify device's station table to Tx this TID */
Tomas Winkler9f586712008-11-12 13:14:05 -08001928 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
Zhu Yib481de92007-09-25 17:54:57 -07001929
1930 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001931 ret = iwl_grab_nic_access(priv);
1932 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -07001933 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001934 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001935 }
1936
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001937 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07001938 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1939
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001940 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07001941 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1942
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001943 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001944 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001945
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001946 /* Place first TFD at index corresponding to start sequence number.
1947 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001948 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1949 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07001950 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1951
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001952 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001953 iwl_write_targ_mem(priv,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001954 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
1955 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1956 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001957
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001958 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001959 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1960 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
1961 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001962
Tomas Winkler12a81f62008-04-03 16:05:20 -07001963 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001964
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001965 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07001966 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1967
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001968 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001969 spin_unlock_irqrestore(&priv->lock, flags);
1970
1971 return 0;
1972}
1973
Tomas Winkler133636d2008-05-05 10:22:34 +08001974
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08001975static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1976{
1977 switch (cmd_id) {
1978 case REPLY_RXON:
1979 return (u16) sizeof(struct iwl4965_rxon_cmd);
1980 default:
1981 return len;
1982 }
1983}
1984
Tomas Winkler133636d2008-05-05 10:22:34 +08001985static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1986{
1987 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1988 addsta->mode = cmd->mode;
1989 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1990 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1991 addsta->station_flags = cmd->station_flags;
1992 addsta->station_flags_msk = cmd->station_flags_msk;
1993 addsta->tid_disable_tx = cmd->tid_disable_tx;
1994 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1995 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1996 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1997 addsta->reserved1 = __constant_cpu_to_le16(0);
1998 addsta->reserved2 = __constant_cpu_to_le32(0);
1999
2000 return (u16)sizeof(struct iwl4965_addsta_cmd);
2001}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002002
Tomas Winklerf20217d2008-05-29 16:35:10 +08002003static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
2004{
Tomas Winkler25a65722008-06-12 09:47:07 +08002005 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002006}
2007
2008/**
Tomas Winklera96a27f2008-10-23 23:48:56 -07002009 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
Tomas Winklerf20217d2008-05-29 16:35:10 +08002010 */
2011static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2012 struct iwl_ht_agg *agg,
Tomas Winkler25a65722008-06-12 09:47:07 +08002013 struct iwl4965_tx_resp *tx_resp,
2014 int txq_id, u16 start_idx)
Tomas Winklerf20217d2008-05-29 16:35:10 +08002015{
2016 u16 status;
Tomas Winkler25a65722008-06-12 09:47:07 +08002017 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002018 struct ieee80211_tx_info *info = NULL;
2019 struct ieee80211_hdr *hdr = NULL;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002020 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
Tomas Winkler25a65722008-06-12 09:47:07 +08002021 int i, sh, idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002022 u16 seq;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002023 if (agg->wait_for_ba)
2024 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
2025
2026 agg->frame_count = tx_resp->frame_count;
2027 agg->start_idx = start_idx;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002028 agg->rate_n_flags = rate_n_flags;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002029 agg->bitmap = 0;
2030
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002031 /* num frames attempted by Tx command */
Tomas Winklerf20217d2008-05-29 16:35:10 +08002032 if (agg->frame_count == 1) {
2033 /* Only one frame was attempted; no block-ack will arrive */
2034 status = le16_to_cpu(frame_status[0].status);
Tomas Winkler25a65722008-06-12 09:47:07 +08002035 idx = start_idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002036
2037 /* FIXME: code repetition */
2038 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2039 agg->frame_count, agg->start_idx, idx);
2040
2041 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
Johannes Berge6a98542008-10-21 12:40:02 +02002042 info->status.rates[0].count = tx_resp->failure_frame + 1;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002043 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Abhijeet Kolekarc3056062008-11-12 13:14:08 -08002044 info->flags |= iwl_is_tx_success(status) ?
Tomas Winklerf20217d2008-05-29 16:35:10 +08002045 IEEE80211_TX_STAT_ACK : 0;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002046 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002047 /* FIXME: code repetition end */
2048
2049 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2050 status & 0xff, tx_resp->failure_frame);
Tomas Winklere7d326a2008-06-12 09:47:11 +08002051 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002052
2053 agg->wait_for_ba = 0;
2054 } else {
2055 /* Two or more frames were attempted; expect block-ack */
2056 u64 bitmap = 0;
2057 int start = agg->start_idx;
2058
2059 /* Construct bit-map of pending frames within Tx window */
2060 for (i = 0; i < agg->frame_count; i++) {
2061 u16 sc;
2062 status = le16_to_cpu(frame_status[i].status);
2063 seq = le16_to_cpu(frame_status[i].sequence);
2064 idx = SEQ_TO_INDEX(seq);
2065 txq_id = SEQ_TO_QUEUE(seq);
2066
2067 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2068 AGG_TX_STATE_ABORT_MSK))
2069 continue;
2070
2071 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2072 agg->frame_count, txq_id, idx);
2073
2074 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
2075
2076 sc = le16_to_cpu(hdr->seq_ctrl);
2077 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002078 IWL_ERR(priv,
2079 "BUG_ON idx doesn't match seq control"
2080 " idx=%d, seq_idx=%d, seq=%d\n",
2081 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002082 return -1;
2083 }
2084
2085 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
2086 i, idx, SEQ_TO_SN(sc));
2087
2088 sh = idx - start;
2089 if (sh > 64) {
2090 sh = (start - idx) + 0xff;
2091 bitmap = bitmap << sh;
2092 sh = 0;
2093 start = idx;
2094 } else if (sh < -64)
2095 sh = 0xff - (start - idx);
2096 else if (sh < 0) {
2097 sh = start - idx;
2098 start = idx;
2099 bitmap = bitmap << sh;
2100 sh = 0;
2101 }
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08002102 bitmap |= 1ULL << sh;
2103 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
2104 start, (unsigned long long)bitmap);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002105 }
2106
2107 agg->bitmap = bitmap;
2108 agg->start_idx = start;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002109 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2110 agg->frame_count, agg->start_idx,
2111 (unsigned long long)agg->bitmap);
2112
2113 if (bitmap)
2114 agg->wait_for_ba = 1;
2115 }
2116 return 0;
2117}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002118
2119/**
2120 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2121 */
2122static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2123 struct iwl_rx_mem_buffer *rxb)
2124{
2125 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
2126 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2127 int txq_id = SEQ_TO_QUEUE(sequence);
2128 int index = SEQ_TO_INDEX(sequence);
2129 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002130 struct ieee80211_hdr *hdr;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002131 struct ieee80211_tx_info *info;
2132 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
Tomas Winkler25a65722008-06-12 09:47:07 +08002133 u32 status = le32_to_cpu(tx_resp->u.status);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002134 int tid = MAX_TID_COUNT;
2135 int sta_id;
2136 int freed;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002137 u8 *qc = NULL;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002138
2139 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002140 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
Tomas Winklerf20217d2008-05-29 16:35:10 +08002141 "is out of range [0-%d] %d %d\n", txq_id,
2142 index, txq->q.n_bd, txq->q.write_ptr,
2143 txq->q.read_ptr);
2144 return;
2145 }
2146
2147 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
2148 memset(&info->status, 0, sizeof(info->status));
2149
Tomas Winklerf20217d2008-05-29 16:35:10 +08002150 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002151 if (ieee80211_is_data_qos(hdr->frame_control)) {
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002152 qc = ieee80211_get_qos_ctl(hdr);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002153 tid = qc[0] & 0xf;
2154 }
2155
2156 sta_id = iwl_get_ra_sta_id(priv, hdr);
2157 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002158 IWL_ERR(priv, "Station not known\n");
Tomas Winklerf20217d2008-05-29 16:35:10 +08002159 return;
2160 }
2161
2162 if (txq->sched_retry) {
2163 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2164 struct iwl_ht_agg *agg = NULL;
2165
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002166 WARN_ON(!qc);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002167
2168 agg = &priv->stations[sta_id].tid[tid].agg;
2169
Tomas Winkler25a65722008-06-12 09:47:07 +08002170 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002171
Ron Rindjunsky32354272008-07-01 10:44:51 +03002172 /* check if BAR is needed */
2173 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
2174 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002175
2176 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
Tomas Winklerf20217d2008-05-29 16:35:10 +08002177 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2178 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2179 "%d index %d\n", scd_ssn , index);
Tomas Winkler17b88922008-05-29 16:35:12 +08002180 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002181 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2182
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002183 if (priv->mac80211_registered &&
2184 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
2185 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
Tomas Winklerf20217d2008-05-29 16:35:10 +08002186 if (agg->state == IWL_AGG_OFF)
2187 ieee80211_wake_queue(priv->hw, txq_id);
2188 else
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002189 ieee80211_wake_queue(priv->hw,
2190 txq->swq_id);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002191 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002192 }
2193 } else {
Johannes Berge6a98542008-10-21 12:40:02 +02002194 info->status.rates[0].count = tx_resp->failure_frame + 1;
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002195 info->flags |= iwl_is_tx_success(status) ?
2196 IEEE80211_TX_STAT_ACK : 0;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002197 iwl_hwrate_to_tx_control(priv,
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002198 le32_to_cpu(tx_resp->rate_n_flags),
2199 info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002200
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002201 IWL_DEBUG_TX_REPLY("TXQ %d status %s (0x%08x) "
2202 "rate_n_flags 0x%x retries %d\n",
2203 txq_id,
2204 iwl_get_tx_fail_reason(status), status,
2205 le32_to_cpu(tx_resp->rate_n_flags),
2206 tx_resp->failure_frame);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002207
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002208 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
Tomas Winklered7fafe2008-10-23 23:48:50 -07002209 if (qc && likely(sta_id != IWL_INVALID_STATION))
Tomas Winklerf20217d2008-05-29 16:35:10 +08002210 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002211
2212 if (priv->mac80211_registered &&
2213 (iwl_queue_space(&txq->q) > txq->q.low_mark))
Tomas Winklerf20217d2008-05-29 16:35:10 +08002214 ieee80211_wake_queue(priv->hw, txq_id);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002215 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002216
Tomas Winklered7fafe2008-10-23 23:48:50 -07002217 if (qc && likely(sta_id != IWL_INVALID_STATION))
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002218 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2219
Tomas Winklerf20217d2008-05-29 16:35:10 +08002220 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
Winkler, Tomas15b16872008-12-19 10:37:33 +08002221 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
Tomas Winklerf20217d2008-05-29 16:35:10 +08002222}
2223
Tomas Winklercaab8f12008-08-04 16:00:42 +08002224static int iwl4965_calc_rssi(struct iwl_priv *priv,
2225 struct iwl_rx_phy_res *rx_resp)
2226{
2227 /* data from PHY/DSP regarding signal strength, etc.,
2228 * contents are always there, not configurable by host. */
2229 struct iwl4965_rx_non_cfg_phy *ncphy =
2230 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2231 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2232 >> IWL49_AGC_DB_POS;
2233
2234 u32 valid_antennae =
2235 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2236 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2237 u8 max_rssi = 0;
2238 u32 i;
2239
2240 /* Find max rssi among 3 possible receivers.
2241 * These values are measured by the digital signal processor (DSP).
2242 * They should stay fairly constant even as the signal strength varies,
2243 * if the radio's automatic gain control (AGC) is working right.
2244 * AGC value (see below) will provide the "interesting" info. */
2245 for (i = 0; i < 3; i++)
2246 if (valid_antennae & (1 << i))
2247 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2248
2249 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2250 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2251 max_rssi, agc);
2252
2253 /* dBm = max_rssi dB - agc dB - constant.
2254 * Higher AGC (higher radio gain) means lower signal. */
Samuel Ortiz250bdd22008-12-19 10:37:11 +08002255 return max_rssi - agc - IWL49_RSSI_OFFSET;
Tomas Winklercaab8f12008-08-04 16:00:42 +08002256}
2257
Tomas Winklerf20217d2008-05-29 16:35:10 +08002258
Zhu Yib481de92007-09-25 17:54:57 -07002259/* Set up 4965-specific Rx frame reply handlers */
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002260static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002261{
2262 /* Legacy Rx frames */
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08002263 priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
Ron Rindjunsky37a44212008-05-29 16:35:18 +08002264 /* Tx response */
Tomas Winklerf20217d2008-05-29 16:35:10 +08002265 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
Zhu Yib481de92007-09-25 17:54:57 -07002266}
2267
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002268static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002269{
2270 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002271}
2272
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002273static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002274{
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002275 cancel_work_sync(&priv->txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002276}
2277
Tomas Winkler3c424c22008-04-15 16:01:42 -07002278
2279static struct iwl_hcmd_ops iwl4965_hcmd = {
Tomas Winkler7e8c5192008-04-15 16:01:43 -07002280 .rxon_assoc = iwl4965_send_rxon_assoc,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002281};
2282
Tomas Winkler857485c2008-03-21 13:53:44 -07002283static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08002284 .get_hcmd_size = iwl4965_get_hcmd_size,
Tomas Winkler133636d2008-05-05 10:22:34 +08002285 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002286 .chain_noise_reset = iwl4965_chain_noise_reset,
2287 .gain_computation = iwl4965_gain_computation,
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +08002288 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
Tomas Winklercaab8f12008-08-04 16:00:42 +08002289 .calc_rssi = iwl4965_calc_rssi,
Tomas Winkler857485c2008-03-21 13:53:44 -07002290};
2291
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002292static struct iwl_lib_ops iwl4965_lib = {
Tomas Winkler5425e492008-04-15 16:01:38 -07002293 .set_hw_params = iwl4965_hw_set_hw_params,
Tomas Winklere2a722e2008-04-14 21:16:10 -07002294 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winklerda1bc452008-05-29 16:35:00 +08002295 .txq_set_sched = iwl4965_txq_set_sched,
Tomas Winkler30e553e2008-05-29 16:35:16 +08002296 .txq_agg_enable = iwl4965_txq_agg_enable,
2297 .txq_agg_disable = iwl4965_txq_agg_disable,
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002298 .rx_handler_setup = iwl4965_rx_handler_setup,
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002299 .setup_deferred_work = iwl4965_setup_deferred_work,
2300 .cancel_deferred_work = iwl4965_cancel_deferred_work,
Tomas Winkler57aab752008-04-14 21:16:03 -07002301 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2302 .alive_notify = iwl4965_alive_notify,
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +08002303 .init_alive_start = iwl4965_init_alive_start,
Tomas Winkler57aab752008-04-14 21:16:03 -07002304 .load_ucode = iwl4965_load_bsm,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002305 .apm_ops = {
Tomas Winkler91238712008-04-23 17:14:53 -07002306 .init = iwl4965_apm_init,
Tomas Winkler7f066102008-05-29 16:34:57 +08002307 .reset = iwl4965_apm_reset,
Tomas Winklerf118a912008-05-29 16:34:58 +08002308 .stop = iwl4965_apm_stop,
Tomas Winkler694cc562008-04-24 11:55:22 -07002309 .config = iwl4965_nic_config,
Emmanuel Grumbach5b9f8cd2008-10-29 14:05:46 -07002310 .set_pwr_src = iwl_set_pwr_src,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002311 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002312 .eeprom_ops = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07002313 .regulatory_bands = {
2314 EEPROM_REGULATORY_BAND_1_CHANNELS,
2315 EEPROM_REGULATORY_BAND_2_CHANNELS,
2316 EEPROM_REGULATORY_BAND_3_CHANNELS,
2317 EEPROM_REGULATORY_BAND_4_CHANNELS,
2318 EEPROM_REGULATORY_BAND_5_CHANNELS,
2319 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
2320 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
2321 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002322 .verify_signature = iwlcore_eeprom_verify_signature,
2323 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
2324 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winkler0ef2ca62008-10-23 23:48:51 -07002325 .calib_version = iwl4965_eeprom_calib_version,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002326 .query_addr = iwlcore_eeprom_query_addr,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002327 },
Tomas Winkler630fe9b2008-06-12 09:47:08 +08002328 .send_tx_power = iwl4965_send_tx_power,
Emmanuel Grumbach5b9f8cd2008-10-29 14:05:46 -07002329 .update_chain_flags = iwl_update_chain_flags,
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +08002330 .temperature = iwl4965_temperature_calib,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002331};
2332
2333static struct iwl_ops iwl4965_ops = {
2334 .lib = &iwl4965_lib,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002335 .hcmd = &iwl4965_hcmd,
Tomas Winkler857485c2008-03-21 13:53:44 -07002336 .utils = &iwl4965_hcmd_utils,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002337};
2338
Ron Rindjunskyfed90172008-04-15 16:01:41 -07002339struct iwl_cfg iwl4965_agn_cfg = {
Tomas Winkler82b9a122008-03-04 18:09:30 -08002340 .name = "4965AGN",
Reinette Chatrea0987a82008-12-02 12:14:06 -08002341 .fw_name_pre = IWL4965_FW_PRE,
2342 .ucode_api_max = IWL4965_UCODE_API_MAX,
2343 .ucode_api_min = IWL4965_UCODE_API_MIN,
Tomas Winkler82b9a122008-03-04 18:09:30 -08002344 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002345 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
Tomas Winkler0ef2ca62008-10-23 23:48:51 -07002346 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2347 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002348 .ops = &iwl4965_ops,
Assaf Krauss1ea87392008-03-18 14:57:50 -07002349 .mod_params = &iwl4965_mod_params,
Tomas Winkler82b9a122008-03-04 18:09:30 -08002350};
2351
Tomas Winklerd16dc482008-07-11 11:53:38 +08002352/* Module firmware */
Reinette Chatrea0987a82008-12-02 12:14:06 -08002353MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
Tomas Winklerd16dc482008-07-11 11:53:38 +08002354
Assaf Krauss1ea87392008-03-18 14:57:50 -07002355module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
2356MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2357module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
2358MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
Emmanuel Grumbachfcc76c62008-04-15 16:01:47 -07002359module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
Niels de Vos61a2d072008-07-31 00:07:23 -07002360MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
Wu, Fengguang95aa1942008-12-17 16:52:30 +08002361module_param_named(debug, iwl4965_mod_params.debug, uint, 0444);
Assaf Krauss1ea87392008-03-18 14:57:50 -07002362MODULE_PARM_DESC(debug, "debug output mask");
2363module_param_named(
2364 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
2365MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2366
2367module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
2368MODULE_PARM_DESC(queues_num, "number of hw queues.");
Ron Rindjunsky49779292008-06-30 17:23:21 +08002369/* 11n */
2370module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444);
2371MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
Assaf Krauss1ea87392008-03-18 14:57:50 -07002372module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
2373MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
Ron Rindjunsky49779292008-06-30 17:23:21 +08002374
Ester Kummer3a1081e2008-05-06 11:05:14 +08002375module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
2376MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");