blob: 4a0cc78d55e3dd271653b6cd2041a6da5d1afdaf [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070042#include "iwl-dev.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -070046#include "iwl-calib.h"
Tomas Winkler5083e562008-05-29 16:35:15 +080047#include "iwl-sta.h"
Zhu Yib481de92007-09-25 17:54:57 -070048
Tomas Winkler630fe9b2008-06-12 09:47:08 +080049static int iwl4965_send_tx_power(struct iwl_priv *priv);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +080050static int iwl4965_hw_get_temperature(const struct iwl_priv *priv);
Tomas Winkler630fe9b2008-06-12 09:47:08 +080051
Tomas Winklerd16dc482008-07-11 11:53:38 +080052/* Change firmware file name, using "-" and incrementing number,
53 * *only* when uCode interface or architecture changes so that it
54 * is not compatible with earlier drivers.
55 * This number will also appear in << 8 position of 1st dword of uCode file */
56#define IWL4965_UCODE_API "-2"
57
58
Assaf Krauss1ea87392008-03-18 14:57:50 -070059/* module parameters */
60static struct iwl_mod_params iwl4965_mod_params = {
Emmanuel Grumbach038669e2008-04-23 17:15:04 -070061 .num_of_queues = IWL49_NUM_QUEUES,
Tomas Winkler9f17b312008-07-11 11:53:35 +080062 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
Assaf Krauss1ea87392008-03-18 14:57:50 -070063 .enable_qos = 1,
64 .amsdu_size_8K = 1,
Ester Kummer3a1081e2008-05-06 11:05:14 +080065 .restart_fw = 1,
Assaf Krauss1ea87392008-03-18 14:57:50 -070066 /* the rest are 0 by default */
67};
68
Tomas Winkler57aab752008-04-14 21:16:03 -070069/* check contents of special bootstrap uCode SRAM */
70static int iwl4965_verify_bsm(struct iwl_priv *priv)
71{
72 __le32 *image = priv->ucode_boot.v_addr;
73 u32 len = priv->ucode_boot.len;
74 u32 reg;
75 u32 val;
76
77 IWL_DEBUG_INFO("Begin verify bsm\n");
78
79 /* verify BSM SRAM contents */
80 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
81 for (reg = BSM_SRAM_LOWER_BOUND;
82 reg < BSM_SRAM_LOWER_BOUND + len;
83 reg += sizeof(u32), image++) {
84 val = iwl_read_prph(priv, reg);
85 if (val != le32_to_cpu(*image)) {
86 IWL_ERROR("BSM uCode verification failed at "
87 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
88 BSM_SRAM_LOWER_BOUND,
89 reg - BSM_SRAM_LOWER_BOUND, len,
90 val, le32_to_cpu(*image));
91 return -EIO;
92 }
93 }
94
95 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
96
97 return 0;
98}
99
100/**
101 * iwl4965_load_bsm - Load bootstrap instructions
102 *
103 * BSM operation:
104 *
105 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
106 * in special SRAM that does not power down during RFKILL. When powering back
107 * up after power-saving sleeps (or during initial uCode load), the BSM loads
108 * the bootstrap program into the on-board processor, and starts it.
109 *
110 * The bootstrap program loads (via DMA) instructions and data for a new
111 * program from host DRAM locations indicated by the host driver in the
112 * BSM_DRAM_* registers. Once the new program is loaded, it starts
113 * automatically.
114 *
115 * When initializing the NIC, the host driver points the BSM to the
116 * "initialize" uCode image. This uCode sets up some internal data, then
117 * notifies host via "initialize alive" that it is complete.
118 *
119 * The host then replaces the BSM_DRAM_* pointer values to point to the
120 * normal runtime uCode instructions and a backup uCode data cache buffer
121 * (filled initially with starting data values for the on-board processor),
122 * then triggers the "initialize" uCode to load and launch the runtime uCode,
123 * which begins normal operation.
124 *
125 * When doing a power-save shutdown, runtime uCode saves data SRAM into
126 * the backup data cache in DRAM before SRAM is powered down.
127 *
128 * When powering back up, the BSM loads the bootstrap program. This reloads
129 * the runtime uCode instructions and the backup data cache into SRAM,
130 * and re-launches the runtime uCode from where it left off.
131 */
132static int iwl4965_load_bsm(struct iwl_priv *priv)
133{
134 __le32 *image = priv->ucode_boot.v_addr;
135 u32 len = priv->ucode_boot.len;
136 dma_addr_t pinst;
137 dma_addr_t pdata;
138 u32 inst_len;
139 u32 data_len;
140 int i;
141 u32 done;
142 u32 reg_offset;
143 int ret;
144
145 IWL_DEBUG_INFO("Begin load bsm\n");
146
Ron Rindjunskyfe9b6b72008-05-29 16:35:06 +0800147 priv->ucode_type = UCODE_RT;
148
Tomas Winkler57aab752008-04-14 21:16:03 -0700149 /* make sure bootstrap program is no larger than BSM's SRAM size */
150 if (len > IWL_MAX_BSM_SIZE)
151 return -EINVAL;
152
153 /* Tell bootstrap uCode where to find the "Initialize" uCode
154 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
Tomas Winkler2d878892008-05-29 16:34:51 +0800155 * NOTE: iwl_init_alive_start() will replace these values,
Tomas Winkler57aab752008-04-14 21:16:03 -0700156 * after the "initialize" uCode has run, to point to
Tomas Winkler2d878892008-05-29 16:34:51 +0800157 * runtime/protocol instructions and backup data cache.
158 */
Tomas Winkler57aab752008-04-14 21:16:03 -0700159 pinst = priv->ucode_init.p_addr >> 4;
160 pdata = priv->ucode_init_data.p_addr >> 4;
161 inst_len = priv->ucode_init.len;
162 data_len = priv->ucode_init_data.len;
163
164 ret = iwl_grab_nic_access(priv);
165 if (ret)
166 return ret;
167
168 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
169 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
170 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
171 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
172
173 /* Fill BSM memory with bootstrap instructions */
174 for (reg_offset = BSM_SRAM_LOWER_BOUND;
175 reg_offset < BSM_SRAM_LOWER_BOUND + len;
176 reg_offset += sizeof(u32), image++)
177 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
178
179 ret = iwl4965_verify_bsm(priv);
180 if (ret) {
181 iwl_release_nic_access(priv);
182 return ret;
183 }
184
185 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
186 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
187 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
188 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
189
190 /* Load bootstrap code into instruction SRAM now,
191 * to prepare to load "initialize" uCode */
192 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
193
194 /* Wait for load of bootstrap uCode to finish */
195 for (i = 0; i < 100; i++) {
196 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
197 if (!(done & BSM_WR_CTRL_REG_BIT_START))
198 break;
199 udelay(10);
200 }
201 if (i < 100)
202 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
203 else {
204 IWL_ERROR("BSM write did not complete!\n");
205 return -EIO;
206 }
207
208 /* Enable future boot loads whenever power management unit triggers it
209 * (e.g. when powering back up after power-save shutdown) */
210 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
211
212 iwl_release_nic_access(priv);
213
214 return 0;
215}
216
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800217/**
218 * iwl4965_set_ucode_ptrs - Set uCode address location
219 *
220 * Tell initialization uCode where to find runtime uCode.
221 *
222 * BSM registers initially contain pointers to initialization uCode.
223 * We need to replace them to load runtime uCode inst and data,
224 * and to save runtime data when powering down.
225 */
226static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
227{
228 dma_addr_t pinst;
229 dma_addr_t pdata;
230 unsigned long flags;
231 int ret = 0;
232
233 /* bits 35:4 for 4965 */
234 pinst = priv->ucode_code.p_addr >> 4;
235 pdata = priv->ucode_data_backup.p_addr >> 4;
236
237 spin_lock_irqsave(&priv->lock, flags);
238 ret = iwl_grab_nic_access(priv);
239 if (ret) {
240 spin_unlock_irqrestore(&priv->lock, flags);
241 return ret;
242 }
243
244 /* Tell bootstrap uCode where to find image to load */
245 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
246 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
247 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
248 priv->ucode_data.len);
249
250 /* Inst bytecount must be last to set up, bit 31 signals uCode
251 * that all new ptr/size info is in place */
252 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
253 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
254 iwl_release_nic_access(priv);
255
256 spin_unlock_irqrestore(&priv->lock, flags);
257
258 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
259
260 return ret;
261}
262
263/**
264 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
265 *
266 * Called after REPLY_ALIVE notification received from "initialize" uCode.
267 *
268 * The 4965 "initialize" ALIVE reply contains calibration data for:
269 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
270 * (3945 does not contain this data).
271 *
272 * Tell "initialize" uCode to go ahead and load the runtime uCode.
273*/
274static void iwl4965_init_alive_start(struct iwl_priv *priv)
275{
276 /* Check alive response for "valid" sign from uCode */
277 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
278 /* We had an error bringing up the hardware, so take it
279 * all the way back down so we can try again */
280 IWL_DEBUG_INFO("Initialize Alive failed.\n");
281 goto restart;
282 }
283
284 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
285 * This is a paranoid check, because we would not have gotten the
286 * "initialize" alive if code weren't properly loaded. */
287 if (iwl_verify_ucode(priv)) {
288 /* Runtime instruction load was bad;
289 * take it all the way back down so we can try again */
290 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
291 goto restart;
292 }
293
294 /* Calculate temperature */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +0800295 priv->temperature = iwl4965_hw_get_temperature(priv);
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800296
297 /* Send pointers to protocol/runtime uCode image ... init code will
298 * load and launch runtime uCode, which will send us another "Alive"
299 * notification. */
300 IWL_DEBUG_INFO("Initialization Alive received.\n");
301 if (iwl4965_set_ucode_ptrs(priv)) {
302 /* Runtime instruction load won't happen;
303 * take it all the way back down so we can try again */
304 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
305 goto restart;
306 }
307 return;
308
309restart:
310 queue_work(priv->workqueue, &priv->restart);
311}
312
Zhu Yib481de92007-09-25 17:54:57 -0700313static int is_fat_channel(__le32 rxon_flags)
314{
315 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
316 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
317}
318
Tomas Winkler8614f362008-04-23 17:14:55 -0700319/*
320 * EEPROM handlers
321 */
322
323static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
324{
325 u16 eeprom_ver;
326 u16 calib_ver;
327
328 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
329
330 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
331
332 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
333 calib_ver < EEPROM_4965_TX_POWER_VERSION)
334 goto err;
335
336 return 0;
337err:
338 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
339 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
340 calib_ver, EEPROM_4965_TX_POWER_VERSION);
341 return -EINVAL;
342
343}
Zhu Yib481de92007-09-25 17:54:57 -0700344
Tomas Winklerda1bc452008-05-29 16:35:00 +0800345/*
346 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
347 * must be called under priv->lock and mac access
348 */
349static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
Zhu Yib481de92007-09-25 17:54:57 -0700350{
Tomas Winklerda1bc452008-05-29 16:35:00 +0800351 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
Zhu Yib481de92007-09-25 17:54:57 -0700352}
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800353
Tomas Winkler91238712008-04-23 17:14:53 -0700354static int iwl4965_apm_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700355{
Tomas Winkler91238712008-04-23 17:14:53 -0700356 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700357
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700358 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Tomas Winkler91238712008-04-23 17:14:53 -0700359 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
Zhu Yib481de92007-09-25 17:54:57 -0700360
Tomas Winkler8f061892008-05-29 16:34:56 +0800361 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
362 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
363 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
364
Tomas Winkler91238712008-04-23 17:14:53 -0700365 /* set "initialization complete" bit to move adapter
366 * D0U* --> D0A* state */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700367 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winkler91238712008-04-23 17:14:53 -0700368
369 /* wait for clock stabilization */
370 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
371 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
372 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
373 if (ret < 0) {
Zhu Yib481de92007-09-25 17:54:57 -0700374 IWL_DEBUG_INFO("Failed to init the card\n");
Tomas Winkler91238712008-04-23 17:14:53 -0700375 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700376 }
377
Tomas Winkler91238712008-04-23 17:14:53 -0700378 ret = iwl_grab_nic_access(priv);
379 if (ret)
380 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700381
Tomas Winkler91238712008-04-23 17:14:53 -0700382 /* enable DMA */
Tomas Winkler8f061892008-05-29 16:34:56 +0800383 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
384 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700385
386 udelay(20);
387
Tomas Winkler8f061892008-05-29 16:34:56 +0800388 /* disable L1-Active */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700389 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
Tomas Winkler91238712008-04-23 17:14:53 -0700390 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700391
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700392 iwl_release_nic_access(priv);
Tomas Winkler91238712008-04-23 17:14:53 -0700393out:
Tomas Winkler91238712008-04-23 17:14:53 -0700394 return ret;
395}
396
Tomas Winkler694cc562008-04-24 11:55:22 -0700397
398static void iwl4965_nic_config(struct iwl_priv *priv)
399{
400 unsigned long flags;
401 u32 val;
402 u16 radio_cfg;
403 u8 val_link;
404
405 spin_lock_irqsave(&priv->lock, flags);
406
407 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
408 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
409 /* Enable No Snoop field */
410 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
411 val & ~(1 << 11));
412 }
413
414 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
415
Tomas Winkler8f061892008-05-29 16:34:56 +0800416 /* L1 is enabled by BIOS */
417 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
418 /* diable L0S disabled L1A enabled */
419 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
420 else
421 /* L0S enabled L1A disabled */
422 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
Tomas Winkler694cc562008-04-24 11:55:22 -0700423
424 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
425
426 /* write radio config values to register */
427 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
428 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
429 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
430 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
431 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
432
433 /* set CSR_HW_CONFIG_REG for uCode use */
434 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
435 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
436 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
437
438 priv->calib_info = (struct iwl_eeprom_calib_info *)
439 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
440
441 spin_unlock_irqrestore(&priv->lock, flags);
442}
443
Tomas Winkler46315e02008-05-29 16:34:59 +0800444static int iwl4965_apm_stop_master(struct iwl_priv *priv)
445{
446 int ret = 0;
447 unsigned long flags;
448
449 spin_lock_irqsave(&priv->lock, flags);
450
451 /* set stop master bit */
452 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
453
454 ret = iwl_poll_bit(priv, CSR_RESET,
455 CSR_RESET_REG_FLAG_MASTER_DISABLED,
456 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
457 if (ret < 0)
458 goto out;
459
460out:
461 spin_unlock_irqrestore(&priv->lock, flags);
462 IWL_DEBUG_INFO("stop master\n");
463
464 return ret;
465}
466
Tomas Winklerf118a912008-05-29 16:34:58 +0800467static void iwl4965_apm_stop(struct iwl_priv *priv)
468{
469 unsigned long flags;
470
Tomas Winkler46315e02008-05-29 16:34:59 +0800471 iwl4965_apm_stop_master(priv);
Tomas Winklerf118a912008-05-29 16:34:58 +0800472
473 spin_lock_irqsave(&priv->lock, flags);
474
475 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
476
477 udelay(10);
478
479 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
480 spin_unlock_irqrestore(&priv->lock, flags);
481}
482
Tomas Winkler7f066102008-05-29 16:34:57 +0800483static int iwl4965_apm_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700484{
Tomas Winkler7f066102008-05-29 16:34:57 +0800485 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700486 unsigned long flags;
487
Tomas Winkler46315e02008-05-29 16:34:59 +0800488 iwl4965_apm_stop_master(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700489
490 spin_lock_irqsave(&priv->lock, flags);
491
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700492 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
Zhu Yib481de92007-09-25 17:54:57 -0700493
494 udelay(10);
495
Tomas Winkler7f066102008-05-29 16:34:57 +0800496 /* FIXME: put here L1A -L0S w/a */
497
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700498 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winklerf118a912008-05-29 16:34:58 +0800499
Tomas Winkler7f066102008-05-29 16:34:57 +0800500 ret = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700501 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
502 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
503
Tomas Winkler7f066102008-05-29 16:34:57 +0800504 if (ret)
505 goto out;
506
Zhu Yib481de92007-09-25 17:54:57 -0700507 udelay(10);
508
Tomas Winkler7f066102008-05-29 16:34:57 +0800509 ret = iwl_grab_nic_access(priv);
510 if (ret)
511 goto out;
512 /* Enable DMA and BSM Clock */
513 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
514 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700515
Tomas Winkler7f066102008-05-29 16:34:57 +0800516 udelay(10);
Zhu Yib481de92007-09-25 17:54:57 -0700517
Tomas Winkler7f066102008-05-29 16:34:57 +0800518 /* disable L1A */
519 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
520 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700521
Tomas Winkler7f066102008-05-29 16:34:57 +0800522 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700523
524 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
525 wake_up_interruptible(&priv->wait_command_queue);
526
Tomas Winkler7f066102008-05-29 16:34:57 +0800527out:
Zhu Yib481de92007-09-25 17:54:57 -0700528 spin_unlock_irqrestore(&priv->lock, flags);
529
Tomas Winkler7f066102008-05-29 16:34:57 +0800530 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700531}
532
Zhu Yib481de92007-09-25 17:54:57 -0700533/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
534 * Called after every association, but this runs only once!
535 * ... once chain noise is calibrated the first time, it's good forever. */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700536static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700537{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700538 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
Zhu Yib481de92007-09-25 17:54:57 -0700539
Tomas Winkler3109ece2008-03-28 16:33:35 -0700540 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800541 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700542
543 memset(&cmd, 0, sizeof(cmd));
544 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
545 cmd.diff_gain_a = 0;
546 cmd.diff_gain_b = 0;
547 cmd.diff_gain_c = 0;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700548 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
549 sizeof(cmd), &cmd))
550 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700551 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
552 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
553 }
Zhu Yib481de92007-09-25 17:54:57 -0700554}
555
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700556static void iwl4965_gain_computation(struct iwl_priv *priv,
557 u32 *average_noise,
558 u16 min_average_noise_antenna_i,
559 u32 min_average_noise)
Zhu Yib481de92007-09-25 17:54:57 -0700560{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700561 int i, ret;
562 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
Zhu Yib481de92007-09-25 17:54:57 -0700563
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700564 data->delta_gain_code[min_average_noise_antenna_i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700565
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700566 for (i = 0; i < NUM_RX_CHAINS; i++) {
567 s32 delta_g = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700568
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700569 if (!(data->disconn_array[i]) &&
570 (data->delta_gain_code[i] ==
Zhu Yib481de92007-09-25 17:54:57 -0700571 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700572 delta_g = average_noise[i] - min_average_noise;
573 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
574 data->delta_gain_code[i] =
575 min(data->delta_gain_code[i],
576 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
Zhu Yib481de92007-09-25 17:54:57 -0700577
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700578 data->delta_gain_code[i] =
579 (data->delta_gain_code[i] | (1 << 2));
580 } else {
581 data->delta_gain_code[i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700582 }
Zhu Yib481de92007-09-25 17:54:57 -0700583 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700584 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
585 data->delta_gain_code[0],
586 data->delta_gain_code[1],
587 data->delta_gain_code[2]);
Zhu Yib481de92007-09-25 17:54:57 -0700588
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700589 /* Differential gain gets sent to uCode only once */
590 if (!data->radio_write) {
591 struct iwl4965_calibration_cmd cmd;
592 data->radio_write = 1;
Zhu Yib481de92007-09-25 17:54:57 -0700593
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700594 memset(&cmd, 0, sizeof(cmd));
595 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
596 cmd.diff_gain_a = data->delta_gain_code[0];
597 cmd.diff_gain_b = data->delta_gain_code[1];
598 cmd.diff_gain_c = data->delta_gain_code[2];
599 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
600 sizeof(cmd), &cmd);
601 if (ret)
602 IWL_DEBUG_CALIB("fail sending cmd "
603 "REPLY_PHY_CALIBRATION_CMD \n");
Zhu Yib481de92007-09-25 17:54:57 -0700604
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700605 /* TODO we might want recalculate
606 * rx_chain in rxon cmd */
607
608 /* Mark so we run this algo only once! */
609 data->state = IWL_CHAIN_NOISE_CALIBRATED;
Zhu Yib481de92007-09-25 17:54:57 -0700610 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700611 data->chain_noise_a = 0;
612 data->chain_noise_b = 0;
613 data->chain_noise_c = 0;
614 data->chain_signal_a = 0;
615 data->chain_signal_b = 0;
616 data->chain_signal_c = 0;
617 data->beacon_count = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700618}
619
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +0800620static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
621 __le32 *tx_flags)
622{
623 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
624 *tx_flags |= TX_CMD_FLG_RTS_MSK;
625 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
626 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
627 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
628 *tx_flags |= TX_CMD_FLG_CTS_MSK;
629 }
630}
631
Zhu Yib481de92007-09-25 17:54:57 -0700632static void iwl4965_bg_txpower_work(struct work_struct *work)
633{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700634 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700635 txpower_work);
636
637 /* If a scan happened to start before we got here
638 * then just return; the statistics notification will
639 * kick off another scheduled work to compensate for
640 * any temperature delta we missed here. */
641 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
642 test_bit(STATUS_SCANNING, &priv->status))
643 return;
644
645 mutex_lock(&priv->mutex);
646
647 /* Regardless of if we are assocaited, we must reconfigure the
648 * TX power since frames can be sent on non-radar channels while
649 * not associated */
Tomas Winkler630fe9b2008-06-12 09:47:08 +0800650 iwl4965_send_tx_power(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700651
652 /* Update last_temperature to keep is_calib_needed from running
653 * when it isn't needed... */
654 priv->last_temperature = priv->temperature;
655
656 mutex_unlock(&priv->mutex);
657}
658
659/*
660 * Acquire priv->lock before calling this function !
661 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700662static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -0700663{
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700664 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -0700665 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -0700666 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -0700667}
668
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800669/**
670 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
671 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
672 * @scd_retry: (1) Indicates queue will be used in aggregation mode
673 *
674 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -0700675 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700676static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800677 struct iwl_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -0700678 int tx_fifo_id, int scd_retry)
679{
680 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800681
682 /* Find out whether to activate Tx queue */
Zhu Yib481de92007-09-25 17:54:57 -0700683 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
684
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800685 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700686 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700687 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
688 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
689 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
690 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
691 IWL49_SCD_QUEUE_STTS_REG_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700692
693 txq->sched_retry = scd_retry;
694
695 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800696 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -0700697 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
698}
699
700static const u16 default_queue_to_tx_fifo[] = {
701 IWL_TX_FIFO_AC3,
702 IWL_TX_FIFO_AC2,
703 IWL_TX_FIFO_AC1,
704 IWL_TX_FIFO_AC0,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700705 IWL49_CMD_FIFO_NUM,
Zhu Yib481de92007-09-25 17:54:57 -0700706 IWL_TX_FIFO_HCCA_1,
707 IWL_TX_FIFO_HCCA_2
708};
709
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800710static int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700711{
712 u32 a;
713 int i = 0;
714 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -0700715 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700716
717 spin_lock_irqsave(&priv->lock, flags);
718
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700719 ret = iwl_grab_nic_access(priv);
Tomas Winkler857485c2008-03-21 13:53:44 -0700720 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700721 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler857485c2008-03-21 13:53:44 -0700722 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700723 }
724
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800725 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700726 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700727 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
728 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700729 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700730 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700731 iwl_write_targ_mem(priv, a, 0);
Tomas Winkler5425e492008-04-15 16:01:38 -0700732 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700733 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700734
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800735 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700736 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler059ff822008-04-14 21:16:14 -0700737 (priv->shared_phys +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800738 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800739
740 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700741 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700742
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800743 /* Initialize each Tx queue (including the command queue) */
Tomas Winkler5425e492008-04-15 16:01:38 -0700744 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800745
746 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700747 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700748 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800749
750 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700751 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700752 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
753 (SCD_WIN_SIZE <<
754 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
755 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800756
757 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700758 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700759 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
760 sizeof(u32),
761 (SCD_FRAME_LIMIT <<
762 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
763 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700764
765 }
Tomas Winkler12a81f62008-04-03 16:05:20 -0700766 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Tomas Winkler5425e492008-04-15 16:01:38 -0700767 (1 << priv->hw_params.max_txq_num) - 1);
Zhu Yib481de92007-09-25 17:54:57 -0700768
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800769 /* Activate all Tx DMA/FIFO channels */
Tomas Winklerda1bc452008-05-29 16:35:00 +0800770 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
Zhu Yib481de92007-09-25 17:54:57 -0700771
772 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800773
774 /* Map each Tx/cmd queue to its corresponding fifo */
Zhu Yib481de92007-09-25 17:54:57 -0700775 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
776 int ac = default_queue_to_tx_fifo[i];
Ron Rindjunsky36470742008-05-15 13:54:10 +0800777 iwl_txq_ctx_activate(priv, i);
Zhu Yib481de92007-09-25 17:54:57 -0700778 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
779 }
780
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700781 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700782 spin_unlock_irqrestore(&priv->lock, flags);
783
Tomas Winkler857485c2008-03-21 13:53:44 -0700784 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700785}
786
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700787static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
788 .min_nrg_cck = 97,
789 .max_nrg_cck = 0,
790
791 .auto_corr_min_ofdm = 85,
792 .auto_corr_min_ofdm_mrc = 170,
793 .auto_corr_min_ofdm_x1 = 105,
794 .auto_corr_min_ofdm_mrc_x1 = 220,
795
796 .auto_corr_max_ofdm = 120,
797 .auto_corr_max_ofdm_mrc = 210,
798 .auto_corr_max_ofdm_x1 = 140,
799 .auto_corr_max_ofdm_mrc_x1 = 270,
800
801 .auto_corr_min_cck = 125,
802 .auto_corr_max_cck = 200,
803 .auto_corr_min_cck_mrc = 200,
804 .auto_corr_max_cck_mrc = 400,
805
806 .nrg_th_cck = 100,
807 .nrg_th_ofdm = 100,
808};
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700809
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800810/**
Tomas Winkler5425e492008-04-15 16:01:38 -0700811 * iwl4965_hw_set_hw_params
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800812 *
813 * Called when initializing driver
814 */
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800815static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700816{
Assaf Krauss316c30d2008-03-14 10:38:46 -0700817
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700818 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
Assaf Krauss1ea87392008-03-18 14:57:50 -0700819 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
Assaf Krauss316c30d2008-03-14 10:38:46 -0700820 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700821 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
Tomas Winkler059ff822008-04-14 21:16:14 -0700822 return -EINVAL;
Assaf Krauss316c30d2008-03-14 10:38:46 -0700823 }
824
Tomas Winkler5425e492008-04-15 16:01:38 -0700825 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +0800826 priv->hw_params.first_ampdu_q = IWL49_FIRST_AMPDU_QUEUE;
Tomas Winkler5425e492008-04-15 16:01:38 -0700827 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
828 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700829 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
830 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
831 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
832 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
833
Tomas Winklerec35cf22008-04-15 16:01:39 -0700834 priv->hw_params.tx_chains_num = 2;
835 priv->hw_params.rx_chains_num = 2;
Guy Cohenfde0db32008-04-21 15:42:01 -0700836 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
837 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700838 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
839
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700840 priv->hw_params.sens = &iwl4965_sensitivity;
Tomas Winkler3e82a822008-02-13 11:32:31 -0800841
Tomas Winkler059ff822008-04-14 21:16:14 -0700842 return 0;
Zhu Yib481de92007-09-25 17:54:57 -0700843}
844
Zhu Yib481de92007-09-25 17:54:57 -0700845static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
846{
847 s32 sign = 1;
848
849 if (num < 0) {
850 sign = -sign;
851 num = -num;
852 }
853 if (denom < 0) {
854 sign = -sign;
855 denom = -denom;
856 }
857 *res = 1;
858 *res = ((num * 2 + denom) / (denom * 2)) * sign;
859
860 return 1;
861}
862
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800863/**
864 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
865 *
866 * Determines power supply voltage compensation for txpower calculations.
867 * Returns number of 1/2-dB steps to subtract from gain table index,
868 * to compensate for difference between power supply voltage during
869 * factory measurements, vs. current power supply voltage.
870 *
871 * Voltage indication is higher for lower voltage.
872 * Lower voltage requires more gain (lower gain table index).
873 */
Zhu Yib481de92007-09-25 17:54:57 -0700874static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
875 s32 current_voltage)
876{
877 s32 comp = 0;
878
879 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
880 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
881 return 0;
882
883 iwl4965_math_div_round(current_voltage - eeprom_voltage,
884 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
885
886 if (current_voltage > eeprom_voltage)
887 comp *= 2;
888 if ((comp < -2) || (comp > 2))
889 comp = 0;
890
891 return comp;
892}
893
Zhu Yib481de92007-09-25 17:54:57 -0700894static s32 iwl4965_get_tx_atten_grp(u16 channel)
895{
896 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
897 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
898 return CALIB_CH_GROUP_5;
899
900 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
901 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
902 return CALIB_CH_GROUP_1;
903
904 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
905 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
906 return CALIB_CH_GROUP_2;
907
908 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
909 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
910 return CALIB_CH_GROUP_3;
911
912 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
913 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
914 return CALIB_CH_GROUP_4;
915
916 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
917 return -1;
918}
919
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700920static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -0700921{
922 s32 b = -1;
923
924 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700925 if (priv->calib_info->band_info[b].ch_from == 0)
Zhu Yib481de92007-09-25 17:54:57 -0700926 continue;
927
Tomas Winkler073d3f52008-04-21 15:41:52 -0700928 if ((channel >= priv->calib_info->band_info[b].ch_from)
929 && (channel <= priv->calib_info->band_info[b].ch_to))
Zhu Yib481de92007-09-25 17:54:57 -0700930 break;
931 }
932
933 return b;
934}
935
936static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
937{
938 s32 val;
939
940 if (x2 == x1)
941 return y1;
942 else {
943 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
944 return val + y2;
945 }
946}
947
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800948/**
949 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
950 *
951 * Interpolates factory measurements from the two sample channels within a
952 * sub-band, to apply to channel of interest. Interpolation is proportional to
953 * differences in channel frequencies, which is proportional to differences
954 * in channel number.
955 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700956static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Tomas Winkler073d3f52008-04-21 15:41:52 -0700957 struct iwl_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -0700958{
959 s32 s = -1;
960 u32 c;
961 u32 m;
Tomas Winkler073d3f52008-04-21 15:41:52 -0700962 const struct iwl_eeprom_calib_measure *m1;
963 const struct iwl_eeprom_calib_measure *m2;
964 struct iwl_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -0700965 u32 ch_i1;
966 u32 ch_i2;
967
968 s = iwl4965_get_sub_band(priv, channel);
969 if (s >= EEPROM_TX_POWER_BANDS) {
Jiri Slaby6f147922008-08-11 23:49:41 +0200970 IWL_ERROR("Tx Power can not find channel %d\n", channel);
Zhu Yib481de92007-09-25 17:54:57 -0700971 return -1;
972 }
973
Tomas Winkler073d3f52008-04-21 15:41:52 -0700974 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
975 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
Zhu Yib481de92007-09-25 17:54:57 -0700976 chan_info->ch_num = (u8) channel;
977
978 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
979 channel, s, ch_i1, ch_i2);
980
981 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
982 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700983 m1 = &(priv->calib_info->band_info[s].ch1.
Zhu Yib481de92007-09-25 17:54:57 -0700984 measurements[c][m]);
Tomas Winkler073d3f52008-04-21 15:41:52 -0700985 m2 = &(priv->calib_info->band_info[s].ch2.
Zhu Yib481de92007-09-25 17:54:57 -0700986 measurements[c][m]);
987 omeas = &(chan_info->measurements[c][m]);
988
989 omeas->actual_pow =
990 (u8) iwl4965_interpolate_value(channel, ch_i1,
991 m1->actual_pow,
992 ch_i2,
993 m2->actual_pow);
994 omeas->gain_idx =
995 (u8) iwl4965_interpolate_value(channel, ch_i1,
996 m1->gain_idx, ch_i2,
997 m2->gain_idx);
998 omeas->temperature =
999 (u8) iwl4965_interpolate_value(channel, ch_i1,
1000 m1->temperature,
1001 ch_i2,
1002 m2->temperature);
1003 omeas->pa_det =
1004 (s8) iwl4965_interpolate_value(channel, ch_i1,
1005 m1->pa_det, ch_i2,
1006 m2->pa_det);
1007
1008 IWL_DEBUG_TXPOWER
1009 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
1010 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1011 IWL_DEBUG_TXPOWER
1012 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1013 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1014 IWL_DEBUG_TXPOWER
1015 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1016 m1->pa_det, m2->pa_det, omeas->pa_det);
1017 IWL_DEBUG_TXPOWER
1018 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1019 m1->temperature, m2->temperature,
1020 omeas->temperature);
1021 }
1022 }
1023
1024 return 0;
1025}
1026
1027/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
1028 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
1029static s32 back_off_table[] = {
1030 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
1031 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
1032 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
1033 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
1034 10 /* CCK */
1035};
1036
1037/* Thermal compensation values for txpower for various frequency ranges ...
1038 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001039static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -07001040 s32 degrees_per_05db_a;
1041 s32 degrees_per_05db_a_denom;
1042} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
1043 {9, 2}, /* group 0 5.2, ch 34-43 */
1044 {4, 1}, /* group 1 5.2, ch 44-70 */
1045 {4, 1}, /* group 2 5.2, ch 71-124 */
1046 {4, 1}, /* group 3 5.2, ch 125-200 */
1047 {3, 1} /* group 4 2.4, ch all */
1048};
1049
1050static s32 get_min_power_index(s32 rate_power_index, u32 band)
1051{
1052 if (!band) {
1053 if ((rate_power_index & 7) <= 4)
1054 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
1055 }
1056 return MIN_TX_GAIN_INDEX;
1057}
1058
1059struct gain_entry {
1060 u8 dsp;
1061 u8 radio;
1062};
1063
1064static const struct gain_entry gain_table[2][108] = {
1065 /* 5.2GHz power gain index table */
1066 {
1067 {123, 0x3F}, /* highest txpower */
1068 {117, 0x3F},
1069 {110, 0x3F},
1070 {104, 0x3F},
1071 {98, 0x3F},
1072 {110, 0x3E},
1073 {104, 0x3E},
1074 {98, 0x3E},
1075 {110, 0x3D},
1076 {104, 0x3D},
1077 {98, 0x3D},
1078 {110, 0x3C},
1079 {104, 0x3C},
1080 {98, 0x3C},
1081 {110, 0x3B},
1082 {104, 0x3B},
1083 {98, 0x3B},
1084 {110, 0x3A},
1085 {104, 0x3A},
1086 {98, 0x3A},
1087 {110, 0x39},
1088 {104, 0x39},
1089 {98, 0x39},
1090 {110, 0x38},
1091 {104, 0x38},
1092 {98, 0x38},
1093 {110, 0x37},
1094 {104, 0x37},
1095 {98, 0x37},
1096 {110, 0x36},
1097 {104, 0x36},
1098 {98, 0x36},
1099 {110, 0x35},
1100 {104, 0x35},
1101 {98, 0x35},
1102 {110, 0x34},
1103 {104, 0x34},
1104 {98, 0x34},
1105 {110, 0x33},
1106 {104, 0x33},
1107 {98, 0x33},
1108 {110, 0x32},
1109 {104, 0x32},
1110 {98, 0x32},
1111 {110, 0x31},
1112 {104, 0x31},
1113 {98, 0x31},
1114 {110, 0x30},
1115 {104, 0x30},
1116 {98, 0x30},
1117 {110, 0x25},
1118 {104, 0x25},
1119 {98, 0x25},
1120 {110, 0x24},
1121 {104, 0x24},
1122 {98, 0x24},
1123 {110, 0x23},
1124 {104, 0x23},
1125 {98, 0x23},
1126 {110, 0x22},
1127 {104, 0x18},
1128 {98, 0x18},
1129 {110, 0x17},
1130 {104, 0x17},
1131 {98, 0x17},
1132 {110, 0x16},
1133 {104, 0x16},
1134 {98, 0x16},
1135 {110, 0x15},
1136 {104, 0x15},
1137 {98, 0x15},
1138 {110, 0x14},
1139 {104, 0x14},
1140 {98, 0x14},
1141 {110, 0x13},
1142 {104, 0x13},
1143 {98, 0x13},
1144 {110, 0x12},
1145 {104, 0x08},
1146 {98, 0x08},
1147 {110, 0x07},
1148 {104, 0x07},
1149 {98, 0x07},
1150 {110, 0x06},
1151 {104, 0x06},
1152 {98, 0x06},
1153 {110, 0x05},
1154 {104, 0x05},
1155 {98, 0x05},
1156 {110, 0x04},
1157 {104, 0x04},
1158 {98, 0x04},
1159 {110, 0x03},
1160 {104, 0x03},
1161 {98, 0x03},
1162 {110, 0x02},
1163 {104, 0x02},
1164 {98, 0x02},
1165 {110, 0x01},
1166 {104, 0x01},
1167 {98, 0x01},
1168 {110, 0x00},
1169 {104, 0x00},
1170 {98, 0x00},
1171 {93, 0x00},
1172 {88, 0x00},
1173 {83, 0x00},
1174 {78, 0x00},
1175 },
1176 /* 2.4GHz power gain index table */
1177 {
1178 {110, 0x3f}, /* highest txpower */
1179 {104, 0x3f},
1180 {98, 0x3f},
1181 {110, 0x3e},
1182 {104, 0x3e},
1183 {98, 0x3e},
1184 {110, 0x3d},
1185 {104, 0x3d},
1186 {98, 0x3d},
1187 {110, 0x3c},
1188 {104, 0x3c},
1189 {98, 0x3c},
1190 {110, 0x3b},
1191 {104, 0x3b},
1192 {98, 0x3b},
1193 {110, 0x3a},
1194 {104, 0x3a},
1195 {98, 0x3a},
1196 {110, 0x39},
1197 {104, 0x39},
1198 {98, 0x39},
1199 {110, 0x38},
1200 {104, 0x38},
1201 {98, 0x38},
1202 {110, 0x37},
1203 {104, 0x37},
1204 {98, 0x37},
1205 {110, 0x36},
1206 {104, 0x36},
1207 {98, 0x36},
1208 {110, 0x35},
1209 {104, 0x35},
1210 {98, 0x35},
1211 {110, 0x34},
1212 {104, 0x34},
1213 {98, 0x34},
1214 {110, 0x33},
1215 {104, 0x33},
1216 {98, 0x33},
1217 {110, 0x32},
1218 {104, 0x32},
1219 {98, 0x32},
1220 {110, 0x31},
1221 {104, 0x31},
1222 {98, 0x31},
1223 {110, 0x30},
1224 {104, 0x30},
1225 {98, 0x30},
1226 {110, 0x6},
1227 {104, 0x6},
1228 {98, 0x6},
1229 {110, 0x5},
1230 {104, 0x5},
1231 {98, 0x5},
1232 {110, 0x4},
1233 {104, 0x4},
1234 {98, 0x4},
1235 {110, 0x3},
1236 {104, 0x3},
1237 {98, 0x3},
1238 {110, 0x2},
1239 {104, 0x2},
1240 {98, 0x2},
1241 {110, 0x1},
1242 {104, 0x1},
1243 {98, 0x1},
1244 {110, 0x0},
1245 {104, 0x0},
1246 {98, 0x0},
1247 {97, 0},
1248 {96, 0},
1249 {95, 0},
1250 {94, 0},
1251 {93, 0},
1252 {92, 0},
1253 {91, 0},
1254 {90, 0},
1255 {89, 0},
1256 {88, 0},
1257 {87, 0},
1258 {86, 0},
1259 {85, 0},
1260 {84, 0},
1261 {83, 0},
1262 {82, 0},
1263 {81, 0},
1264 {80, 0},
1265 {79, 0},
1266 {78, 0},
1267 {77, 0},
1268 {76, 0},
1269 {75, 0},
1270 {74, 0},
1271 {73, 0},
1272 {72, 0},
1273 {71, 0},
1274 {70, 0},
1275 {69, 0},
1276 {68, 0},
1277 {67, 0},
1278 {66, 0},
1279 {65, 0},
1280 {64, 0},
1281 {63, 0},
1282 {62, 0},
1283 {61, 0},
1284 {60, 0},
1285 {59, 0},
1286 }
1287};
1288
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001289static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Zhu Yib481de92007-09-25 17:54:57 -07001290 u8 is_fat, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001291 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07001292{
1293 u8 saturation_power;
1294 s32 target_power;
1295 s32 user_target_power;
1296 s32 power_limit;
1297 s32 current_temp;
1298 s32 reg_limit;
1299 s32 current_regulatory;
1300 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1301 int i;
1302 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001303 const struct iwl_channel_info *ch_info = NULL;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001304 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1305 const struct iwl_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07001306 s16 voltage;
1307 s32 init_voltage;
1308 s32 voltage_compensation;
1309 s32 degrees_per_05db_num;
1310 s32 degrees_per_05db_denom;
1311 s32 factory_temp;
1312 s32 temperature_comp[2];
1313 s32 factory_gain_index[2];
1314 s32 factory_actual_pwr[2];
1315 s32 power_index;
1316
Zhu Yib481de92007-09-25 17:54:57 -07001317 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
1318 * are used for indexing into txpower table) */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001319 user_target_power = 2 * priv->tx_power_user_lmt;
Zhu Yib481de92007-09-25 17:54:57 -07001320
1321 /* Get current (RXON) channel, band, width */
Zhu Yib481de92007-09-25 17:54:57 -07001322 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
1323 is_fat);
1324
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001325 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1326
1327 if (!is_channel_valid(ch_info))
Zhu Yib481de92007-09-25 17:54:57 -07001328 return -EINVAL;
1329
1330 /* get txatten group, used to select 1) thermal txpower adjustment
1331 * and 2) mimo txpower balance between Tx chains. */
1332 txatten_grp = iwl4965_get_tx_atten_grp(channel);
1333 if (txatten_grp < 0)
1334 return -EINVAL;
1335
1336 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
1337 channel, txatten_grp);
1338
1339 if (is_fat) {
1340 if (ctrl_chan_high)
1341 channel -= 2;
1342 else
1343 channel += 2;
1344 }
1345
1346 /* hardware txpower limits ...
1347 * saturation (clipping distortion) txpowers are in half-dBm */
1348 if (band)
Tomas Winkler073d3f52008-04-21 15:41:52 -07001349 saturation_power = priv->calib_info->saturation_power24;
Zhu Yib481de92007-09-25 17:54:57 -07001350 else
Tomas Winkler073d3f52008-04-21 15:41:52 -07001351 saturation_power = priv->calib_info->saturation_power52;
Zhu Yib481de92007-09-25 17:54:57 -07001352
1353 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1354 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1355 if (band)
1356 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1357 else
1358 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1359 }
1360
1361 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1362 * max_power_avg values are in dBm, convert * 2 */
1363 if (is_fat)
1364 reg_limit = ch_info->fat_max_power_avg * 2;
1365 else
1366 reg_limit = ch_info->max_power_avg * 2;
1367
1368 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1369 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1370 if (band)
1371 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1372 else
1373 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1374 }
1375
1376 /* Interpolate txpower calibration values for this channel,
1377 * based on factory calibration tests on spaced channels. */
1378 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1379
1380 /* calculate tx gain adjustment based on power supply voltage */
Tomas Winkler073d3f52008-04-21 15:41:52 -07001381 voltage = priv->calib_info->voltage;
Zhu Yib481de92007-09-25 17:54:57 -07001382 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1383 voltage_compensation =
1384 iwl4965_get_voltage_compensation(voltage, init_voltage);
1385
1386 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
1387 init_voltage,
1388 voltage, voltage_compensation);
1389
1390 /* get current temperature (Celsius) */
1391 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1392 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1393 current_temp = KELVIN_TO_CELSIUS(current_temp);
1394
1395 /* select thermal txpower adjustment params, based on channel group
1396 * (same frequency group used for mimo txatten adjustment) */
1397 degrees_per_05db_num =
1398 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1399 degrees_per_05db_denom =
1400 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1401
1402 /* get per-chain txpower values from factory measurements */
1403 for (c = 0; c < 2; c++) {
1404 measurement = &ch_eeprom_info.measurements[c][1];
1405
1406 /* txgain adjustment (in half-dB steps) based on difference
1407 * between factory and current temperature */
1408 factory_temp = measurement->temperature;
1409 iwl4965_math_div_round((current_temp - factory_temp) *
1410 degrees_per_05db_denom,
1411 degrees_per_05db_num,
1412 &temperature_comp[c]);
1413
1414 factory_gain_index[c] = measurement->gain_idx;
1415 factory_actual_pwr[c] = measurement->actual_pow;
1416
1417 IWL_DEBUG_TXPOWER("chain = %d\n", c);
1418 IWL_DEBUG_TXPOWER("fctry tmp %d, "
1419 "curr tmp %d, comp %d steps\n",
1420 factory_temp, current_temp,
1421 temperature_comp[c]);
1422
1423 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
1424 factory_gain_index[c],
1425 factory_actual_pwr[c]);
1426 }
1427
1428 /* for each of 33 bit-rates (including 1 for CCK) */
1429 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1430 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001431 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07001432
1433 /* for mimo, reduce each chain's txpower by half
1434 * (3dB, 6 steps), so total output power is regulatory
1435 * compliant. */
1436 if (i & 0x8) {
1437 current_regulatory = reg_limit -
1438 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1439 is_mimo_rate = 1;
1440 } else {
1441 current_regulatory = reg_limit;
1442 is_mimo_rate = 0;
1443 }
1444
1445 /* find txpower limit, either hardware or regulatory */
1446 power_limit = saturation_power - back_off_table[i];
1447 if (power_limit > current_regulatory)
1448 power_limit = current_regulatory;
1449
1450 /* reduce user's txpower request if necessary
1451 * for this rate on this channel */
1452 target_power = user_target_power;
1453 if (target_power > power_limit)
1454 target_power = power_limit;
1455
1456 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
1457 i, saturation_power - back_off_table[i],
1458 current_regulatory, user_target_power,
1459 target_power);
1460
1461 /* for each of 2 Tx chains (radio transmitters) */
1462 for (c = 0; c < 2; c++) {
1463 s32 atten_value;
1464
1465 if (is_mimo_rate)
1466 atten_value =
1467 (s32)le32_to_cpu(priv->card_alive_init.
1468 tx_atten[txatten_grp][c]);
1469 else
1470 atten_value = 0;
1471
1472 /* calculate index; higher index means lower txpower */
1473 power_index = (u8) (factory_gain_index[c] -
1474 (target_power -
1475 factory_actual_pwr[c]) -
1476 temperature_comp[c] -
1477 voltage_compensation +
1478 atten_value);
1479
1480/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
1481 power_index); */
1482
1483 if (power_index < get_min_power_index(i, band))
1484 power_index = get_min_power_index(i, band);
1485
1486 /* adjust 5 GHz index to support negative indexes */
1487 if (!band)
1488 power_index += 9;
1489
1490 /* CCK, rate 32, reduce txpower for CCK */
1491 if (i == POWER_TABLE_CCK_ENTRY)
1492 power_index +=
1493 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1494
1495 /* stay within the table! */
1496 if (power_index > 107) {
1497 IWL_WARNING("txpower index %d > 107\n",
1498 power_index);
1499 power_index = 107;
1500 }
1501 if (power_index < 0) {
1502 IWL_WARNING("txpower index %d < 0\n",
1503 power_index);
1504 power_index = 0;
1505 }
1506
1507 /* fill txpower command for this rate/chain */
1508 tx_power.s.radio_tx_gain[c] =
1509 gain_table[band][power_index].radio;
1510 tx_power.s.dsp_predis_atten[c] =
1511 gain_table[band][power_index].dsp;
1512
1513 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
1514 "gain 0x%02x dsp %d\n",
1515 c, atten_value, power_index,
1516 tx_power.s.radio_tx_gain[c],
1517 tx_power.s.dsp_predis_atten[c]);
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001518 } /* for each chain */
Zhu Yib481de92007-09-25 17:54:57 -07001519
1520 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1521
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001522 } /* for each rate */
Zhu Yib481de92007-09-25 17:54:57 -07001523
1524 return 0;
1525}
1526
1527/**
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001528 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07001529 *
1530 * Uses the active RXON for channel, band, and characteristics (fat, high)
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001531 * The power limit is taken from priv->tx_power_user_lmt.
Zhu Yib481de92007-09-25 17:54:57 -07001532 */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001533static int iwl4965_send_tx_power(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001534{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001535 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001536 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001537 u8 band = 0;
1538 u8 is_fat = 0;
1539 u8 ctrl_chan_high = 0;
1540
1541 if (test_bit(STATUS_SCANNING, &priv->status)) {
1542 /* If this gets hit a lot, switch it to a BUG() and catch
1543 * the stack trace to find out who is calling this during
1544 * a scan. */
1545 IWL_WARNING("TX Power requested while scanning!\n");
1546 return -EAGAIN;
1547 }
1548
Johannes Berg8318d782008-01-24 19:38:38 +01001549 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001550
1551 is_fat = is_fat_channel(priv->active_rxon.flags);
1552
1553 if (is_fat &&
1554 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1555 ctrl_chan_high = 1;
1556
1557 cmd.band = band;
1558 cmd.channel = priv->active_rxon.channel;
1559
Tomas Winkler857485c2008-03-21 13:53:44 -07001560 ret = iwl4965_fill_txpower_tbl(priv, band,
Zhu Yib481de92007-09-25 17:54:57 -07001561 le16_to_cpu(priv->active_rxon.channel),
1562 is_fat, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07001563 if (ret)
1564 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07001565
Tomas Winkler857485c2008-03-21 13:53:44 -07001566 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1567
1568out:
1569 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001570}
1571
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001572static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1573{
1574 int ret = 0;
1575 struct iwl4965_rxon_assoc_cmd rxon_assoc;
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08001576 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1577 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001578
1579 if ((rxon1->flags == rxon2->flags) &&
1580 (rxon1->filter_flags == rxon2->filter_flags) &&
1581 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1582 (rxon1->ofdm_ht_single_stream_basic_rates ==
1583 rxon2->ofdm_ht_single_stream_basic_rates) &&
1584 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1585 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1586 (rxon1->rx_chain == rxon2->rx_chain) &&
1587 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1588 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1589 return 0;
1590 }
1591
1592 rxon_assoc.flags = priv->staging_rxon.flags;
1593 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1594 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1595 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1596 rxon_assoc.reserved = 0;
1597 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1598 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1599 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1600 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1601 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1602
1603 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1604 sizeof(rxon_assoc), &rxon_assoc, NULL);
1605 if (ret)
1606 return ret;
1607
1608 return ret;
1609}
1610
1611
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001612int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001613{
1614 int rc;
1615 u8 band = 0;
1616 u8 is_fat = 0;
1617 u8 ctrl_chan_high = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001618 struct iwl4965_channel_switch_cmd cmd = { 0 };
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001619 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07001620
Johannes Berg8318d782008-01-24 19:38:38 +01001621 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001622
Assaf Krauss8622e702008-03-21 13:53:43 -07001623 ch_info = iwl_get_channel_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001624
1625 is_fat = is_fat_channel(priv->staging_rxon.flags);
1626
1627 if (is_fat &&
1628 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1629 ctrl_chan_high = 1;
1630
1631 cmd.band = band;
1632 cmd.expect_beacon = 0;
1633 cmd.channel = cpu_to_le16(channel);
1634 cmd.rxon_flags = priv->active_rxon.flags;
1635 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
1636 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1637 if (ch_info)
1638 cmd.expect_beacon = is_channel_radar(ch_info);
1639 else
1640 cmd.expect_beacon = 1;
1641
1642 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
1643 ctrl_chan_high, &cmd.tx_power);
1644 if (rc) {
1645 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
1646 return rc;
1647 }
1648
Tomas Winkler857485c2008-03-21 13:53:44 -07001649 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001650 return rc;
1651}
1652
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08001653static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001654{
Tomas Winkler059ff822008-04-14 21:16:14 -07001655 struct iwl4965_shared *s = priv->shared_virt;
1656 return le32_to_cpu(s->rb_closed) & 0xFFF;
Zhu Yib481de92007-09-25 17:54:57 -07001657}
1658
Ron Rindjunsky399f4902008-04-23 17:14:56 -07001659static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
1660{
1661 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
1662 sizeof(struct iwl4965_shared),
1663 &priv->shared_phys);
1664 if (!priv->shared_virt)
1665 return -ENOMEM;
1666
1667 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
1668
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08001669 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
1670
Ron Rindjunsky399f4902008-04-23 17:14:56 -07001671 return 0;
1672}
1673
1674static void iwl4965_free_shared_mem(struct iwl_priv *priv)
1675{
1676 if (priv->shared_virt)
1677 pci_free_consistent(priv->pci_dev,
1678 sizeof(struct iwl4965_shared),
1679 priv->shared_virt,
1680 priv->shared_phys);
1681}
1682
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001683/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07001684 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001685 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07001686static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +08001687 struct iwl_tx_queue *txq,
Tomas Winklere2a722e2008-04-14 21:16:10 -07001688 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07001689{
1690 int len;
1691 int txq_id = txq->q.id;
Tomas Winkler059ff822008-04-14 21:16:14 -07001692 struct iwl4965_shared *shared_data = priv->shared_virt;
Zhu Yib481de92007-09-25 17:54:57 -07001693
Zhu Yib481de92007-09-25 17:54:57 -07001694 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1695
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001696 /* Set up byte count within first 256 entries */
Zhu Yib481de92007-09-25 17:54:57 -07001697 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001698 tfd_offset[txq->q.write_ptr], byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07001699
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001700 /* If within first 64 entries, duplicate at end */
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001701 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
Zhu Yib481de92007-09-25 17:54:57 -07001702 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001703 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
Zhu Yib481de92007-09-25 17:54:57 -07001704 byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07001705}
1706
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001707/**
Zhu Yib481de92007-09-25 17:54:57 -07001708 * sign_extend - Sign extend a value using specified bit as sign-bit
1709 *
1710 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
1711 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
1712 *
1713 * @param oper value to sign extend
1714 * @param index 0 based bit index (0<=index<32) to sign bit
1715 */
1716static s32 sign_extend(u32 oper, int index)
1717{
1718 u8 shift = 31 - index;
1719
1720 return (s32)(oper << shift) >> shift;
1721}
1722
1723/**
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001724 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
Zhu Yib481de92007-09-25 17:54:57 -07001725 * @statistics: Provides the temperature reading from the uCode
1726 *
1727 * A return of <0 indicates bogus data in the statistics
1728 */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001729static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001730{
1731 s32 temperature;
1732 s32 vt;
1733 s32 R1, R2, R3;
1734 u32 R4;
1735
1736 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1737 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
1738 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
1739 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1740 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1741 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1742 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1743 } else {
1744 IWL_DEBUG_TEMP("Running temperature calibration\n");
1745 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1746 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1747 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1748 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1749 }
1750
1751 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001752 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07001753 *
1754 * NOTE If we haven't received a statistics notification yet
1755 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001756 * "initialize" ALIVE response.
1757 */
Zhu Yib481de92007-09-25 17:54:57 -07001758 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1759 vt = sign_extend(R4, 23);
1760 else
1761 vt = sign_extend(
1762 le32_to_cpu(priv->statistics.general.temperature), 23);
1763
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001764 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
Zhu Yib481de92007-09-25 17:54:57 -07001765
1766 if (R3 == R1) {
1767 IWL_ERROR("Calibration conflict R1 == R3\n");
1768 return -1;
1769 }
1770
1771 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1772 * Add offset to center the adjustment around 0 degrees Centigrade. */
1773 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1774 temperature /= (R3 - R1);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001775 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
Zhu Yib481de92007-09-25 17:54:57 -07001776
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001777 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n",
1778 temperature, KELVIN_TO_CELSIUS(temperature));
Zhu Yib481de92007-09-25 17:54:57 -07001779
1780 return temperature;
1781}
1782
1783/* Adjust Txpower only if temperature variance is greater than threshold. */
1784#define IWL_TEMPERATURE_THRESHOLD 3
1785
1786/**
1787 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1788 *
1789 * If the temperature changed has changed sufficiently, then a recalibration
1790 * is needed.
1791 *
1792 * Assumes caller will replace priv->last_temperature once calibration
1793 * executed.
1794 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001795static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001796{
1797 int temp_diff;
1798
1799 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1800 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
1801 return 0;
1802 }
1803
1804 temp_diff = priv->temperature - priv->last_temperature;
1805
1806 /* get absolute value */
1807 if (temp_diff < 0) {
1808 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
1809 temp_diff = -temp_diff;
1810 } else if (temp_diff == 0)
1811 IWL_DEBUG_POWER("Same temp, \n");
1812 else
1813 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
1814
1815 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1816 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
1817 return 0;
1818 }
1819
1820 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
1821
1822 return 1;
1823}
1824
Zhu Yi52256402008-06-30 17:23:31 +08001825static void iwl4965_temperature_calib(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001826{
Zhu Yib481de92007-09-25 17:54:57 -07001827 s32 temp;
Zhu Yib481de92007-09-25 17:54:57 -07001828
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001829 temp = iwl4965_hw_get_temperature(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001830 if (temp < 0)
1831 return;
1832
1833 if (priv->temperature != temp) {
1834 if (priv->temperature)
1835 IWL_DEBUG_TEMP("Temperature changed "
1836 "from %dC to %dC\n",
1837 KELVIN_TO_CELSIUS(priv->temperature),
1838 KELVIN_TO_CELSIUS(temp));
1839 else
1840 IWL_DEBUG_TEMP("Temperature "
1841 "initialized to %dC\n",
1842 KELVIN_TO_CELSIUS(temp));
1843 }
1844
1845 priv->temperature = temp;
1846 set_bit(STATUS_TEMPERATURE, &priv->status);
1847
Emmanuel Grumbach203566f2008-06-12 09:46:54 +08001848 if (!priv->disable_tx_power_cal &&
1849 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1850 iwl4965_is_temp_calib_needed(priv))
Zhu Yib481de92007-09-25 17:54:57 -07001851 queue_work(priv->workqueue, &priv->txpower_work);
1852}
1853
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001854/**
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001855 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1856 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001857static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001858 u16 txq_id)
1859{
1860 /* Simply stop the queue, but don't change any configuration;
1861 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001862 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07001863 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001864 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1865 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001866}
1867
1868/**
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001869 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08001870 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001871 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001872static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1873 u16 ssn_idx, u8 tx_fifo)
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001874{
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08001875 int ret = 0;
1876
Tomas Winkler9f17b312008-07-11 11:53:35 +08001877 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1878 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
1879 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
1880 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1881 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001882 return -EINVAL;
1883 }
1884
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001885 ret = iwl_grab_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08001886 if (ret)
1887 return ret;
1888
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001889 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1890
Tomas Winkler12a81f62008-04-03 16:05:20 -07001891 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001892
1893 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1894 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1895 /* supposes that ssn_idx is valid (!= 0xFFF) */
1896 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1897
Tomas Winkler12a81f62008-04-03 16:05:20 -07001898 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunsky36470742008-05-15 13:54:10 +08001899 iwl_txq_ctx_deactivate(priv, txq_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001900 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1901
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001902 iwl_release_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08001903
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001904 return 0;
1905}
1906
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001907/**
1908 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1909 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001910static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07001911 u16 txq_id)
1912{
1913 u32 tbl_dw_addr;
1914 u32 tbl_dw;
1915 u16 scd_q2ratid;
1916
Tomas Winkler30e553e2008-05-29 16:35:16 +08001917 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07001918
1919 tbl_dw_addr = priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001920 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
Zhu Yib481de92007-09-25 17:54:57 -07001921
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001922 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07001923
1924 if (txq_id & 0x1)
1925 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1926 else
1927 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1928
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001929 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07001930
1931 return 0;
1932}
1933
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001934
Zhu Yib481de92007-09-25 17:54:57 -07001935/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001936 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
1937 *
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001938 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001939 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07001940 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001941static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1942 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
Zhu Yib481de92007-09-25 17:54:57 -07001943{
1944 unsigned long flags;
Tomas Winkler30e553e2008-05-29 16:35:16 +08001945 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001946 u16 ra_tid;
1947
Tomas Winkler9f17b312008-07-11 11:53:35 +08001948 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1949 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
1950 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
1951 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1952 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
1953 return -EINVAL;
1954 }
Zhu Yib481de92007-09-25 17:54:57 -07001955
1956 ra_tid = BUILD_RAxTID(sta_id, tid);
1957
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001958 /* Modify device's station table to Tx this TID */
Tomas Winkler5083e562008-05-29 16:35:15 +08001959 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
Zhu Yib481de92007-09-25 17:54:57 -07001960
1961 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001962 ret = iwl_grab_nic_access(priv);
1963 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -07001964 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler30e553e2008-05-29 16:35:16 +08001965 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001966 }
1967
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001968 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07001969 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1970
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001971 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07001972 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1973
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001974 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001975 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001976
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001977 /* Place first TFD at index corresponding to start sequence number.
1978 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001979 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1980 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07001981 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1982
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001983 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001984 iwl_write_targ_mem(priv,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001985 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
1986 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1987 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001988
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001989 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001990 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1991 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
1992 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001993
Tomas Winkler12a81f62008-04-03 16:05:20 -07001994 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001995
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001996 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07001997 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1998
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001999 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002000 spin_unlock_irqrestore(&priv->lock, flags);
2001
2002 return 0;
2003}
2004
Tomas Winkler133636d2008-05-05 10:22:34 +08002005
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08002006static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
2007{
2008 switch (cmd_id) {
2009 case REPLY_RXON:
2010 return (u16) sizeof(struct iwl4965_rxon_cmd);
2011 default:
2012 return len;
2013 }
2014}
2015
Tomas Winkler133636d2008-05-05 10:22:34 +08002016static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
2017{
2018 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
2019 addsta->mode = cmd->mode;
2020 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2021 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2022 addsta->station_flags = cmd->station_flags;
2023 addsta->station_flags_msk = cmd->station_flags_msk;
2024 addsta->tid_disable_tx = cmd->tid_disable_tx;
2025 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2026 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2027 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2028 addsta->reserved1 = __constant_cpu_to_le16(0);
2029 addsta->reserved2 = __constant_cpu_to_le32(0);
2030
2031 return (u16)sizeof(struct iwl4965_addsta_cmd);
2032}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002033
Tomas Winklerf20217d2008-05-29 16:35:10 +08002034static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
2035{
Tomas Winkler25a65722008-06-12 09:47:07 +08002036 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002037}
2038
2039/**
2040 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
2041 */
2042static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2043 struct iwl_ht_agg *agg,
Tomas Winkler25a65722008-06-12 09:47:07 +08002044 struct iwl4965_tx_resp *tx_resp,
2045 int txq_id, u16 start_idx)
Tomas Winklerf20217d2008-05-29 16:35:10 +08002046{
2047 u16 status;
Tomas Winkler25a65722008-06-12 09:47:07 +08002048 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002049 struct ieee80211_tx_info *info = NULL;
2050 struct ieee80211_hdr *hdr = NULL;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002051 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
Tomas Winkler25a65722008-06-12 09:47:07 +08002052 int i, sh, idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002053 u16 seq;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002054 if (agg->wait_for_ba)
2055 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
2056
2057 agg->frame_count = tx_resp->frame_count;
2058 agg->start_idx = start_idx;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002059 agg->rate_n_flags = rate_n_flags;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002060 agg->bitmap = 0;
2061
2062 /* # frames attempted by Tx command */
2063 if (agg->frame_count == 1) {
2064 /* Only one frame was attempted; no block-ack will arrive */
2065 status = le16_to_cpu(frame_status[0].status);
Tomas Winkler25a65722008-06-12 09:47:07 +08002066 idx = start_idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002067
2068 /* FIXME: code repetition */
2069 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2070 agg->frame_count, agg->start_idx, idx);
2071
2072 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
2073 info->status.retry_count = tx_resp->failure_frame;
2074 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2075 info->flags |= iwl_is_tx_success(status)?
2076 IEEE80211_TX_STAT_ACK : 0;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002077 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002078 /* FIXME: code repetition end */
2079
2080 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2081 status & 0xff, tx_resp->failure_frame);
Tomas Winklere7d326a2008-06-12 09:47:11 +08002082 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002083
2084 agg->wait_for_ba = 0;
2085 } else {
2086 /* Two or more frames were attempted; expect block-ack */
2087 u64 bitmap = 0;
2088 int start = agg->start_idx;
2089
2090 /* Construct bit-map of pending frames within Tx window */
2091 for (i = 0; i < agg->frame_count; i++) {
2092 u16 sc;
2093 status = le16_to_cpu(frame_status[i].status);
2094 seq = le16_to_cpu(frame_status[i].sequence);
2095 idx = SEQ_TO_INDEX(seq);
2096 txq_id = SEQ_TO_QUEUE(seq);
2097
2098 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2099 AGG_TX_STATE_ABORT_MSK))
2100 continue;
2101
2102 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2103 agg->frame_count, txq_id, idx);
2104
2105 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
2106
2107 sc = le16_to_cpu(hdr->seq_ctrl);
2108 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2109 IWL_ERROR("BUG_ON idx doesn't match seq control"
2110 " idx=%d, seq_idx=%d, seq=%d\n",
2111 idx, SEQ_TO_SN(sc),
2112 hdr->seq_ctrl);
2113 return -1;
2114 }
2115
2116 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
2117 i, idx, SEQ_TO_SN(sc));
2118
2119 sh = idx - start;
2120 if (sh > 64) {
2121 sh = (start - idx) + 0xff;
2122 bitmap = bitmap << sh;
2123 sh = 0;
2124 start = idx;
2125 } else if (sh < -64)
2126 sh = 0xff - (start - idx);
2127 else if (sh < 0) {
2128 sh = start - idx;
2129 start = idx;
2130 bitmap = bitmap << sh;
2131 sh = 0;
2132 }
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08002133 bitmap |= 1ULL << sh;
2134 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
2135 start, (unsigned long long)bitmap);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002136 }
2137
2138 agg->bitmap = bitmap;
2139 agg->start_idx = start;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002140 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2141 agg->frame_count, agg->start_idx,
2142 (unsigned long long)agg->bitmap);
2143
2144 if (bitmap)
2145 agg->wait_for_ba = 1;
2146 }
2147 return 0;
2148}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002149
2150/**
2151 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2152 */
2153static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2154 struct iwl_rx_mem_buffer *rxb)
2155{
2156 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
2157 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2158 int txq_id = SEQ_TO_QUEUE(sequence);
2159 int index = SEQ_TO_INDEX(sequence);
2160 struct iwl_tx_queue *txq = &priv->txq[txq_id];
2161 struct ieee80211_tx_info *info;
2162 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
Tomas Winkler25a65722008-06-12 09:47:07 +08002163 u32 status = le32_to_cpu(tx_resp->u.status);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002164 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002165 __le16 fc;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002166 struct ieee80211_hdr *hdr;
2167 u8 *qc = NULL;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002168
2169 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
2170 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
2171 "is out of range [0-%d] %d %d\n", txq_id,
2172 index, txq->q.n_bd, txq->q.write_ptr,
2173 txq->q.read_ptr);
2174 return;
2175 }
2176
2177 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
2178 memset(&info->status, 0, sizeof(info->status));
2179
Tomas Winklerf20217d2008-05-29 16:35:10 +08002180 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002181 fc = hdr->frame_control;
2182 if (ieee80211_is_data_qos(fc)) {
2183 qc = ieee80211_get_qos_ctl(hdr);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002184 tid = qc[0] & 0xf;
2185 }
2186
2187 sta_id = iwl_get_ra_sta_id(priv, hdr);
2188 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2189 IWL_ERROR("Station not known\n");
2190 return;
2191 }
2192
2193 if (txq->sched_retry) {
2194 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2195 struct iwl_ht_agg *agg = NULL;
2196
2197 if (!qc)
2198 return;
2199
2200 agg = &priv->stations[sta_id].tid[tid].agg;
2201
Tomas Winkler25a65722008-06-12 09:47:07 +08002202 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002203
Ron Rindjunsky32354272008-07-01 10:44:51 +03002204 /* check if BAR is needed */
2205 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
2206 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002207
2208 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2209 int freed, ampdu_q;
2210 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2211 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2212 "%d index %d\n", scd_ssn , index);
Tomas Winkler17b88922008-05-29 16:35:12 +08002213 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002214 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2215
2216 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
2217 txq_id >= 0 && priv->mac80211_registered &&
2218 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
2219 /* calculate mac80211 ampdu sw queue to wake */
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08002220 ampdu_q = txq_id - IWL49_FIRST_AMPDU_QUEUE +
Tomas Winklerf20217d2008-05-29 16:35:10 +08002221 priv->hw->queues;
2222 if (agg->state == IWL_AGG_OFF)
2223 ieee80211_wake_queue(priv->hw, txq_id);
2224 else
2225 ieee80211_wake_queue(priv->hw, ampdu_q);
2226 }
Tomas Winkler30e553e2008-05-29 16:35:16 +08002227 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002228 }
2229 } else {
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002230 info->status.retry_count = tx_resp->failure_frame;
2231 info->flags |=
2232 iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002233 iwl_hwrate_to_tx_control(priv,
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002234 le32_to_cpu(tx_resp->rate_n_flags),
2235 info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002236
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002237 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags "
2238 "0x%x retries %d\n", txq_id,
2239 iwl_get_tx_fail_reason(status),
2240 status, le32_to_cpu(tx_resp->rate_n_flags),
2241 tx_resp->failure_frame);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002242
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002243 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
Tomas Winklere7d326a2008-06-12 09:47:11 +08002244
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002245 if (index != -1) {
2246 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2247 if (tid != MAX_TID_COUNT)
Tomas Winklerf20217d2008-05-29 16:35:10 +08002248 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002249 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
Tomas Winklerf20217d2008-05-29 16:35:10 +08002250 (txq_id >= 0) && priv->mac80211_registered)
2251 ieee80211_wake_queue(priv->hw, txq_id);
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002252 if (tid != MAX_TID_COUNT)
Tomas Winkler30e553e2008-05-29 16:35:16 +08002253 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002254 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002255 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002256
2257 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2258 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
2259}
2260
Tomas Winklercaab8f12008-08-04 16:00:42 +08002261static int iwl4965_calc_rssi(struct iwl_priv *priv,
2262 struct iwl_rx_phy_res *rx_resp)
2263{
2264 /* data from PHY/DSP regarding signal strength, etc.,
2265 * contents are always there, not configurable by host. */
2266 struct iwl4965_rx_non_cfg_phy *ncphy =
2267 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2268 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2269 >> IWL49_AGC_DB_POS;
2270
2271 u32 valid_antennae =
2272 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2273 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2274 u8 max_rssi = 0;
2275 u32 i;
2276
2277 /* Find max rssi among 3 possible receivers.
2278 * These values are measured by the digital signal processor (DSP).
2279 * They should stay fairly constant even as the signal strength varies,
2280 * if the radio's automatic gain control (AGC) is working right.
2281 * AGC value (see below) will provide the "interesting" info. */
2282 for (i = 0; i < 3; i++)
2283 if (valid_antennae & (1 << i))
2284 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2285
2286 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2287 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2288 max_rssi, agc);
2289
2290 /* dBm = max_rssi dB - agc dB - constant.
2291 * Higher AGC (higher radio gain) means lower signal. */
2292 return max_rssi - agc - IWL_RSSI_OFFSET;
2293}
2294
Tomas Winklerf20217d2008-05-29 16:35:10 +08002295
Zhu Yib481de92007-09-25 17:54:57 -07002296/* Set up 4965-specific Rx frame reply handlers */
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002297static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002298{
2299 /* Legacy Rx frames */
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08002300 priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
Ron Rindjunsky37a44212008-05-29 16:35:18 +08002301 /* Tx response */
Tomas Winklerf20217d2008-05-29 16:35:10 +08002302 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
Zhu Yib481de92007-09-25 17:54:57 -07002303}
2304
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002305static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002306{
2307 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002308}
2309
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002310static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002311{
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002312 cancel_work_sync(&priv->txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002313}
2314
Tomas Winkler3c424c22008-04-15 16:01:42 -07002315
2316static struct iwl_hcmd_ops iwl4965_hcmd = {
Tomas Winkler7e8c5192008-04-15 16:01:43 -07002317 .rxon_assoc = iwl4965_send_rxon_assoc,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002318};
2319
Tomas Winkler857485c2008-03-21 13:53:44 -07002320static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08002321 .get_hcmd_size = iwl4965_get_hcmd_size,
Tomas Winkler133636d2008-05-05 10:22:34 +08002322 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002323 .chain_noise_reset = iwl4965_chain_noise_reset,
2324 .gain_computation = iwl4965_gain_computation,
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +08002325 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
Tomas Winklercaab8f12008-08-04 16:00:42 +08002326 .calc_rssi = iwl4965_calc_rssi,
Tomas Winkler857485c2008-03-21 13:53:44 -07002327};
2328
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002329static struct iwl_lib_ops iwl4965_lib = {
Tomas Winkler5425e492008-04-15 16:01:38 -07002330 .set_hw_params = iwl4965_hw_set_hw_params,
Ron Rindjunsky399f4902008-04-23 17:14:56 -07002331 .alloc_shared_mem = iwl4965_alloc_shared_mem,
2332 .free_shared_mem = iwl4965_free_shared_mem,
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08002333 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
Tomas Winklere2a722e2008-04-14 21:16:10 -07002334 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winklerda1bc452008-05-29 16:35:00 +08002335 .txq_set_sched = iwl4965_txq_set_sched,
Tomas Winkler30e553e2008-05-29 16:35:16 +08002336 .txq_agg_enable = iwl4965_txq_agg_enable,
2337 .txq_agg_disable = iwl4965_txq_agg_disable,
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002338 .rx_handler_setup = iwl4965_rx_handler_setup,
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002339 .setup_deferred_work = iwl4965_setup_deferred_work,
2340 .cancel_deferred_work = iwl4965_cancel_deferred_work,
Tomas Winkler57aab752008-04-14 21:16:03 -07002341 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2342 .alive_notify = iwl4965_alive_notify,
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +08002343 .init_alive_start = iwl4965_init_alive_start,
Tomas Winkler57aab752008-04-14 21:16:03 -07002344 .load_ucode = iwl4965_load_bsm,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002345 .apm_ops = {
Tomas Winkler91238712008-04-23 17:14:53 -07002346 .init = iwl4965_apm_init,
Tomas Winkler7f066102008-05-29 16:34:57 +08002347 .reset = iwl4965_apm_reset,
Tomas Winklerf118a912008-05-29 16:34:58 +08002348 .stop = iwl4965_apm_stop,
Tomas Winkler694cc562008-04-24 11:55:22 -07002349 .config = iwl4965_nic_config,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002350 .set_pwr_src = iwl4965_set_pwr_src,
2351 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002352 .eeprom_ops = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07002353 .regulatory_bands = {
2354 EEPROM_REGULATORY_BAND_1_CHANNELS,
2355 EEPROM_REGULATORY_BAND_2_CHANNELS,
2356 EEPROM_REGULATORY_BAND_3_CHANNELS,
2357 EEPROM_REGULATORY_BAND_4_CHANNELS,
2358 EEPROM_REGULATORY_BAND_5_CHANNELS,
2359 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
2360 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
2361 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002362 .verify_signature = iwlcore_eeprom_verify_signature,
2363 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
2364 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winkler8614f362008-04-23 17:14:55 -07002365 .check_version = iwl4965_eeprom_check_version,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002366 .query_addr = iwlcore_eeprom_query_addr,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002367 },
Tomas Winkler630fe9b2008-06-12 09:47:08 +08002368 .send_tx_power = iwl4965_send_tx_power,
Mohamed Abbas5da4b552008-04-21 15:41:51 -07002369 .update_chain_flags = iwl4965_update_chain_flags,
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +08002370 .temperature = iwl4965_temperature_calib,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002371};
2372
2373static struct iwl_ops iwl4965_ops = {
2374 .lib = &iwl4965_lib,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002375 .hcmd = &iwl4965_hcmd,
Tomas Winkler857485c2008-03-21 13:53:44 -07002376 .utils = &iwl4965_hcmd_utils,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002377};
2378
Ron Rindjunskyfed90172008-04-15 16:01:41 -07002379struct iwl_cfg iwl4965_agn_cfg = {
Tomas Winkler82b9a122008-03-04 18:09:30 -08002380 .name = "4965AGN",
Tomas Winkler4bf775c2008-03-04 18:09:31 -08002381 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
Tomas Winkler82b9a122008-03-04 18:09:30 -08002382 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002383 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002384 .ops = &iwl4965_ops,
Assaf Krauss1ea87392008-03-18 14:57:50 -07002385 .mod_params = &iwl4965_mod_params,
Tomas Winkler82b9a122008-03-04 18:09:30 -08002386};
2387
Tomas Winklerd16dc482008-07-11 11:53:38 +08002388/* Module firmware */
2389MODULE_FIRMWARE("iwlwifi-4965" IWL4965_UCODE_API ".ucode");
2390
Assaf Krauss1ea87392008-03-18 14:57:50 -07002391module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
2392MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2393module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
2394MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
Emmanuel Grumbachfcc76c62008-04-15 16:01:47 -07002395module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
Niels de Vos61a2d072008-07-31 00:07:23 -07002396MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
Assaf Krauss1ea87392008-03-18 14:57:50 -07002397module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
2398MODULE_PARM_DESC(debug, "debug output mask");
2399module_param_named(
2400 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
2401MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2402
2403module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
2404MODULE_PARM_DESC(queues_num, "number of hw queues.");
Assaf Krauss1ea87392008-03-18 14:57:50 -07002405/* QoS */
2406module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
2407MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
Ron Rindjunsky49779292008-06-30 17:23:21 +08002408/* 11n */
2409module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444);
2410MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
Assaf Krauss1ea87392008-03-18 14:57:50 -07002411module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
2412MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
Ron Rindjunsky49779292008-06-30 17:23:21 +08002413
Ester Kummer3a1081e2008-05-06 11:05:14 +08002414module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
2415MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");