blob: 1d6a46d4db594dbf9fe8ae34f4dc702ebb05063f [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatre1f447802010-01-15 13:43:41 -08003 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080022 * Intel Linux Wireless <ilw@linux.intel.com>
Zhu Yib481de92007-09-25 17:54:57 -070023 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
Zhu Yib481de92007-09-25 17:54:57 -070029#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040033#include <linux/sched.h>
Zhu Yib481de92007-09-25 17:54:57 -070034#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070042#include "iwl-dev.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -070046#include "iwl-calib.h"
Tomas Winkler5083e562008-05-29 16:35:15 +080047#include "iwl-sta.h"
Johannes Berge932a602009-10-02 13:44:03 -070048#include "iwl-agn-led.h"
Wey-Yi Guy74bcdb32010-03-17 13:34:34 -070049#include "iwl-agn.h"
Abhijeet Kolekarb8c76262010-04-08 15:29:07 -070050#include "iwl-agn-debugfs.h"
Zhu Yib481de92007-09-25 17:54:57 -070051
Tomas Winkler630fe9b2008-06-12 09:47:08 +080052static int iwl4965_send_tx_power(struct iwl_priv *priv);
Reinette Chatre3d816c72009-08-07 15:41:37 -070053static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
Tomas Winkler630fe9b2008-06-12 09:47:08 +080054
Reinette Chatrea0987a82008-12-02 12:14:06 -080055/* Highest firmware API version supported */
56#define IWL4965_UCODE_API_MAX 2
57
58/* Lowest firmware API version supported */
59#define IWL4965_UCODE_API_MIN 2
60
61#define IWL4965_FW_PRE "iwlwifi-4965-"
62#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
63#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
Tomas Winklerd16dc482008-07-11 11:53:38 +080064
Tomas Winkler57aab752008-04-14 21:16:03 -070065/* check contents of special bootstrap uCode SRAM */
66static int iwl4965_verify_bsm(struct iwl_priv *priv)
67{
68 __le32 *image = priv->ucode_boot.v_addr;
69 u32 len = priv->ucode_boot.len;
70 u32 reg;
71 u32 val;
72
Tomas Winklere1623442009-01-27 14:27:56 -080073 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
Tomas Winkler57aab752008-04-14 21:16:03 -070074
75 /* verify BSM SRAM contents */
76 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
77 for (reg = BSM_SRAM_LOWER_BOUND;
78 reg < BSM_SRAM_LOWER_BOUND + len;
79 reg += sizeof(u32), image++) {
80 val = iwl_read_prph(priv, reg);
81 if (val != le32_to_cpu(*image)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +080082 IWL_ERR(priv, "BSM uCode verification failed at "
Tomas Winkler57aab752008-04-14 21:16:03 -070083 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
84 BSM_SRAM_LOWER_BOUND,
85 reg - BSM_SRAM_LOWER_BOUND, len,
86 val, le32_to_cpu(*image));
87 return -EIO;
88 }
89 }
90
Tomas Winklere1623442009-01-27 14:27:56 -080091 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
Tomas Winkler57aab752008-04-14 21:16:03 -070092
93 return 0;
94}
95
96/**
97 * iwl4965_load_bsm - Load bootstrap instructions
98 *
99 * BSM operation:
100 *
101 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
102 * in special SRAM that does not power down during RFKILL. When powering back
103 * up after power-saving sleeps (or during initial uCode load), the BSM loads
104 * the bootstrap program into the on-board processor, and starts it.
105 *
106 * The bootstrap program loads (via DMA) instructions and data for a new
107 * program from host DRAM locations indicated by the host driver in the
108 * BSM_DRAM_* registers. Once the new program is loaded, it starts
109 * automatically.
110 *
111 * When initializing the NIC, the host driver points the BSM to the
112 * "initialize" uCode image. This uCode sets up some internal data, then
113 * notifies host via "initialize alive" that it is complete.
114 *
115 * The host then replaces the BSM_DRAM_* pointer values to point to the
116 * normal runtime uCode instructions and a backup uCode data cache buffer
117 * (filled initially with starting data values for the on-board processor),
118 * then triggers the "initialize" uCode to load and launch the runtime uCode,
119 * which begins normal operation.
120 *
121 * When doing a power-save shutdown, runtime uCode saves data SRAM into
122 * the backup data cache in DRAM before SRAM is powered down.
123 *
124 * When powering back up, the BSM loads the bootstrap program. This reloads
125 * the runtime uCode instructions and the backup data cache into SRAM,
126 * and re-launches the runtime uCode from where it left off.
127 */
128static int iwl4965_load_bsm(struct iwl_priv *priv)
129{
130 __le32 *image = priv->ucode_boot.v_addr;
131 u32 len = priv->ucode_boot.len;
132 dma_addr_t pinst;
133 dma_addr_t pdata;
134 u32 inst_len;
135 u32 data_len;
136 int i;
137 u32 done;
138 u32 reg_offset;
139 int ret;
140
Tomas Winklere1623442009-01-27 14:27:56 -0800141 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
Tomas Winkler57aab752008-04-14 21:16:03 -0700142
Reinette Chatrec03ea162009-08-07 15:41:44 -0700143 priv->ucode_type = UCODE_RT;
Ron Rindjunskyfe9b6b72008-05-29 16:35:06 +0800144
Tomas Winkler57aab752008-04-14 21:16:03 -0700145 /* make sure bootstrap program is no larger than BSM's SRAM size */
Samuel Ortiz250bdd22008-12-19 10:37:11 +0800146 if (len > IWL49_MAX_BSM_SIZE)
Tomas Winkler57aab752008-04-14 21:16:03 -0700147 return -EINVAL;
148
149 /* Tell bootstrap uCode where to find the "Initialize" uCode
150 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
Tomas Winkler2d878892008-05-29 16:34:51 +0800151 * NOTE: iwl_init_alive_start() will replace these values,
Tomas Winkler57aab752008-04-14 21:16:03 -0700152 * after the "initialize" uCode has run, to point to
Tomas Winkler2d878892008-05-29 16:34:51 +0800153 * runtime/protocol instructions and backup data cache.
154 */
Tomas Winkler57aab752008-04-14 21:16:03 -0700155 pinst = priv->ucode_init.p_addr >> 4;
156 pdata = priv->ucode_init_data.p_addr >> 4;
157 inst_len = priv->ucode_init.len;
158 data_len = priv->ucode_init_data.len;
159
Tomas Winkler57aab752008-04-14 21:16:03 -0700160 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
161 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
162 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
163 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
164
165 /* Fill BSM memory with bootstrap instructions */
166 for (reg_offset = BSM_SRAM_LOWER_BOUND;
167 reg_offset < BSM_SRAM_LOWER_BOUND + len;
168 reg_offset += sizeof(u32), image++)
169 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
170
171 ret = iwl4965_verify_bsm(priv);
Mohamed Abbasa8b50a02009-05-22 11:01:47 -0700172 if (ret)
Tomas Winkler57aab752008-04-14 21:16:03 -0700173 return ret;
Tomas Winkler57aab752008-04-14 21:16:03 -0700174
175 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
176 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
Samuel Ortiz250bdd22008-12-19 10:37:11 +0800177 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
Tomas Winkler57aab752008-04-14 21:16:03 -0700178 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
179
180 /* Load bootstrap code into instruction SRAM now,
181 * to prepare to load "initialize" uCode */
182 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
183
184 /* Wait for load of bootstrap uCode to finish */
185 for (i = 0; i < 100; i++) {
186 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
187 if (!(done & BSM_WR_CTRL_REG_BIT_START))
188 break;
189 udelay(10);
190 }
191 if (i < 100)
Tomas Winklere1623442009-01-27 14:27:56 -0800192 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
Tomas Winkler57aab752008-04-14 21:16:03 -0700193 else {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800194 IWL_ERR(priv, "BSM write did not complete!\n");
Tomas Winkler57aab752008-04-14 21:16:03 -0700195 return -EIO;
196 }
197
198 /* Enable future boot loads whenever power management unit triggers it
199 * (e.g. when powering back up after power-save shutdown) */
200 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
201
Tomas Winkler57aab752008-04-14 21:16:03 -0700202
203 return 0;
204}
205
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800206/**
207 * iwl4965_set_ucode_ptrs - Set uCode address location
208 *
209 * Tell initialization uCode where to find runtime uCode.
210 *
211 * BSM registers initially contain pointers to initialization uCode.
212 * We need to replace them to load runtime uCode inst and data,
213 * and to save runtime data when powering down.
214 */
215static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
216{
217 dma_addr_t pinst;
218 dma_addr_t pdata;
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800219 int ret = 0;
220
221 /* bits 35:4 for 4965 */
222 pinst = priv->ucode_code.p_addr >> 4;
223 pdata = priv->ucode_data_backup.p_addr >> 4;
224
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800225 /* Tell bootstrap uCode where to find image to load */
226 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
227 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
228 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
229 priv->ucode_data.len);
230
Tomas Winklera96a27f2008-10-23 23:48:56 -0700231 /* Inst byte count must be last to set up, bit 31 signals uCode
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800232 * that all new ptr/size info is in place */
233 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
234 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
Tomas Winklere1623442009-01-27 14:27:56 -0800235 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800236
237 return ret;
238}
239
240/**
241 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
242 *
243 * Called after REPLY_ALIVE notification received from "initialize" uCode.
244 *
245 * The 4965 "initialize" ALIVE reply contains calibration data for:
246 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
247 * (3945 does not contain this data).
248 *
249 * Tell "initialize" uCode to go ahead and load the runtime uCode.
250*/
251static void iwl4965_init_alive_start(struct iwl_priv *priv)
252{
253 /* Check alive response for "valid" sign from uCode */
254 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
255 /* We had an error bringing up the hardware, so take it
256 * all the way back down so we can try again */
Tomas Winklere1623442009-01-27 14:27:56 -0800257 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800258 goto restart;
259 }
260
261 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
262 * This is a paranoid check, because we would not have gotten the
263 * "initialize" alive if code weren't properly loaded. */
264 if (iwl_verify_ucode(priv)) {
265 /* Runtime instruction load was bad;
266 * take it all the way back down so we can try again */
Tomas Winklere1623442009-01-27 14:27:56 -0800267 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800268 goto restart;
269 }
270
271 /* Calculate temperature */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +0800272 priv->temperature = iwl4965_hw_get_temperature(priv);
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800273
274 /* Send pointers to protocol/runtime uCode image ... init code will
275 * load and launch runtime uCode, which will send us another "Alive"
276 * notification. */
Tomas Winklere1623442009-01-27 14:27:56 -0800277 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800278 if (iwl4965_set_ucode_ptrs(priv)) {
279 /* Runtime instruction load won't happen;
280 * take it all the way back down so we can try again */
Tomas Winklere1623442009-01-27 14:27:56 -0800281 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800282 goto restart;
283 }
284 return;
285
286restart:
287 queue_work(priv->workqueue, &priv->restart);
288}
289
Wey-Yi Guy7aafef12009-08-07 15:41:38 -0700290static bool is_ht40_channel(__le32 rxon_flags)
Zhu Yib481de92007-09-25 17:54:57 -0700291{
Wey-Yi Guya2b0f022009-05-22 11:01:49 -0700292 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
293 >> RXON_FLG_CHANNEL_MODE_POS;
294 return ((chan_mod == CHANNEL_MODE_PURE_40) ||
295 (chan_mod == CHANNEL_MODE_MIXED));
Zhu Yib481de92007-09-25 17:54:57 -0700296}
297
Tomas Winkler8614f362008-04-23 17:14:55 -0700298/*
299 * EEPROM handlers
300 */
Tomas Winkler0ef2ca62008-10-23 23:48:51 -0700301static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
Tomas Winkler8614f362008-04-23 17:14:55 -0700302{
Tomas Winkler0ef2ca62008-10-23 23:48:51 -0700303 return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
Tomas Winkler8614f362008-04-23 17:14:55 -0700304}
Zhu Yib481de92007-09-25 17:54:57 -0700305
Tomas Winklerda1bc452008-05-29 16:35:00 +0800306/*
Tomas Winklera96a27f2008-10-23 23:48:56 -0700307 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
Tomas Winklerda1bc452008-05-29 16:35:00 +0800308 * must be called under priv->lock and mac access
309 */
310static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
Zhu Yib481de92007-09-25 17:54:57 -0700311{
Tomas Winklerda1bc452008-05-29 16:35:00 +0800312 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
Zhu Yib481de92007-09-25 17:54:57 -0700313}
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800314
Tomas Winkler694cc562008-04-24 11:55:22 -0700315static void iwl4965_nic_config(struct iwl_priv *priv)
316{
317 unsigned long flags;
Tomas Winkler694cc562008-04-24 11:55:22 -0700318 u16 radio_cfg;
Tomas Winkler694cc562008-04-24 11:55:22 -0700319
320 spin_lock_irqsave(&priv->lock, flags);
321
Tomas Winkler694cc562008-04-24 11:55:22 -0700322 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
323
324 /* write radio config values to register */
325 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
326 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
327 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
328 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
329 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
330
331 /* set CSR_HW_CONFIG_REG for uCode use */
332 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
333 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
334 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
335
336 priv->calib_info = (struct iwl_eeprom_calib_info *)
337 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
338
339 spin_unlock_irqrestore(&priv->lock, flags);
340}
341
Zhu Yib481de92007-09-25 17:54:57 -0700342/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
343 * Called after every association, but this runs only once!
344 * ... once chain noise is calibrated the first time, it's good forever. */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700345static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700346{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700347 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
Zhu Yib481de92007-09-25 17:54:57 -0700348
Shanyu Zhaof4308442010-05-11 15:25:03 -0700349 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
Johannes Berg246ed352010-08-23 10:46:32 +0200350 iwl_is_any_associated(priv)) {
Tomas Winklerf69f42a2008-10-23 23:48:52 -0700351 struct iwl_calib_diff_gain_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700352
Shanyu Zhaof4308442010-05-11 15:25:03 -0700353 /* clear data for chain noise calibration algorithm */
354 data->chain_noise_a = 0;
355 data->chain_noise_b = 0;
356 data->chain_noise_c = 0;
357 data->chain_signal_a = 0;
358 data->chain_signal_b = 0;
359 data->chain_signal_c = 0;
360 data->beacon_count = 0;
361
Zhu Yib481de92007-09-25 17:54:57 -0700362 memset(&cmd, 0, sizeof(cmd));
Tomas Winkler0d950d82008-11-25 13:36:01 -0800363 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
Zhu Yib481de92007-09-25 17:54:57 -0700364 cmd.diff_gain_a = 0;
365 cmd.diff_gain_b = 0;
366 cmd.diff_gain_c = 0;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700367 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
368 sizeof(cmd), &cmd))
Winkler, Tomas15b16872008-12-19 10:37:33 +0800369 IWL_ERR(priv,
370 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700371 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
Tomas Winklere1623442009-01-27 14:27:56 -0800372 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
Zhu Yib481de92007-09-25 17:54:57 -0700373 }
Zhu Yib481de92007-09-25 17:54:57 -0700374}
375
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700376static void iwl4965_gain_computation(struct iwl_priv *priv,
377 u32 *average_noise,
378 u16 min_average_noise_antenna_i,
Wey-Yi Guyd8c07e72009-09-25 14:24:26 -0700379 u32 min_average_noise,
380 u8 default_chain)
Zhu Yib481de92007-09-25 17:54:57 -0700381{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700382 int i, ret;
383 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
Zhu Yib481de92007-09-25 17:54:57 -0700384
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700385 data->delta_gain_code[min_average_noise_antenna_i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700386
Wey-Yi Guyd8c07e72009-09-25 14:24:26 -0700387 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700388 s32 delta_g = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700389
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700390 if (!(data->disconn_array[i]) &&
391 (data->delta_gain_code[i] ==
Zhu Yib481de92007-09-25 17:54:57 -0700392 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700393 delta_g = average_noise[i] - min_average_noise;
394 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
395 data->delta_gain_code[i] =
396 min(data->delta_gain_code[i],
397 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
Zhu Yib481de92007-09-25 17:54:57 -0700398
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700399 data->delta_gain_code[i] =
400 (data->delta_gain_code[i] | (1 << 2));
401 } else {
402 data->delta_gain_code[i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700403 }
Zhu Yib481de92007-09-25 17:54:57 -0700404 }
Tomas Winklere1623442009-01-27 14:27:56 -0800405 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700406 data->delta_gain_code[0],
407 data->delta_gain_code[1],
408 data->delta_gain_code[2]);
Zhu Yib481de92007-09-25 17:54:57 -0700409
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700410 /* Differential gain gets sent to uCode only once */
411 if (!data->radio_write) {
Tomas Winklerf69f42a2008-10-23 23:48:52 -0700412 struct iwl_calib_diff_gain_cmd cmd;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700413 data->radio_write = 1;
Zhu Yib481de92007-09-25 17:54:57 -0700414
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700415 memset(&cmd, 0, sizeof(cmd));
Tomas Winkler0d950d82008-11-25 13:36:01 -0800416 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700417 cmd.diff_gain_a = data->delta_gain_code[0];
418 cmd.diff_gain_b = data->delta_gain_code[1];
419 cmd.diff_gain_c = data->delta_gain_code[2];
420 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
421 sizeof(cmd), &cmd);
422 if (ret)
Tomas Winklere1623442009-01-27 14:27:56 -0800423 IWL_DEBUG_CALIB(priv, "fail sending cmd "
Frans Pop91dd6c22010-03-24 14:19:58 -0700424 "REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700425
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700426 /* TODO we might want recalculate
427 * rx_chain in rxon cmd */
428
429 /* Mark so we run this algo only once! */
430 data->state = IWL_CHAIN_NOISE_CALIBRATED;
Zhu Yib481de92007-09-25 17:54:57 -0700431 }
Zhu Yib481de92007-09-25 17:54:57 -0700432}
433
Zhu Yib481de92007-09-25 17:54:57 -0700434static void iwl4965_bg_txpower_work(struct work_struct *work)
435{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700436 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700437 txpower_work);
438
439 /* If a scan happened to start before we got here
440 * then just return; the statistics notification will
441 * kick off another scheduled work to compensate for
442 * any temperature delta we missed here. */
443 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
444 test_bit(STATUS_SCANNING, &priv->status))
445 return;
446
447 mutex_lock(&priv->mutex);
448
Tomas Winklera96a27f2008-10-23 23:48:56 -0700449 /* Regardless of if we are associated, we must reconfigure the
Zhu Yib481de92007-09-25 17:54:57 -0700450 * TX power since frames can be sent on non-radar channels while
451 * not associated */
Tomas Winkler630fe9b2008-06-12 09:47:08 +0800452 iwl4965_send_tx_power(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700453
454 /* Update last_temperature to keep is_calib_needed from running
455 * when it isn't needed... */
456 priv->last_temperature = priv->temperature;
457
458 mutex_unlock(&priv->mutex);
459}
460
461/*
462 * Acquire priv->lock before calling this function !
463 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700464static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -0700465{
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700466 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -0700467 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -0700468 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -0700469}
470
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800471/**
472 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
473 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
474 * @scd_retry: (1) Indicates queue will be used in aggregation mode
475 *
476 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -0700477 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700478static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800479 struct iwl_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -0700480 int tx_fifo_id, int scd_retry)
481{
482 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800483
484 /* Find out whether to activate Tx queue */
Abhijeet Kolekarc3056062008-11-12 13:14:08 -0800485 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
Zhu Yib481de92007-09-25 17:54:57 -0700486
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800487 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700488 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700489 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
490 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
491 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
492 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
493 IWL49_SCD_QUEUE_STTS_REG_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700494
495 txq->sched_retry = scd_retry;
496
Tomas Winklere1623442009-01-27 14:27:56 -0800497 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800498 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -0700499 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
500}
501
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800502static const s8 default_queue_to_tx_fifo[] = {
503 IWL_TX_FIFO_VO,
504 IWL_TX_FIFO_VI,
505 IWL_TX_FIFO_BE,
506 IWL_TX_FIFO_BK,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700507 IWL49_CMD_FIFO_NUM,
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800508 IWL_TX_FIFO_UNUSED,
509 IWL_TX_FIFO_UNUSED,
Zhu Yib481de92007-09-25 17:54:57 -0700510};
511
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800512static int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700513{
514 u32 a;
Zhu Yib481de92007-09-25 17:54:57 -0700515 unsigned long flags;
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800516 int i, chan;
Winkler, Tomas40fc95d2008-11-19 15:32:27 -0800517 u32 reg_val;
Zhu Yib481de92007-09-25 17:54:57 -0700518
519 spin_lock_irqsave(&priv->lock, flags);
520
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800521 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700522 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700523 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
524 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700525 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700526 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700527 iwl_write_targ_mem(priv, a, 0);
Huaxu Wan39d5e0c2009-10-02 13:44:00 -0700528 for (; a < priv->scd_base_addr +
529 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700530 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700531
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800532 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700533 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800534 priv->scd_bc_tbls.dma >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800535
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800536 /* Enable DMA channel */
537 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
538 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
539 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
540 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
541
Winkler, Tomas40fc95d2008-11-19 15:32:27 -0800542 /* Update FH chicken bits */
543 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
544 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
545 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
546
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800547 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700548 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700549
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800550 /* Initialize each Tx queue (including the command queue) */
Tomas Winkler5425e492008-04-15 16:01:38 -0700551 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800552
553 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700554 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700555 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800556
557 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700558 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700559 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
560 (SCD_WIN_SIZE <<
561 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
562 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800563
564 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700565 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700566 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
567 sizeof(u32),
568 (SCD_FRAME_LIMIT <<
569 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
570 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700571
572 }
Tomas Winkler12a81f62008-04-03 16:05:20 -0700573 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Tomas Winkler5425e492008-04-15 16:01:38 -0700574 (1 << priv->hw_params.max_txq_num) - 1);
Zhu Yib481de92007-09-25 17:54:57 -0700575
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800576 /* Activate all Tx DMA/FIFO channels */
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800577 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
Zhu Yib481de92007-09-25 17:54:57 -0700578
Johannes Berg13bb9482010-08-23 10:46:33 +0200579 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800580
Wey-Yi Guya9e10fb2010-02-09 08:14:11 -0800581 /* make sure all queue are not stopped */
582 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
583 for (i = 0; i < 4; i++)
584 atomic_set(&priv->queue_stop_count[i], 0);
585
Wey-Yi Guydff010a2010-02-02 16:58:34 -0800586 /* reset to 0 to enable all the queue first */
587 priv->txq_ctx_active_msk = 0;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800588 /* Map each Tx/cmd queue to its corresponding fifo */
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800589 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
Johannes Berg13bb9482010-08-23 10:46:33 +0200590
Zhu Yib481de92007-09-25 17:54:57 -0700591 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
592 int ac = default_queue_to_tx_fifo[i];
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800593
Ron Rindjunsky36470742008-05-15 13:54:10 +0800594 iwl_txq_ctx_activate(priv, i);
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800595
596 if (ac == IWL_TX_FIFO_UNUSED)
597 continue;
598
Zhu Yib481de92007-09-25 17:54:57 -0700599 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
600 }
601
Zhu Yib481de92007-09-25 17:54:57 -0700602 spin_unlock_irqrestore(&priv->lock, flags);
603
Mohamed Abbasa8b50a02009-05-22 11:01:47 -0700604 return 0;
Zhu Yib481de92007-09-25 17:54:57 -0700605}
606
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700607static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
608 .min_nrg_cck = 97,
Wey-Yi Guyfe6efb42009-06-12 13:22:54 -0700609 .max_nrg_cck = 0, /* not used, set to 0 */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700610
611 .auto_corr_min_ofdm = 85,
612 .auto_corr_min_ofdm_mrc = 170,
613 .auto_corr_min_ofdm_x1 = 105,
614 .auto_corr_min_ofdm_mrc_x1 = 220,
615
616 .auto_corr_max_ofdm = 120,
617 .auto_corr_max_ofdm_mrc = 210,
618 .auto_corr_max_ofdm_x1 = 140,
619 .auto_corr_max_ofdm_mrc_x1 = 270,
620
621 .auto_corr_min_cck = 125,
622 .auto_corr_max_cck = 200,
623 .auto_corr_min_cck_mrc = 200,
624 .auto_corr_max_cck_mrc = 400,
625
626 .nrg_th_cck = 100,
627 .nrg_th_ofdm = 100,
Wey-Yi Guy55036d62009-10-09 13:20:24 -0700628
629 .barker_corr_th_min = 190,
630 .barker_corr_th_min_mrc = 390,
631 .nrg_th_cca = 62,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700632};
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700633
Wey-Yi Guy62161ae2009-05-21 13:44:23 -0700634static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
635{
636 /* want Kelvin */
Wey-Yi Guy672639d2009-07-24 11:13:01 -0700637 priv->hw_params.ct_kill_threshold =
638 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
Wey-Yi Guy62161ae2009-05-21 13:44:23 -0700639}
640
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800641/**
Tomas Winkler5425e492008-04-15 16:01:38 -0700642 * iwl4965_hw_set_hw_params
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800643 *
644 * Called when initializing driver
645 */
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800646static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700647{
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700648 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
649 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
650 priv->cfg->num_of_queues =
651 priv->cfg->mod_params->num_of_queues;
Assaf Krauss316c30d2008-03-14 10:38:46 -0700652
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700653 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
Zhu Yif3f911d2008-12-02 12:14:04 -0800654 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800655 priv->hw_params.scd_bc_tbls_size =
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700656 priv->cfg->num_of_queues *
657 sizeof(struct iwl4965_scd_bc_tbl);
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800658 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
Tomas Winkler5425e492008-04-15 16:01:38 -0700659 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
Johannes Berga194e322010-08-27 08:53:46 -0700660 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700661 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
662 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
663 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
Wey-Yi Guy7aafef12009-08-07 15:41:38 -0700664 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700665
Winkler, Tomas141c43a2009-01-08 10:19:53 -0800666 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
667
Wey-Yi Guy52aa0812009-10-23 13:42:24 -0700668 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
669 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
670 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
671 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
Wey-Yi Guy62161ae2009-05-21 13:44:23 -0700672 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
673 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700674
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700675 priv->hw_params.sens = &iwl4965_sensitivity;
Wey-Yi Guya0ee74c2010-05-06 08:54:10 -0700676 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
Tomas Winkler3e82a822008-02-13 11:32:31 -0800677
Tomas Winkler059ff822008-04-14 21:16:14 -0700678 return 0;
Zhu Yib481de92007-09-25 17:54:57 -0700679}
680
Zhu Yib481de92007-09-25 17:54:57 -0700681static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
682{
683 s32 sign = 1;
684
685 if (num < 0) {
686 sign = -sign;
687 num = -num;
688 }
689 if (denom < 0) {
690 sign = -sign;
691 denom = -denom;
692 }
693 *res = 1;
694 *res = ((num * 2 + denom) / (denom * 2)) * sign;
695
696 return 1;
697}
698
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800699/**
700 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
701 *
702 * Determines power supply voltage compensation for txpower calculations.
703 * Returns number of 1/2-dB steps to subtract from gain table index,
704 * to compensate for difference between power supply voltage during
705 * factory measurements, vs. current power supply voltage.
706 *
707 * Voltage indication is higher for lower voltage.
708 * Lower voltage requires more gain (lower gain table index).
709 */
Zhu Yib481de92007-09-25 17:54:57 -0700710static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
711 s32 current_voltage)
712{
713 s32 comp = 0;
714
715 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
716 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
717 return 0;
718
719 iwl4965_math_div_round(current_voltage - eeprom_voltage,
720 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
721
722 if (current_voltage > eeprom_voltage)
723 comp *= 2;
724 if ((comp < -2) || (comp > 2))
725 comp = 0;
726
727 return comp;
728}
729
Zhu Yib481de92007-09-25 17:54:57 -0700730static s32 iwl4965_get_tx_atten_grp(u16 channel)
731{
732 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
733 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
734 return CALIB_CH_GROUP_5;
735
736 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
737 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
738 return CALIB_CH_GROUP_1;
739
740 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
741 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
742 return CALIB_CH_GROUP_2;
743
744 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
745 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
746 return CALIB_CH_GROUP_3;
747
748 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
749 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
750 return CALIB_CH_GROUP_4;
751
Zhu Yib481de92007-09-25 17:54:57 -0700752 return -1;
753}
754
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700755static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -0700756{
757 s32 b = -1;
758
759 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700760 if (priv->calib_info->band_info[b].ch_from == 0)
Zhu Yib481de92007-09-25 17:54:57 -0700761 continue;
762
Tomas Winkler073d3f52008-04-21 15:41:52 -0700763 if ((channel >= priv->calib_info->band_info[b].ch_from)
764 && (channel <= priv->calib_info->band_info[b].ch_to))
Zhu Yib481de92007-09-25 17:54:57 -0700765 break;
766 }
767
768 return b;
769}
770
771static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
772{
773 s32 val;
774
775 if (x2 == x1)
776 return y1;
777 else {
778 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
779 return val + y2;
780 }
781}
782
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800783/**
784 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
785 *
786 * Interpolates factory measurements from the two sample channels within a
787 * sub-band, to apply to channel of interest. Interpolation is proportional to
788 * differences in channel frequencies, which is proportional to differences
789 * in channel number.
790 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700791static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Tomas Winkler073d3f52008-04-21 15:41:52 -0700792 struct iwl_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -0700793{
794 s32 s = -1;
795 u32 c;
796 u32 m;
Tomas Winkler073d3f52008-04-21 15:41:52 -0700797 const struct iwl_eeprom_calib_measure *m1;
798 const struct iwl_eeprom_calib_measure *m2;
799 struct iwl_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -0700800 u32 ch_i1;
801 u32 ch_i2;
802
803 s = iwl4965_get_sub_band(priv, channel);
804 if (s >= EEPROM_TX_POWER_BANDS) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800805 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
Zhu Yib481de92007-09-25 17:54:57 -0700806 return -1;
807 }
808
Tomas Winkler073d3f52008-04-21 15:41:52 -0700809 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
810 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
Zhu Yib481de92007-09-25 17:54:57 -0700811 chan_info->ch_num = (u8) channel;
812
Tomas Winklere1623442009-01-27 14:27:56 -0800813 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
Zhu Yib481de92007-09-25 17:54:57 -0700814 channel, s, ch_i1, ch_i2);
815
816 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
817 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700818 m1 = &(priv->calib_info->band_info[s].ch1.
Zhu Yib481de92007-09-25 17:54:57 -0700819 measurements[c][m]);
Tomas Winkler073d3f52008-04-21 15:41:52 -0700820 m2 = &(priv->calib_info->band_info[s].ch2.
Zhu Yib481de92007-09-25 17:54:57 -0700821 measurements[c][m]);
822 omeas = &(chan_info->measurements[c][m]);
823
824 omeas->actual_pow =
825 (u8) iwl4965_interpolate_value(channel, ch_i1,
826 m1->actual_pow,
827 ch_i2,
828 m2->actual_pow);
829 omeas->gain_idx =
830 (u8) iwl4965_interpolate_value(channel, ch_i1,
831 m1->gain_idx, ch_i2,
832 m2->gain_idx);
833 omeas->temperature =
834 (u8) iwl4965_interpolate_value(channel, ch_i1,
835 m1->temperature,
836 ch_i2,
837 m2->temperature);
838 omeas->pa_det =
839 (s8) iwl4965_interpolate_value(channel, ch_i1,
840 m1->pa_det, ch_i2,
841 m2->pa_det);
842
Tomas Winklere1623442009-01-27 14:27:56 -0800843 IWL_DEBUG_TXPOWER(priv,
844 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
845 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
846 IWL_DEBUG_TXPOWER(priv,
847 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
848 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
849 IWL_DEBUG_TXPOWER(priv,
850 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
851 m1->pa_det, m2->pa_det, omeas->pa_det);
852 IWL_DEBUG_TXPOWER(priv,
853 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
854 m1->temperature, m2->temperature,
855 omeas->temperature);
Zhu Yib481de92007-09-25 17:54:57 -0700856 }
857 }
858
859 return 0;
860}
861
862/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
863 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
864static s32 back_off_table[] = {
865 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
866 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
867 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
868 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
869 10 /* CCK */
870};
871
872/* Thermal compensation values for txpower for various frequency ranges ...
873 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800874static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -0700875 s32 degrees_per_05db_a;
876 s32 degrees_per_05db_a_denom;
877} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
878 {9, 2}, /* group 0 5.2, ch 34-43 */
879 {4, 1}, /* group 1 5.2, ch 44-70 */
880 {4, 1}, /* group 2 5.2, ch 71-124 */
881 {4, 1}, /* group 3 5.2, ch 125-200 */
882 {3, 1} /* group 4 2.4, ch all */
883};
884
885static s32 get_min_power_index(s32 rate_power_index, u32 band)
886{
887 if (!band) {
888 if ((rate_power_index & 7) <= 4)
889 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
890 }
891 return MIN_TX_GAIN_INDEX;
892}
893
894struct gain_entry {
895 u8 dsp;
896 u8 radio;
897};
898
899static const struct gain_entry gain_table[2][108] = {
900 /* 5.2GHz power gain index table */
901 {
902 {123, 0x3F}, /* highest txpower */
903 {117, 0x3F},
904 {110, 0x3F},
905 {104, 0x3F},
906 {98, 0x3F},
907 {110, 0x3E},
908 {104, 0x3E},
909 {98, 0x3E},
910 {110, 0x3D},
911 {104, 0x3D},
912 {98, 0x3D},
913 {110, 0x3C},
914 {104, 0x3C},
915 {98, 0x3C},
916 {110, 0x3B},
917 {104, 0x3B},
918 {98, 0x3B},
919 {110, 0x3A},
920 {104, 0x3A},
921 {98, 0x3A},
922 {110, 0x39},
923 {104, 0x39},
924 {98, 0x39},
925 {110, 0x38},
926 {104, 0x38},
927 {98, 0x38},
928 {110, 0x37},
929 {104, 0x37},
930 {98, 0x37},
931 {110, 0x36},
932 {104, 0x36},
933 {98, 0x36},
934 {110, 0x35},
935 {104, 0x35},
936 {98, 0x35},
937 {110, 0x34},
938 {104, 0x34},
939 {98, 0x34},
940 {110, 0x33},
941 {104, 0x33},
942 {98, 0x33},
943 {110, 0x32},
944 {104, 0x32},
945 {98, 0x32},
946 {110, 0x31},
947 {104, 0x31},
948 {98, 0x31},
949 {110, 0x30},
950 {104, 0x30},
951 {98, 0x30},
952 {110, 0x25},
953 {104, 0x25},
954 {98, 0x25},
955 {110, 0x24},
956 {104, 0x24},
957 {98, 0x24},
958 {110, 0x23},
959 {104, 0x23},
960 {98, 0x23},
961 {110, 0x22},
962 {104, 0x18},
963 {98, 0x18},
964 {110, 0x17},
965 {104, 0x17},
966 {98, 0x17},
967 {110, 0x16},
968 {104, 0x16},
969 {98, 0x16},
970 {110, 0x15},
971 {104, 0x15},
972 {98, 0x15},
973 {110, 0x14},
974 {104, 0x14},
975 {98, 0x14},
976 {110, 0x13},
977 {104, 0x13},
978 {98, 0x13},
979 {110, 0x12},
980 {104, 0x08},
981 {98, 0x08},
982 {110, 0x07},
983 {104, 0x07},
984 {98, 0x07},
985 {110, 0x06},
986 {104, 0x06},
987 {98, 0x06},
988 {110, 0x05},
989 {104, 0x05},
990 {98, 0x05},
991 {110, 0x04},
992 {104, 0x04},
993 {98, 0x04},
994 {110, 0x03},
995 {104, 0x03},
996 {98, 0x03},
997 {110, 0x02},
998 {104, 0x02},
999 {98, 0x02},
1000 {110, 0x01},
1001 {104, 0x01},
1002 {98, 0x01},
1003 {110, 0x00},
1004 {104, 0x00},
1005 {98, 0x00},
1006 {93, 0x00},
1007 {88, 0x00},
1008 {83, 0x00},
1009 {78, 0x00},
1010 },
1011 /* 2.4GHz power gain index table */
1012 {
1013 {110, 0x3f}, /* highest txpower */
1014 {104, 0x3f},
1015 {98, 0x3f},
1016 {110, 0x3e},
1017 {104, 0x3e},
1018 {98, 0x3e},
1019 {110, 0x3d},
1020 {104, 0x3d},
1021 {98, 0x3d},
1022 {110, 0x3c},
1023 {104, 0x3c},
1024 {98, 0x3c},
1025 {110, 0x3b},
1026 {104, 0x3b},
1027 {98, 0x3b},
1028 {110, 0x3a},
1029 {104, 0x3a},
1030 {98, 0x3a},
1031 {110, 0x39},
1032 {104, 0x39},
1033 {98, 0x39},
1034 {110, 0x38},
1035 {104, 0x38},
1036 {98, 0x38},
1037 {110, 0x37},
1038 {104, 0x37},
1039 {98, 0x37},
1040 {110, 0x36},
1041 {104, 0x36},
1042 {98, 0x36},
1043 {110, 0x35},
1044 {104, 0x35},
1045 {98, 0x35},
1046 {110, 0x34},
1047 {104, 0x34},
1048 {98, 0x34},
1049 {110, 0x33},
1050 {104, 0x33},
1051 {98, 0x33},
1052 {110, 0x32},
1053 {104, 0x32},
1054 {98, 0x32},
1055 {110, 0x31},
1056 {104, 0x31},
1057 {98, 0x31},
1058 {110, 0x30},
1059 {104, 0x30},
1060 {98, 0x30},
1061 {110, 0x6},
1062 {104, 0x6},
1063 {98, 0x6},
1064 {110, 0x5},
1065 {104, 0x5},
1066 {98, 0x5},
1067 {110, 0x4},
1068 {104, 0x4},
1069 {98, 0x4},
1070 {110, 0x3},
1071 {104, 0x3},
1072 {98, 0x3},
1073 {110, 0x2},
1074 {104, 0x2},
1075 {98, 0x2},
1076 {110, 0x1},
1077 {104, 0x1},
1078 {98, 0x1},
1079 {110, 0x0},
1080 {104, 0x0},
1081 {98, 0x0},
1082 {97, 0},
1083 {96, 0},
1084 {95, 0},
1085 {94, 0},
1086 {93, 0},
1087 {92, 0},
1088 {91, 0},
1089 {90, 0},
1090 {89, 0},
1091 {88, 0},
1092 {87, 0},
1093 {86, 0},
1094 {85, 0},
1095 {84, 0},
1096 {83, 0},
1097 {82, 0},
1098 {81, 0},
1099 {80, 0},
1100 {79, 0},
1101 {78, 0},
1102 {77, 0},
1103 {76, 0},
1104 {75, 0},
1105 {74, 0},
1106 {73, 0},
1107 {72, 0},
1108 {71, 0},
1109 {70, 0},
1110 {69, 0},
1111 {68, 0},
1112 {67, 0},
1113 {66, 0},
1114 {65, 0},
1115 {64, 0},
1116 {63, 0},
1117 {62, 0},
1118 {61, 0},
1119 {60, 0},
1120 {59, 0},
1121 }
1122};
1123
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001124static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001125 u8 is_ht40, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001126 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07001127{
1128 u8 saturation_power;
1129 s32 target_power;
1130 s32 user_target_power;
1131 s32 power_limit;
1132 s32 current_temp;
1133 s32 reg_limit;
1134 s32 current_regulatory;
1135 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1136 int i;
1137 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001138 const struct iwl_channel_info *ch_info = NULL;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001139 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1140 const struct iwl_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07001141 s16 voltage;
1142 s32 init_voltage;
1143 s32 voltage_compensation;
1144 s32 degrees_per_05db_num;
1145 s32 degrees_per_05db_denom;
1146 s32 factory_temp;
1147 s32 temperature_comp[2];
1148 s32 factory_gain_index[2];
1149 s32 factory_actual_pwr[2];
1150 s32 power_index;
1151
Winkler, Tomas62ea9c52009-01-19 15:30:29 -08001152 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
Zhu Yib481de92007-09-25 17:54:57 -07001153 * are used for indexing into txpower table) */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001154 user_target_power = 2 * priv->tx_power_user_lmt;
Zhu Yib481de92007-09-25 17:54:57 -07001155
1156 /* Get current (RXON) channel, band, width */
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001157 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
1158 is_ht40);
Zhu Yib481de92007-09-25 17:54:57 -07001159
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001160 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1161
1162 if (!is_channel_valid(ch_info))
Zhu Yib481de92007-09-25 17:54:57 -07001163 return -EINVAL;
1164
1165 /* get txatten group, used to select 1) thermal txpower adjustment
1166 * and 2) mimo txpower balance between Tx chains. */
1167 txatten_grp = iwl4965_get_tx_atten_grp(channel);
Samuel Ortiza3139c52008-12-19 10:37:09 +08001168 if (txatten_grp < 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001169 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
Samuel Ortiza3139c52008-12-19 10:37:09 +08001170 channel);
Zhu Yib481de92007-09-25 17:54:57 -07001171 return -EINVAL;
Samuel Ortiza3139c52008-12-19 10:37:09 +08001172 }
Zhu Yib481de92007-09-25 17:54:57 -07001173
Tomas Winklere1623442009-01-27 14:27:56 -08001174 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001175 channel, txatten_grp);
1176
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001177 if (is_ht40) {
Zhu Yib481de92007-09-25 17:54:57 -07001178 if (ctrl_chan_high)
1179 channel -= 2;
1180 else
1181 channel += 2;
1182 }
1183
1184 /* hardware txpower limits ...
1185 * saturation (clipping distortion) txpowers are in half-dBm */
1186 if (band)
Tomas Winkler073d3f52008-04-21 15:41:52 -07001187 saturation_power = priv->calib_info->saturation_power24;
Zhu Yib481de92007-09-25 17:54:57 -07001188 else
Tomas Winkler073d3f52008-04-21 15:41:52 -07001189 saturation_power = priv->calib_info->saturation_power52;
Zhu Yib481de92007-09-25 17:54:57 -07001190
1191 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1192 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1193 if (band)
1194 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1195 else
1196 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1197 }
1198
1199 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1200 * max_power_avg values are in dBm, convert * 2 */
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001201 if (is_ht40)
1202 reg_limit = ch_info->ht40_max_power_avg * 2;
Zhu Yib481de92007-09-25 17:54:57 -07001203 else
1204 reg_limit = ch_info->max_power_avg * 2;
1205
1206 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1207 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1208 if (band)
1209 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1210 else
1211 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1212 }
1213
1214 /* Interpolate txpower calibration values for this channel,
1215 * based on factory calibration tests on spaced channels. */
1216 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1217
1218 /* calculate tx gain adjustment based on power supply voltage */
Johannes Bergb7bb1752009-12-14 14:12:09 -08001219 voltage = le16_to_cpu(priv->calib_info->voltage);
Zhu Yib481de92007-09-25 17:54:57 -07001220 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1221 voltage_compensation =
1222 iwl4965_get_voltage_compensation(voltage, init_voltage);
1223
Tomas Winklere1623442009-01-27 14:27:56 -08001224 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001225 init_voltage,
1226 voltage, voltage_compensation);
1227
1228 /* get current temperature (Celsius) */
1229 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1230 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1231 current_temp = KELVIN_TO_CELSIUS(current_temp);
1232
1233 /* select thermal txpower adjustment params, based on channel group
1234 * (same frequency group used for mimo txatten adjustment) */
1235 degrees_per_05db_num =
1236 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1237 degrees_per_05db_denom =
1238 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1239
1240 /* get per-chain txpower values from factory measurements */
1241 for (c = 0; c < 2; c++) {
1242 measurement = &ch_eeprom_info.measurements[c][1];
1243
1244 /* txgain adjustment (in half-dB steps) based on difference
1245 * between factory and current temperature */
1246 factory_temp = measurement->temperature;
1247 iwl4965_math_div_round((current_temp - factory_temp) *
1248 degrees_per_05db_denom,
1249 degrees_per_05db_num,
1250 &temperature_comp[c]);
1251
1252 factory_gain_index[c] = measurement->gain_idx;
1253 factory_actual_pwr[c] = measurement->actual_pow;
1254
Tomas Winklere1623442009-01-27 14:27:56 -08001255 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1256 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
Zhu Yib481de92007-09-25 17:54:57 -07001257 "curr tmp %d, comp %d steps\n",
1258 factory_temp, current_temp,
1259 temperature_comp[c]);
1260
Tomas Winklere1623442009-01-27 14:27:56 -08001261 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001262 factory_gain_index[c],
1263 factory_actual_pwr[c]);
1264 }
1265
1266 /* for each of 33 bit-rates (including 1 for CCK) */
1267 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1268 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001269 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07001270
1271 /* for mimo, reduce each chain's txpower by half
1272 * (3dB, 6 steps), so total output power is regulatory
1273 * compliant. */
1274 if (i & 0x8) {
1275 current_regulatory = reg_limit -
1276 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1277 is_mimo_rate = 1;
1278 } else {
1279 current_regulatory = reg_limit;
1280 is_mimo_rate = 0;
1281 }
1282
1283 /* find txpower limit, either hardware or regulatory */
1284 power_limit = saturation_power - back_off_table[i];
1285 if (power_limit > current_regulatory)
1286 power_limit = current_regulatory;
1287
1288 /* reduce user's txpower request if necessary
1289 * for this rate on this channel */
1290 target_power = user_target_power;
1291 if (target_power > power_limit)
1292 target_power = power_limit;
1293
Tomas Winklere1623442009-01-27 14:27:56 -08001294 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001295 i, saturation_power - back_off_table[i],
1296 current_regulatory, user_target_power,
1297 target_power);
1298
1299 /* for each of 2 Tx chains (radio transmitters) */
1300 for (c = 0; c < 2; c++) {
1301 s32 atten_value;
1302
1303 if (is_mimo_rate)
1304 atten_value =
1305 (s32)le32_to_cpu(priv->card_alive_init.
1306 tx_atten[txatten_grp][c]);
1307 else
1308 atten_value = 0;
1309
1310 /* calculate index; higher index means lower txpower */
1311 power_index = (u8) (factory_gain_index[c] -
1312 (target_power -
1313 factory_actual_pwr[c]) -
1314 temperature_comp[c] -
1315 voltage_compensation +
1316 atten_value);
1317
Tomas Winklere1623442009-01-27 14:27:56 -08001318/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001319 power_index); */
1320
1321 if (power_index < get_min_power_index(i, band))
1322 power_index = get_min_power_index(i, band);
1323
1324 /* adjust 5 GHz index to support negative indexes */
1325 if (!band)
1326 power_index += 9;
1327
1328 /* CCK, rate 32, reduce txpower for CCK */
1329 if (i == POWER_TABLE_CCK_ENTRY)
1330 power_index +=
1331 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1332
1333 /* stay within the table! */
1334 if (power_index > 107) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001335 IWL_WARN(priv, "txpower index %d > 107\n",
Zhu Yib481de92007-09-25 17:54:57 -07001336 power_index);
1337 power_index = 107;
1338 }
1339 if (power_index < 0) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001340 IWL_WARN(priv, "txpower index %d < 0\n",
Zhu Yib481de92007-09-25 17:54:57 -07001341 power_index);
1342 power_index = 0;
1343 }
1344
1345 /* fill txpower command for this rate/chain */
1346 tx_power.s.radio_tx_gain[c] =
1347 gain_table[band][power_index].radio;
1348 tx_power.s.dsp_predis_atten[c] =
1349 gain_table[band][power_index].dsp;
1350
Tomas Winklere1623442009-01-27 14:27:56 -08001351 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
Zhu Yib481de92007-09-25 17:54:57 -07001352 "gain 0x%02x dsp %d\n",
1353 c, atten_value, power_index,
1354 tx_power.s.radio_tx_gain[c],
1355 tx_power.s.dsp_predis_atten[c]);
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001356 } /* for each chain */
Zhu Yib481de92007-09-25 17:54:57 -07001357
1358 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1359
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001360 } /* for each rate */
Zhu Yib481de92007-09-25 17:54:57 -07001361
1362 return 0;
1363}
1364
1365/**
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001366 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07001367 *
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001368 * Uses the active RXON for channel, band, and characteristics (ht40, high)
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001369 * The power limit is taken from priv->tx_power_user_lmt.
Zhu Yib481de92007-09-25 17:54:57 -07001370 */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001371static int iwl4965_send_tx_power(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001372{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001373 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001374 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001375 u8 band = 0;
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001376 bool is_ht40 = false;
Zhu Yib481de92007-09-25 17:54:57 -07001377 u8 ctrl_chan_high = 0;
Johannes Berg246ed352010-08-23 10:46:32 +02001378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
Zhu Yib481de92007-09-25 17:54:57 -07001379
1380 if (test_bit(STATUS_SCANNING, &priv->status)) {
1381 /* If this gets hit a lot, switch it to a BUG() and catch
1382 * the stack trace to find out who is calling this during
1383 * a scan. */
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001384 IWL_WARN(priv, "TX Power requested while scanning!\n");
Zhu Yib481de92007-09-25 17:54:57 -07001385 return -EAGAIN;
1386 }
1387
Johannes Berg8318d782008-01-24 19:38:38 +01001388 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001389
Johannes Berg246ed352010-08-23 10:46:32 +02001390 is_ht40 = is_ht40_channel(ctx->active.flags);
Zhu Yib481de92007-09-25 17:54:57 -07001391
Johannes Berg246ed352010-08-23 10:46:32 +02001392 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
Zhu Yib481de92007-09-25 17:54:57 -07001393 ctrl_chan_high = 1;
1394
1395 cmd.band = band;
Johannes Berg246ed352010-08-23 10:46:32 +02001396 cmd.channel = ctx->active.channel;
Zhu Yib481de92007-09-25 17:54:57 -07001397
Tomas Winkler857485c2008-03-21 13:53:44 -07001398 ret = iwl4965_fill_txpower_tbl(priv, band,
Johannes Berg246ed352010-08-23 10:46:32 +02001399 le16_to_cpu(ctx->active.channel),
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001400 is_ht40, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07001401 if (ret)
1402 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07001403
Tomas Winkler857485c2008-03-21 13:53:44 -07001404 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1405
1406out:
1407 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001408}
1409
Johannes Berg246ed352010-08-23 10:46:32 +02001410static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1411 struct iwl_rxon_context *ctx)
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001412{
1413 int ret = 0;
1414 struct iwl4965_rxon_assoc_cmd rxon_assoc;
Johannes Berg246ed352010-08-23 10:46:32 +02001415 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
1416 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001417
1418 if ((rxon1->flags == rxon2->flags) &&
1419 (rxon1->filter_flags == rxon2->filter_flags) &&
1420 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1421 (rxon1->ofdm_ht_single_stream_basic_rates ==
1422 rxon2->ofdm_ht_single_stream_basic_rates) &&
1423 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1424 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1425 (rxon1->rx_chain == rxon2->rx_chain) &&
1426 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
Tomas Winklere1623442009-01-27 14:27:56 -08001427 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001428 return 0;
1429 }
1430
Johannes Berg246ed352010-08-23 10:46:32 +02001431 rxon_assoc.flags = ctx->staging.flags;
1432 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1433 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1434 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001435 rxon_assoc.reserved = 0;
1436 rxon_assoc.ofdm_ht_single_stream_basic_rates =
Johannes Berg246ed352010-08-23 10:46:32 +02001437 ctx->staging.ofdm_ht_single_stream_basic_rates;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001438 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
Johannes Berg246ed352010-08-23 10:46:32 +02001439 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1440 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001441
1442 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1443 sizeof(rxon_assoc), &rxon_assoc, NULL);
1444 if (ret)
1445 return ret;
1446
1447 return ret;
1448}
1449
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001450static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1451 struct ieee80211_channel_switch *ch_switch)
Zhu Yib481de92007-09-25 17:54:57 -07001452{
Johannes Berg246ed352010-08-23 10:46:32 +02001453 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
Zhu Yib481de92007-09-25 17:54:57 -07001454 int rc;
1455 u8 band = 0;
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001456 bool is_ht40 = false;
Zhu Yib481de92007-09-25 17:54:57 -07001457 u8 ctrl_chan_high = 0;
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07001458 struct iwl4965_channel_switch_cmd cmd;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001459 const struct iwl_channel_info *ch_info;
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001460 u32 switch_time_in_usec, ucode_switch_time;
1461 u16 ch;
1462 u32 tsf_low;
1463 u8 switch_count;
Johannes Berg246ed352010-08-23 10:46:32 +02001464 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
Johannes Berg8bd413e2010-08-23 10:46:40 +02001465 struct ieee80211_vif *vif = ctx->vif;
Johannes Berg8318d782008-01-24 19:38:38 +01001466 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001467
Johannes Berg246ed352010-08-23 10:46:32 +02001468 is_ht40 = is_ht40_channel(ctx->staging.flags);
Zhu Yib481de92007-09-25 17:54:57 -07001469
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001470 if (is_ht40 &&
Johannes Berg246ed352010-08-23 10:46:32 +02001471 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
Zhu Yib481de92007-09-25 17:54:57 -07001472 ctrl_chan_high = 1;
1473
1474 cmd.band = band;
1475 cmd.expect_beacon = 0;
Shanyu Zhao81e95432010-07-28 13:40:27 -07001476 ch = ch_switch->channel->hw_value;
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001477 cmd.channel = cpu_to_le16(ch);
Johannes Berg246ed352010-08-23 10:46:32 +02001478 cmd.rxon_flags = ctx->staging.flags;
1479 cmd.rxon_filter_flags = ctx->staging.filter_flags;
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001480 switch_count = ch_switch->count;
1481 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1482 /*
1483 * calculate the ucode channel switch time
1484 * adding TSF as one of the factor for when to switch
1485 */
1486 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1487 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1488 beacon_interval)) {
1489 switch_count -= (priv->ucode_beacon_time -
1490 tsf_low) / beacon_interval;
1491 } else
1492 switch_count = 0;
1493 }
1494 if (switch_count <= 1)
1495 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1496 else {
1497 switch_time_in_usec =
1498 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1499 ucode_switch_time = iwl_usecs_to_beacons(priv,
1500 switch_time_in_usec,
1501 beacon_interval);
1502 cmd.switch_time = iwl_add_beacon_time(priv,
1503 priv->ucode_beacon_time,
1504 ucode_switch_time,
1505 beacon_interval);
1506 }
1507 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1508 cmd.switch_time);
1509 ch_info = iwl_get_channel_info(priv, priv->band, ch);
Zhu Yib481de92007-09-25 17:54:57 -07001510 if (ch_info)
1511 cmd.expect_beacon = is_channel_radar(ch_info);
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07001512 else {
1513 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
Johannes Berg246ed352010-08-23 10:46:32 +02001514 ctx->active.channel, ch);
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07001515 return -EFAULT;
1516 }
Zhu Yib481de92007-09-25 17:54:57 -07001517
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001518 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
Zhu Yib481de92007-09-25 17:54:57 -07001519 ctrl_chan_high, &cmd.tx_power);
1520 if (rc) {
Tomas Winklere1623442009-01-27 14:27:56 -08001521 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
Zhu Yib481de92007-09-25 17:54:57 -07001522 return rc;
1523 }
1524
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001525 priv->switch_rxon.channel = cmd.channel;
Wey-Yi Guy0924e512009-11-06 14:52:54 -08001526 priv->switch_rxon.switch_in_progress = true;
1527
1528 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001529}
1530
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001531/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07001532 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001533 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07001534static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +08001535 struct iwl_tx_queue *txq,
Tomas Winklere2a722e2008-04-14 21:16:10 -07001536 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07001537{
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001538 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
Tomas Winkler127901a2008-10-23 23:48:55 -07001539 int txq_id = txq->q.id;
1540 int write_ptr = txq->q.write_ptr;
1541 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1542 __le16 bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001543
Tomas Winkler127901a2008-10-23 23:48:55 -07001544 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
Zhu Yib481de92007-09-25 17:54:57 -07001545
Tomas Winkler127901a2008-10-23 23:48:55 -07001546 bc_ent = cpu_to_le16(len & 0xFFF);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001547 /* Set up byte count within first 256 entries */
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001548 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001549
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001550 /* If within first 64 entries, duplicate at end */
Tomas Winkler127901a2008-10-23 23:48:55 -07001551 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001552 scd_bc_tbl[txq_id].
Tomas Winkler127901a2008-10-23 23:48:55 -07001553 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001554}
1555
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001556/**
Zhu Yib481de92007-09-25 17:54:57 -07001557 * sign_extend - Sign extend a value using specified bit as sign-bit
1558 *
1559 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
1560 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
1561 *
1562 * @param oper value to sign extend
1563 * @param index 0 based bit index (0<=index<32) to sign bit
1564 */
1565static s32 sign_extend(u32 oper, int index)
1566{
1567 u8 shift = 31 - index;
1568
1569 return (s32)(oper << shift) >> shift;
1570}
1571
1572/**
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001573 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
Zhu Yib481de92007-09-25 17:54:57 -07001574 * @statistics: Provides the temperature reading from the uCode
1575 *
1576 * A return of <0 indicates bogus data in the statistics
1577 */
Reinette Chatre3d816c72009-08-07 15:41:37 -07001578static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001579{
1580 s32 temperature;
1581 s32 vt;
1582 s32 R1, R2, R3;
1583 u32 R4;
1584
1585 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
Wey-Yi Guyf3aebee2010-06-14 17:09:54 -07001586 (priv->_agn.statistics.flag &
1587 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001588 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
Zhu Yib481de92007-09-25 17:54:57 -07001589 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1590 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1591 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1592 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1593 } else {
Tomas Winklere1623442009-01-27 14:27:56 -08001594 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
Zhu Yib481de92007-09-25 17:54:57 -07001595 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1596 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1597 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1598 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1599 }
1600
1601 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001602 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07001603 *
1604 * NOTE If we haven't received a statistics notification yet
1605 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001606 * "initialize" ALIVE response.
1607 */
Zhu Yib481de92007-09-25 17:54:57 -07001608 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1609 vt = sign_extend(R4, 23);
1610 else
Wey-Yi Guy325322e2010-07-14 08:07:27 -07001611 vt = sign_extend(le32_to_cpu(priv->_agn.statistics.
1612 general.common.temperature), 23);
Zhu Yib481de92007-09-25 17:54:57 -07001613
Tomas Winklere1623442009-01-27 14:27:56 -08001614 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
Zhu Yib481de92007-09-25 17:54:57 -07001615
1616 if (R3 == R1) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001617 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
Zhu Yib481de92007-09-25 17:54:57 -07001618 return -1;
1619 }
1620
1621 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1622 * Add offset to center the adjustment around 0 degrees Centigrade. */
1623 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1624 temperature /= (R3 - R1);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001625 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
Zhu Yib481de92007-09-25 17:54:57 -07001626
Tomas Winklere1623442009-01-27 14:27:56 -08001627 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001628 temperature, KELVIN_TO_CELSIUS(temperature));
Zhu Yib481de92007-09-25 17:54:57 -07001629
1630 return temperature;
1631}
1632
1633/* Adjust Txpower only if temperature variance is greater than threshold. */
1634#define IWL_TEMPERATURE_THRESHOLD 3
1635
1636/**
1637 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1638 *
1639 * If the temperature changed has changed sufficiently, then a recalibration
1640 * is needed.
1641 *
1642 * Assumes caller will replace priv->last_temperature once calibration
1643 * executed.
1644 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001645static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001646{
1647 int temp_diff;
1648
1649 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
Tomas Winklere1623442009-01-27 14:27:56 -08001650 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
Zhu Yib481de92007-09-25 17:54:57 -07001651 return 0;
1652 }
1653
1654 temp_diff = priv->temperature - priv->last_temperature;
1655
1656 /* get absolute value */
1657 if (temp_diff < 0) {
Frans Pop91dd6c22010-03-24 14:19:58 -07001658 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
Zhu Yib481de92007-09-25 17:54:57 -07001659 temp_diff = -temp_diff;
1660 } else if (temp_diff == 0)
Frans Pop91dd6c22010-03-24 14:19:58 -07001661 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
Zhu Yib481de92007-09-25 17:54:57 -07001662 else
Frans Pop91dd6c22010-03-24 14:19:58 -07001663 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
Zhu Yib481de92007-09-25 17:54:57 -07001664
1665 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
Frans Pop91dd6c22010-03-24 14:19:58 -07001666 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
Zhu Yib481de92007-09-25 17:54:57 -07001667 return 0;
1668 }
1669
Frans Pop91dd6c22010-03-24 14:19:58 -07001670 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
Zhu Yib481de92007-09-25 17:54:57 -07001671
1672 return 1;
1673}
1674
Zhu Yi52256402008-06-30 17:23:31 +08001675static void iwl4965_temperature_calib(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001676{
Zhu Yib481de92007-09-25 17:54:57 -07001677 s32 temp;
Zhu Yib481de92007-09-25 17:54:57 -07001678
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001679 temp = iwl4965_hw_get_temperature(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001680 if (temp < 0)
1681 return;
1682
1683 if (priv->temperature != temp) {
1684 if (priv->temperature)
Tomas Winklere1623442009-01-27 14:27:56 -08001685 IWL_DEBUG_TEMP(priv, "Temperature changed "
Zhu Yib481de92007-09-25 17:54:57 -07001686 "from %dC to %dC\n",
1687 KELVIN_TO_CELSIUS(priv->temperature),
1688 KELVIN_TO_CELSIUS(temp));
1689 else
Tomas Winklere1623442009-01-27 14:27:56 -08001690 IWL_DEBUG_TEMP(priv, "Temperature "
Zhu Yib481de92007-09-25 17:54:57 -07001691 "initialized to %dC\n",
1692 KELVIN_TO_CELSIUS(temp));
1693 }
1694
1695 priv->temperature = temp;
Wey-Yi Guy39b73fb2009-07-24 11:13:02 -07001696 iwl_tt_handler(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001697 set_bit(STATUS_TEMPERATURE, &priv->status);
1698
Emmanuel Grumbach203566f2008-06-12 09:46:54 +08001699 if (!priv->disable_tx_power_cal &&
1700 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1701 iwl4965_is_temp_calib_needed(priv))
Zhu Yib481de92007-09-25 17:54:57 -07001702 queue_work(priv->workqueue, &priv->txpower_work);
1703}
1704
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001705/**
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001706 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1707 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001708static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001709 u16 txq_id)
1710{
1711 /* Simply stop the queue, but don't change any configuration;
1712 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001713 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07001714 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001715 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1716 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001717}
1718
1719/**
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001720 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001721 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001722 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001723static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1724 u16 ssn_idx, u8 tx_fifo)
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001725{
Tomas Winkler9f17b312008-07-11 11:53:35 +08001726 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
Wey-Yi Guy88804e22009-10-09 13:20:28 -07001727 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1728 <= txq_id)) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001729 IWL_WARN(priv,
1730 "queue number out of range: %d, must be %d to %d\n",
Tomas Winkler9f17b312008-07-11 11:53:35 +08001731 txq_id, IWL49_FIRST_AMPDU_QUEUE,
Wey-Yi Guy88804e22009-10-09 13:20:28 -07001732 IWL49_FIRST_AMPDU_QUEUE +
1733 priv->cfg->num_of_ampdu_queues - 1);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001734 return -EINVAL;
1735 }
1736
1737 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1738
Tomas Winkler12a81f62008-04-03 16:05:20 -07001739 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001740
1741 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1742 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1743 /* supposes that ssn_idx is valid (!= 0xFFF) */
1744 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1745
Tomas Winkler12a81f62008-04-03 16:05:20 -07001746 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunsky36470742008-05-15 13:54:10 +08001747 iwl_txq_ctx_deactivate(priv, txq_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001748 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1749
1750 return 0;
1751}
1752
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001753/**
1754 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1755 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001756static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07001757 u16 txq_id)
1758{
1759 u32 tbl_dw_addr;
1760 u32 tbl_dw;
1761 u16 scd_q2ratid;
1762
Tomas Winkler30e553e2008-05-29 16:35:16 +08001763 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07001764
1765 tbl_dw_addr = priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001766 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
Zhu Yib481de92007-09-25 17:54:57 -07001767
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001768 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07001769
1770 if (txq_id & 0x1)
1771 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1772 else
1773 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1774
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001775 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07001776
1777 return 0;
1778}
1779
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001780
Zhu Yib481de92007-09-25 17:54:57 -07001781/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001782 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
1783 *
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001784 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001785 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07001786 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001787static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1788 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
Zhu Yib481de92007-09-25 17:54:57 -07001789{
1790 unsigned long flags;
Zhu Yib481de92007-09-25 17:54:57 -07001791 u16 ra_tid;
Johannes Berg4620fef2010-06-16 03:30:27 -07001792 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001793
Tomas Winkler9f17b312008-07-11 11:53:35 +08001794 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
Wey-Yi Guy88804e22009-10-09 13:20:28 -07001795 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1796 <= txq_id)) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001797 IWL_WARN(priv,
1798 "queue number out of range: %d, must be %d to %d\n",
Tomas Winkler9f17b312008-07-11 11:53:35 +08001799 txq_id, IWL49_FIRST_AMPDU_QUEUE,
Wey-Yi Guy88804e22009-10-09 13:20:28 -07001800 IWL49_FIRST_AMPDU_QUEUE +
1801 priv->cfg->num_of_ampdu_queues - 1);
Tomas Winkler9f17b312008-07-11 11:53:35 +08001802 return -EINVAL;
1803 }
Zhu Yib481de92007-09-25 17:54:57 -07001804
1805 ra_tid = BUILD_RAxTID(sta_id, tid);
1806
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001807 /* Modify device's station table to Tx this TID */
Johannes Berg4620fef2010-06-16 03:30:27 -07001808 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1809 if (ret)
1810 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001811
1812 spin_lock_irqsave(&priv->lock, flags);
Zhu Yib481de92007-09-25 17:54:57 -07001813
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001814 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07001815 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1816
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001817 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07001818 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1819
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001820 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001821 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001822
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001823 /* Place first TFD at index corresponding to start sequence number.
1824 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001825 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1826 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07001827 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1828
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001829 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001830 iwl_write_targ_mem(priv,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001831 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
1832 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1833 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001834
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001835 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001836 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1837 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
1838 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001839
Tomas Winkler12a81f62008-04-03 16:05:20 -07001840 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001841
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001842 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07001843 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1844
Zhu Yib481de92007-09-25 17:54:57 -07001845 spin_unlock_irqrestore(&priv->lock, flags);
1846
1847 return 0;
1848}
1849
Tomas Winkler133636d2008-05-05 10:22:34 +08001850
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08001851static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1852{
1853 switch (cmd_id) {
1854 case REPLY_RXON:
1855 return (u16) sizeof(struct iwl4965_rxon_cmd);
1856 default:
1857 return len;
1858 }
1859}
1860
Tomas Winkler133636d2008-05-05 10:22:34 +08001861static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1862{
1863 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1864 addsta->mode = cmd->mode;
1865 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1866 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1867 addsta->station_flags = cmd->station_flags;
1868 addsta->station_flags_msk = cmd->station_flags_msk;
1869 addsta->tid_disable_tx = cmd->tid_disable_tx;
1870 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1871 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1872 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
Johannes Berg9bb487b2009-11-13 11:56:36 -08001873 addsta->sleep_tx_count = cmd->sleep_tx_count;
Harvey Harrisonc1b4aa32009-01-29 13:26:44 -08001874 addsta->reserved1 = cpu_to_le16(0);
Wey-Yi Guy62624082009-11-20 12:05:01 -08001875 addsta->reserved2 = cpu_to_le16(0);
Tomas Winkler133636d2008-05-05 10:22:34 +08001876
1877 return (u16)sizeof(struct iwl4965_addsta_cmd);
1878}
Tomas Winklerf20217d2008-05-29 16:35:10 +08001879
Tomas Winklerf20217d2008-05-29 16:35:10 +08001880static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
1881{
Tomas Winkler25a65722008-06-12 09:47:07 +08001882 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001883}
1884
1885/**
Tomas Winklera96a27f2008-10-23 23:48:56 -07001886 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
Tomas Winklerf20217d2008-05-29 16:35:10 +08001887 */
1888static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1889 struct iwl_ht_agg *agg,
Tomas Winkler25a65722008-06-12 09:47:07 +08001890 struct iwl4965_tx_resp *tx_resp,
1891 int txq_id, u16 start_idx)
Tomas Winklerf20217d2008-05-29 16:35:10 +08001892{
1893 u16 status;
Tomas Winkler25a65722008-06-12 09:47:07 +08001894 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001895 struct ieee80211_tx_info *info = NULL;
1896 struct ieee80211_hdr *hdr = NULL;
Tomas Winklere7d326a2008-06-12 09:47:11 +08001897 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
Tomas Winkler25a65722008-06-12 09:47:07 +08001898 int i, sh, idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001899 u16 seq;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001900 if (agg->wait_for_ba)
Tomas Winklere1623442009-01-27 14:27:56 -08001901 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
Tomas Winklerf20217d2008-05-29 16:35:10 +08001902
1903 agg->frame_count = tx_resp->frame_count;
1904 agg->start_idx = start_idx;
Tomas Winklere7d326a2008-06-12 09:47:11 +08001905 agg->rate_n_flags = rate_n_flags;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001906 agg->bitmap = 0;
1907
Tomas Winkler3fd07a12008-10-23 23:48:49 -07001908 /* num frames attempted by Tx command */
Tomas Winklerf20217d2008-05-29 16:35:10 +08001909 if (agg->frame_count == 1) {
1910 /* Only one frame was attempted; no block-ack will arrive */
1911 status = le16_to_cpu(frame_status[0].status);
Tomas Winkler25a65722008-06-12 09:47:07 +08001912 idx = start_idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001913
1914 /* FIXME: code repetition */
Tomas Winklere1623442009-01-27 14:27:56 -08001915 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08001916 agg->frame_count, agg->start_idx, idx);
1917
Johannes Bergff0d91c2010-05-17 02:37:34 -07001918 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
Johannes Berge6a98542008-10-21 12:40:02 +02001919 info->status.rates[0].count = tx_resp->failure_frame + 1;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001920 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Johannes Bergc397bf12009-11-13 11:56:35 -08001921 info->flags |= iwl_tx_status_to_mac80211(status);
Wey-Yi Guy8d801082010-03-17 13:34:36 -07001922 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08001923 /* FIXME: code repetition end */
1924
Tomas Winklere1623442009-01-27 14:27:56 -08001925 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08001926 status & 0xff, tx_resp->failure_frame);
Tomas Winklere1623442009-01-27 14:27:56 -08001927 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08001928
1929 agg->wait_for_ba = 0;
1930 } else {
1931 /* Two or more frames were attempted; expect block-ack */
1932 u64 bitmap = 0;
1933 int start = agg->start_idx;
1934
1935 /* Construct bit-map of pending frames within Tx window */
1936 for (i = 0; i < agg->frame_count; i++) {
1937 u16 sc;
1938 status = le16_to_cpu(frame_status[i].status);
1939 seq = le16_to_cpu(frame_status[i].sequence);
1940 idx = SEQ_TO_INDEX(seq);
1941 txq_id = SEQ_TO_QUEUE(seq);
1942
1943 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1944 AGG_TX_STATE_ABORT_MSK))
1945 continue;
1946
Tomas Winklere1623442009-01-27 14:27:56 -08001947 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08001948 agg->frame_count, txq_id, idx);
1949
1950 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
Stanislaw Gruszka6c6a22e2009-09-23 10:51:34 +02001951 if (!hdr) {
1952 IWL_ERR(priv,
1953 "BUG_ON idx doesn't point to valid skb"
1954 " idx=%d, txq_id=%d\n", idx, txq_id);
1955 return -1;
1956 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08001957
1958 sc = le16_to_cpu(hdr->seq_ctrl);
1959 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001960 IWL_ERR(priv,
1961 "BUG_ON idx doesn't match seq control"
1962 " idx=%d, seq_idx=%d, seq=%d\n",
1963 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
Tomas Winklerf20217d2008-05-29 16:35:10 +08001964 return -1;
1965 }
1966
Tomas Winklere1623442009-01-27 14:27:56 -08001967 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08001968 i, idx, SEQ_TO_SN(sc));
1969
1970 sh = idx - start;
1971 if (sh > 64) {
1972 sh = (start - idx) + 0xff;
1973 bitmap = bitmap << sh;
1974 sh = 0;
1975 start = idx;
1976 } else if (sh < -64)
1977 sh = 0xff - (start - idx);
1978 else if (sh < 0) {
1979 sh = start - idx;
1980 start = idx;
1981 bitmap = bitmap << sh;
1982 sh = 0;
1983 }
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08001984 bitmap |= 1ULL << sh;
Tomas Winklere1623442009-01-27 14:27:56 -08001985 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08001986 start, (unsigned long long)bitmap);
Tomas Winklerf20217d2008-05-29 16:35:10 +08001987 }
1988
1989 agg->bitmap = bitmap;
1990 agg->start_idx = start;
Tomas Winklere1623442009-01-27 14:27:56 -08001991 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08001992 agg->frame_count, agg->start_idx,
1993 (unsigned long long)agg->bitmap);
1994
1995 if (bitmap)
1996 agg->wait_for_ba = 1;
1997 }
1998 return 0;
1999}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002000
Johannes Bergc1182742010-04-30 11:30:48 -07002001static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
2002{
2003 int i;
2004 int start = 0;
2005 int ret = IWL_INVALID_STATION;
2006 unsigned long flags;
2007
2008 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
2009 (priv->iw_mode == NL80211_IFTYPE_AP))
2010 start = IWL_STA_ID;
2011
2012 if (is_broadcast_ether_addr(addr))
Johannes Berga194e322010-08-27 08:53:46 -07002013 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
Johannes Bergc1182742010-04-30 11:30:48 -07002014
2015 spin_lock_irqsave(&priv->sta_lock, flags);
2016 for (i = start; i < priv->hw_params.max_stations; i++)
2017 if (priv->stations[i].used &&
2018 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
2019 addr))) {
2020 ret = i;
2021 goto out;
2022 }
2023
2024 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
2025 addr, priv->num_stations);
2026
2027 out:
2028 /*
2029 * It may be possible that more commands interacting with stations
2030 * arrive before we completed processing the adding of
2031 * station
2032 */
2033 if (ret != IWL_INVALID_STATION &&
2034 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
2035 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
2036 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
2037 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
2038 ret);
2039 ret = IWL_INVALID_STATION;
2040 }
2041 spin_unlock_irqrestore(&priv->sta_lock, flags);
2042 return ret;
2043}
2044
Johannes Berg93286db2010-04-29 04:43:03 -07002045static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2046{
2047 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2048 return IWL_AP_ID;
2049 } else {
2050 u8 *da = ieee80211_get_DA(hdr);
2051 return iwl_find_station(priv, da);
2052 }
2053}
2054
Tomas Winklerf20217d2008-05-29 16:35:10 +08002055/**
2056 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2057 */
2058static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2059 struct iwl_rx_mem_buffer *rxb)
2060{
Zhu Yi2f301222009-10-09 17:19:45 +08002061 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002062 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2063 int txq_id = SEQ_TO_QUEUE(sequence);
2064 int index = SEQ_TO_INDEX(sequence);
2065 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002066 struct ieee80211_hdr *hdr;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002067 struct ieee80211_tx_info *info;
2068 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
Tomas Winkler25a65722008-06-12 09:47:07 +08002069 u32 status = le32_to_cpu(tx_resp->u.status);
Dan Carpenter39825f42010-01-09 11:41:48 +03002070 int uninitialized_var(tid);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002071 int sta_id;
2072 int freed;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002073 u8 *qc = NULL;
Reinette Chatre9c5ac092010-05-05 02:26:06 -07002074 unsigned long flags;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002075
2076 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002077 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
Tomas Winklerf20217d2008-05-29 16:35:10 +08002078 "is out of range [0-%d] %d %d\n", txq_id,
2079 index, txq->q.n_bd, txq->q.write_ptr,
2080 txq->q.read_ptr);
2081 return;
2082 }
2083
Johannes Bergff0d91c2010-05-17 02:37:34 -07002084 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002085 memset(&info->status, 0, sizeof(info->status));
2086
Tomas Winklerf20217d2008-05-29 16:35:10 +08002087 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002088 if (ieee80211_is_data_qos(hdr->frame_control)) {
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002089 qc = ieee80211_get_qos_ctl(hdr);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002090 tid = qc[0] & 0xf;
2091 }
2092
2093 sta_id = iwl_get_ra_sta_id(priv, hdr);
2094 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002095 IWL_ERR(priv, "Station not known\n");
Tomas Winklerf20217d2008-05-29 16:35:10 +08002096 return;
2097 }
2098
Reinette Chatre9c5ac092010-05-05 02:26:06 -07002099 spin_lock_irqsave(&priv->sta_lock, flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002100 if (txq->sched_retry) {
2101 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2102 struct iwl_ht_agg *agg = NULL;
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002103 WARN_ON(!qc);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002104
2105 agg = &priv->stations[sta_id].tid[tid].agg;
2106
Tomas Winkler25a65722008-06-12 09:47:07 +08002107 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002108
Ron Rindjunsky32354272008-07-01 10:44:51 +03002109 /* check if BAR is needed */
2110 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
2111 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002112
2113 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
Tomas Winklerf20217d2008-05-29 16:35:10 +08002114 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
Tomas Winklere1623442009-01-27 14:27:56 -08002115 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
Tomas Winklerf20217d2008-05-29 16:35:10 +08002116 "%d index %d\n", scd_ssn , index);
Wey-Yi Guy74bcdb32010-03-17 13:34:34 -07002117 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
Wey-Yi Guyece64442010-04-08 13:17:37 -07002118 if (qc)
2119 iwl_free_tfds_in_queue(priv, sta_id,
2120 tid, freed);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002121
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002122 if (priv->mac80211_registered &&
2123 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
2124 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
Tomas Winklerf20217d2008-05-29 16:35:10 +08002125 if (agg->state == IWL_AGG_OFF)
Johannes Berge4e72fb2009-03-23 17:28:42 +01002126 iwl_wake_queue(priv, txq_id);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002127 else
Johannes Berge4e72fb2009-03-23 17:28:42 +01002128 iwl_wake_queue(priv, txq->swq_id);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002129 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002130 }
2131 } else {
Johannes Berge6a98542008-10-21 12:40:02 +02002132 info->status.rates[0].count = tx_resp->failure_frame + 1;
Johannes Bergc397bf12009-11-13 11:56:35 -08002133 info->flags |= iwl_tx_status_to_mac80211(status);
Wey-Yi Guy8d801082010-03-17 13:34:36 -07002134 iwlagn_hwrate_to_tx_control(priv,
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002135 le32_to_cpu(tx_resp->rate_n_flags),
2136 info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002137
Tomas Winklere1623442009-01-27 14:27:56 -08002138 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002139 "rate_n_flags 0x%x retries %d\n",
2140 txq_id,
2141 iwl_get_tx_fail_reason(status), status,
2142 le32_to_cpu(tx_resp->rate_n_flags),
2143 tx_resp->failure_frame);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002144
Wey-Yi Guy74bcdb32010-03-17 13:34:34 -07002145 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
Wey-Yi Guyece64442010-04-08 13:17:37 -07002146 if (qc && likely(sta_id != IWL_INVALID_STATION))
2147 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2148 else if (sta_id == IWL_INVALID_STATION)
2149 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002150
2151 if (priv->mac80211_registered &&
2152 (iwl_queue_space(&txq->q) > txq->q.low_mark))
Johannes Berge4e72fb2009-03-23 17:28:42 +01002153 iwl_wake_queue(priv, txq_id);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002154 }
Wey-Yi Guyece64442010-04-08 13:17:37 -07002155 if (qc && likely(sta_id != IWL_INVALID_STATION))
John W. Linville1805a342010-04-09 13:42:26 -04002156 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002157
Wey-Yi Guy04569cb2010-03-31 17:57:28 -07002158 iwl_check_abort_status(priv, tx_resp->frame_count, status);
Reinette Chatre9c5ac092010-05-05 02:26:06 -07002159
2160 spin_unlock_irqrestore(&priv->sta_lock, flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002161}
2162
Tomas Winklercaab8f12008-08-04 16:00:42 +08002163static int iwl4965_calc_rssi(struct iwl_priv *priv,
2164 struct iwl_rx_phy_res *rx_resp)
2165{
2166 /* data from PHY/DSP regarding signal strength, etc.,
2167 * contents are always there, not configurable by host. */
2168 struct iwl4965_rx_non_cfg_phy *ncphy =
2169 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2170 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2171 >> IWL49_AGC_DB_POS;
2172
2173 u32 valid_antennae =
2174 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2175 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2176 u8 max_rssi = 0;
2177 u32 i;
2178
2179 /* Find max rssi among 3 possible receivers.
2180 * These values are measured by the digital signal processor (DSP).
2181 * They should stay fairly constant even as the signal strength varies,
2182 * if the radio's automatic gain control (AGC) is working right.
2183 * AGC value (see below) will provide the "interesting" info. */
2184 for (i = 0; i < 3; i++)
2185 if (valid_antennae & (1 << i))
2186 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2187
Tomas Winklere1623442009-01-27 14:27:56 -08002188 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
Tomas Winklercaab8f12008-08-04 16:00:42 +08002189 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2190 max_rssi, agc);
2191
2192 /* dBm = max_rssi dB - agc dB - constant.
2193 * Higher AGC (higher radio gain) means lower signal. */
Wey-Yi Guyb744cb72010-03-23 11:37:59 -07002194 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
Tomas Winklercaab8f12008-08-04 16:00:42 +08002195}
2196
Tomas Winklerf20217d2008-05-29 16:35:10 +08002197
Zhu Yib481de92007-09-25 17:54:57 -07002198/* Set up 4965-specific Rx frame reply handlers */
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002199static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002200{
2201 /* Legacy Rx frames */
Wey-Yi Guy8d801082010-03-17 13:34:36 -07002202 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
Ron Rindjunsky37a44212008-05-29 16:35:18 +08002203 /* Tx response */
Tomas Winklerf20217d2008-05-29 16:35:10 +08002204 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
Zhu Yib481de92007-09-25 17:54:57 -07002205}
2206
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002207static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002208{
2209 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002210}
2211
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002212static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002213{
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002214 cancel_work_sync(&priv->txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002215}
2216
Tomas Winkler3c424c22008-04-15 16:01:42 -07002217static struct iwl_hcmd_ops iwl4965_hcmd = {
Tomas Winkler7e8c5192008-04-15 16:01:43 -07002218 .rxon_assoc = iwl4965_send_rxon_assoc,
Abhijeet Kolekare0158e62009-04-08 11:26:37 -07002219 .commit_rxon = iwl_commit_rxon,
Abhijeet Kolekar45823532009-04-08 11:26:44 -07002220 .set_rxon_chain = iwl_set_rxon_chain,
Johannes Berg65b52bd2010-04-13 01:04:31 -07002221 .send_bt_config = iwl_send_bt_config,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002222};
2223
Tomas Winkler857485c2008-03-21 13:53:44 -07002224static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08002225 .get_hcmd_size = iwl4965_get_hcmd_size,
Tomas Winkler133636d2008-05-05 10:22:34 +08002226 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002227 .chain_noise_reset = iwl4965_chain_noise_reset,
2228 .gain_computation = iwl4965_gain_computation,
Johannes Berg94597ab2010-08-09 10:57:02 -07002229 .tx_cmd_protection = iwlcore_tx_cmd_protection,
Tomas Winklercaab8f12008-08-04 16:00:42 +08002230 .calc_rssi = iwl4965_calc_rssi,
Johannes Bergb6e4c552010-04-06 04:12:42 -07002231 .request_scan = iwlagn_request_scan,
Tomas Winkler857485c2008-03-21 13:53:44 -07002232};
2233
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002234static struct iwl_lib_ops iwl4965_lib = {
Tomas Winkler5425e492008-04-15 16:01:38 -07002235 .set_hw_params = iwl4965_hw_set_hw_params,
Tomas Winklere2a722e2008-04-14 21:16:10 -07002236 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winklerda1bc452008-05-29 16:35:00 +08002237 .txq_set_sched = iwl4965_txq_set_sched,
Tomas Winkler30e553e2008-05-29 16:35:16 +08002238 .txq_agg_enable = iwl4965_txq_agg_enable,
2239 .txq_agg_disable = iwl4965_txq_agg_disable,
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -08002240 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
2241 .txq_free_tfd = iwl_hw_txq_free_tfd,
Samuel Ortiza8e74e22009-01-23 13:45:14 -08002242 .txq_init = iwl_hw_tx_queue_init,
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002243 .rx_handler_setup = iwl4965_rx_handler_setup,
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002244 .setup_deferred_work = iwl4965_setup_deferred_work,
2245 .cancel_deferred_work = iwl4965_cancel_deferred_work,
Tomas Winkler57aab752008-04-14 21:16:03 -07002246 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2247 .alive_notify = iwl4965_alive_notify,
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +08002248 .init_alive_start = iwl4965_init_alive_start,
Tomas Winkler57aab752008-04-14 21:16:03 -07002249 .load_ucode = iwl4965_load_bsm,
Reinette Chatreb7a79402009-09-25 14:24:23 -07002250 .dump_nic_event_log = iwl_dump_nic_event_log,
2251 .dump_nic_error_log = iwl_dump_nic_error_log,
Ben Cahill647291f2010-03-02 12:48:25 -08002252 .dump_fh = iwl_dump_fh,
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07002253 .set_channel_switch = iwl4965_hw_channel_switch,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002254 .apm_ops = {
Ben Cahillfadb3582009-10-23 13:42:21 -07002255 .init = iwl_apm_init,
Abhijeet Kolekard68b6032009-10-02 13:44:04 -07002256 .stop = iwl_apm_stop,
Tomas Winkler694cc562008-04-24 11:55:22 -07002257 .config = iwl4965_nic_config,
Emmanuel Grumbach5b9f8cd2008-10-29 14:05:46 -07002258 .set_pwr_src = iwl_set_pwr_src,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002259 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002260 .eeprom_ops = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07002261 .regulatory_bands = {
2262 EEPROM_REGULATORY_BAND_1_CHANNELS,
2263 EEPROM_REGULATORY_BAND_2_CHANNELS,
2264 EEPROM_REGULATORY_BAND_3_CHANNELS,
2265 EEPROM_REGULATORY_BAND_4_CHANNELS,
2266 EEPROM_REGULATORY_BAND_5_CHANNELS,
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07002267 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2268 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
Tomas Winkler073d3f52008-04-21 15:41:52 -07002269 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002270 .verify_signature = iwlcore_eeprom_verify_signature,
2271 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
2272 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winkler0ef2ca62008-10-23 23:48:51 -07002273 .calib_version = iwl4965_eeprom_calib_version,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002274 .query_addr = iwlcore_eeprom_query_addr,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002275 },
Tomas Winkler630fe9b2008-06-12 09:47:08 +08002276 .send_tx_power = iwl4965_send_tx_power,
Emmanuel Grumbach5b9f8cd2008-10-29 14:05:46 -07002277 .update_chain_flags = iwl_update_chain_flags,
Abhijeet Kolekar5bbe2332009-04-08 11:26:35 -07002278 .post_associate = iwl_post_associate,
Abhijeet Kolekar60690a62009-04-08 11:26:49 -07002279 .config_ap = iwl_config_ap,
Mohamed Abbasef850d72009-05-22 11:01:50 -07002280 .isr = iwl_isr_legacy,
Wey-Yi Guy62161ae2009-05-21 13:44:23 -07002281 .temp_ops = {
2282 .temperature = iwl4965_temperature_calib,
2283 .set_ct_kill = iwl4965_set_ct_threshold,
2284 },
Johannes Berg1fa61b22010-04-28 08:44:52 -07002285 .manage_ibss_station = iwlagn_manage_ibss_station,
Johannes Berga194e322010-08-27 08:53:46 -07002286 .update_bcast_stations = iwl_update_bcast_stations,
Abhijeet Kolekarb8c76262010-04-08 15:29:07 -07002287 .debugfs_ops = {
2288 .rx_stats_read = iwl_ucode_rx_stats_read,
2289 .tx_stats_read = iwl_ucode_tx_stats_read,
2290 .general_stats_read = iwl_ucode_general_stats_read,
Wey-Yi Guyffb7d892010-07-14 08:09:55 -07002291 .bt_stats_read = iwl_ucode_bt_stats_read,
Abhijeet Kolekarb8c76262010-04-08 15:29:07 -07002292 },
Wey-Yi Guyb807b8a2010-07-07 08:26:45 -07002293 .recover_from_tx_stall = iwl_bg_monitor_recover,
Wey-Yi Guyfa8f1302010-03-05 14:22:46 -08002294 .check_plcp_health = iwl_good_plcp_health,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002295};
2296
Emese Revfy45d5d802009-12-14 00:59:53 +01002297static const struct iwl_ops iwl4965_ops = {
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002298 .lib = &iwl4965_lib,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002299 .hcmd = &iwl4965_hcmd,
Tomas Winkler857485c2008-03-21 13:53:44 -07002300 .utils = &iwl4965_hcmd_utils,
Johannes Berge932a602009-10-02 13:44:03 -07002301 .led = &iwlagn_led_ops,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002302};
2303
Ron Rindjunskyfed90172008-04-15 16:01:41 -07002304struct iwl_cfg iwl4965_agn_cfg = {
Shanyu Zhaoc11362c2010-03-05 17:05:20 -08002305 .name = "Intel(R) Wireless WiFi Link 4965AGN",
Reinette Chatrea0987a82008-12-02 12:14:06 -08002306 .fw_name_pre = IWL4965_FW_PRE,
2307 .ucode_api_max = IWL4965_UCODE_API_MAX,
2308 .ucode_api_min = IWL4965_UCODE_API_MIN,
Tomas Winkler82b9a122008-03-04 18:09:30 -08002309 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002310 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
Tomas Winkler0ef2ca62008-10-23 23:48:51 -07002311 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2312 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002313 .ops = &iwl4965_ops,
Wey-Yi Guy88804e22009-10-09 13:20:28 -07002314 .num_of_queues = IWL49_NUM_QUEUES,
2315 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
Wey-Yi Guy2b068612010-03-22 09:17:39 -07002316 .mod_params = &iwlagn_mod_params,
Wey-Yi Guy52aa0812009-10-23 13:42:24 -07002317 .valid_tx_ant = ANT_AB,
Shanyu Zhaob23aa882009-11-06 14:52:48 -08002318 .valid_rx_ant = ANT_ABC,
Ben Cahillfadb3582009-10-23 13:42:21 -07002319 .pll_cfg_val = 0,
2320 .set_l0s = true,
2321 .use_bsm = true,
Daniel C Halperinb2617932009-08-13 13:30:59 -07002322 .use_isr_legacy = true,
2323 .ht_greenfield_support = false,
Johannes Berg96d8c6a2009-09-11 10:50:37 -07002324 .broken_powersave = true,
Wey-Yi Guyf2d0d0e2009-09-11 10:38:14 -07002325 .led_compensation = 61,
Wey-Yi Guyd8c07e72009-09-25 14:24:26 -07002326 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
Trieu 'Andrew' Nguyen3e4fb5f2010-01-22 14:22:46 -08002327 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
Wey-Yi Guyce606592010-07-23 13:19:39 -07002328 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
Wey-Yi Guy2f3f7f92010-03-18 10:56:32 -07002329 .temperature_kelvin = true,
Wey-Yi Guy678b3852010-03-26 12:54:37 -07002330 .max_event_log_size = 512,
Wey-Yi Guy4e7033e2010-04-27 14:33:33 -07002331 .tx_power_by_driver = true,
Wey-Yi Guy6e5c8002010-04-27 14:00:28 -07002332 .ucode_tracing = true,
Wey-Yi Guy65d1f892010-04-25 15:41:43 -07002333 .sensitivity_calib_by_driver = true,
2334 .chain_noise_calib_by_driver = true,
Johannes Berge7cb4952010-04-13 01:04:35 -07002335 /*
2336 * Force use of chains B and C for scan RX on 5 GHz band
2337 * because the device has off-channel reception on chain A.
2338 */
Johannes Berg0e1654f2010-05-18 02:48:36 -07002339 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
Tomas Winkler82b9a122008-03-04 18:09:30 -08002340};
2341
Tomas Winklerd16dc482008-07-11 11:53:38 +08002342/* Module firmware */
Reinette Chatrea0987a82008-12-02 12:14:06 -08002343MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
Tomas Winklerd16dc482008-07-11 11:53:38 +08002344