blob: 822169e61e9b0044fe245220a0dd36e8ddbd7bd4 [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Zhu Yib481de92007-09-25 17:54:57 -070042#include "iwl-4965.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
46
Assaf Krauss1ea87392008-03-18 14:57:50 -070047/* module parameters */
48static struct iwl_mod_params iwl4965_mod_params = {
49 .num_of_queues = IWL_MAX_NUM_QUEUES,
50 .enable_qos = 1,
51 .amsdu_size_8K = 1,
52 /* the rest are 0 by default */
53};
54
Tomas Winklerc79dd5b2008-03-12 16:58:50 -070055static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
Christoph Hellwig416e1432007-10-25 17:15:49 +080056
Zhu Yib481de92007-09-25 17:54:57 -070057#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
58 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
59 IWL_RATE_SISO_##s##M_PLCP, \
60 IWL_RATE_MIMO_##s##M_PLCP, \
61 IWL_RATE_##r##M_IEEE, \
62 IWL_RATE_##ip##M_INDEX, \
63 IWL_RATE_##in##M_INDEX, \
64 IWL_RATE_##rp##M_INDEX, \
65 IWL_RATE_##rn##M_INDEX, \
66 IWL_RATE_##pp##M_INDEX, \
67 IWL_RATE_##np##M_INDEX }
68
69/*
70 * Parameter order:
71 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
72 *
73 * If there isn't a valid next or previous rate then INV is used which
74 * maps to IWL_RATE_INVALID
75 *
76 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -080077const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
Zhu Yib481de92007-09-25 17:54:57 -070078 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
79 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
80 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
81 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
82 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
83 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
84 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
85 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
86 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
87 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
88 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
89 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
90 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
91};
92
Ron Rindjunskyfe01b472008-01-28 14:07:24 +020093#ifdef CONFIG_IWL4965_HT
94
95static const u16 default_tid_to_tx_fifo[] = {
96 IWL_TX_FIFO_AC1,
97 IWL_TX_FIFO_AC0,
98 IWL_TX_FIFO_AC0,
99 IWL_TX_FIFO_AC1,
100 IWL_TX_FIFO_AC2,
101 IWL_TX_FIFO_AC2,
102 IWL_TX_FIFO_AC3,
103 IWL_TX_FIFO_AC3,
104 IWL_TX_FIFO_NONE,
105 IWL_TX_FIFO_NONE,
106 IWL_TX_FIFO_NONE,
107 IWL_TX_FIFO_NONE,
108 IWL_TX_FIFO_NONE,
109 IWL_TX_FIFO_NONE,
110 IWL_TX_FIFO_NONE,
111 IWL_TX_FIFO_NONE,
112 IWL_TX_FIFO_AC3
113};
114
115#endif /*CONFIG_IWL4965_HT */
116
Tomas Winkler57aab752008-04-14 21:16:03 -0700117/* check contents of special bootstrap uCode SRAM */
118static int iwl4965_verify_bsm(struct iwl_priv *priv)
119{
120 __le32 *image = priv->ucode_boot.v_addr;
121 u32 len = priv->ucode_boot.len;
122 u32 reg;
123 u32 val;
124
125 IWL_DEBUG_INFO("Begin verify bsm\n");
126
127 /* verify BSM SRAM contents */
128 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
129 for (reg = BSM_SRAM_LOWER_BOUND;
130 reg < BSM_SRAM_LOWER_BOUND + len;
131 reg += sizeof(u32), image++) {
132 val = iwl_read_prph(priv, reg);
133 if (val != le32_to_cpu(*image)) {
134 IWL_ERROR("BSM uCode verification failed at "
135 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
136 BSM_SRAM_LOWER_BOUND,
137 reg - BSM_SRAM_LOWER_BOUND, len,
138 val, le32_to_cpu(*image));
139 return -EIO;
140 }
141 }
142
143 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
144
145 return 0;
146}
147
148/**
149 * iwl4965_load_bsm - Load bootstrap instructions
150 *
151 * BSM operation:
152 *
153 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
154 * in special SRAM that does not power down during RFKILL. When powering back
155 * up after power-saving sleeps (or during initial uCode load), the BSM loads
156 * the bootstrap program into the on-board processor, and starts it.
157 *
158 * The bootstrap program loads (via DMA) instructions and data for a new
159 * program from host DRAM locations indicated by the host driver in the
160 * BSM_DRAM_* registers. Once the new program is loaded, it starts
161 * automatically.
162 *
163 * When initializing the NIC, the host driver points the BSM to the
164 * "initialize" uCode image. This uCode sets up some internal data, then
165 * notifies host via "initialize alive" that it is complete.
166 *
167 * The host then replaces the BSM_DRAM_* pointer values to point to the
168 * normal runtime uCode instructions and a backup uCode data cache buffer
169 * (filled initially with starting data values for the on-board processor),
170 * then triggers the "initialize" uCode to load and launch the runtime uCode,
171 * which begins normal operation.
172 *
173 * When doing a power-save shutdown, runtime uCode saves data SRAM into
174 * the backup data cache in DRAM before SRAM is powered down.
175 *
176 * When powering back up, the BSM loads the bootstrap program. This reloads
177 * the runtime uCode instructions and the backup data cache into SRAM,
178 * and re-launches the runtime uCode from where it left off.
179 */
180static int iwl4965_load_bsm(struct iwl_priv *priv)
181{
182 __le32 *image = priv->ucode_boot.v_addr;
183 u32 len = priv->ucode_boot.len;
184 dma_addr_t pinst;
185 dma_addr_t pdata;
186 u32 inst_len;
187 u32 data_len;
188 int i;
189 u32 done;
190 u32 reg_offset;
191 int ret;
192
193 IWL_DEBUG_INFO("Begin load bsm\n");
194
195 /* make sure bootstrap program is no larger than BSM's SRAM size */
196 if (len > IWL_MAX_BSM_SIZE)
197 return -EINVAL;
198
199 /* Tell bootstrap uCode where to find the "Initialize" uCode
200 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
201 * NOTE: iwl4965_initialize_alive_start() will replace these values,
202 * after the "initialize" uCode has run, to point to
203 * runtime/protocol instructions and backup data cache. */
204 pinst = priv->ucode_init.p_addr >> 4;
205 pdata = priv->ucode_init_data.p_addr >> 4;
206 inst_len = priv->ucode_init.len;
207 data_len = priv->ucode_init_data.len;
208
209 ret = iwl_grab_nic_access(priv);
210 if (ret)
211 return ret;
212
213 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
214 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
215 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
216 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
217
218 /* Fill BSM memory with bootstrap instructions */
219 for (reg_offset = BSM_SRAM_LOWER_BOUND;
220 reg_offset < BSM_SRAM_LOWER_BOUND + len;
221 reg_offset += sizeof(u32), image++)
222 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
223
224 ret = iwl4965_verify_bsm(priv);
225 if (ret) {
226 iwl_release_nic_access(priv);
227 return ret;
228 }
229
230 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
231 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
232 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
233 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
234
235 /* Load bootstrap code into instruction SRAM now,
236 * to prepare to load "initialize" uCode */
237 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
238
239 /* Wait for load of bootstrap uCode to finish */
240 for (i = 0; i < 100; i++) {
241 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
242 if (!(done & BSM_WR_CTRL_REG_BIT_START))
243 break;
244 udelay(10);
245 }
246 if (i < 100)
247 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
248 else {
249 IWL_ERROR("BSM write did not complete!\n");
250 return -EIO;
251 }
252
253 /* Enable future boot loads whenever power management unit triggers it
254 * (e.g. when powering back up after power-save shutdown) */
255 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
256
257 iwl_release_nic_access(priv);
258
259 return 0;
260}
261
Assaf Kraussbf85ea42008-03-14 10:38:49 -0700262static int iwl4965_init_drv(struct iwl_priv *priv)
263{
264 int ret;
265 int i;
266
Assaf Krauss1ea87392008-03-18 14:57:50 -0700267 priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna;
Assaf Kraussbf85ea42008-03-14 10:38:49 -0700268 priv->retry_rate = 1;
269 priv->ibss_beacon = NULL;
270
271 spin_lock_init(&priv->lock);
272 spin_lock_init(&priv->power_data.lock);
273 spin_lock_init(&priv->sta_lock);
274 spin_lock_init(&priv->hcmd_lock);
275 spin_lock_init(&priv->lq_mngr.lock);
276
277 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
278 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
279
280 INIT_LIST_HEAD(&priv->free_frames);
281
282 mutex_init(&priv->mutex);
283
284 /* Clear the driver's (not device's) station table */
285 iwlcore_clear_stations_table(priv);
286
287 priv->data_retry_limit = -1;
288 priv->ieee_channels = NULL;
289 priv->ieee_rates = NULL;
290 priv->band = IEEE80211_BAND_2GHZ;
291
292 priv->iw_mode = IEEE80211_IF_TYPE_STA;
293
294 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
295 priv->valid_antenna = 0x7; /* assume all 3 connected */
296 priv->ps_mode = IWL_MIMO_PS_NONE;
297
298 /* Choose which receivers/antennas to use */
299 iwl4965_set_rxon_chain(priv);
300
301 iwlcore_reset_qos(priv);
302
303 priv->qos_data.qos_active = 0;
304 priv->qos_data.qos_cap.val = 0;
305
306 iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
307
308 priv->rates_mask = IWL_RATES_MASK;
309 /* If power management is turned on, default to AC mode */
310 priv->power_mode = IWL_POWER_AC;
311 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
312
313 ret = iwl_init_channel_map(priv);
314 if (ret) {
315 IWL_ERROR("initializing regulatory failed: %d\n", ret);
316 goto err;
317 }
318
319 ret = iwl4965_init_geos(priv);
320 if (ret) {
321 IWL_ERROR("initializing geos failed: %d\n", ret);
322 goto err_free_channel_map;
323 }
324
Assaf Kraussbf85ea42008-03-14 10:38:49 -0700325 ret = ieee80211_register_hw(priv->hw);
326 if (ret) {
327 IWL_ERROR("Failed to register network device (error %d)\n",
328 ret);
329 goto err_free_geos;
330 }
331
332 priv->hw->conf.beacon_int = 100;
333 priv->mac80211_registered = 1;
334
335 return 0;
336
337err_free_geos:
338 iwl4965_free_geos(priv);
339err_free_channel_map:
340 iwl_free_channel_map(priv);
341err:
342 return ret;
343}
344
Zhu Yib481de92007-09-25 17:54:57 -0700345static int is_fat_channel(__le32 rxon_flags)
346{
347 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
348 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
349}
350
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700351static u8 is_single_stream(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700352{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +0800353#ifdef CONFIG_IWL4965_HT
Ron Rindjunskyfd105e72007-11-26 16:14:39 +0200354 if (!priv->current_ht_config.is_ht ||
355 (priv->current_ht_config.supp_mcs_set[1] == 0) ||
Zhu Yib481de92007-09-25 17:54:57 -0700356 (priv->ps_mode == IWL_MIMO_PS_STATIC))
357 return 1;
358#else
359 return 1;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +0800360#endif /*CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -0700361 return 0;
362}
363
Tomas Winkler17744ff2008-03-02 01:52:00 +0200364int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
365{
366 int idx = 0;
367
368 /* 4965 HT rate format */
369 if (rate_n_flags & RATE_MCS_HT_MSK) {
370 idx = (rate_n_flags & 0xff);
371
372 if (idx >= IWL_RATE_MIMO_6M_PLCP)
373 idx = idx - IWL_RATE_MIMO_6M_PLCP;
374
375 idx += IWL_FIRST_OFDM_RATE;
376 /* skip 9M not supported in ht*/
377 if (idx >= IWL_RATE_9M_INDEX)
378 idx += 1;
379 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
380 return idx;
381
382 /* 4965 legacy rate format, search for match in table */
383 } else {
384 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
385 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
386 return idx;
387 }
388
389 return -1;
390}
391
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800392/**
393 * translate ucode response to mac80211 tx status control values
394 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700395void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800396 struct ieee80211_tx_control *control)
397{
398 int rate_index;
399
400 control->antenna_sel_tx =
401 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_A_POS);
402 if (rate_n_flags & RATE_MCS_HT_MSK)
403 control->flags |= IEEE80211_TXCTL_OFDM_HT;
404 if (rate_n_flags & RATE_MCS_GF_MSK)
405 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
406 if (rate_n_flags & RATE_MCS_FAT_MSK)
407 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
408 if (rate_n_flags & RATE_MCS_DUP_MSK)
409 control->flags |= IEEE80211_TXCTL_DUP_DATA;
410 if (rate_n_flags & RATE_MCS_SGI_MSK)
411 control->flags |= IEEE80211_TXCTL_SHORT_GI;
412 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
413 * IEEE80211_BAND_2GHZ band as it contains all the rates */
414 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
415 if (rate_index == -1)
416 control->tx_rate = NULL;
417 else
418 control->tx_rate =
419 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
420}
Tomas Winkler17744ff2008-03-02 01:52:00 +0200421
Zhu Yib481de92007-09-25 17:54:57 -0700422/*
423 * Determine how many receiver/antenna chains to use.
424 * More provides better reception via diversity. Fewer saves power.
425 * MIMO (dual stream) requires at least 2, but works better with 3.
426 * This does not determine *which* chains to use, just how many.
427 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700428static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -0700429 u8 *idle_state, u8 *rx_state)
430{
431 u8 is_single = is_single_stream(priv);
432 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
433
434 /* # of Rx chains to use when expecting MIMO. */
435 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
436 *rx_state = 2;
437 else
438 *rx_state = 3;
439
440 /* # Rx chains when idling and maybe trying to save power */
441 switch (priv->ps_mode) {
442 case IWL_MIMO_PS_STATIC:
443 case IWL_MIMO_PS_DYNAMIC:
444 *idle_state = (is_cam) ? 2 : 1;
445 break;
446 case IWL_MIMO_PS_NONE:
447 *idle_state = (is_cam) ? *rx_state : 1;
448 break;
449 default:
450 *idle_state = 1;
451 break;
452 }
453
454 return 0;
455}
456
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700457int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700458{
459 int rc;
460 unsigned long flags;
461
462 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700463 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700464 if (rc) {
465 spin_unlock_irqrestore(&priv->lock, flags);
466 return rc;
467 }
468
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800469 /* stop Rx DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700470 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
471 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700472 (1 << 24), 1000);
473 if (rc < 0)
474 IWL_ERROR("Can't stop Rx DMA.\n");
475
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700476 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700477 spin_unlock_irqrestore(&priv->lock, flags);
478
479 return 0;
480}
481
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700482u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr)
Zhu Yib481de92007-09-25 17:54:57 -0700483{
484 int i;
485 int start = 0;
486 int ret = IWL_INVALID_STATION;
487 unsigned long flags;
Joe Perches0795af52007-10-03 17:59:30 -0700488 DECLARE_MAC_BUF(mac);
Zhu Yib481de92007-09-25 17:54:57 -0700489
490 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
491 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
492 start = IWL_STA_ID;
493
494 if (is_broadcast_ether_addr(addr))
Tomas Winklera4062b82008-03-11 16:17:16 -0700495 return priv->hw_setting.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -0700496
497 spin_lock_irqsave(&priv->sta_lock, flags);
498 for (i = start; i < priv->hw_setting.max_stations; i++)
499 if ((priv->stations[i].used) &&
500 (!compare_ether_addr
501 (priv->stations[i].sta.sta.addr, addr))) {
502 ret = i;
503 goto out;
504 }
505
John W. Linvillea50e2e32007-09-27 17:00:29 -0400506 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
Joe Perches0795af52007-10-03 17:59:30 -0700507 print_mac(mac, addr), priv->num_stations);
Zhu Yib481de92007-09-25 17:54:57 -0700508
509 out:
510 spin_unlock_irqrestore(&priv->sta_lock, flags);
511 return ret;
512}
513
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700514static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
Zhu Yib481de92007-09-25 17:54:57 -0700515{
Tomas Winklerd8609652007-10-25 17:15:35 +0800516 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700517 unsigned long flags;
518
519 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700520 ret = iwl_grab_nic_access(priv);
Tomas Winklerd8609652007-10-25 17:15:35 +0800521 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700522 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winklerd8609652007-10-25 17:15:35 +0800523 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700524 }
525
526 if (!pwr_max) {
527 u32 val;
528
Tomas Winklerd8609652007-10-25 17:15:35 +0800529 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
Zhu Yib481de92007-09-25 17:54:57 -0700530 &val);
531
532 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700533 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700534 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
535 ~APMG_PS_CTRL_MSK_PWR_SRC);
536 } else
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700537 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700538 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
539 ~APMG_PS_CTRL_MSK_PWR_SRC);
540
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700541 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700542 spin_unlock_irqrestore(&priv->lock, flags);
543
Tomas Winklerd8609652007-10-25 17:15:35 +0800544 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700545}
546
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700547static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
Zhu Yib481de92007-09-25 17:54:57 -0700548{
549 int rc;
550 unsigned long flags;
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +0200551 unsigned int rb_size;
Zhu Yib481de92007-09-25 17:54:57 -0700552
553 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700554 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700555 if (rc) {
556 spin_unlock_irqrestore(&priv->lock, flags);
557 return rc;
558 }
559
Assaf Krauss1ea87392008-03-18 14:57:50 -0700560 if (priv->cfg->mod_params->amsdu_size_8K)
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +0200561 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
562 else
563 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
564
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800565 /* Stop Rx DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700566 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700567
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800568 /* Reset driver's Rx queue write index */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700569 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800570
571 /* Tell device where to find RBD circular buffer in DRAM */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700572 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
573 rxq->dma_addr >> 8);
Zhu Yib481de92007-09-25 17:54:57 -0700574
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800575 /* Tell device where in DRAM to update its Rx status */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700576 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
577 (priv->hw_setting.shared_phys +
578 offsetof(struct iwl4965_shared, val0)) >> 4);
Zhu Yib481de92007-09-25 17:54:57 -0700579
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800580 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700581 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
582 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
583 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
584 rb_size |
Zhu Yib481de92007-09-25 17:54:57 -0700585 /*0x10 << 4 | */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700586 (RX_QUEUE_SIZE_LOG <<
Zhu Yib481de92007-09-25 17:54:57 -0700587 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
588
589 /*
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700590 * iwl_write32(priv,CSR_INT_COAL_REG,0);
Zhu Yib481de92007-09-25 17:54:57 -0700591 */
592
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700593 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700594 spin_unlock_irqrestore(&priv->lock, flags);
595
596 return 0;
597}
598
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800599/* Tell 4965 where to find the "keep warm" buffer */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700600static int iwl4965_kw_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700601{
602 unsigned long flags;
603 int rc;
604
605 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700606 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700607 if (rc)
608 goto out;
609
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700610 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700611 priv->kw.dma_addr >> 4);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700612 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700613out:
614 spin_unlock_irqrestore(&priv->lock, flags);
615 return rc;
616}
617
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700618static int iwl4965_kw_alloc(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700619{
620 struct pci_dev *dev = priv->pci_dev;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800621 struct iwl4965_kw *kw = &priv->kw;
Zhu Yib481de92007-09-25 17:54:57 -0700622
623 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
624 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
625 if (!kw->v_addr)
626 return -ENOMEM;
627
628 return 0;
629}
630
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800631/**
632 * iwl4965_kw_free - Free the "keep warm" buffer
633 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700634static void iwl4965_kw_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700635{
636 struct pci_dev *dev = priv->pci_dev;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800637 struct iwl4965_kw *kw = &priv->kw;
Zhu Yib481de92007-09-25 17:54:57 -0700638
639 if (kw->v_addr) {
640 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
641 memset(kw, 0, sizeof(*kw));
642 }
643}
644
645/**
646 * iwl4965_txq_ctx_reset - Reset TX queue context
647 * Destroys all DMA structures and initialise them again
648 *
649 * @param priv
650 * @return error code
651 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700652static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700653{
654 int rc = 0;
655 int txq_id, slots_num;
656 unsigned long flags;
657
658 iwl4965_kw_free(priv);
659
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800660 /* Free all tx/cmd queues and keep-warm buffer */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800661 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700662
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800663 /* Alloc keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -0700664 rc = iwl4965_kw_alloc(priv);
665 if (rc) {
666 IWL_ERROR("Keep Warm allocation failed");
667 goto error_kw;
668 }
669
670 spin_lock_irqsave(&priv->lock, flags);
671
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700672 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700673 if (unlikely(rc)) {
674 IWL_ERROR("TX reset failed");
675 spin_unlock_irqrestore(&priv->lock, flags);
676 goto error_reset;
677 }
678
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800679 /* Turn off all Tx DMA channels */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700680 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700681 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700682 spin_unlock_irqrestore(&priv->lock, flags);
683
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800684 /* Tell 4965 where to find the keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -0700685 rc = iwl4965_kw_init(priv);
686 if (rc) {
687 IWL_ERROR("kw_init failed\n");
688 goto error_reset;
689 }
690
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800691 /* Alloc and init all (default 16) Tx queues,
692 * including the command queue (#4) */
Zhu Yib481de92007-09-25 17:54:57 -0700693 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
694 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
695 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800696 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
Zhu Yib481de92007-09-25 17:54:57 -0700697 txq_id);
698 if (rc) {
699 IWL_ERROR("Tx %d queue init failed\n", txq_id);
700 goto error;
701 }
702 }
703
704 return rc;
705
706 error:
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800707 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700708 error_reset:
709 iwl4965_kw_free(priv);
710 error_kw:
711 return rc;
712}
713
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700714int iwl4965_hw_nic_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700715{
716 int rc;
717 unsigned long flags;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800718 struct iwl4965_rx_queue *rxq = &priv->rxq;
Zhu Yib481de92007-09-25 17:54:57 -0700719 u8 rev_id;
720 u32 val;
721 u8 val_link;
722
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800723 iwl4965_power_init_handle(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700724
725 /* nic_init */
726 spin_lock_irqsave(&priv->lock, flags);
727
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700728 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Zhu Yib481de92007-09-25 17:54:57 -0700729 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
730
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700731 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
732 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
Zhu Yib481de92007-09-25 17:54:57 -0700733 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
734 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
735 if (rc < 0) {
736 spin_unlock_irqrestore(&priv->lock, flags);
737 IWL_DEBUG_INFO("Failed to init the card\n");
738 return rc;
739 }
740
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700741 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700742 if (rc) {
743 spin_unlock_irqrestore(&priv->lock, flags);
744 return rc;
745 }
746
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700747 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
Zhu Yib481de92007-09-25 17:54:57 -0700748
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700749 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
750 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
751 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
Zhu Yib481de92007-09-25 17:54:57 -0700752
753 udelay(20);
754
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700755 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
756 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700757
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700758 iwl_release_nic_access(priv);
759 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
Zhu Yib481de92007-09-25 17:54:57 -0700760 spin_unlock_irqrestore(&priv->lock, flags);
761
762 /* Determine HW type */
763 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
764 if (rc)
765 return rc;
766
767 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
768
769 iwl4965_nic_set_pwr_src(priv, 1);
770 spin_lock_irqsave(&priv->lock, flags);
771
772 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) {
773 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
774 /* Enable No Snoop field */
775 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
776 val & ~(1 << 11));
777 }
778
779 spin_unlock_irqrestore(&priv->lock, flags);
780
Zhu Yib481de92007-09-25 17:54:57 -0700781 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
782 IWL_ERROR("Older EEPROM detected! Aborting.\n");
783 return -EINVAL;
784 }
785
786 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
787
788 /* disable L1 entry -- workaround for pre-B1 */
789 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
790
791 spin_lock_irqsave(&priv->lock, flags);
792
793 /* set CSR_HW_CONFIG_REG for uCode use */
794
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700795 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
796 CSR49_HW_IF_CONFIG_REG_BIT_4965_R |
797 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI |
798 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
Zhu Yib481de92007-09-25 17:54:57 -0700799
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700800 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700801 if (rc < 0) {
802 spin_unlock_irqrestore(&priv->lock, flags);
803 IWL_DEBUG_INFO("Failed to init the card\n");
804 return rc;
805 }
806
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700807 iwl_read_prph(priv, APMG_PS_CTRL_REG);
808 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
Zhu Yib481de92007-09-25 17:54:57 -0700809 udelay(5);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700810 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
Zhu Yib481de92007-09-25 17:54:57 -0700811
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700812 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700813 spin_unlock_irqrestore(&priv->lock, flags);
814
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800815 iwl4965_hw_card_show_info(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700816
817 /* end nic_init */
818
819 /* Allocate the RX queue, or reset if it is already allocated */
820 if (!rxq->bd) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800821 rc = iwl4965_rx_queue_alloc(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700822 if (rc) {
823 IWL_ERROR("Unable to initialize Rx queue\n");
824 return -ENOMEM;
825 }
826 } else
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800827 iwl4965_rx_queue_reset(priv, rxq);
Zhu Yib481de92007-09-25 17:54:57 -0700828
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800829 iwl4965_rx_replenish(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700830
831 iwl4965_rx_init(priv, rxq);
832
833 spin_lock_irqsave(&priv->lock, flags);
834
835 rxq->need_update = 1;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800836 iwl4965_rx_queue_update_write_ptr(priv, rxq);
Zhu Yib481de92007-09-25 17:54:57 -0700837
838 spin_unlock_irqrestore(&priv->lock, flags);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800839
840 /* Allocate and init all Tx and Command queues */
Zhu Yib481de92007-09-25 17:54:57 -0700841 rc = iwl4965_txq_ctx_reset(priv);
842 if (rc)
843 return rc;
844
845 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
846 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
847
848 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
849 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
850
851 set_bit(STATUS_INIT, &priv->status);
852
853 return 0;
854}
855
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700856int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700857{
858 int rc = 0;
859 u32 reg_val;
860 unsigned long flags;
861
862 spin_lock_irqsave(&priv->lock, flags);
863
864 /* set stop master bit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700865 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
Zhu Yib481de92007-09-25 17:54:57 -0700866
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700867 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
Zhu Yib481de92007-09-25 17:54:57 -0700868
869 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
870 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
871 IWL_DEBUG_INFO("Card in power save, master is already "
872 "stopped\n");
873 else {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700874 rc = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700875 CSR_RESET_REG_FLAG_MASTER_DISABLED,
876 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
877 if (rc < 0) {
878 spin_unlock_irqrestore(&priv->lock, flags);
879 return rc;
880 }
881 }
882
883 spin_unlock_irqrestore(&priv->lock, flags);
884 IWL_DEBUG_INFO("stop master\n");
885
886 return rc;
887}
888
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800889/**
890 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
891 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700892void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700893{
894
895 int txq_id;
896 unsigned long flags;
897
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800898 /* Stop each Tx DMA channel, and wait for it to be idle */
Zhu Yib481de92007-09-25 17:54:57 -0700899 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
900 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700901 if (iwl_grab_nic_access(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -0700902 spin_unlock_irqrestore(&priv->lock, flags);
903 continue;
904 }
905
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700906 iwl_write_direct32(priv,
907 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
908 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
909 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
910 (txq_id), 200);
911 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700912 spin_unlock_irqrestore(&priv->lock, flags);
913 }
914
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800915 /* Deallocate memory for all Tx queues */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800916 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700917}
918
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700919int iwl4965_hw_nic_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700920{
921 int rc = 0;
922 unsigned long flags;
923
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800924 iwl4965_hw_nic_stop_master(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700925
926 spin_lock_irqsave(&priv->lock, flags);
927
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700928 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
Zhu Yib481de92007-09-25 17:54:57 -0700929
930 udelay(10);
931
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700932 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
933 rc = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700934 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
935 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
936
937 udelay(10);
938
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700939 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700940 if (!rc) {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700941 iwl_write_prph(priv, APMG_CLK_EN_REG,
942 APMG_CLK_VAL_DMA_CLK_RQT |
943 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700944
945 udelay(10);
946
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700947 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
948 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700949
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700950 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700951 }
952
953 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
954 wake_up_interruptible(&priv->wait_command_queue);
955
956 spin_unlock_irqrestore(&priv->lock, flags);
957
958 return rc;
959
960}
961
962#define REG_RECALIB_PERIOD (60)
963
964/**
965 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
966 *
967 * This callback is provided in order to queue the statistics_work
968 * in work_queue context (v. softirq)
969 *
970 * This timer function is continually reset to execute within
971 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
972 * was received. We need to ensure we receive the statistics in order
973 * to update the temperature used for calibrating the TXPOWER. However,
974 * we can't send the statistics command from softirq context (which
975 * is the context which timers run at) so we have to queue off the
976 * statistics_work to actually send the command to the hardware.
977 */
978static void iwl4965_bg_statistics_periodic(unsigned long data)
979{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700980 struct iwl_priv *priv = (struct iwl_priv *)data;
Zhu Yib481de92007-09-25 17:54:57 -0700981
982 queue_work(priv->workqueue, &priv->statistics_work);
983}
984
985/**
986 * iwl4965_bg_statistics_work - Send the statistics request to the hardware.
987 *
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800988 * This is queued by iwl4965_bg_statistics_periodic.
Zhu Yib481de92007-09-25 17:54:57 -0700989 */
990static void iwl4965_bg_statistics_work(struct work_struct *work)
991{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700992 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700993 statistics_work);
994
995 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
996 return;
997
998 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800999 iwl4965_send_statistics_request(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001000 mutex_unlock(&priv->mutex);
1001}
1002
1003#define CT_LIMIT_CONST 259
1004#define TM_CT_KILL_THRESHOLD 110
1005
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001006void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001007{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001008 struct iwl4965_ct_kill_config cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001009 u32 R1, R2, R3;
1010 u32 temp_th;
1011 u32 crit_temperature;
1012 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -07001013 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07001014
1015 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001016 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
Zhu Yib481de92007-09-25 17:54:57 -07001017 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1018 spin_unlock_irqrestore(&priv->lock, flags);
1019
1020 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
1021 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1022 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1023 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1024 } else {
1025 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1026 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1027 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1028 }
1029
1030 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
1031
1032 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
1033 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
Tomas Winkler857485c2008-03-21 13:53:44 -07001034 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1035 sizeof(cmd), &cmd);
1036 if (ret)
Zhu Yib481de92007-09-25 17:54:57 -07001037 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
1038 else
1039 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
1040}
1041
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001042#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07001043
1044/* "false alarms" are signals that our DSP tries to lock onto,
1045 * but then determines that they are either noise, or transmissions
1046 * from a distant wireless network (also "noise", really) that get
1047 * "stepped on" by stronger transmissions within our own network.
1048 * This algorithm attempts to set a sensitivity level that is high
1049 * enough to receive all of our own network traffic, but not so
1050 * high that our DSP gets too busy trying to lock onto non-network
1051 * activity/noise. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001052static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07001053 u32 norm_fa,
1054 u32 rx_enable_time,
1055 struct statistics_general_data *rx_info)
1056{
1057 u32 max_nrg_cck = 0;
1058 int i = 0;
1059 u8 max_silence_rssi = 0;
1060 u32 silence_ref = 0;
1061 u8 silence_rssi_a = 0;
1062 u8 silence_rssi_b = 0;
1063 u8 silence_rssi_c = 0;
1064 u32 val;
1065
1066 /* "false_alarms" values below are cross-multiplications to assess the
1067 * numbers of false alarms within the measured period of actual Rx
1068 * (Rx is off when we're txing), vs the min/max expected false alarms
1069 * (some should be expected if rx is sensitive enough) in a
1070 * hypothetical listening period of 200 time units (TU), 204.8 msec:
1071 *
1072 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
1073 *
1074 * */
1075 u32 false_alarms = norm_fa * 200 * 1024;
1076 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
1077 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001078 struct iwl4965_sensitivity_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001079
1080 data = &(priv->sensitivity_data);
1081
1082 data->nrg_auto_corr_silence_diff = 0;
1083
1084 /* Find max silence rssi among all 3 receivers.
1085 * This is background noise, which may include transmissions from other
1086 * networks, measured during silence before our network's beacon */
1087 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
Reinette Chatre8a1b0242008-01-14 17:46:25 -08001088 ALL_BAND_FILTER) >> 8);
Zhu Yib481de92007-09-25 17:54:57 -07001089 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
Reinette Chatre8a1b0242008-01-14 17:46:25 -08001090 ALL_BAND_FILTER) >> 8);
Zhu Yib481de92007-09-25 17:54:57 -07001091 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
Reinette Chatre8a1b0242008-01-14 17:46:25 -08001092 ALL_BAND_FILTER) >> 8);
Zhu Yib481de92007-09-25 17:54:57 -07001093
1094 val = max(silence_rssi_b, silence_rssi_c);
1095 max_silence_rssi = max(silence_rssi_a, (u8) val);
1096
1097 /* Store silence rssi in 20-beacon history table */
1098 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
1099 data->nrg_silence_idx++;
1100 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
1101 data->nrg_silence_idx = 0;
1102
1103 /* Find max silence rssi across 20 beacon history */
1104 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
1105 val = data->nrg_silence_rssi[i];
1106 silence_ref = max(silence_ref, val);
1107 }
1108 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
1109 silence_rssi_a, silence_rssi_b, silence_rssi_c,
1110 silence_ref);
1111
1112 /* Find max rx energy (min value!) among all 3 receivers,
1113 * measured during beacon frame.
1114 * Save it in 10-beacon history table. */
1115 i = data->nrg_energy_idx;
1116 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
1117 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
1118
1119 data->nrg_energy_idx++;
1120 if (data->nrg_energy_idx >= 10)
1121 data->nrg_energy_idx = 0;
1122
1123 /* Find min rx energy (max value) across 10 beacon history.
1124 * This is the minimum signal level that we want to receive well.
1125 * Add backoff (margin so we don't miss slightly lower energy frames).
1126 * This establishes an upper bound (min value) for energy threshold. */
1127 max_nrg_cck = data->nrg_value[0];
1128 for (i = 1; i < 10; i++)
1129 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
1130 max_nrg_cck += 6;
1131
1132 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
1133 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
1134 rx_info->beacon_energy_c, max_nrg_cck - 6);
1135
1136 /* Count number of consecutive beacons with fewer-than-desired
1137 * false alarms. */
1138 if (false_alarms < min_false_alarms)
1139 data->num_in_cck_no_fa++;
1140 else
1141 data->num_in_cck_no_fa = 0;
1142 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
1143 data->num_in_cck_no_fa);
1144
1145 /* If we got too many false alarms this time, reduce sensitivity */
1146 if (false_alarms > max_false_alarms) {
1147 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
1148 false_alarms, max_false_alarms);
1149 IWL_DEBUG_CALIB("... reducing sensitivity\n");
1150 data->nrg_curr_state = IWL_FA_TOO_MANY;
1151
1152 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
1153 /* Store for "fewer than desired" on later beacon */
1154 data->nrg_silence_ref = silence_ref;
1155
1156 /* increase energy threshold (reduce nrg value)
1157 * to decrease sensitivity */
1158 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
1159 data->nrg_th_cck = data->nrg_th_cck
1160 - NRG_STEP_CCK;
1161 }
1162
1163 /* increase auto_corr values to decrease sensitivity */
1164 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
1165 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
1166 else {
1167 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
1168 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
1169 }
1170 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
1171 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
1172
1173 /* Else if we got fewer than desired, increase sensitivity */
1174 } else if (false_alarms < min_false_alarms) {
1175 data->nrg_curr_state = IWL_FA_TOO_FEW;
1176
1177 /* Compare silence level with silence level for most recent
1178 * healthy number or too many false alarms */
1179 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
1180 (s32)silence_ref;
1181
1182 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
1183 false_alarms, min_false_alarms,
1184 data->nrg_auto_corr_silence_diff);
1185
1186 /* Increase value to increase sensitivity, but only if:
1187 * 1a) previous beacon did *not* have *too many* false alarms
1188 * 1b) AND there's a significant difference in Rx levels
1189 * from a previous beacon with too many, or healthy # FAs
1190 * OR 2) We've seen a lot of beacons (100) with too few
1191 * false alarms */
1192 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1193 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1194 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1195
1196 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1197 /* Increase nrg value to increase sensitivity */
1198 val = data->nrg_th_cck + NRG_STEP_CCK;
1199 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1200
1201 /* Decrease auto_corr values to increase sensitivity */
1202 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1203 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1204
1205 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1206 data->auto_corr_cck_mrc =
1207 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1208
1209 } else
1210 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1211
1212 /* Else we got a healthy number of false alarms, keep status quo */
1213 } else {
1214 IWL_DEBUG_CALIB(" FA in safe zone\n");
1215 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1216
1217 /* Store for use in "fewer than desired" with later beacon */
1218 data->nrg_silence_ref = silence_ref;
1219
1220 /* If previous beacon had too many false alarms,
1221 * give it some extra margin by reducing sensitivity again
1222 * (but don't go below measured energy of desired Rx) */
1223 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1224 IWL_DEBUG_CALIB("... increasing margin\n");
1225 data->nrg_th_cck -= NRG_MARGIN;
1226 }
1227 }
1228
1229 /* Make sure the energy threshold does not go above the measured
1230 * energy of the desired Rx signals (reduced by backoff margin),
1231 * or else we might start missing Rx frames.
1232 * Lower value is higher energy, so we use max()!
1233 */
1234 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1235 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1236
1237 data->nrg_prev_state = data->nrg_curr_state;
1238
1239 return 0;
1240}
1241
1242
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001243static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07001244 u32 norm_fa,
1245 u32 rx_enable_time)
1246{
1247 u32 val;
1248 u32 false_alarms = norm_fa * 200 * 1024;
1249 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1250 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001251 struct iwl4965_sensitivity_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001252
1253 data = &(priv->sensitivity_data);
1254
1255 /* If we got too many false alarms this time, reduce sensitivity */
1256 if (false_alarms > max_false_alarms) {
1257
1258 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1259 false_alarms, max_false_alarms);
1260
1261 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1262 data->auto_corr_ofdm =
1263 min((u32)AUTO_CORR_MAX_OFDM, val);
1264
1265 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1266 data->auto_corr_ofdm_mrc =
1267 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1268
1269 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1270 data->auto_corr_ofdm_x1 =
1271 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1272
1273 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1274 data->auto_corr_ofdm_mrc_x1 =
1275 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1276 }
1277
1278 /* Else if we got fewer than desired, increase sensitivity */
1279 else if (false_alarms < min_false_alarms) {
1280
1281 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1282 false_alarms, min_false_alarms);
1283
1284 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1285 data->auto_corr_ofdm =
1286 max((u32)AUTO_CORR_MIN_OFDM, val);
1287
1288 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1289 data->auto_corr_ofdm_mrc =
1290 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1291
1292 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1293 data->auto_corr_ofdm_x1 =
1294 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1295
1296 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1297 data->auto_corr_ofdm_mrc_x1 =
1298 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1299 }
1300
1301 else
1302 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1303 min_false_alarms, false_alarms, max_false_alarms);
1304
1305 return 0;
1306}
1307
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001308static int iwl4965_sensitivity_callback(struct iwl_priv *priv,
Tomas Winkler857485c2008-03-21 13:53:44 -07001309 struct iwl_cmd *cmd, struct sk_buff *skb)
Zhu Yib481de92007-09-25 17:54:57 -07001310{
1311 /* We didn't cache the SKB; let the caller free it */
1312 return 1;
1313}
1314
1315/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001316static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
Zhu Yib481de92007-09-25 17:54:57 -07001317{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001318 struct iwl4965_sensitivity_cmd cmd ;
1319 struct iwl4965_sensitivity_data *data = NULL;
Tomas Winkler857485c2008-03-21 13:53:44 -07001320 struct iwl_host_cmd cmd_out = {
Zhu Yib481de92007-09-25 17:54:57 -07001321 .id = SENSITIVITY_CMD,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001322 .len = sizeof(struct iwl4965_sensitivity_cmd),
Zhu Yib481de92007-09-25 17:54:57 -07001323 .meta.flags = flags,
1324 .data = &cmd,
1325 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001326 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001327
1328 data = &(priv->sensitivity_data);
1329
1330 memset(&cmd, 0, sizeof(cmd));
1331
1332 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1333 cpu_to_le16((u16)data->auto_corr_ofdm);
1334 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1335 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1336 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1337 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1338 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1339 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1340
1341 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1342 cpu_to_le16((u16)data->auto_corr_cck);
1343 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1344 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1345
1346 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1347 cpu_to_le16((u16)data->nrg_th_cck);
1348 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1349 cpu_to_le16((u16)data->nrg_th_ofdm);
1350
1351 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1352 __constant_cpu_to_le16(190);
1353 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1354 __constant_cpu_to_le16(390);
1355 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1356 __constant_cpu_to_le16(62);
1357
1358 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1359 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1360 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1361 data->nrg_th_ofdm);
1362
1363 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1364 data->auto_corr_cck, data->auto_corr_cck_mrc,
1365 data->nrg_th_cck);
1366
Ben Cahillf7d09d72007-11-29 11:09:51 +08001367 /* Update uCode's "work" table, and copy it to DSP */
Zhu Yib481de92007-09-25 17:54:57 -07001368 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1369
1370 if (flags & CMD_ASYNC)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001371 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
Zhu Yib481de92007-09-25 17:54:57 -07001372
1373 /* Don't send command to uCode if nothing has changed */
1374 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1375 sizeof(u16)*HD_TABLE_SIZE)) {
1376 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1377 return 0;
1378 }
1379
1380 /* Copy table for comparison next time */
1381 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1382 sizeof(u16)*HD_TABLE_SIZE);
1383
Tomas Winkler857485c2008-03-21 13:53:44 -07001384 ret = iwl_send_cmd(priv, &cmd_out);
1385 if (ret)
1386 IWL_ERROR("SENSITIVITY_CMD failed\n");
Zhu Yib481de92007-09-25 17:54:57 -07001387
Tomas Winkler857485c2008-03-21 13:53:44 -07001388 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001389}
1390
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001391void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
Zhu Yib481de92007-09-25 17:54:57 -07001392{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001393 struct iwl4965_sensitivity_data *data = NULL;
Tomas Winkler857485c2008-03-21 13:53:44 -07001394 int i;
1395 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07001396
1397 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1398
1399 if (force)
1400 memset(&(priv->sensitivity_tbl[0]), 0,
1401 sizeof(u16)*HD_TABLE_SIZE);
1402
1403 /* Clear driver's sensitivity algo data */
1404 data = &(priv->sensitivity_data);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001405 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
Zhu Yib481de92007-09-25 17:54:57 -07001406
1407 data->num_in_cck_no_fa = 0;
1408 data->nrg_curr_state = IWL_FA_TOO_MANY;
1409 data->nrg_prev_state = IWL_FA_TOO_MANY;
1410 data->nrg_silence_ref = 0;
1411 data->nrg_silence_idx = 0;
1412 data->nrg_energy_idx = 0;
1413
1414 for (i = 0; i < 10; i++)
1415 data->nrg_value[i] = 0;
1416
1417 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1418 data->nrg_silence_rssi[i] = 0;
1419
1420 data->auto_corr_ofdm = 90;
1421 data->auto_corr_ofdm_mrc = 170;
1422 data->auto_corr_ofdm_x1 = 105;
1423 data->auto_corr_ofdm_mrc_x1 = 220;
1424 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1425 data->auto_corr_cck_mrc = 200;
1426 data->nrg_th_cck = 100;
1427 data->nrg_th_ofdm = 100;
1428
1429 data->last_bad_plcp_cnt_ofdm = 0;
1430 data->last_fa_cnt_ofdm = 0;
1431 data->last_bad_plcp_cnt_cck = 0;
1432 data->last_fa_cnt_cck = 0;
1433
1434 /* Clear prior Sensitivity command data to force send to uCode */
1435 if (force)
1436 memset(&(priv->sensitivity_tbl[0]), 0,
1437 sizeof(u16)*HD_TABLE_SIZE);
1438
Tomas Winkler857485c2008-03-21 13:53:44 -07001439 ret |= iwl4965_sensitivity_write(priv, flags);
1440 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
Zhu Yib481de92007-09-25 17:54:57 -07001441
1442 return;
1443}
1444
1445
1446/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1447 * Called after every association, but this runs only once!
1448 * ... once chain noise is calibrated the first time, it's good forever. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001449void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001450{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001451 struct iwl4965_chain_noise_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001452
1453 data = &(priv->chain_noise_data);
Tomas Winkler3109ece2008-03-28 16:33:35 -07001454 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001455 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001456
1457 memset(&cmd, 0, sizeof(cmd));
1458 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1459 cmd.diff_gain_a = 0;
1460 cmd.diff_gain_b = 0;
1461 cmd.diff_gain_c = 0;
Tomas Winklere5472972008-03-28 16:21:12 -07001462 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
1463 sizeof(cmd), &cmd, NULL);
Zhu Yib481de92007-09-25 17:54:57 -07001464 msleep(4);
1465 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1466 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1467 }
1468 return;
1469}
1470
1471/*
1472 * Accumulate 20 beacons of signal and noise statistics for each of
1473 * 3 receivers/antennas/rx-chains, then figure out:
1474 * 1) Which antennas are connected.
1475 * 2) Differential rx gain settings to balance the 3 receivers.
1476 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001477static void iwl4965_noise_calibration(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001478 struct iwl4965_notif_statistics *stat_resp)
Zhu Yib481de92007-09-25 17:54:57 -07001479{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001480 struct iwl4965_chain_noise_data *data = NULL;
Tomas Winkler857485c2008-03-21 13:53:44 -07001481 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07001482
1483 u32 chain_noise_a;
1484 u32 chain_noise_b;
1485 u32 chain_noise_c;
1486 u32 chain_sig_a;
1487 u32 chain_sig_b;
1488 u32 chain_sig_c;
1489 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1490 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1491 u32 max_average_sig;
1492 u16 max_average_sig_antenna_i;
1493 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1494 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1495 u16 i = 0;
1496 u16 chan_num = INITIALIZATION_VALUE;
1497 u32 band = INITIALIZATION_VALUE;
1498 u32 active_chains = 0;
1499 unsigned long flags;
1500 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1501
1502 data = &(priv->chain_noise_data);
1503
1504 /* Accumulate just the first 20 beacons after the first association,
1505 * then we're done forever. */
1506 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1507 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1508 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1509 return;
1510 }
1511
1512 spin_lock_irqsave(&priv->lock, flags);
1513 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1514 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1515 spin_unlock_irqrestore(&priv->lock, flags);
1516 return;
1517 }
1518
1519 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1520 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1521
1522 /* Make sure we accumulate data for just the associated channel
1523 * (even if scanning). */
1524 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1525 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1526 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1527 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1528 chan_num, band);
1529 spin_unlock_irqrestore(&priv->lock, flags);
1530 return;
1531 }
1532
1533 /* Accumulate beacon statistics values across 20 beacons */
1534 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1535 IN_BAND_FILTER;
1536 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1537 IN_BAND_FILTER;
1538 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1539 IN_BAND_FILTER;
1540
1541 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1542 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1543 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1544
1545 spin_unlock_irqrestore(&priv->lock, flags);
1546
1547 data->beacon_count++;
1548
1549 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1550 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1551 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1552
1553 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1554 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1555 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1556
1557 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1558 data->beacon_count);
1559 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1560 chain_sig_a, chain_sig_b, chain_sig_c);
1561 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1562 chain_noise_a, chain_noise_b, chain_noise_c);
1563
1564 /* If this is the 20th beacon, determine:
1565 * 1) Disconnected antennas (using signal strengths)
1566 * 2) Differential gain (using silence noise) to balance receivers */
1567 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1568
1569 /* Analyze signal for disconnected antenna */
1570 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1571 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1572 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1573
1574 if (average_sig[0] >= average_sig[1]) {
1575 max_average_sig = average_sig[0];
1576 max_average_sig_antenna_i = 0;
1577 active_chains = (1 << max_average_sig_antenna_i);
1578 } else {
1579 max_average_sig = average_sig[1];
1580 max_average_sig_antenna_i = 1;
1581 active_chains = (1 << max_average_sig_antenna_i);
1582 }
1583
1584 if (average_sig[2] >= max_average_sig) {
1585 max_average_sig = average_sig[2];
1586 max_average_sig_antenna_i = 2;
1587 active_chains = (1 << max_average_sig_antenna_i);
1588 }
1589
1590 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1591 average_sig[0], average_sig[1], average_sig[2]);
1592 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1593 max_average_sig, max_average_sig_antenna_i);
1594
1595 /* Compare signal strengths for all 3 receivers. */
1596 for (i = 0; i < NUM_RX_CHAINS; i++) {
1597 if (i != max_average_sig_antenna_i) {
1598 s32 rssi_delta = (max_average_sig -
1599 average_sig[i]);
1600
1601 /* If signal is very weak, compared with
1602 * strongest, mark it as disconnected. */
1603 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1604 data->disconn_array[i] = 1;
1605 else
1606 active_chains |= (1 << i);
1607 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1608 "disconn_array[i] = %d\n",
1609 i, rssi_delta, data->disconn_array[i]);
1610 }
1611 }
1612
1613 /*If both chains A & B are disconnected -
1614 * connect B and leave A as is */
1615 if (data->disconn_array[CHAIN_A] &&
1616 data->disconn_array[CHAIN_B]) {
1617 data->disconn_array[CHAIN_B] = 0;
1618 active_chains |= (1 << CHAIN_B);
1619 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1620 "W/A - declare B as connected\n");
1621 }
1622
1623 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1624 active_chains);
1625
1626 /* Save for use within RXON, TX, SCAN commands, etc. */
1627 priv->valid_antenna = active_chains;
1628
1629 /* Analyze noise for rx balance */
1630 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1631 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1632 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1633
1634 for (i = 0; i < NUM_RX_CHAINS; i++) {
1635 if (!(data->disconn_array[i]) &&
1636 (average_noise[i] <= min_average_noise)) {
1637 /* This means that chain i is active and has
1638 * lower noise values so far: */
1639 min_average_noise = average_noise[i];
1640 min_average_noise_antenna_i = i;
1641 }
1642 }
1643
1644 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1645
1646 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1647 average_noise[0], average_noise[1],
1648 average_noise[2]);
1649
1650 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1651 min_average_noise, min_average_noise_antenna_i);
1652
1653 for (i = 0; i < NUM_RX_CHAINS; i++) {
1654 s32 delta_g = 0;
1655
1656 if (!(data->disconn_array[i]) &&
1657 (data->delta_gain_code[i] ==
1658 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1659 delta_g = average_noise[i] - min_average_noise;
1660 data->delta_gain_code[i] = (u8)((delta_g *
1661 10) / 15);
1662 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE <
1663 data->delta_gain_code[i])
1664 data->delta_gain_code[i] =
1665 CHAIN_NOISE_MAX_DELTA_GAIN_CODE;
1666
1667 data->delta_gain_code[i] =
1668 (data->delta_gain_code[i] | (1 << 2));
1669 } else
1670 data->delta_gain_code[i] = 0;
1671 }
1672 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1673 data->delta_gain_code[0],
1674 data->delta_gain_code[1],
1675 data->delta_gain_code[2]);
1676
1677 /* Differential gain gets sent to uCode only once */
1678 if (!data->radio_write) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001679 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001680 data->radio_write = 1;
1681
1682 memset(&cmd, 0, sizeof(cmd));
1683 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1684 cmd.diff_gain_a = data->delta_gain_code[0];
1685 cmd.diff_gain_b = data->delta_gain_code[1];
1686 cmd.diff_gain_c = data->delta_gain_code[2];
Tomas Winkler857485c2008-03-21 13:53:44 -07001687 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
Zhu Yib481de92007-09-25 17:54:57 -07001688 sizeof(cmd), &cmd);
Tomas Winkler857485c2008-03-21 13:53:44 -07001689 if (ret)
Zhu Yib481de92007-09-25 17:54:57 -07001690 IWL_DEBUG_CALIB("fail sending cmd "
1691 "REPLY_PHY_CALIBRATION_CMD \n");
1692
1693 /* TODO we might want recalculate
1694 * rx_chain in rxon cmd */
1695
1696 /* Mark so we run this algo only once! */
1697 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1698 }
1699 data->chain_noise_a = 0;
1700 data->chain_noise_b = 0;
1701 data->chain_noise_c = 0;
1702 data->chain_signal_a = 0;
1703 data->chain_signal_b = 0;
1704 data->chain_signal_c = 0;
1705 data->beacon_count = 0;
1706 }
1707 return;
1708}
1709
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001710static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001711 struct iwl4965_notif_statistics *resp)
Zhu Yib481de92007-09-25 17:54:57 -07001712{
Zhu Yib481de92007-09-25 17:54:57 -07001713 u32 rx_enable_time;
1714 u32 fa_cck;
1715 u32 fa_ofdm;
1716 u32 bad_plcp_cck;
1717 u32 bad_plcp_ofdm;
1718 u32 norm_fa_ofdm;
1719 u32 norm_fa_cck;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001720 struct iwl4965_sensitivity_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001721 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1722 struct statistics_rx *statistics = &(resp->rx);
1723 unsigned long flags;
1724 struct statistics_general_data statis;
Tomas Winkler857485c2008-03-21 13:53:44 -07001725 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001726
1727 data = &(priv->sensitivity_data);
1728
Tomas Winkler3109ece2008-03-28 16:33:35 -07001729 if (!iwl_is_associated(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07001730 IWL_DEBUG_CALIB("<< - not associated\n");
1731 return;
1732 }
1733
1734 spin_lock_irqsave(&priv->lock, flags);
1735 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1736 IWL_DEBUG_CALIB("<< invalid data.\n");
1737 spin_unlock_irqrestore(&priv->lock, flags);
1738 return;
1739 }
1740
1741 /* Extract Statistics: */
1742 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1743 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1744 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1745 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1746 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1747
1748 statis.beacon_silence_rssi_a =
1749 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1750 statis.beacon_silence_rssi_b =
1751 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1752 statis.beacon_silence_rssi_c =
1753 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1754 statis.beacon_energy_a =
1755 le32_to_cpu(statistics->general.beacon_energy_a);
1756 statis.beacon_energy_b =
1757 le32_to_cpu(statistics->general.beacon_energy_b);
1758 statis.beacon_energy_c =
1759 le32_to_cpu(statistics->general.beacon_energy_c);
1760
1761 spin_unlock_irqrestore(&priv->lock, flags);
1762
1763 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1764
1765 if (!rx_enable_time) {
1766 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1767 return;
1768 }
1769
1770 /* These statistics increase monotonically, and do not reset
1771 * at each beacon. Calculate difference from last value, or just
1772 * use the new statistics value if it has reset or wrapped around. */
1773 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1774 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1775 else {
1776 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1777 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1778 }
1779
1780 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
1781 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
1782 else {
1783 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
1784 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1785 }
1786
1787 if (data->last_fa_cnt_ofdm > fa_ofdm)
1788 data->last_fa_cnt_ofdm = fa_ofdm;
1789 else {
1790 fa_ofdm -= data->last_fa_cnt_ofdm;
1791 data->last_fa_cnt_ofdm += fa_ofdm;
1792 }
1793
1794 if (data->last_fa_cnt_cck > fa_cck)
1795 data->last_fa_cnt_cck = fa_cck;
1796 else {
1797 fa_cck -= data->last_fa_cnt_cck;
1798 data->last_fa_cnt_cck += fa_cck;
1799 }
1800
1801 /* Total aborted signal locks */
1802 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1803 norm_fa_cck = fa_cck + bad_plcp_cck;
1804
1805 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1806 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1807
1808 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1809 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
Tomas Winkler857485c2008-03-21 13:53:44 -07001810 ret = iwl4965_sensitivity_write(priv, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07001811
1812 return;
1813}
1814
1815static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1816{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001817 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -07001818 sensitivity_work);
1819
1820 mutex_lock(&priv->mutex);
1821
1822 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1823 test_bit(STATUS_SCANNING, &priv->status)) {
1824 mutex_unlock(&priv->mutex);
1825 return;
1826 }
1827
1828 if (priv->start_calib) {
1829 iwl4965_noise_calibration(priv, &priv->statistics);
1830
1831 if (priv->sensitivity_data.state ==
1832 IWL_SENS_CALIB_NEED_REINIT) {
1833 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1834 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1835 } else
1836 iwl4965_sensitivity_calibration(priv,
1837 &priv->statistics);
1838 }
1839
1840 mutex_unlock(&priv->mutex);
1841 return;
1842}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001843#endif /*CONFIG_IWL4965_SENSITIVITY*/
Zhu Yib481de92007-09-25 17:54:57 -07001844
1845static void iwl4965_bg_txpower_work(struct work_struct *work)
1846{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001847 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -07001848 txpower_work);
1849
1850 /* If a scan happened to start before we got here
1851 * then just return; the statistics notification will
1852 * kick off another scheduled work to compensate for
1853 * any temperature delta we missed here. */
1854 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1855 test_bit(STATUS_SCANNING, &priv->status))
1856 return;
1857
1858 mutex_lock(&priv->mutex);
1859
1860 /* Regardless of if we are assocaited, we must reconfigure the
1861 * TX power since frames can be sent on non-radar channels while
1862 * not associated */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001863 iwl4965_hw_reg_send_txpower(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001864
1865 /* Update last_temperature to keep is_calib_needed from running
1866 * when it isn't needed... */
1867 priv->last_temperature = priv->temperature;
1868
1869 mutex_unlock(&priv->mutex);
1870}
1871
1872/*
1873 * Acquire priv->lock before calling this function !
1874 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001875static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -07001876{
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001877 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -07001878 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -07001879 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -07001880}
1881
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001882/**
1883 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
1884 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
1885 * @scd_retry: (1) Indicates queue will be used in aggregation mode
1886 *
1887 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -07001888 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001889static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001890 struct iwl4965_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -07001891 int tx_fifo_id, int scd_retry)
1892{
1893 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001894
1895 /* Find out whether to activate Tx queue */
Zhu Yib481de92007-09-25 17:54:57 -07001896 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1897
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001898 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001899 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Zhu Yib481de92007-09-25 17:54:57 -07001900 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1901 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1902 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
1903 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1904 SCD_QUEUE_STTS_REG_MSK);
1905
1906 txq->sched_retry = scd_retry;
1907
1908 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001909 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -07001910 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1911}
1912
1913static const u16 default_queue_to_tx_fifo[] = {
1914 IWL_TX_FIFO_AC3,
1915 IWL_TX_FIFO_AC2,
1916 IWL_TX_FIFO_AC1,
1917 IWL_TX_FIFO_AC0,
1918 IWL_CMD_FIFO_NUM,
1919 IWL_TX_FIFO_HCCA_1,
1920 IWL_TX_FIFO_HCCA_2
1921};
1922
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001923static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
Zhu Yib481de92007-09-25 17:54:57 -07001924{
1925 set_bit(txq_id, &priv->txq_ctx_active_msk);
1926}
1927
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001928static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
Zhu Yib481de92007-09-25 17:54:57 -07001929{
1930 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1931}
1932
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001933int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001934{
1935 u32 a;
1936 int i = 0;
1937 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -07001938 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001939
1940 spin_lock_irqsave(&priv->lock, flags);
1941
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001942#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07001943 memset(&(priv->sensitivity_data), 0,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001944 sizeof(struct iwl4965_sensitivity_data));
Zhu Yib481de92007-09-25 17:54:57 -07001945 memset(&(priv->chain_noise_data), 0,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001946 sizeof(struct iwl4965_chain_noise_data));
Zhu Yib481de92007-09-25 17:54:57 -07001947 for (i = 0; i < NUM_RX_CHAINS; i++)
1948 priv->chain_noise_data.delta_gain_code[i] =
1949 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001950#endif /* CONFIG_IWL4965_SENSITIVITY*/
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001951 ret = iwl_grab_nic_access(priv);
Tomas Winkler857485c2008-03-21 13:53:44 -07001952 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -07001953 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler857485c2008-03-21 13:53:44 -07001954 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001955 }
1956
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001957 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001958 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Zhu Yib481de92007-09-25 17:54:57 -07001959 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1960 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001961 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001962 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001963 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001964 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001965 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001966
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001967 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001968 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Zhu Yib481de92007-09-25 17:54:57 -07001969 (priv->hw_setting.shared_phys +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001970 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001971
1972 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001973 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001974
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001975 /* Initialize each Tx queue (including the command queue) */
Zhu Yib481de92007-09-25 17:54:57 -07001976 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001977
1978 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001979 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001980 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001981
1982 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001983 iwl_write_targ_mem(priv, priv->scd_base_addr +
Zhu Yib481de92007-09-25 17:54:57 -07001984 SCD_CONTEXT_QUEUE_OFFSET(i),
1985 (SCD_WIN_SIZE <<
1986 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1987 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001988
1989 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001990 iwl_write_targ_mem(priv, priv->scd_base_addr +
Zhu Yib481de92007-09-25 17:54:57 -07001991 SCD_CONTEXT_QUEUE_OFFSET(i) +
1992 sizeof(u32),
1993 (SCD_FRAME_LIMIT <<
1994 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1995 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1996
1997 }
Tomas Winkler12a81f62008-04-03 16:05:20 -07001998 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Zhu Yib481de92007-09-25 17:54:57 -07001999 (1 << priv->hw_setting.max_txq_num) - 1);
2000
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002001 /* Activate all Tx DMA/FIFO channels */
Tomas Winkler12a81f62008-04-03 16:05:20 -07002002 iwl_write_prph(priv, IWL49_SCD_TXFACT,
Zhu Yib481de92007-09-25 17:54:57 -07002003 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2004
2005 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002006
2007 /* Map each Tx/cmd queue to its corresponding fifo */
Zhu Yib481de92007-09-25 17:54:57 -07002008 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2009 int ac = default_queue_to_tx_fifo[i];
2010 iwl4965_txq_ctx_activate(priv, i);
2011 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2012 }
2013
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002014 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002015 spin_unlock_irqrestore(&priv->lock, flags);
2016
Tomas Winkler857485c2008-03-21 13:53:44 -07002017 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07002018}
2019
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002020/**
2021 * iwl4965_hw_set_hw_setting
2022 *
2023 * Called when initializing driver
2024 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002025int iwl4965_hw_set_hw_setting(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002026{
Assaf Krauss316c30d2008-03-14 10:38:46 -07002027 int ret = 0;
2028
Assaf Krauss1ea87392008-03-18 14:57:50 -07002029 if ((priv->cfg->mod_params->num_of_queues > IWL_MAX_NUM_QUEUES) ||
2030 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
Assaf Krauss316c30d2008-03-14 10:38:46 -07002031 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
2032 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
2033 ret = -EINVAL;
2034 goto out;
2035 }
2036
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002037 /* Allocate area for Tx byte count tables and Rx queue status */
Zhu Yib481de92007-09-25 17:54:57 -07002038 priv->hw_setting.shared_virt =
2039 pci_alloc_consistent(priv->pci_dev,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002040 sizeof(struct iwl4965_shared),
Zhu Yib481de92007-09-25 17:54:57 -07002041 &priv->hw_setting.shared_phys);
2042
Assaf Krauss316c30d2008-03-14 10:38:46 -07002043 if (!priv->hw_setting.shared_virt) {
2044 ret = -ENOMEM;
2045 goto out;
2046 }
Zhu Yib481de92007-09-25 17:54:57 -07002047
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002048 memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl4965_shared));
Zhu Yib481de92007-09-25 17:54:57 -07002049
Assaf Krauss1ea87392008-03-18 14:57:50 -07002050 priv->hw_setting.max_txq_num = priv->cfg->mod_params->num_of_queues;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002051 priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
Zhu Yib481de92007-09-25 17:54:57 -07002052 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
2053 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
Assaf Krauss1ea87392008-03-18 14:57:50 -07002054 if (priv->cfg->mod_params->amsdu_size_8K)
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02002055 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_8K;
2056 else
2057 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_4K;
2058 priv->hw_setting.max_pkt_size = priv->hw_setting.rx_buf_size - 256;
Zhu Yib481de92007-09-25 17:54:57 -07002059 priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
2060 priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
Tomas Winkler3e82a822008-02-13 11:32:31 -08002061
2062 priv->hw_setting.tx_ant_num = 2;
2063
Assaf Krauss316c30d2008-03-14 10:38:46 -07002064out:
2065 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07002066}
2067
2068/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002069 * iwl4965_hw_txq_ctx_free - Free TXQ Context
Zhu Yib481de92007-09-25 17:54:57 -07002070 *
2071 * Destroy all TX DMA queues and structures
2072 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002073void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002074{
2075 int txq_id;
2076
2077 /* Tx queues */
2078 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002079 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
Zhu Yib481de92007-09-25 17:54:57 -07002080
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002081 /* Keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -07002082 iwl4965_kw_free(priv);
2083}
2084
2085/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002086 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
Zhu Yib481de92007-09-25 17:54:57 -07002087 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002088 * Does NOT advance any TFD circular buffer read/write indexes
2089 * Does NOT free the TFD itself (which is within circular buffer)
Zhu Yib481de92007-09-25 17:54:57 -07002090 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002091int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -07002092{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002093 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
2094 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
Zhu Yib481de92007-09-25 17:54:57 -07002095 struct pci_dev *dev = priv->pci_dev;
2096 int i;
2097 int counter = 0;
2098 int index, is_odd;
2099
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002100 /* Host command buffers stay mapped in memory, nothing to clean */
Zhu Yib481de92007-09-25 17:54:57 -07002101 if (txq->q.id == IWL_CMD_QUEUE_NUM)
Zhu Yib481de92007-09-25 17:54:57 -07002102 return 0;
2103
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002104 /* Sanity check on number of chunks */
Zhu Yib481de92007-09-25 17:54:57 -07002105 counter = IWL_GET_BITS(*bd, num_tbs);
2106 if (counter > MAX_NUM_OF_TBS) {
2107 IWL_ERROR("Too many chunks: %i\n", counter);
2108 /* @todo issue fatal error, it is quite serious situation */
2109 return 0;
2110 }
2111
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002112 /* Unmap chunks, if any.
2113 * TFD info for odd chunks is different format than for even chunks. */
Zhu Yib481de92007-09-25 17:54:57 -07002114 for (i = 0; i < counter; i++) {
2115 index = i / 2;
2116 is_odd = i & 0x1;
2117
2118 if (is_odd)
2119 pci_unmap_single(
2120 dev,
2121 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
2122 (IWL_GET_BITS(bd->pa[index],
2123 tb2_addr_hi20) << 16),
2124 IWL_GET_BITS(bd->pa[index], tb2_len),
2125 PCI_DMA_TODEVICE);
2126
2127 else if (i > 0)
2128 pci_unmap_single(dev,
2129 le32_to_cpu(bd->pa[index].tb1_addr),
2130 IWL_GET_BITS(bd->pa[index], tb1_len),
2131 PCI_DMA_TODEVICE);
2132
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002133 /* Free SKB, if any, for this chunk */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002134 if (txq->txb[txq->q.read_ptr].skb[i]) {
2135 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
Zhu Yib481de92007-09-25 17:54:57 -07002136
2137 dev_kfree_skb(skb);
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002138 txq->txb[txq->q.read_ptr].skb[i] = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07002139 }
2140 }
2141 return 0;
2142}
2143
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002144int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
Zhu Yib481de92007-09-25 17:54:57 -07002145{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002146 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
Zhu Yib481de92007-09-25 17:54:57 -07002147 return -EINVAL;
2148}
2149
2150static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
2151{
2152 s32 sign = 1;
2153
2154 if (num < 0) {
2155 sign = -sign;
2156 num = -num;
2157 }
2158 if (denom < 0) {
2159 sign = -sign;
2160 denom = -denom;
2161 }
2162 *res = 1;
2163 *res = ((num * 2 + denom) / (denom * 2)) * sign;
2164
2165 return 1;
2166}
2167
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002168/**
2169 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
2170 *
2171 * Determines power supply voltage compensation for txpower calculations.
2172 * Returns number of 1/2-dB steps to subtract from gain table index,
2173 * to compensate for difference between power supply voltage during
2174 * factory measurements, vs. current power supply voltage.
2175 *
2176 * Voltage indication is higher for lower voltage.
2177 * Lower voltage requires more gain (lower gain table index).
2178 */
Zhu Yib481de92007-09-25 17:54:57 -07002179static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
2180 s32 current_voltage)
2181{
2182 s32 comp = 0;
2183
2184 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
2185 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
2186 return 0;
2187
2188 iwl4965_math_div_round(current_voltage - eeprom_voltage,
2189 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
2190
2191 if (current_voltage > eeprom_voltage)
2192 comp *= 2;
2193 if ((comp < -2) || (comp > 2))
2194 comp = 0;
2195
2196 return comp;
2197}
2198
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002199static const struct iwl_channel_info *
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002200iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
Johannes Berg8318d782008-01-24 19:38:38 +01002201 enum ieee80211_band band, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07002202{
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002203 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07002204
Assaf Krauss8622e702008-03-21 13:53:43 -07002205 ch_info = iwl_get_channel_info(priv, band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07002206
2207 if (!is_channel_valid(ch_info))
2208 return NULL;
2209
2210 return ch_info;
2211}
2212
2213static s32 iwl4965_get_tx_atten_grp(u16 channel)
2214{
2215 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
2216 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
2217 return CALIB_CH_GROUP_5;
2218
2219 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
2220 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
2221 return CALIB_CH_GROUP_1;
2222
2223 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
2224 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
2225 return CALIB_CH_GROUP_2;
2226
2227 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
2228 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
2229 return CALIB_CH_GROUP_3;
2230
2231 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
2232 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
2233 return CALIB_CH_GROUP_4;
2234
2235 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
2236 return -1;
2237}
2238
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002239static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -07002240{
2241 s32 b = -1;
2242
2243 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2244 if (priv->eeprom.calib_info.band_info[b].ch_from == 0)
2245 continue;
2246
2247 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from)
2248 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to))
2249 break;
2250 }
2251
2252 return b;
2253}
2254
2255static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2256{
2257 s32 val;
2258
2259 if (x2 == x1)
2260 return y1;
2261 else {
2262 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
2263 return val + y2;
2264 }
2265}
2266
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002267/**
2268 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
2269 *
2270 * Interpolates factory measurements from the two sample channels within a
2271 * sub-band, to apply to channel of interest. Interpolation is proportional to
2272 * differences in channel frequencies, which is proportional to differences
2273 * in channel number.
2274 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002275static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002276 struct iwl4965_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -07002277{
2278 s32 s = -1;
2279 u32 c;
2280 u32 m;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002281 const struct iwl4965_eeprom_calib_measure *m1;
2282 const struct iwl4965_eeprom_calib_measure *m2;
2283 struct iwl4965_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -07002284 u32 ch_i1;
2285 u32 ch_i2;
2286
2287 s = iwl4965_get_sub_band(priv, channel);
2288 if (s >= EEPROM_TX_POWER_BANDS) {
2289 IWL_ERROR("Tx Power can not find channel %d ", channel);
2290 return -1;
2291 }
2292
2293 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num;
2294 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num;
2295 chan_info->ch_num = (u8) channel;
2296
2297 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
2298 channel, s, ch_i1, ch_i2);
2299
2300 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2301 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2302 m1 = &(priv->eeprom.calib_info.band_info[s].ch1.
2303 measurements[c][m]);
2304 m2 = &(priv->eeprom.calib_info.band_info[s].ch2.
2305 measurements[c][m]);
2306 omeas = &(chan_info->measurements[c][m]);
2307
2308 omeas->actual_pow =
2309 (u8) iwl4965_interpolate_value(channel, ch_i1,
2310 m1->actual_pow,
2311 ch_i2,
2312 m2->actual_pow);
2313 omeas->gain_idx =
2314 (u8) iwl4965_interpolate_value(channel, ch_i1,
2315 m1->gain_idx, ch_i2,
2316 m2->gain_idx);
2317 omeas->temperature =
2318 (u8) iwl4965_interpolate_value(channel, ch_i1,
2319 m1->temperature,
2320 ch_i2,
2321 m2->temperature);
2322 omeas->pa_det =
2323 (s8) iwl4965_interpolate_value(channel, ch_i1,
2324 m1->pa_det, ch_i2,
2325 m2->pa_det);
2326
2327 IWL_DEBUG_TXPOWER
2328 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
2329 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
2330 IWL_DEBUG_TXPOWER
2331 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
2332 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
2333 IWL_DEBUG_TXPOWER
2334 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
2335 m1->pa_det, m2->pa_det, omeas->pa_det);
2336 IWL_DEBUG_TXPOWER
2337 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
2338 m1->temperature, m2->temperature,
2339 omeas->temperature);
2340 }
2341 }
2342
2343 return 0;
2344}
2345
2346/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
2347 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
2348static s32 back_off_table[] = {
2349 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
2350 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
2351 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
2352 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
2353 10 /* CCK */
2354};
2355
2356/* Thermal compensation values for txpower for various frequency ranges ...
2357 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002358static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -07002359 s32 degrees_per_05db_a;
2360 s32 degrees_per_05db_a_denom;
2361} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
2362 {9, 2}, /* group 0 5.2, ch 34-43 */
2363 {4, 1}, /* group 1 5.2, ch 44-70 */
2364 {4, 1}, /* group 2 5.2, ch 71-124 */
2365 {4, 1}, /* group 3 5.2, ch 125-200 */
2366 {3, 1} /* group 4 2.4, ch all */
2367};
2368
2369static s32 get_min_power_index(s32 rate_power_index, u32 band)
2370{
2371 if (!band) {
2372 if ((rate_power_index & 7) <= 4)
2373 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
2374 }
2375 return MIN_TX_GAIN_INDEX;
2376}
2377
2378struct gain_entry {
2379 u8 dsp;
2380 u8 radio;
2381};
2382
2383static const struct gain_entry gain_table[2][108] = {
2384 /* 5.2GHz power gain index table */
2385 {
2386 {123, 0x3F}, /* highest txpower */
2387 {117, 0x3F},
2388 {110, 0x3F},
2389 {104, 0x3F},
2390 {98, 0x3F},
2391 {110, 0x3E},
2392 {104, 0x3E},
2393 {98, 0x3E},
2394 {110, 0x3D},
2395 {104, 0x3D},
2396 {98, 0x3D},
2397 {110, 0x3C},
2398 {104, 0x3C},
2399 {98, 0x3C},
2400 {110, 0x3B},
2401 {104, 0x3B},
2402 {98, 0x3B},
2403 {110, 0x3A},
2404 {104, 0x3A},
2405 {98, 0x3A},
2406 {110, 0x39},
2407 {104, 0x39},
2408 {98, 0x39},
2409 {110, 0x38},
2410 {104, 0x38},
2411 {98, 0x38},
2412 {110, 0x37},
2413 {104, 0x37},
2414 {98, 0x37},
2415 {110, 0x36},
2416 {104, 0x36},
2417 {98, 0x36},
2418 {110, 0x35},
2419 {104, 0x35},
2420 {98, 0x35},
2421 {110, 0x34},
2422 {104, 0x34},
2423 {98, 0x34},
2424 {110, 0x33},
2425 {104, 0x33},
2426 {98, 0x33},
2427 {110, 0x32},
2428 {104, 0x32},
2429 {98, 0x32},
2430 {110, 0x31},
2431 {104, 0x31},
2432 {98, 0x31},
2433 {110, 0x30},
2434 {104, 0x30},
2435 {98, 0x30},
2436 {110, 0x25},
2437 {104, 0x25},
2438 {98, 0x25},
2439 {110, 0x24},
2440 {104, 0x24},
2441 {98, 0x24},
2442 {110, 0x23},
2443 {104, 0x23},
2444 {98, 0x23},
2445 {110, 0x22},
2446 {104, 0x18},
2447 {98, 0x18},
2448 {110, 0x17},
2449 {104, 0x17},
2450 {98, 0x17},
2451 {110, 0x16},
2452 {104, 0x16},
2453 {98, 0x16},
2454 {110, 0x15},
2455 {104, 0x15},
2456 {98, 0x15},
2457 {110, 0x14},
2458 {104, 0x14},
2459 {98, 0x14},
2460 {110, 0x13},
2461 {104, 0x13},
2462 {98, 0x13},
2463 {110, 0x12},
2464 {104, 0x08},
2465 {98, 0x08},
2466 {110, 0x07},
2467 {104, 0x07},
2468 {98, 0x07},
2469 {110, 0x06},
2470 {104, 0x06},
2471 {98, 0x06},
2472 {110, 0x05},
2473 {104, 0x05},
2474 {98, 0x05},
2475 {110, 0x04},
2476 {104, 0x04},
2477 {98, 0x04},
2478 {110, 0x03},
2479 {104, 0x03},
2480 {98, 0x03},
2481 {110, 0x02},
2482 {104, 0x02},
2483 {98, 0x02},
2484 {110, 0x01},
2485 {104, 0x01},
2486 {98, 0x01},
2487 {110, 0x00},
2488 {104, 0x00},
2489 {98, 0x00},
2490 {93, 0x00},
2491 {88, 0x00},
2492 {83, 0x00},
2493 {78, 0x00},
2494 },
2495 /* 2.4GHz power gain index table */
2496 {
2497 {110, 0x3f}, /* highest txpower */
2498 {104, 0x3f},
2499 {98, 0x3f},
2500 {110, 0x3e},
2501 {104, 0x3e},
2502 {98, 0x3e},
2503 {110, 0x3d},
2504 {104, 0x3d},
2505 {98, 0x3d},
2506 {110, 0x3c},
2507 {104, 0x3c},
2508 {98, 0x3c},
2509 {110, 0x3b},
2510 {104, 0x3b},
2511 {98, 0x3b},
2512 {110, 0x3a},
2513 {104, 0x3a},
2514 {98, 0x3a},
2515 {110, 0x39},
2516 {104, 0x39},
2517 {98, 0x39},
2518 {110, 0x38},
2519 {104, 0x38},
2520 {98, 0x38},
2521 {110, 0x37},
2522 {104, 0x37},
2523 {98, 0x37},
2524 {110, 0x36},
2525 {104, 0x36},
2526 {98, 0x36},
2527 {110, 0x35},
2528 {104, 0x35},
2529 {98, 0x35},
2530 {110, 0x34},
2531 {104, 0x34},
2532 {98, 0x34},
2533 {110, 0x33},
2534 {104, 0x33},
2535 {98, 0x33},
2536 {110, 0x32},
2537 {104, 0x32},
2538 {98, 0x32},
2539 {110, 0x31},
2540 {104, 0x31},
2541 {98, 0x31},
2542 {110, 0x30},
2543 {104, 0x30},
2544 {98, 0x30},
2545 {110, 0x6},
2546 {104, 0x6},
2547 {98, 0x6},
2548 {110, 0x5},
2549 {104, 0x5},
2550 {98, 0x5},
2551 {110, 0x4},
2552 {104, 0x4},
2553 {98, 0x4},
2554 {110, 0x3},
2555 {104, 0x3},
2556 {98, 0x3},
2557 {110, 0x2},
2558 {104, 0x2},
2559 {98, 0x2},
2560 {110, 0x1},
2561 {104, 0x1},
2562 {98, 0x1},
2563 {110, 0x0},
2564 {104, 0x0},
2565 {98, 0x0},
2566 {97, 0},
2567 {96, 0},
2568 {95, 0},
2569 {94, 0},
2570 {93, 0},
2571 {92, 0},
2572 {91, 0},
2573 {90, 0},
2574 {89, 0},
2575 {88, 0},
2576 {87, 0},
2577 {86, 0},
2578 {85, 0},
2579 {84, 0},
2580 {83, 0},
2581 {82, 0},
2582 {81, 0},
2583 {80, 0},
2584 {79, 0},
2585 {78, 0},
2586 {77, 0},
2587 {76, 0},
2588 {75, 0},
2589 {74, 0},
2590 {73, 0},
2591 {72, 0},
2592 {71, 0},
2593 {70, 0},
2594 {69, 0},
2595 {68, 0},
2596 {67, 0},
2597 {66, 0},
2598 {65, 0},
2599 {64, 0},
2600 {63, 0},
2601 {62, 0},
2602 {61, 0},
2603 {60, 0},
2604 {59, 0},
2605 }
2606};
2607
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002608static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Zhu Yib481de92007-09-25 17:54:57 -07002609 u8 is_fat, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002610 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07002611{
2612 u8 saturation_power;
2613 s32 target_power;
2614 s32 user_target_power;
2615 s32 power_limit;
2616 s32 current_temp;
2617 s32 reg_limit;
2618 s32 current_regulatory;
2619 s32 txatten_grp = CALIB_CH_GROUP_MAX;
2620 int i;
2621 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002622 const struct iwl_channel_info *ch_info = NULL;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002623 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info;
2624 const struct iwl4965_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07002625 s16 voltage;
2626 s32 init_voltage;
2627 s32 voltage_compensation;
2628 s32 degrees_per_05db_num;
2629 s32 degrees_per_05db_denom;
2630 s32 factory_temp;
2631 s32 temperature_comp[2];
2632 s32 factory_gain_index[2];
2633 s32 factory_actual_pwr[2];
2634 s32 power_index;
2635
2636 /* Sanity check requested level (dBm) */
2637 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2638 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2639 priv->user_txpower_limit);
2640 return -EINVAL;
2641 }
2642 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2643 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2644 priv->user_txpower_limit);
2645 return -EINVAL;
2646 }
2647
2648 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2649 * are used for indexing into txpower table) */
2650 user_target_power = 2 * priv->user_txpower_limit;
2651
2652 /* Get current (RXON) channel, band, width */
2653 ch_info =
Johannes Berg8318d782008-01-24 19:38:38 +01002654 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07002655
2656 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2657 is_fat);
2658
2659 if (!ch_info)
2660 return -EINVAL;
2661
2662 /* get txatten group, used to select 1) thermal txpower adjustment
2663 * and 2) mimo txpower balance between Tx chains. */
2664 txatten_grp = iwl4965_get_tx_atten_grp(channel);
2665 if (txatten_grp < 0)
2666 return -EINVAL;
2667
2668 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
2669 channel, txatten_grp);
2670
2671 if (is_fat) {
2672 if (ctrl_chan_high)
2673 channel -= 2;
2674 else
2675 channel += 2;
2676 }
2677
2678 /* hardware txpower limits ...
2679 * saturation (clipping distortion) txpowers are in half-dBm */
2680 if (band)
2681 saturation_power = priv->eeprom.calib_info.saturation_power24;
2682 else
2683 saturation_power = priv->eeprom.calib_info.saturation_power52;
2684
2685 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2686 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
2687 if (band)
2688 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
2689 else
2690 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
2691 }
2692
2693 /* regulatory txpower limits ... reg_limit values are in half-dBm,
2694 * max_power_avg values are in dBm, convert * 2 */
2695 if (is_fat)
2696 reg_limit = ch_info->fat_max_power_avg * 2;
2697 else
2698 reg_limit = ch_info->max_power_avg * 2;
2699
2700 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
2701 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
2702 if (band)
2703 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
2704 else
2705 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
2706 }
2707
2708 /* Interpolate txpower calibration values for this channel,
2709 * based on factory calibration tests on spaced channels. */
2710 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2711
2712 /* calculate tx gain adjustment based on power supply voltage */
2713 voltage = priv->eeprom.calib_info.voltage;
2714 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2715 voltage_compensation =
2716 iwl4965_get_voltage_compensation(voltage, init_voltage);
2717
2718 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
2719 init_voltage,
2720 voltage, voltage_compensation);
2721
2722 /* get current temperature (Celsius) */
2723 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
2724 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
2725 current_temp = KELVIN_TO_CELSIUS(current_temp);
2726
2727 /* select thermal txpower adjustment params, based on channel group
2728 * (same frequency group used for mimo txatten adjustment) */
2729 degrees_per_05db_num =
2730 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
2731 degrees_per_05db_denom =
2732 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
2733
2734 /* get per-chain txpower values from factory measurements */
2735 for (c = 0; c < 2; c++) {
2736 measurement = &ch_eeprom_info.measurements[c][1];
2737
2738 /* txgain adjustment (in half-dB steps) based on difference
2739 * between factory and current temperature */
2740 factory_temp = measurement->temperature;
2741 iwl4965_math_div_round((current_temp - factory_temp) *
2742 degrees_per_05db_denom,
2743 degrees_per_05db_num,
2744 &temperature_comp[c]);
2745
2746 factory_gain_index[c] = measurement->gain_idx;
2747 factory_actual_pwr[c] = measurement->actual_pow;
2748
2749 IWL_DEBUG_TXPOWER("chain = %d\n", c);
2750 IWL_DEBUG_TXPOWER("fctry tmp %d, "
2751 "curr tmp %d, comp %d steps\n",
2752 factory_temp, current_temp,
2753 temperature_comp[c]);
2754
2755 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
2756 factory_gain_index[c],
2757 factory_actual_pwr[c]);
2758 }
2759
2760 /* for each of 33 bit-rates (including 1 for CCK) */
2761 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
2762 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002763 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07002764
2765 /* for mimo, reduce each chain's txpower by half
2766 * (3dB, 6 steps), so total output power is regulatory
2767 * compliant. */
2768 if (i & 0x8) {
2769 current_regulatory = reg_limit -
2770 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
2771 is_mimo_rate = 1;
2772 } else {
2773 current_regulatory = reg_limit;
2774 is_mimo_rate = 0;
2775 }
2776
2777 /* find txpower limit, either hardware or regulatory */
2778 power_limit = saturation_power - back_off_table[i];
2779 if (power_limit > current_regulatory)
2780 power_limit = current_regulatory;
2781
2782 /* reduce user's txpower request if necessary
2783 * for this rate on this channel */
2784 target_power = user_target_power;
2785 if (target_power > power_limit)
2786 target_power = power_limit;
2787
2788 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
2789 i, saturation_power - back_off_table[i],
2790 current_regulatory, user_target_power,
2791 target_power);
2792
2793 /* for each of 2 Tx chains (radio transmitters) */
2794 for (c = 0; c < 2; c++) {
2795 s32 atten_value;
2796
2797 if (is_mimo_rate)
2798 atten_value =
2799 (s32)le32_to_cpu(priv->card_alive_init.
2800 tx_atten[txatten_grp][c]);
2801 else
2802 atten_value = 0;
2803
2804 /* calculate index; higher index means lower txpower */
2805 power_index = (u8) (factory_gain_index[c] -
2806 (target_power -
2807 factory_actual_pwr[c]) -
2808 temperature_comp[c] -
2809 voltage_compensation +
2810 atten_value);
2811
2812/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
2813 power_index); */
2814
2815 if (power_index < get_min_power_index(i, band))
2816 power_index = get_min_power_index(i, band);
2817
2818 /* adjust 5 GHz index to support negative indexes */
2819 if (!band)
2820 power_index += 9;
2821
2822 /* CCK, rate 32, reduce txpower for CCK */
2823 if (i == POWER_TABLE_CCK_ENTRY)
2824 power_index +=
2825 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
2826
2827 /* stay within the table! */
2828 if (power_index > 107) {
2829 IWL_WARNING("txpower index %d > 107\n",
2830 power_index);
2831 power_index = 107;
2832 }
2833 if (power_index < 0) {
2834 IWL_WARNING("txpower index %d < 0\n",
2835 power_index);
2836 power_index = 0;
2837 }
2838
2839 /* fill txpower command for this rate/chain */
2840 tx_power.s.radio_tx_gain[c] =
2841 gain_table[band][power_index].radio;
2842 tx_power.s.dsp_predis_atten[c] =
2843 gain_table[band][power_index].dsp;
2844
2845 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
2846 "gain 0x%02x dsp %d\n",
2847 c, atten_value, power_index,
2848 tx_power.s.radio_tx_gain[c],
2849 tx_power.s.dsp_predis_atten[c]);
2850 }/* for each chain */
2851
2852 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
2853
2854 }/* for each rate */
2855
2856 return 0;
2857}
2858
2859/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002860 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07002861 *
2862 * Uses the active RXON for channel, band, and characteristics (fat, high)
2863 * The power limit is taken from priv->user_txpower_limit.
2864 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002865int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002866{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002867 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07002868 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07002869 u8 band = 0;
2870 u8 is_fat = 0;
2871 u8 ctrl_chan_high = 0;
2872
2873 if (test_bit(STATUS_SCANNING, &priv->status)) {
2874 /* If this gets hit a lot, switch it to a BUG() and catch
2875 * the stack trace to find out who is calling this during
2876 * a scan. */
2877 IWL_WARNING("TX Power requested while scanning!\n");
2878 return -EAGAIN;
2879 }
2880
Johannes Berg8318d782008-01-24 19:38:38 +01002881 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002882
2883 is_fat = is_fat_channel(priv->active_rxon.flags);
2884
2885 if (is_fat &&
2886 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2887 ctrl_chan_high = 1;
2888
2889 cmd.band = band;
2890 cmd.channel = priv->active_rxon.channel;
2891
Tomas Winkler857485c2008-03-21 13:53:44 -07002892 ret = iwl4965_fill_txpower_tbl(priv, band,
Zhu Yib481de92007-09-25 17:54:57 -07002893 le16_to_cpu(priv->active_rxon.channel),
2894 is_fat, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07002895 if (ret)
2896 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07002897
Tomas Winkler857485c2008-03-21 13:53:44 -07002898 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2899
2900out:
2901 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07002902}
2903
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002904int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07002905{
2906 int rc;
2907 u8 band = 0;
2908 u8 is_fat = 0;
2909 u8 ctrl_chan_high = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002910 struct iwl4965_channel_switch_cmd cmd = { 0 };
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002911 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07002912
Johannes Berg8318d782008-01-24 19:38:38 +01002913 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002914
Assaf Krauss8622e702008-03-21 13:53:43 -07002915 ch_info = iwl_get_channel_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07002916
2917 is_fat = is_fat_channel(priv->staging_rxon.flags);
2918
2919 if (is_fat &&
2920 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2921 ctrl_chan_high = 1;
2922
2923 cmd.band = band;
2924 cmd.expect_beacon = 0;
2925 cmd.channel = cpu_to_le16(channel);
2926 cmd.rxon_flags = priv->active_rxon.flags;
2927 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2928 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2929 if (ch_info)
2930 cmd.expect_beacon = is_channel_radar(ch_info);
2931 else
2932 cmd.expect_beacon = 1;
2933
2934 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2935 ctrl_chan_high, &cmd.tx_power);
2936 if (rc) {
2937 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2938 return rc;
2939 }
2940
Tomas Winkler857485c2008-03-21 13:53:44 -07002941 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07002942 return rc;
2943}
2944
2945#define RTS_HCCA_RETRY_LIMIT 3
2946#define RTS_DFAULT_RETRY_LIMIT 60
2947
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002948void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
Tomas Winkler857485c2008-03-21 13:53:44 -07002949 struct iwl_cmd *cmd,
Zhu Yib481de92007-09-25 17:54:57 -07002950 struct ieee80211_tx_control *ctrl,
2951 struct ieee80211_hdr *hdr, int sta_id,
2952 int is_hcca)
2953{
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002954 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
Zhu Yib481de92007-09-25 17:54:57 -07002955 u8 rts_retry_limit = 0;
2956 u8 data_retry_limit = 0;
Zhu Yib481de92007-09-25 17:54:57 -07002957 u16 fc = le16_to_cpu(hdr->frame_control);
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002958 u8 rate_plcp;
2959 u16 rate_flags = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01002960 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
Zhu Yib481de92007-09-25 17:54:57 -07002961
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002962 rate_plcp = iwl4965_rates[rate_idx].plcp;
Zhu Yib481de92007-09-25 17:54:57 -07002963
2964 rts_retry_limit = (is_hcca) ?
2965 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2966
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002967 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2968 rate_flags |= RATE_MCS_CCK_MSK;
2969
2970
Zhu Yib481de92007-09-25 17:54:57 -07002971 if (ieee80211_is_probe_response(fc)) {
2972 data_retry_limit = 3;
2973 if (data_retry_limit < rts_retry_limit)
2974 rts_retry_limit = data_retry_limit;
2975 } else
2976 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2977
2978 if (priv->data_retry_limit != -1)
2979 data_retry_limit = priv->data_retry_limit;
2980
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002981
2982 if (ieee80211_is_data(fc)) {
2983 tx->initial_rate_index = 0;
2984 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2985 } else {
Zhu Yib481de92007-09-25 17:54:57 -07002986 switch (fc & IEEE80211_FCTL_STYPE) {
2987 case IEEE80211_STYPE_AUTH:
2988 case IEEE80211_STYPE_DEAUTH:
2989 case IEEE80211_STYPE_ASSOC_REQ:
2990 case IEEE80211_STYPE_REASSOC_REQ:
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002991 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2992 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2993 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07002994 }
2995 break;
2996 default:
2997 break;
2998 }
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002999
3000 /* Alternate between antenna A and B for successive frames */
3001 if (priv->use_ant_b_for_management_frame) {
3002 priv->use_ant_b_for_management_frame = 0;
3003 rate_flags |= RATE_MCS_ANT_B_MSK;
3004 } else {
3005 priv->use_ant_b_for_management_frame = 1;
3006 rate_flags |= RATE_MCS_ANT_A_MSK;
3007 }
Zhu Yib481de92007-09-25 17:54:57 -07003008 }
3009
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08003010 tx->rts_retry_limit = rts_retry_limit;
3011 tx->data_retry_limit = data_retry_limit;
3012 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
Zhu Yib481de92007-09-25 17:54:57 -07003013}
3014
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003015int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003016{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003017 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
Zhu Yib481de92007-09-25 17:54:57 -07003018
3019 return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num);
3020}
3021
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003022int iwl4965_hw_get_temperature(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003023{
3024 return priv->temperature;
3025}
3026
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003027unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003028 struct iwl4965_frame *frame, u8 rate)
Zhu Yib481de92007-09-25 17:54:57 -07003029{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003030 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
Zhu Yib481de92007-09-25 17:54:57 -07003031 unsigned int frame_size;
3032
3033 tx_beacon_cmd = &frame->u.beacon;
3034 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3035
Tomas Winklera4062b82008-03-11 16:17:16 -07003036 tx_beacon_cmd->tx.sta_id = priv->hw_setting.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07003037 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3038
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003039 frame_size = iwl4965_fill_beacon_frame(priv,
Zhu Yib481de92007-09-25 17:54:57 -07003040 tx_beacon_cmd->frame,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003041 iwl4965_broadcast_addr,
Zhu Yib481de92007-09-25 17:54:57 -07003042 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3043
3044 BUG_ON(frame_size > MAX_MPDU_SIZE);
3045 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
3046
3047 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
3048 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003049 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07003050 else
3051 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003052 iwl4965_hw_set_rate_n_flags(rate, 0);
Zhu Yib481de92007-09-25 17:54:57 -07003053
3054 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
3055 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
3056 return (sizeof(*tx_beacon_cmd) + frame_size);
3057}
3058
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003059/*
3060 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
3061 * given Tx queue, and enable the DMA channel used for that queue.
3062 *
3063 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3064 * channels supported in hardware.
3065 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003066int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -07003067{
3068 int rc;
3069 unsigned long flags;
3070 int txq_id = txq->q.id;
3071
3072 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003073 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003074 if (rc) {
3075 spin_unlock_irqrestore(&priv->lock, flags);
3076 return rc;
3077 }
3078
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003079 /* Circular buffer (TFD queue in DRAM) physical base address */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003080 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
Zhu Yib481de92007-09-25 17:54:57 -07003081 txq->q.dma_addr >> 8);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003082
3083 /* Enable DMA channel, using same id as for TFD queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003084 iwl_write_direct32(
Zhu Yib481de92007-09-25 17:54:57 -07003085 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
3086 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3087 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003088 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003089 spin_unlock_irqrestore(&priv->lock, flags);
3090
3091 return 0;
3092}
3093
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003094int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
Zhu Yib481de92007-09-25 17:54:57 -07003095 dma_addr_t addr, u16 len)
3096{
3097 int index, is_odd;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003098 struct iwl4965_tfd_frame *tfd = ptr;
Zhu Yib481de92007-09-25 17:54:57 -07003099 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
3100
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003101 /* Each TFD can point to a maximum 20 Tx buffers */
Zhu Yib481de92007-09-25 17:54:57 -07003102 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
3103 IWL_ERROR("Error can not send more than %d chunks\n",
3104 MAX_NUM_OF_TBS);
3105 return -EINVAL;
3106 }
3107
3108 index = num_tbs / 2;
3109 is_odd = num_tbs & 0x1;
3110
3111 if (!is_odd) {
3112 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
3113 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
Tomas Winkler6a218f62008-01-14 17:46:15 -08003114 iwl_get_dma_hi_address(addr));
Zhu Yib481de92007-09-25 17:54:57 -07003115 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
3116 } else {
3117 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
3118 (u32) (addr & 0xffff));
3119 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
3120 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
3121 }
3122
3123 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
3124
3125 return 0;
3126}
3127
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003128static void iwl4965_hw_card_show_info(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003129{
3130 u16 hw_version = priv->eeprom.board_revision_4965;
3131
3132 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
3133 ((hw_version >> 8) & 0x0F),
3134 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
3135
3136 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
3137 priv->eeprom.board_pba_number_4965);
3138}
3139
3140#define IWL_TX_CRC_SIZE 4
3141#define IWL_TX_DELIMITER_SIZE 4
3142
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003143/**
3144 * iwl4965_tx_queue_update_wr_ptr - Set up entry in Tx byte-count array
3145 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003146int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003147 struct iwl4965_tx_queue *txq, u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07003148{
3149 int len;
3150 int txq_id = txq->q.id;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003151 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
Zhu Yib481de92007-09-25 17:54:57 -07003152
3153 if (txq->need_update == 0)
3154 return 0;
3155
3156 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
3157
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003158 /* Set up byte count within first 256 entries */
Zhu Yib481de92007-09-25 17:54:57 -07003159 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003160 tfd_offset[txq->q.write_ptr], byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07003161
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003162 /* If within first 64 entries, duplicate at end */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003163 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
Zhu Yib481de92007-09-25 17:54:57 -07003164 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003165 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
Zhu Yib481de92007-09-25 17:54:57 -07003166 byte_cnt, len);
3167
3168 return 0;
3169}
3170
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003171/**
3172 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
3173 *
3174 * Selects how many and which Rx receivers/antennas/chains to use.
3175 * This should not be used for scan command ... it puts data in wrong place.
3176 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003177void iwl4965_set_rxon_chain(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003178{
3179 u8 is_single = is_single_stream(priv);
3180 u8 idle_state, rx_state;
3181
3182 priv->staging_rxon.rx_chain = 0;
3183 rx_state = idle_state = 3;
3184
3185 /* Tell uCode which antennas are actually connected.
3186 * Before first association, we assume all antennas are connected.
3187 * Just after first association, iwl4965_noise_calibration()
3188 * checks which antennas actually *are* connected. */
3189 priv->staging_rxon.rx_chain |=
3190 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
3191
3192 /* How many receivers should we use? */
3193 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
3194 priv->staging_rxon.rx_chain |=
3195 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
3196 priv->staging_rxon.rx_chain |=
3197 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3198
3199 if (!is_single && (rx_state >= 2) &&
3200 !test_bit(STATUS_POWER_PMI, &priv->status))
3201 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3202 else
3203 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3204
3205 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3206}
3207
Zhu Yib481de92007-09-25 17:54:57 -07003208/**
3209 * sign_extend - Sign extend a value using specified bit as sign-bit
3210 *
3211 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
3212 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
3213 *
3214 * @param oper value to sign extend
3215 * @param index 0 based bit index (0<=index<32) to sign bit
3216 */
3217static s32 sign_extend(u32 oper, int index)
3218{
3219 u8 shift = 31 - index;
3220
3221 return (s32)(oper << shift) >> shift;
3222}
3223
3224/**
3225 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
3226 * @statistics: Provides the temperature reading from the uCode
3227 *
3228 * A return of <0 indicates bogus data in the statistics
3229 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003230int iwl4965_get_temperature(const struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003231{
3232 s32 temperature;
3233 s32 vt;
3234 s32 R1, R2, R3;
3235 u32 R4;
3236
3237 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
3238 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
3239 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
3240 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
3241 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
3242 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
3243 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
3244 } else {
3245 IWL_DEBUG_TEMP("Running temperature calibration\n");
3246 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
3247 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
3248 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
3249 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
3250 }
3251
3252 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003253 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07003254 *
3255 * NOTE If we haven't received a statistics notification yet
3256 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003257 * "initialize" ALIVE response.
3258 */
Zhu Yib481de92007-09-25 17:54:57 -07003259 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3260 vt = sign_extend(R4, 23);
3261 else
3262 vt = sign_extend(
3263 le32_to_cpu(priv->statistics.general.temperature), 23);
3264
3265 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
3266 R1, R2, R3, vt);
3267
3268 if (R3 == R1) {
3269 IWL_ERROR("Calibration conflict R1 == R3\n");
3270 return -1;
3271 }
3272
3273 /* Calculate temperature in degrees Kelvin, adjust by 97%.
3274 * Add offset to center the adjustment around 0 degrees Centigrade. */
3275 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3276 temperature /= (R3 - R1);
3277 temperature = (temperature * 97) / 100 +
3278 TEMPERATURE_CALIB_KELVIN_OFFSET;
3279
3280 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
3281 KELVIN_TO_CELSIUS(temperature));
3282
3283 return temperature;
3284}
3285
3286/* Adjust Txpower only if temperature variance is greater than threshold. */
3287#define IWL_TEMPERATURE_THRESHOLD 3
3288
3289/**
3290 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
3291 *
3292 * If the temperature changed has changed sufficiently, then a recalibration
3293 * is needed.
3294 *
3295 * Assumes caller will replace priv->last_temperature once calibration
3296 * executed.
3297 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003298static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003299{
3300 int temp_diff;
3301
3302 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
3303 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
3304 return 0;
3305 }
3306
3307 temp_diff = priv->temperature - priv->last_temperature;
3308
3309 /* get absolute value */
3310 if (temp_diff < 0) {
3311 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
3312 temp_diff = -temp_diff;
3313 } else if (temp_diff == 0)
3314 IWL_DEBUG_POWER("Same temp, \n");
3315 else
3316 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
3317
3318 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
3319 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
3320 return 0;
3321 }
3322
3323 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
3324
3325 return 1;
3326}
3327
3328/* Calculate noise level, based on measurements during network silence just
3329 * before arriving beacon. This measurement can be done only if we know
3330 * exactly when to expect beacons, therefore only when we're associated. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003331static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003332{
3333 struct statistics_rx_non_phy *rx_info
3334 = &(priv->statistics.rx.general);
3335 int num_active_rx = 0;
3336 int total_silence = 0;
3337 int bcn_silence_a =
3338 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3339 int bcn_silence_b =
3340 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3341 int bcn_silence_c =
3342 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3343
3344 if (bcn_silence_a) {
3345 total_silence += bcn_silence_a;
3346 num_active_rx++;
3347 }
3348 if (bcn_silence_b) {
3349 total_silence += bcn_silence_b;
3350 num_active_rx++;
3351 }
3352 if (bcn_silence_c) {
3353 total_silence += bcn_silence_c;
3354 num_active_rx++;
3355 }
3356
3357 /* Average among active antennas */
3358 if (num_active_rx)
3359 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3360 else
3361 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3362
3363 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3364 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3365 priv->last_rx_noise);
3366}
3367
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003368void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003369{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003370 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003371 int change;
3372 s32 temp;
3373
3374 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
3375 (int)sizeof(priv->statistics), pkt->len);
3376
3377 change = ((priv->statistics.general.temperature !=
3378 pkt->u.stats.general.temperature) ||
3379 ((priv->statistics.flag &
3380 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3381 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3382
3383 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3384
3385 set_bit(STATUS_STATISTICS, &priv->status);
3386
3387 /* Reschedule the statistics timer to occur in
3388 * REG_RECALIB_PERIOD seconds to ensure we get a
3389 * thermal update even if the uCode doesn't give
3390 * us one */
3391 mod_timer(&priv->statistics_periodic, jiffies +
3392 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3393
3394 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3395 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3396 iwl4965_rx_calc_noise(priv);
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003397#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07003398 queue_work(priv->workqueue, &priv->sensitivity_work);
3399#endif
3400 }
3401
Mohamed Abbasab53d8a2008-03-25 16:33:36 -07003402 iwl_leds_background(priv);
3403
Zhu Yib481de92007-09-25 17:54:57 -07003404 /* If the hardware hasn't reported a change in
3405 * temperature then don't bother computing a
3406 * calibrated temperature value */
3407 if (!change)
3408 return;
3409
3410 temp = iwl4965_get_temperature(priv);
3411 if (temp < 0)
3412 return;
3413
3414 if (priv->temperature != temp) {
3415 if (priv->temperature)
3416 IWL_DEBUG_TEMP("Temperature changed "
3417 "from %dC to %dC\n",
3418 KELVIN_TO_CELSIUS(priv->temperature),
3419 KELVIN_TO_CELSIUS(temp));
3420 else
3421 IWL_DEBUG_TEMP("Temperature "
3422 "initialized to %dC\n",
3423 KELVIN_TO_CELSIUS(temp));
3424 }
3425
3426 priv->temperature = temp;
3427 set_bit(STATUS_TEMPERATURE, &priv->status);
3428
3429 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3430 iwl4965_is_temp_calib_needed(priv))
3431 queue_work(priv->workqueue, &priv->txpower_work);
3432}
3433
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003434static void iwl4965_add_radiotap(struct iwl_priv *priv,
Zhu Yi12342c42007-12-20 11:27:32 +08003435 struct sk_buff *skb,
3436 struct iwl4965_rx_phy_res *rx_start,
3437 struct ieee80211_rx_status *stats,
3438 u32 ampdu_status)
3439{
3440 s8 signal = stats->ssi;
3441 s8 noise = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01003442 int rate = stats->rate_idx;
Zhu Yi12342c42007-12-20 11:27:32 +08003443 u64 tsf = stats->mactime;
Johannes Berga0b484f2008-04-01 17:51:47 +02003444 __le16 antenna;
Zhu Yi12342c42007-12-20 11:27:32 +08003445 __le16 phy_flags_hw = rx_start->phy_flags;
3446 struct iwl4965_rt_rx_hdr {
3447 struct ieee80211_radiotap_header rt_hdr;
3448 __le64 rt_tsf; /* TSF */
3449 u8 rt_flags; /* radiotap packet flags */
3450 u8 rt_rate; /* rate in 500kb/s */
3451 __le16 rt_channelMHz; /* channel in MHz */
3452 __le16 rt_chbitmask; /* channel bitfield */
3453 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
3454 s8 rt_dbmnoise;
3455 u8 rt_antenna; /* antenna number */
3456 } __attribute__ ((packed)) *iwl4965_rt;
3457
3458 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
3459 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
3460 if (net_ratelimit())
3461 printk(KERN_ERR "not enough headroom [%d] for "
Miguel Botón01c20982008-01-04 23:34:35 +01003462 "radiotap head [%zd]\n",
Zhu Yi12342c42007-12-20 11:27:32 +08003463 skb_headroom(skb), sizeof(*iwl4965_rt));
3464 return;
3465 }
3466
3467 /* put radiotap header in front of 802.11 header and data */
3468 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
3469
3470 /* initialise radiotap header */
3471 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3472 iwl4965_rt->rt_hdr.it_pad = 0;
3473
3474 /* total header + data */
3475 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
3476 &iwl4965_rt->rt_hdr.it_len);
3477
3478 /* Indicate all the fields we add to the radiotap header */
3479 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3480 (1 << IEEE80211_RADIOTAP_FLAGS) |
3481 (1 << IEEE80211_RADIOTAP_RATE) |
3482 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3483 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3484 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3485 (1 << IEEE80211_RADIOTAP_ANTENNA)),
3486 &iwl4965_rt->rt_hdr.it_present);
3487
3488 /* Zero the flags, we'll add to them as we go */
3489 iwl4965_rt->rt_flags = 0;
3490
3491 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
3492
3493 iwl4965_rt->rt_dbmsignal = signal;
3494 iwl4965_rt->rt_dbmnoise = noise;
3495
3496 /* Convert the channel frequency and set the flags */
3497 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
3498 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3499 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3500 IEEE80211_CHAN_5GHZ),
3501 &iwl4965_rt->rt_chbitmask);
3502 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3503 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
3504 IEEE80211_CHAN_2GHZ),
3505 &iwl4965_rt->rt_chbitmask);
3506 else /* 802.11g */
3507 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3508 IEEE80211_CHAN_2GHZ),
3509 &iwl4965_rt->rt_chbitmask);
3510
Zhu Yi12342c42007-12-20 11:27:32 +08003511 if (rate == -1)
3512 iwl4965_rt->rt_rate = 0;
3513 else
3514 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
3515
3516 /*
3517 * "antenna number"
3518 *
3519 * It seems that the antenna field in the phy flags value
3520 * is actually a bitfield. This is undefined by radiotap,
3521 * it wants an actual antenna number but I always get "7"
3522 * for most legacy frames I receive indicating that the
3523 * same frame was received on all three RX chains.
3524 *
3525 * I think this field should be removed in favour of a
3526 * new 802.11n radiotap field "RX chains" that is defined
3527 * as a bitmask.
3528 */
Johannes Berga0b484f2008-04-01 17:51:47 +02003529 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
3530 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
Zhu Yi12342c42007-12-20 11:27:32 +08003531
3532 /* set the preamble flag if appropriate */
3533 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3534 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3535
3536 stats->flag |= RX_FLAG_RADIOTAP;
3537}
3538
Tomas Winkler19758be2008-03-12 16:58:51 -07003539static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
3540{
3541 /* 0 - mgmt, 1 - cnt, 2 - data */
3542 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
3543 priv->rx_stats[idx].cnt++;
3544 priv->rx_stats[idx].bytes += len;
3545}
3546
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07003547static u32 iwl4965_translate_rx_status(u32 decrypt_in)
3548{
3549 u32 decrypt_out = 0;
3550
3551 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
3552 RX_RES_STATUS_STATION_FOUND)
3553 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
3554 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
3555
3556 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
3557
3558 /* packet was not encrypted */
3559 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3560 RX_RES_STATUS_SEC_TYPE_NONE)
3561 return decrypt_out;
3562
3563 /* packet was encrypted with unknown alg */
3564 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3565 RX_RES_STATUS_SEC_TYPE_ERR)
3566 return decrypt_out;
3567
3568 /* decryption was not done in HW */
3569 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
3570 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
3571 return decrypt_out;
3572
3573 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
3574
3575 case RX_RES_STATUS_SEC_TYPE_CCMP:
3576 /* alg is CCM: check MIC only */
3577 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
3578 /* Bad MIC */
3579 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3580 else
3581 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3582
3583 break;
3584
3585 case RX_RES_STATUS_SEC_TYPE_TKIP:
3586 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
3587 /* Bad TTAK */
3588 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
3589 break;
3590 }
3591 /* fall through if TTAK OK */
3592 default:
3593 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
3594 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3595 else
3596 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3597 break;
3598 };
3599
3600 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
3601 decrypt_in, decrypt_out);
3602
3603 return decrypt_out;
3604}
3605
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003606static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
Zhu Yib481de92007-09-25 17:54:57 -07003607 int include_phy,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003608 struct iwl4965_rx_mem_buffer *rxb,
Zhu Yib481de92007-09-25 17:54:57 -07003609 struct ieee80211_rx_status *stats)
3610{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003611 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003612 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3613 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3614 struct ieee80211_hdr *hdr;
3615 u16 len;
3616 __le32 *rx_end;
3617 unsigned int skblen;
3618 u32 ampdu_status;
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07003619 u32 ampdu_status_legacy;
Zhu Yib481de92007-09-25 17:54:57 -07003620
3621 if (!include_phy && priv->last_phy_res[0])
3622 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3623
3624 if (!rx_start) {
3625 IWL_ERROR("MPDU frame without a PHY data\n");
3626 return;
3627 }
3628 if (include_phy) {
3629 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3630 rx_start->cfg_phy_cnt);
3631
3632 len = le16_to_cpu(rx_start->byte_count);
3633
3634 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3635 sizeof(struct iwl4965_rx_phy_res) +
3636 rx_start->cfg_phy_cnt + len);
3637
3638 } else {
3639 struct iwl4965_rx_mpdu_res_start *amsdu =
3640 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3641
3642 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3643 sizeof(struct iwl4965_rx_mpdu_res_start));
3644 len = le16_to_cpu(amsdu->byte_count);
3645 rx_start->byte_count = amsdu->byte_count;
3646 rx_end = (__le32 *) (((u8 *) hdr) + len);
3647 }
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02003648 if (len > priv->hw_setting.max_pkt_size || len < 16) {
Zhu Yi12342c42007-12-20 11:27:32 +08003649 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
Zhu Yib481de92007-09-25 17:54:57 -07003650 return;
3651 }
3652
3653 ampdu_status = le32_to_cpu(*rx_end);
3654 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3655
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07003656 if (!include_phy) {
3657 /* New status scheme, need to translate */
3658 ampdu_status_legacy = ampdu_status;
3659 ampdu_status = iwl4965_translate_rx_status(ampdu_status);
3660 }
3661
Zhu Yib481de92007-09-25 17:54:57 -07003662 /* start from MAC */
3663 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3664 skb_put(rxb->skb, len); /* end where data ends */
3665
3666 /* We only process data packets if the interface is open */
3667 if (unlikely(!priv->is_open)) {
3668 IWL_DEBUG_DROP_LIMIT
3669 ("Dropping packet while interface is not open.\n");
3670 return;
3671 }
3672
Zhu Yib481de92007-09-25 17:54:57 -07003673 stats->flag = 0;
3674 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3675
Assaf Krauss1ea87392008-03-18 14:57:50 -07003676 if (priv->cfg->mod_params->hw_crypto)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003677 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
Zhu Yib481de92007-09-25 17:54:57 -07003678
Zhu Yi12342c42007-12-20 11:27:32 +08003679 if (priv->add_radiotap)
3680 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3681
Tomas Winkler19758be2008-03-12 16:58:51 -07003682 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
Zhu Yib481de92007-09-25 17:54:57 -07003683 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3684 priv->alloc_rxb_skb--;
3685 rxb->skb = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07003686}
3687
3688/* Calc max signal level (dBm) among 3 possible receivers */
3689static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3690{
3691 /* data from PHY/DSP regarding signal strength, etc.,
3692 * contents are always there, not configurable by host. */
3693 struct iwl4965_rx_non_cfg_phy *ncphy =
3694 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3695 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3696 >> IWL_AGC_DB_POS;
3697
3698 u32 valid_antennae =
3699 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3700 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3701 u8 max_rssi = 0;
3702 u32 i;
3703
3704 /* Find max rssi among 3 possible receivers.
3705 * These values are measured by the digital signal processor (DSP).
3706 * They should stay fairly constant even as the signal strength varies,
3707 * if the radio's automatic gain control (AGC) is working right.
3708 * AGC value (see below) will provide the "interesting" info. */
3709 for (i = 0; i < 3; i++)
3710 if (valid_antennae & (1 << i))
3711 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3712
3713 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3714 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3715 max_rssi, agc);
3716
3717 /* dBm = max_rssi dB - agc dB - constant.
3718 * Higher AGC (higher radio gain) means lower signal. */
3719 return (max_rssi - agc - IWL_RSSI_OFFSET);
3720}
3721
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003722#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07003723
Assaf Krauss1ea87392008-03-18 14:57:50 -07003724void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
3725 struct ieee80211_ht_info *ht_info,
Tomas Winkler78330fd2008-02-06 02:37:18 +02003726 enum ieee80211_band band)
Ron Rindjunsky326eeee2007-11-26 16:14:37 +02003727{
3728 ht_info->cap = 0;
3729 memset(ht_info->supp_mcs_set, 0, 16);
3730
3731 ht_info->ht_supported = 1;
3732
Tomas Winkler78330fd2008-02-06 02:37:18 +02003733 if (band == IEEE80211_BAND_5GHZ) {
Ron Rindjunsky326eeee2007-11-26 16:14:37 +02003734 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3735 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3736 ht_info->supp_mcs_set[4] = 0x01;
3737 }
3738 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3739 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3740 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3741 (IWL_MIMO_PS_NONE << 2));
Assaf Krauss1ea87392008-03-18 14:57:50 -07003742
3743 if (priv->cfg->mod_params->amsdu_size_8K)
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02003744 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
Ron Rindjunsky326eeee2007-11-26 16:14:37 +02003745
3746 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3747 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3748
3749 ht_info->supp_mcs_set[0] = 0xFF;
3750 ht_info->supp_mcs_set[1] = 0xFF;
3751}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003752#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07003753
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003754static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
Zhu Yib481de92007-09-25 17:54:57 -07003755{
3756 unsigned long flags;
3757
3758 spin_lock_irqsave(&priv->sta_lock, flags);
3759 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3760 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3761 priv->stations[sta_id].sta.sta.modify_mask = 0;
3762 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3763 spin_unlock_irqrestore(&priv->sta_lock, flags);
3764
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003765 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07003766}
3767
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003768static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
Zhu Yib481de92007-09-25 17:54:57 -07003769{
3770 /* FIXME: need locking over ps_status ??? */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003771 u8 sta_id = iwl4965_hw_find_station(priv, addr);
Zhu Yib481de92007-09-25 17:54:57 -07003772
3773 if (sta_id != IWL_INVALID_STATION) {
3774 u8 sta_awake = priv->stations[sta_id].
3775 ps_status == STA_PS_STATUS_WAKE;
3776
3777 if (sta_awake && ps_bit)
3778 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3779 else if (!sta_awake && !ps_bit) {
3780 iwl4965_sta_modify_ps_wake(priv, sta_id);
3781 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3782 }
3783 }
3784}
Tomas Winkler0a6857e2008-03-12 16:58:49 -07003785#ifdef CONFIG_IWLWIFI_DEBUG
Tomas Winkler17744ff2008-03-02 01:52:00 +02003786
3787/**
3788 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3789 *
3790 * You may hack this function to show different aspects of received frames,
3791 * including selective frame dumps.
3792 * group100 parameter selects whether to show 1 out of 100 good frames.
3793 *
3794 * TODO: This was originally written for 3945, need to audit for
3795 * proper operation with 4965.
3796 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003797static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winkler17744ff2008-03-02 01:52:00 +02003798 struct iwl4965_rx_packet *pkt,
3799 struct ieee80211_hdr *header, int group100)
3800{
3801 u32 to_us;
3802 u32 print_summary = 0;
3803 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3804 u32 hundred = 0;
3805 u32 dataframe = 0;
3806 u16 fc;
3807 u16 seq_ctl;
3808 u16 channel;
3809 u16 phy_flags;
3810 int rate_sym;
3811 u16 length;
3812 u16 status;
3813 u16 bcn_tmr;
3814 u32 tsf_low;
3815 u64 tsf;
3816 u8 rssi;
3817 u8 agc;
3818 u16 sig_avg;
3819 u16 noise_diff;
3820 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3821 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3822 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3823 u8 *data = IWL_RX_DATA(pkt);
3824
Tomas Winkler0a6857e2008-03-12 16:58:49 -07003825 if (likely(!(iwl_debug_level & IWL_DL_RX)))
Tomas Winkler17744ff2008-03-02 01:52:00 +02003826 return;
3827
3828 /* MAC header */
3829 fc = le16_to_cpu(header->frame_control);
3830 seq_ctl = le16_to_cpu(header->seq_ctrl);
3831
3832 /* metadata */
3833 channel = le16_to_cpu(rx_hdr->channel);
3834 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3835 rate_sym = rx_hdr->rate;
3836 length = le16_to_cpu(rx_hdr->len);
3837
3838 /* end-of-frame status and timestamp */
3839 status = le32_to_cpu(rx_end->status);
3840 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3841 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3842 tsf = le64_to_cpu(rx_end->timestamp);
3843
3844 /* signal statistics */
3845 rssi = rx_stats->rssi;
3846 agc = rx_stats->agc;
3847 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3848 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3849
3850 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3851
3852 /* if data frame is to us and all is good,
3853 * (optionally) print summary for only 1 out of every 100 */
3854 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3855 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3856 dataframe = 1;
3857 if (!group100)
3858 print_summary = 1; /* print each frame */
3859 else if (priv->framecnt_to_us < 100) {
3860 priv->framecnt_to_us++;
3861 print_summary = 0;
3862 } else {
3863 priv->framecnt_to_us = 0;
3864 print_summary = 1;
3865 hundred = 1;
3866 }
3867 } else {
3868 /* print summary for all other frames */
3869 print_summary = 1;
3870 }
3871
3872 if (print_summary) {
3873 char *title;
3874 int rate_idx;
3875 u32 bitrate;
3876
3877 if (hundred)
3878 title = "100Frames";
3879 else if (fc & IEEE80211_FCTL_RETRY)
3880 title = "Retry";
3881 else if (ieee80211_is_assoc_response(fc))
3882 title = "AscRsp";
3883 else if (ieee80211_is_reassoc_response(fc))
3884 title = "RasRsp";
3885 else if (ieee80211_is_probe_response(fc)) {
3886 title = "PrbRsp";
3887 print_dump = 1; /* dump frame contents */
3888 } else if (ieee80211_is_beacon(fc)) {
3889 title = "Beacon";
3890 print_dump = 1; /* dump frame contents */
3891 } else if (ieee80211_is_atim(fc))
3892 title = "ATIM";
3893 else if (ieee80211_is_auth(fc))
3894 title = "Auth";
3895 else if (ieee80211_is_deauth(fc))
3896 title = "DeAuth";
3897 else if (ieee80211_is_disassoc(fc))
3898 title = "DisAssoc";
3899 else
3900 title = "Frame";
3901
3902 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3903 if (unlikely(rate_idx == -1))
3904 bitrate = 0;
3905 else
3906 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3907
3908 /* print frame summary.
3909 * MAC addresses show just the last byte (for brevity),
3910 * but you can hack it to show more, if you'd like to. */
3911 if (dataframe)
3912 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3913 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3914 title, fc, header->addr1[5],
3915 length, rssi, channel, bitrate);
3916 else {
3917 /* src/dst addresses assume managed mode */
3918 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3919 "src=0x%02x, rssi=%u, tim=%lu usec, "
3920 "phy=0x%02x, chnl=%d\n",
3921 title, fc, header->addr1[5],
3922 header->addr3[5], rssi,
3923 tsf_low - priv->scan_start_tsf,
3924 phy_flags, channel);
3925 }
3926 }
3927 if (print_dump)
Tomas Winkler0a6857e2008-03-12 16:58:49 -07003928 iwl_print_hex_dump(IWL_DL_RX, data, length);
Tomas Winkler17744ff2008-03-02 01:52:00 +02003929}
3930#else
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003931static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winkler17744ff2008-03-02 01:52:00 +02003932 struct iwl4965_rx_packet *pkt,
3933 struct ieee80211_hdr *header,
3934 int group100)
3935{
3936}
3937#endif
3938
Zhu Yib481de92007-09-25 17:54:57 -07003939
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08003940
Tomas Winkler857485c2008-03-21 13:53:44 -07003941/* Called for REPLY_RX (legacy ABG frames), or
Zhu Yib481de92007-09-25 17:54:57 -07003942 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003943static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003944 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003945{
Tomas Winkler17744ff2008-03-02 01:52:00 +02003946 struct ieee80211_hdr *header;
3947 struct ieee80211_rx_status rx_status;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003948 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003949 /* Use phy data (Rx signal strength, etc.) contained within
3950 * this rx packet for legacy frames,
3951 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
Tomas Winkler857485c2008-03-21 13:53:44 -07003952 int include_phy = (pkt->hdr.cmd == REPLY_RX);
Zhu Yib481de92007-09-25 17:54:57 -07003953 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3954 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3955 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3956 __le32 *rx_end;
3957 unsigned int len = 0;
Zhu Yib481de92007-09-25 17:54:57 -07003958 u16 fc;
Zhu Yib481de92007-09-25 17:54:57 -07003959 u8 network_packet;
3960
Tomas Winkler17744ff2008-03-02 01:52:00 +02003961 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
Tomas Winklerdc92e492008-04-03 16:05:22 -07003962 rx_status.freq =
3963 ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel));
Tomas Winkler17744ff2008-03-02 01:52:00 +02003964 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3965 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
Tomas Winklerdc92e492008-04-03 16:05:22 -07003966 rx_status.rate_idx =
3967 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
Tomas Winkler17744ff2008-03-02 01:52:00 +02003968 if (rx_status.band == IEEE80211_BAND_5GHZ)
3969 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3970
3971 rx_status.antenna = 0;
3972 rx_status.flag = 0;
3973
Zhu Yib481de92007-09-25 17:54:57 -07003974 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
Tomas Winklerdc92e492008-04-03 16:05:22 -07003975 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
3976 rx_start->cfg_phy_cnt);
Zhu Yib481de92007-09-25 17:54:57 -07003977 return;
3978 }
Tomas Winkler17744ff2008-03-02 01:52:00 +02003979
Zhu Yib481de92007-09-25 17:54:57 -07003980 if (!include_phy) {
3981 if (priv->last_phy_res[0])
3982 rx_start = (struct iwl4965_rx_phy_res *)
3983 &priv->last_phy_res[1];
3984 else
3985 rx_start = NULL;
3986 }
3987
3988 if (!rx_start) {
3989 IWL_ERROR("MPDU frame without a PHY data\n");
3990 return;
3991 }
3992
3993 if (include_phy) {
3994 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3995 + rx_start->cfg_phy_cnt);
3996
3997 len = le16_to_cpu(rx_start->byte_count);
Tomas Winkler17744ff2008-03-02 01:52:00 +02003998 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
Zhu Yib481de92007-09-25 17:54:57 -07003999 sizeof(struct iwl4965_rx_phy_res) + len);
4000 } else {
4001 struct iwl4965_rx_mpdu_res_start *amsdu =
4002 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
4003
4004 header = (void *)(pkt->u.raw +
4005 sizeof(struct iwl4965_rx_mpdu_res_start));
4006 len = le16_to_cpu(amsdu->byte_count);
4007 rx_end = (__le32 *) (pkt->u.raw +
4008 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
4009 }
4010
4011 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
4012 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
4013 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
4014 le32_to_cpu(*rx_end));
4015 return;
4016 }
4017
4018 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
4019
Zhu Yib481de92007-09-25 17:54:57 -07004020 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
Tomas Winkler17744ff2008-03-02 01:52:00 +02004021 rx_status.ssi = iwl4965_calc_rssi(rx_start);
Zhu Yib481de92007-09-25 17:54:57 -07004022
4023 /* Meaningful noise values are available only from beacon statistics,
4024 * which are gathered only when associated, and indicate noise
4025 * only for the associated network channel ...
4026 * Ignore these noise values while scanning (other channels) */
Tomas Winkler3109ece2008-03-28 16:33:35 -07004027 if (iwl_is_associated(priv) &&
Zhu Yib481de92007-09-25 17:54:57 -07004028 !test_bit(STATUS_SCANNING, &priv->status)) {
Tomas Winkler17744ff2008-03-02 01:52:00 +02004029 rx_status.noise = priv->last_rx_noise;
4030 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
4031 rx_status.noise);
Zhu Yib481de92007-09-25 17:54:57 -07004032 } else {
Tomas Winkler17744ff2008-03-02 01:52:00 +02004033 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4034 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
Zhu Yib481de92007-09-25 17:54:57 -07004035 }
4036
4037 /* Reset beacon noise level if not associated. */
Tomas Winkler3109ece2008-03-28 16:33:35 -07004038 if (!iwl_is_associated(priv))
Zhu Yib481de92007-09-25 17:54:57 -07004039 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4040
Tomas Winkler17744ff2008-03-02 01:52:00 +02004041 /* Set "1" to report good data frames in groups of 100 */
4042 /* FIXME: need to optimze the call: */
4043 iwl4965_dbg_report_frame(priv, pkt, header, 1);
Zhu Yib481de92007-09-25 17:54:57 -07004044
Tomas Winkler17744ff2008-03-02 01:52:00 +02004045 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
4046 rx_status.ssi, rx_status.noise, rx_status.signal,
John W. Linville06501d22008-04-01 17:38:47 -04004047 (unsigned long long)rx_status.mactime);
Zhu Yib481de92007-09-25 17:54:57 -07004048
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004049 network_packet = iwl4965_is_network_packet(priv, header);
Zhu Yib481de92007-09-25 17:54:57 -07004050 if (network_packet) {
Tomas Winkler17744ff2008-03-02 01:52:00 +02004051 priv->last_rx_rssi = rx_status.ssi;
Zhu Yib481de92007-09-25 17:54:57 -07004052 priv->last_beacon_time = priv->ucode_beacon_time;
4053 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
4054 }
4055
4056 fc = le16_to_cpu(header->frame_control);
4057 switch (fc & IEEE80211_FCTL_FTYPE) {
4058 case IEEE80211_FTYPE_MGMT:
Zhu Yib481de92007-09-25 17:54:57 -07004059 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4060 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4061 header->addr2);
Tomas Winkler17744ff2008-03-02 01:52:00 +02004062 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07004063 break;
4064
4065 case IEEE80211_FTYPE_CTL:
Ron Rindjunsky9ab46172007-12-25 17:00:38 +02004066#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004067 switch (fc & IEEE80211_FCTL_STYPE) {
4068 case IEEE80211_STYPE_BACK_REQ:
4069 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
4070 iwl4965_handle_data_packet(priv, 0, include_phy,
Tomas Winkler17744ff2008-03-02 01:52:00 +02004071 rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07004072 break;
4073 default:
4074 break;
4075 }
4076#endif
Zhu Yib481de92007-09-25 17:54:57 -07004077 break;
4078
Joe Perches0795af52007-10-03 17:59:30 -07004079 case IEEE80211_FTYPE_DATA: {
4080 DECLARE_MAC_BUF(mac1);
4081 DECLARE_MAC_BUF(mac2);
4082 DECLARE_MAC_BUF(mac3);
4083
Zhu Yib481de92007-09-25 17:54:57 -07004084 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4085 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4086 header->addr2);
4087
4088 if (unlikely(!network_packet))
4089 IWL_DEBUG_DROP("Dropping (non network): "
Joe Perches0795af52007-10-03 17:59:30 -07004090 "%s, %s, %s\n",
4091 print_mac(mac1, header->addr1),
4092 print_mac(mac2, header->addr2),
4093 print_mac(mac3, header->addr3));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004094 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
Joe Perches0795af52007-10-03 17:59:30 -07004095 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
4096 print_mac(mac1, header->addr1),
4097 print_mac(mac2, header->addr2),
4098 print_mac(mac3, header->addr3));
Zhu Yib481de92007-09-25 17:54:57 -07004099 else
4100 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
Tomas Winkler17744ff2008-03-02 01:52:00 +02004101 &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07004102 break;
Joe Perches0795af52007-10-03 17:59:30 -07004103 }
Zhu Yib481de92007-09-25 17:54:57 -07004104 default:
4105 break;
4106
4107 }
4108}
4109
4110/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4111 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004112static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004113 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07004114{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004115 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07004116 priv->last_phy_res[0] = 1;
4117 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4118 sizeof(struct iwl4965_rx_phy_res));
4119}
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004120static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004121 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07004122
4123{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004124#ifdef CONFIG_IWL4965_SENSITIVITY
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004125 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4126 struct iwl4965_missed_beacon_notif *missed_beacon;
Zhu Yib481de92007-09-25 17:54:57 -07004127
4128 missed_beacon = &pkt->u.missed_beacon;
4129 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4130 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4131 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4132 le32_to_cpu(missed_beacon->total_missed_becons),
4133 le32_to_cpu(missed_beacon->num_recvd_beacons),
4134 le32_to_cpu(missed_beacon->num_expected_beacons));
4135 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4136 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4137 queue_work(priv->workqueue, &priv->sensitivity_work);
4138 }
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004139#endif /*CONFIG_IWL4965_SENSITIVITY*/
Zhu Yib481de92007-09-25 17:54:57 -07004140}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004141#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004142
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004143/**
4144 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4145 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004146static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07004147 int sta_id, int tid)
4148{
4149 unsigned long flags;
4150
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004151 /* Remove "disable" flag, to enable Tx for this TID */
Zhu Yib481de92007-09-25 17:54:57 -07004152 spin_lock_irqsave(&priv->sta_lock, flags);
4153 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4154 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4155 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4156 spin_unlock_irqrestore(&priv->sta_lock, flags);
4157
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004158 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07004159}
4160
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004161/**
4162 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4163 *
4164 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4165 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4166 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004167static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004168 struct iwl4965_ht_agg *agg,
4169 struct iwl4965_compressed_ba_resp*
Zhu Yib481de92007-09-25 17:54:57 -07004170 ba_resp)
4171
4172{
4173 int i, sh, ack;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004174 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4175 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4176 u64 bitmap;
4177 int successes = 0;
4178 struct ieee80211_tx_status *tx_status;
Zhu Yib481de92007-09-25 17:54:57 -07004179
4180 if (unlikely(!agg->wait_for_ba)) {
4181 IWL_ERROR("Received BA when not expected\n");
4182 return -EINVAL;
4183 }
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004184
4185 /* Mark that the expected block-ack response arrived */
Zhu Yib481de92007-09-25 17:54:57 -07004186 agg->wait_for_ba = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004187 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004188
4189 /* Calculate shift to align block-ack bits with our Tx window bits */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004190 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
Ian Schram01ebd062007-10-25 17:15:22 +08004191 if (sh < 0) /* tbw something is wrong with indices */
Zhu Yib481de92007-09-25 17:54:57 -07004192 sh += 0x100;
4193
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004194 /* don't use 64-bit values for now */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004195 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
Zhu Yib481de92007-09-25 17:54:57 -07004196
4197 if (agg->frame_count > (64 - sh)) {
4198 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4199 return -1;
4200 }
4201
4202 /* check for success or failure according to the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004203 * transmitted bitmap and block-ack bitmap */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004204 bitmap &= agg->bitmap;
Zhu Yib481de92007-09-25 17:54:57 -07004205
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004206 /* For each frame attempted in aggregation,
4207 * update driver's record of tx frame's status. */
Zhu Yib481de92007-09-25 17:54:57 -07004208 for (i = 0; i < agg->frame_count ; i++) {
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004209 ack = bitmap & (1 << i);
4210 successes += !!ack;
Zhu Yib481de92007-09-25 17:54:57 -07004211 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004212 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4213 agg->start_idx + i);
Zhu Yib481de92007-09-25 17:54:57 -07004214 }
4215
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004216 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4217 tx_status->flags = IEEE80211_TX_STATUS_ACK;
Ron Rindjunsky99556432008-01-28 14:07:25 +02004218 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4219 tx_status->ampdu_ack_map = successes;
4220 tx_status->ampdu_ack_len = agg->frame_count;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -08004221 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4222 &tx_status->control);
Zhu Yib481de92007-09-25 17:54:57 -07004223
John W. Linvillef868f4e2008-03-07 16:38:43 -05004224 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004225
4226 return 0;
4227}
4228
4229/**
4230 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4231 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004232static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004233 u16 txq_id)
4234{
4235 /* Simply stop the queue, but don't change any configuration;
4236 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004237 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07004238 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004239 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4240 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4241}
4242
4243/**
4244 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004245 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004246 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004247static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004248 u16 ssn_idx, u8 tx_fifo)
4249{
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004250 int ret = 0;
4251
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004252 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4253 IWL_WARNING("queue number too small: %d, must be > %d\n",
4254 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4255 return -EINVAL;
4256 }
4257
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004258 ret = iwl_grab_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004259 if (ret)
4260 return ret;
4261
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004262 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4263
Tomas Winkler12a81f62008-04-03 16:05:20 -07004264 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004265
4266 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4267 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4268 /* supposes that ssn_idx is valid (!= 0xFFF) */
4269 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4270
Tomas Winkler12a81f62008-04-03 16:05:20 -07004271 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004272 iwl4965_txq_ctx_deactivate(priv, txq_id);
4273 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4274
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004275 iwl_release_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004276
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004277 return 0;
4278}
4279
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004280int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004281 u8 tid, int txq_id)
4282{
4283 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4284 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4285 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4286
4287 switch (priv->stations[sta_id].tid[tid].agg.state) {
4288 case IWL_EMPTYING_HW_QUEUE_DELBA:
4289 /* We are reclaiming the last packet of the */
4290 /* aggregated HW queue */
4291 if (txq_id == tid_data->agg.txq_id &&
4292 q->read_ptr == q->write_ptr) {
4293 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4294 int tx_fifo = default_tid_to_tx_fifo[tid];
4295 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4296 iwl4965_tx_queue_agg_disable(priv, txq_id,
4297 ssn, tx_fifo);
4298 tid_data->agg.state = IWL_AGG_OFF;
4299 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4300 }
4301 break;
4302 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4303 /* We are reclaiming the last packet of the queue */
4304 if (tid_data->tfds_in_queue == 0) {
4305 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4306 tid_data->agg.state = IWL_AGG_ON;
4307 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4308 }
4309 break;
4310 }
Zhu Yib481de92007-09-25 17:54:57 -07004311 return 0;
4312}
4313
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004314/**
4315 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4316 * @index -- current index
4317 * @n_bd -- total number of entries in queue (s/b power of 2)
4318 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004319static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
Zhu Yib481de92007-09-25 17:54:57 -07004320{
4321 return (index == 0) ? n_bd - 1 : index - 1;
4322}
4323
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004324/**
4325 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4326 *
4327 * Handles block-acknowledge notification from device, which reports success
4328 * of frames sent via aggregation.
4329 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004330static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004331 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07004332{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004333 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4334 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
Zhu Yib481de92007-09-25 17:54:57 -07004335 int index;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004336 struct iwl4965_tx_queue *txq = NULL;
4337 struct iwl4965_ht_agg *agg;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004338 DECLARE_MAC_BUF(mac);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004339
4340 /* "flow" corresponds to Tx queue */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004341 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004342
4343 /* "ssn" is start of block-ack Tx window, corresponds to index
4344 * (in Tx queue's circular buffer) of first TFD/frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07004345 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4346
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004347 if (scd_flow >= ARRAY_SIZE(priv->txq)) {
Zhu Yib481de92007-09-25 17:54:57 -07004348 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4349 return;
4350 }
4351
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004352 txq = &priv->txq[scd_flow];
Zhu Yib481de92007-09-25 17:54:57 -07004353 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004354
4355 /* Find index just before block-ack window */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004356 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
Zhu Yib481de92007-09-25 17:54:57 -07004357
Ian Schram01ebd062007-10-25 17:15:22 +08004358 /* TODO: Need to get this copy more safely - now good for debug */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004359
Joe Perches0795af52007-10-03 17:59:30 -07004360 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4361 "sta_id = %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07004362 agg->wait_for_ba,
Joe Perches0795af52007-10-03 17:59:30 -07004363 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
Zhu Yib481de92007-09-25 17:54:57 -07004364 ba_resp->sta_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004365 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
Zhu Yib481de92007-09-25 17:54:57 -07004366 "%d, scd_ssn = %d\n",
4367 ba_resp->tid,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004368 ba_resp->seq_ctl,
Tomas Winkler0310ae72008-03-11 16:17:19 -07004369 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
Zhu Yib481de92007-09-25 17:54:57 -07004370 ba_resp->scd_flow,
4371 ba_resp->scd_ssn);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004372 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
Zhu Yib481de92007-09-25 17:54:57 -07004373 agg->start_idx,
John W. Linvillef868f4e2008-03-07 16:38:43 -05004374 (unsigned long long)agg->bitmap);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004375
4376 /* Update driver's record of ACK vs. not for each frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07004377 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004378
4379 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4380 * block-ack window (we assume that they've been successfully
4381 * transmitted ... if not, it's too late anyway). */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004382 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4383 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4384 priv->stations[ba_resp->sta_id].
4385 tid[ba_resp->tid].tfds_in_queue -= freed;
4386 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4387 priv->mac80211_registered &&
4388 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4389 ieee80211_wake_queue(priv->hw, scd_flow);
4390 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4391 ba_resp->tid, scd_flow);
4392 }
Zhu Yib481de92007-09-25 17:54:57 -07004393}
4394
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004395/**
4396 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4397 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004398static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07004399 u16 txq_id)
4400{
4401 u32 tbl_dw_addr;
4402 u32 tbl_dw;
4403 u16 scd_q2ratid;
4404
4405 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4406
4407 tbl_dw_addr = priv->scd_base_addr +
4408 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4409
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004410 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07004411
4412 if (txq_id & 0x1)
4413 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4414 else
4415 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4416
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004417 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07004418
4419 return 0;
4420}
4421
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004422
Zhu Yib481de92007-09-25 17:54:57 -07004423/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004424 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4425 *
4426 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4427 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07004428 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004429static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
Zhu Yib481de92007-09-25 17:54:57 -07004430 int tx_fifo, int sta_id, int tid,
4431 u16 ssn_idx)
4432{
4433 unsigned long flags;
4434 int rc;
4435 u16 ra_tid;
4436
4437 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
4438 IWL_WARNING("queue number too small: %d, must be > %d\n",
4439 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4440
4441 ra_tid = BUILD_RAxTID(sta_id, tid);
4442
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004443 /* Modify device's station table to Tx this TID */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004444 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
Zhu Yib481de92007-09-25 17:54:57 -07004445
4446 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004447 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004448 if (rc) {
4449 spin_unlock_irqrestore(&priv->lock, flags);
4450 return rc;
4451 }
4452
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004453 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07004454 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4455
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004456 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07004457 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4458
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004459 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07004460 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07004461
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004462 /* Place first TFD at index corresponding to start sequence number.
4463 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08004464 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4465 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07004466 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4467
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004468 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004469 iwl_write_targ_mem(priv,
Zhu Yib481de92007-09-25 17:54:57 -07004470 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4471 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4472 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4473
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004474 iwl_write_targ_mem(priv, priv->scd_base_addr +
Zhu Yib481de92007-09-25 17:54:57 -07004475 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4476 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4477 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4478
Tomas Winkler12a81f62008-04-03 16:05:20 -07004479 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07004480
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004481 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07004482 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4483
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004484 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004485 spin_unlock_irqrestore(&priv->lock, flags);
4486
4487 return 0;
4488}
4489
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004490#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07004491
4492/**
4493 * iwl4965_add_station - Initialize a station's hardware rate table
4494 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004495 * The uCode's station table contains a table of fallback rates
Zhu Yib481de92007-09-25 17:54:57 -07004496 * for automatic fallback during transmission.
4497 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004498 * NOTE: This sets up a default set of values. These will be replaced later
4499 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4500 * rc80211_simple.
Zhu Yib481de92007-09-25 17:54:57 -07004501 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004502 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4503 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4504 * which requires station table entry to exist).
Zhu Yib481de92007-09-25 17:54:57 -07004505 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004506void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
Zhu Yib481de92007-09-25 17:54:57 -07004507{
4508 int i, r;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004509 struct iwl4965_link_quality_cmd link_cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07004510 .reserved1 = 0,
4511 };
4512 u16 rate_flags;
4513
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004514 /* Set up the rate scaling to start at selected rate, fall back
4515 * all the way down to 1M in IEEE order, and then spin on 1M */
Zhu Yib481de92007-09-25 17:54:57 -07004516 if (is_ap)
4517 r = IWL_RATE_54M_INDEX;
Johannes Berg8318d782008-01-24 19:38:38 +01004518 else if (priv->band == IEEE80211_BAND_5GHZ)
Zhu Yib481de92007-09-25 17:54:57 -07004519 r = IWL_RATE_6M_INDEX;
4520 else
4521 r = IWL_RATE_1M_INDEX;
4522
4523 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4524 rate_flags = 0;
4525 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4526 rate_flags |= RATE_MCS_CCK_MSK;
4527
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004528 /* Use Tx antenna B only */
Zhu Yib481de92007-09-25 17:54:57 -07004529 rate_flags |= RATE_MCS_ANT_B_MSK;
4530 rate_flags &= ~RATE_MCS_ANT_A_MSK;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004531
Zhu Yib481de92007-09-25 17:54:57 -07004532 link_cmd.rs_table[i].rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004533 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4534 r = iwl4965_get_prev_ieee_rate(r);
Zhu Yib481de92007-09-25 17:54:57 -07004535 }
4536
4537 link_cmd.general_params.single_stream_ant_msk = 2;
4538 link_cmd.general_params.dual_stream_ant_msk = 3;
4539 link_cmd.agg_params.agg_dis_start_th = 3;
4540 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4541
4542 /* Update the rate scaling for control frame Tx to AP */
Tomas Winklera4062b82008-03-11 16:17:16 -07004543 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_setting.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07004544
Tomas Winklere5472972008-03-28 16:21:12 -07004545 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
4546 sizeof(link_cmd), &link_cmd, NULL);
Zhu Yib481de92007-09-25 17:54:57 -07004547}
4548
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004549#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004550
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004551static u8 iwl4965_is_channel_extension(struct iwl_priv *priv,
Johannes Berg8318d782008-01-24 19:38:38 +01004552 enum ieee80211_band band,
Tomas Winkler78330fd2008-02-06 02:37:18 +02004553 u16 channel, u8 extension_chan_offset)
Zhu Yib481de92007-09-25 17:54:57 -07004554{
Assaf Kraussbf85ea42008-03-14 10:38:49 -07004555 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07004556
Assaf Krauss8622e702008-03-21 13:53:43 -07004557 ch_info = iwl_get_channel_info(priv, band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07004558 if (!is_channel_valid(ch_info))
4559 return 0;
4560
Guy Cohen134eb5d2008-03-04 18:09:25 -08004561 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
Zhu Yib481de92007-09-25 17:54:57 -07004562 return 0;
4563
4564 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4565 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4566 return 1;
4567
4568 return 0;
4569}
4570
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004571static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004572 struct ieee80211_ht_info *sta_ht_inf)
Zhu Yib481de92007-09-25 17:54:57 -07004573{
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004574 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
Zhu Yib481de92007-09-25 17:54:57 -07004575
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004576 if ((!iwl_ht_conf->is_ht) ||
4577 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
Guy Cohen134eb5d2008-03-04 18:09:25 -08004578 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
Zhu Yib481de92007-09-25 17:54:57 -07004579 return 0;
4580
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004581 if (sta_ht_inf) {
4582 if ((!sta_ht_inf->ht_supported) ||
Roel Kluin194c7ca2008-02-02 20:48:48 +01004583 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004584 return 0;
4585 }
Zhu Yib481de92007-09-25 17:54:57 -07004586
Tomas Winkler78330fd2008-02-06 02:37:18 +02004587 return (iwl4965_is_channel_extension(priv, priv->band,
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004588 iwl_ht_conf->control_channel,
4589 iwl_ht_conf->extension_chan_offset));
Zhu Yib481de92007-09-25 17:54:57 -07004590}
4591
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004592void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
Zhu Yib481de92007-09-25 17:54:57 -07004593{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004594 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
Zhu Yib481de92007-09-25 17:54:57 -07004595 u32 val;
4596
4597 if (!ht_info->is_ht)
4598 return;
4599
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004600 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004601 if (iwl4965_is_fat_tx_allowed(priv, NULL))
Zhu Yib481de92007-09-25 17:54:57 -07004602 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4603 else
4604 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4605 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4606
4607 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4608 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4609 le16_to_cpu(rxon->channel),
4610 ht_info->control_channel);
4611 rxon->channel = cpu_to_le16(ht_info->control_channel);
4612 return;
4613 }
4614
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004615 /* Note: control channel is opposite of extension channel */
Zhu Yib481de92007-09-25 17:54:57 -07004616 switch (ht_info->extension_chan_offset) {
4617 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4618 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4619 break;
4620 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4621 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4622 break;
Guy Cohen134eb5d2008-03-04 18:09:25 -08004623 case IWL_EXT_CHANNEL_OFFSET_NONE:
Zhu Yib481de92007-09-25 17:54:57 -07004624 default:
4625 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4626 break;
4627 }
4628
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004629 val = ht_info->ht_protection;
Zhu Yib481de92007-09-25 17:54:57 -07004630
4631 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4632
Zhu Yib481de92007-09-25 17:54:57 -07004633 iwl4965_set_rxon_chain(priv);
4634
4635 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4636 "rxon flags 0x%X operation mode :0x%X "
4637 "extension channel offset 0x%x "
4638 "control chan %d\n",
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004639 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4640 le32_to_cpu(rxon->flags), ht_info->ht_protection,
Zhu Yib481de92007-09-25 17:54:57 -07004641 ht_info->extension_chan_offset,
4642 ht_info->control_channel);
4643 return;
4644}
4645
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004646void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004647 struct ieee80211_ht_info *sta_ht_inf)
Zhu Yib481de92007-09-25 17:54:57 -07004648{
4649 __le32 sta_flags;
Tomas Winklere53cfe02008-01-30 22:05:13 -08004650 u8 mimo_ps_mode;
Zhu Yib481de92007-09-25 17:54:57 -07004651
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004652 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
Zhu Yib481de92007-09-25 17:54:57 -07004653 goto done;
4654
Tomas Winklere53cfe02008-01-30 22:05:13 -08004655 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4656
Zhu Yib481de92007-09-25 17:54:57 -07004657 sta_flags = priv->stations[index].sta.station_flags;
4658
Tomas Winklere53cfe02008-01-30 22:05:13 -08004659 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4660
4661 switch (mimo_ps_mode) {
4662 case WLAN_HT_CAP_MIMO_PS_STATIC:
4663 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4664 break;
4665 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
Zhu Yib481de92007-09-25 17:54:57 -07004666 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
Tomas Winklere53cfe02008-01-30 22:05:13 -08004667 break;
4668 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4669 break;
4670 default:
4671 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
4672 break;
4673 }
Zhu Yib481de92007-09-25 17:54:57 -07004674
4675 sta_flags |= cpu_to_le32(
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004676 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
Zhu Yib481de92007-09-25 17:54:57 -07004677
4678 sta_flags |= cpu_to_le32(
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004679 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
Zhu Yib481de92007-09-25 17:54:57 -07004680
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004681 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
Zhu Yib481de92007-09-25 17:54:57 -07004682 sta_flags |= STA_FLG_FAT_EN_MSK;
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004683 else
Tomas Winklere53cfe02008-01-30 22:05:13 -08004684 sta_flags &= ~STA_FLG_FAT_EN_MSK;
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004685
Zhu Yib481de92007-09-25 17:54:57 -07004686 priv->stations[index].sta.station_flags = sta_flags;
4687 done:
4688 return;
4689}
4690
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004691static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07004692 int sta_id, int tid, u16 ssn)
4693{
4694 unsigned long flags;
4695
4696 spin_lock_irqsave(&priv->sta_lock, flags);
4697 priv->stations[sta_id].sta.station_flags_msk = 0;
4698 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
4699 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
4700 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
4701 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4702 spin_unlock_irqrestore(&priv->sta_lock, flags);
4703
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004704 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07004705}
4706
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004707static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07004708 int sta_id, int tid)
4709{
4710 unsigned long flags;
4711
4712 spin_lock_irqsave(&priv->sta_lock, flags);
4713 priv->stations[sta_id].sta.station_flags_msk = 0;
4714 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4715 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4717 spin_unlock_irqrestore(&priv->sta_lock, flags);
4718
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004719 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07004720}
4721
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004722/*
4723 * Find first available (lowest unused) Tx Queue, mark it "active".
4724 * Called only when finding queue for aggregation.
4725 * Should never return anything < 7, because they should already
4726 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4727 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004728static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004729{
4730 int txq_id;
4731
4732 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
4733 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4734 return txq_id;
4735 return -1;
4736}
4737
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004738static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4739 u16 tid, u16 *start_seq_num)
Zhu Yib481de92007-09-25 17:54:57 -07004740{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004741 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07004742 int sta_id;
4743 int tx_fifo;
4744 int txq_id;
4745 int ssn = -1;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004746 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07004747 unsigned long flags;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004748 struct iwl4965_tid_data *tid_data;
Joe Perches0795af52007-10-03 17:59:30 -07004749 DECLARE_MAC_BUF(mac);
Zhu Yib481de92007-09-25 17:54:57 -07004750
4751 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4752 tx_fifo = default_tid_to_tx_fifo[tid];
4753 else
4754 return -EINVAL;
4755
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004756 IWL_WARNING("%s on da = %s tid = %d\n",
4757 __func__, print_mac(mac, da), tid);
Zhu Yib481de92007-09-25 17:54:57 -07004758
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004759 sta_id = iwl4965_hw_find_station(priv, da);
Zhu Yib481de92007-09-25 17:54:57 -07004760 if (sta_id == IWL_INVALID_STATION)
4761 return -ENXIO;
4762
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004763 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4764 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4765 return -ENXIO;
4766 }
4767
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004768 txq_id = iwl4965_txq_ctx_activate_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004769 if (txq_id == -1)
4770 return -ENXIO;
4771
4772 spin_lock_irqsave(&priv->sta_lock, flags);
4773 tid_data = &priv->stations[sta_id].tid[tid];
4774 ssn = SEQ_TO_SN(tid_data->seq_number);
4775 tid_data->agg.txq_id = txq_id;
4776 spin_unlock_irqrestore(&priv->sta_lock, flags);
4777
4778 *start_seq_num = ssn;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004779 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4780 sta_id, tid, ssn);
4781 if (ret)
4782 return ret;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004783
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004784 ret = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004785 if (tid_data->tfds_in_queue == 0) {
4786 printk(KERN_ERR "HW queue is empty\n");
4787 tid_data->agg.state = IWL_AGG_ON;
4788 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4789 } else {
4790 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4791 tid_data->tfds_in_queue);
4792 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4793 }
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004794 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07004795}
4796
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004797static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4798 u16 tid)
Zhu Yib481de92007-09-25 17:54:57 -07004799{
4800
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004801 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07004802 int tx_fifo_id, txq_id, sta_id, ssn = -1;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004803 struct iwl4965_tid_data *tid_data;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004804 int ret, write_ptr, read_ptr;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004805 unsigned long flags;
Joe Perches0795af52007-10-03 17:59:30 -07004806 DECLARE_MAC_BUF(mac);
4807
Zhu Yib481de92007-09-25 17:54:57 -07004808 if (!da) {
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004809 IWL_ERROR("da = NULL\n");
Zhu Yib481de92007-09-25 17:54:57 -07004810 return -EINVAL;
4811 }
4812
4813 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4814 tx_fifo_id = default_tid_to_tx_fifo[tid];
4815 else
4816 return -EINVAL;
4817
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004818 sta_id = iwl4965_hw_find_station(priv, da);
Zhu Yib481de92007-09-25 17:54:57 -07004819
4820 if (sta_id == IWL_INVALID_STATION)
4821 return -ENXIO;
4822
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004823 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4824 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4825
Zhu Yib481de92007-09-25 17:54:57 -07004826 tid_data = &priv->stations[sta_id].tid[tid];
4827 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4828 txq_id = tid_data->agg.txq_id;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004829 write_ptr = priv->txq[txq_id].q.write_ptr;
4830 read_ptr = priv->txq[txq_id].q.read_ptr;
Zhu Yib481de92007-09-25 17:54:57 -07004831
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004832 /* The queue is not empty */
4833 if (write_ptr != read_ptr) {
4834 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4835 priv->stations[sta_id].tid[tid].agg.state =
4836 IWL_EMPTYING_HW_QUEUE_DELBA;
4837 return 0;
4838 }
4839
4840 IWL_DEBUG_HT("HW queue empty\n");;
4841 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4842
4843 spin_lock_irqsave(&priv->lock, flags);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004844 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004845 spin_unlock_irqrestore(&priv->lock, flags);
4846
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08004847 if (ret)
4848 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07004849
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004850 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
4851
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004852 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004853 print_mac(mac, da), tid);
Zhu Yib481de92007-09-25 17:54:57 -07004854
4855 return 0;
4856}
4857
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02004858int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4859 enum ieee80211_ampdu_mlme_action action,
4860 const u8 *addr, u16 tid, u16 *ssn)
4861{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004862 struct iwl_priv *priv = hw->priv;
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02004863 int sta_id;
4864 DECLARE_MAC_BUF(mac);
4865
4866 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ",
4867 print_mac(mac, addr), tid);
4868 sta_id = iwl4965_hw_find_station(priv, addr);
4869 switch (action) {
4870 case IEEE80211_AMPDU_RX_START:
4871 IWL_DEBUG_HT("start Rx\n");
4872 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn);
4873 break;
4874 case IEEE80211_AMPDU_RX_STOP:
4875 IWL_DEBUG_HT("stop Rx\n");
4876 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4877 break;
4878 case IEEE80211_AMPDU_TX_START:
4879 IWL_DEBUG_HT("start Tx\n");
4880 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn);
4881 case IEEE80211_AMPDU_TX_STOP:
4882 IWL_DEBUG_HT("stop Tx\n");
4883 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4884 default:
4885 IWL_DEBUG_HT("unknown\n");
4886 return -EINVAL;
4887 break;
4888 }
4889 return 0;
4890}
4891
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004892#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07004893
4894/* Set up 4965-specific Rx frame reply handlers */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004895void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004896{
4897 /* Legacy Rx frames */
Tomas Winkler857485c2008-03-21 13:53:44 -07004898 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
Zhu Yib481de92007-09-25 17:54:57 -07004899
4900 /* High-throughput (HT) Rx frames */
4901 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4902 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4903
4904 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4905 iwl4965_rx_missed_beacon_notif;
4906
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004907#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004908 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004909#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07004910}
4911
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004912void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004913{
4914 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4915 INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004916#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07004917 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4918#endif
Zhu Yib481de92007-09-25 17:54:57 -07004919 init_timer(&priv->statistics_periodic);
4920 priv->statistics_periodic.data = (unsigned long)priv;
4921 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4922}
4923
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004924void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004925{
4926 del_timer_sync(&priv->statistics_periodic);
4927
4928 cancel_delayed_work(&priv->init_alive_start);
4929}
4930
Tomas Winkler857485c2008-03-21 13:53:44 -07004931static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4932 .enqueue_hcmd = iwl4965_enqueue_hcmd,
4933};
4934
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004935static struct iwl_lib_ops iwl4965_lib = {
Assaf Kraussbf85ea42008-03-14 10:38:49 -07004936 .init_drv = iwl4965_init_drv,
Tomas Winkler57aab752008-04-14 21:16:03 -07004937 .hw_nic_init = iwl4965_hw_nic_init,
4938 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4939 .alive_notify = iwl4965_alive_notify,
4940 .load_ucode = iwl4965_load_bsm,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004941 .eeprom_ops = {
4942 .verify_signature = iwlcore_eeprom_verify_signature,
4943 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4944 .release_semaphore = iwlcore_eeprom_release_semaphore,
4945 },
Mohamed Abbasad97edd2008-03-28 16:21:06 -07004946 .radio_kill_sw = iwl4965_radio_kill_sw,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004947};
4948
4949static struct iwl_ops iwl4965_ops = {
4950 .lib = &iwl4965_lib,
Tomas Winkler857485c2008-03-21 13:53:44 -07004951 .utils = &iwl4965_hcmd_utils,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004952};
4953
Tomas Winkler82b9a122008-03-04 18:09:30 -08004954static struct iwl_cfg iwl4965_agn_cfg = {
4955 .name = "4965AGN",
Tomas Winkler4bf775c2008-03-04 18:09:31 -08004956 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
Tomas Winkler82b9a122008-03-04 18:09:30 -08004957 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004958 .ops = &iwl4965_ops,
Assaf Krauss1ea87392008-03-18 14:57:50 -07004959 .mod_params = &iwl4965_mod_params,
Tomas Winkler82b9a122008-03-04 18:09:30 -08004960};
4961
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004962struct pci_device_id iwl4965_hw_card_ids[] = {
Tomas Winkler82b9a122008-03-04 18:09:30 -08004963 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4964 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
Zhu Yib481de92007-09-25 17:54:57 -07004965 {0}
4966};
4967
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004968MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
Assaf Krauss1ea87392008-03-18 14:57:50 -07004969
4970module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
4971MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4972module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
4973MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
4974module_param_named(hwcrypto, iwl4965_mod_params.hw_crypto, int, 0444);
4975MODULE_PARM_DESC(hwcrypto,
4976 "using hardware crypto engine (default 0 [software])\n");
4977module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
4978MODULE_PARM_DESC(debug, "debug output mask");
4979module_param_named(
4980 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
4981MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4982
4983module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
4984MODULE_PARM_DESC(queues_num, "number of hw queues.");
4985
4986/* QoS */
4987module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
4988MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
4989module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
4990MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
4991