blob: 27cfe3c9a58dc55d9bdc3cf3c522d4510b44b7b3 [file] [log] [blame]
Tomas Winkler5a6a2562008-04-24 11:55:23 -07001/******************************************************************************
2 *
3 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 *
24 *****************************************************************************/
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/version.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070041#include "iwl-dev.h"
Tomas Winkler5a6a2562008-04-24 11:55:23 -070042#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-helpers.h"
45#include "iwl-5000-hw.h"
46
47#define IWL5000_UCODE_API "-1"
48
Ron Rindjunsky99da1b42008-05-15 13:54:13 +080049static const u16 iwl5000_default_queue_to_tx_fifo[] = {
50 IWL_TX_FIFO_AC3,
51 IWL_TX_FIFO_AC2,
52 IWL_TX_FIFO_AC1,
53 IWL_TX_FIFO_AC0,
54 IWL50_CMD_FIFO_NUM,
55 IWL_TX_FIFO_HCCA_1,
56 IWL_TX_FIFO_HCCA_2
57};
58
Tomas Winkler30d59262008-04-24 11:55:25 -070059static int iwl5000_apm_init(struct iwl_priv *priv)
60{
61 int ret = 0;
62
63 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
64 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
65
Tomas Winkler8f061892008-05-29 16:34:56 +080066 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
67 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
68 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
69
Tomas Winkler30d59262008-04-24 11:55:25 -070070 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
71
72 /* set "initialization complete" bit to move adapter
73 * D0U* --> D0A* state */
74 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
75
76 /* wait for clock stabilization */
77 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
78 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
79 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
80 if (ret < 0) {
81 IWL_DEBUG_INFO("Failed to init the card\n");
82 return ret;
83 }
84
85 ret = iwl_grab_nic_access(priv);
86 if (ret)
87 return ret;
88
89 /* enable DMA */
Tomas Winkler8f061892008-05-29 16:34:56 +080090 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
Tomas Winkler30d59262008-04-24 11:55:25 -070091
92 udelay(20);
93
Tomas Winkler8f061892008-05-29 16:34:56 +080094 /* disable L1-Active */
Tomas Winkler30d59262008-04-24 11:55:25 -070095 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
Tomas Winkler8f061892008-05-29 16:34:56 +080096 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Tomas Winkler30d59262008-04-24 11:55:25 -070097
98 iwl_release_nic_access(priv);
99
100 return ret;
101}
102
Tomas Winklerf118a912008-05-29 16:34:58 +0800103/* FIXME: this is indentical to 4965 */
104static void iwl5000_apm_stop(struct iwl_priv *priv)
105{
106 unsigned long flags;
107
108 iwl4965_hw_nic_stop_master(priv);
109
110 spin_lock_irqsave(&priv->lock, flags);
111
112 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
113
114 udelay(10);
115
116 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
117
118 spin_unlock_irqrestore(&priv->lock, flags);
119}
120
121
Tomas Winkler7f066102008-05-29 16:34:57 +0800122static int iwl5000_apm_reset(struct iwl_priv *priv)
123{
124 int ret = 0;
125 unsigned long flags;
126
127 iwl4965_hw_nic_stop_master(priv);
128
129 spin_lock_irqsave(&priv->lock, flags);
130
131 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
132
133 udelay(10);
134
135
136 /* FIXME: put here L1A -L0S w/a */
137
138 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
139
140 /* set "initialization complete" bit to move adapter
141 * D0U* --> D0A* state */
142 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
143
144 /* wait for clock stabilization */
145 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
146 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
147 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
148 if (ret < 0) {
149 IWL_DEBUG_INFO("Failed to init the card\n");
150 goto out;
151 }
152
153 ret = iwl_grab_nic_access(priv);
154 if (ret)
155 goto out;
156
157 /* enable DMA */
158 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
159
160 udelay(20);
161
162 /* disable L1-Active */
163 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
164 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
165
166 iwl_release_nic_access(priv);
167
168out:
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 return ret;
172}
173
174
Ron Rindjunsky5a835352008-05-05 10:22:29 +0800175static void iwl5000_nic_config(struct iwl_priv *priv)
Tomas Winklere86fe9f2008-04-24 11:55:36 -0700176{
177 unsigned long flags;
178 u16 radio_cfg;
179 u8 val_link;
180
181 spin_lock_irqsave(&priv->lock, flags);
182
183 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
184
Tomas Winkler8f061892008-05-29 16:34:56 +0800185 /* L1 is enabled by BIOS */
186 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
187 /* diable L0S disabled L1A enabled */
188 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
189 else
190 /* L0S enabled L1A disabled */
191 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
Tomas Winklere86fe9f2008-04-24 11:55:36 -0700192
193 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
194
195 /* write radio config values to register */
196 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX)
197 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
198 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
199 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
200 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
201
202 /* set CSR_HW_CONFIG_REG for uCode use */
203 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
204 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
205 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
206
207 spin_unlock_irqrestore(&priv->lock, flags);
208}
209
210
211
Tomas Winkler25ae3982008-04-24 11:55:27 -0700212/*
213 * EEPROM
214 */
215static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
216{
217 u16 offset = 0;
218
219 if ((address & INDIRECT_ADDRESS) == 0)
220 return address;
221
222 switch (address & INDIRECT_TYPE_MSK) {
223 case INDIRECT_HOST:
224 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
225 break;
226 case INDIRECT_GENERAL:
227 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
228 break;
229 case INDIRECT_REGULATORY:
230 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
231 break;
232 case INDIRECT_CALIBRATION:
233 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
234 break;
235 case INDIRECT_PROCESS_ADJST:
236 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
237 break;
238 case INDIRECT_OTHERS:
239 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
240 break;
241 default:
242 IWL_ERROR("illegal indirect type: 0x%X\n",
243 address & INDIRECT_TYPE_MSK);
244 break;
245 }
246
247 /* translate the offset from words to byte */
248 return (address & ADDRESS_MSK) + (offset << 1);
249}
250
Tomas Winklerf1f69412008-04-24 11:55:35 -0700251static int iwl5000_eeprom_check_version(struct iwl_priv *priv)
252{
253 u16 eeprom_ver;
254 struct iwl_eeprom_calib_hdr {
255 u8 version;
256 u8 pa_type;
257 u16 voltage;
258 } *hdr;
259
260 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
261
262 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
263 EEPROM_5000_CALIB_ALL);
264
265 if (eeprom_ver < EEPROM_5000_EEPROM_VERSION ||
266 hdr->version < EEPROM_5000_TX_POWER_VERSION)
267 goto err;
268
269 return 0;
270err:
271 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
272 eeprom_ver, EEPROM_5000_EEPROM_VERSION,
273 hdr->version, EEPROM_5000_TX_POWER_VERSION);
274 return -EINVAL;
275
276}
277
Emmanuel Grumbach33fd5032008-04-24 11:55:30 -0700278#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
279
280static void iwl5000_gain_computation(struct iwl_priv *priv,
281 u32 average_noise[NUM_RX_CHAINS],
282 u16 min_average_noise_antenna_i,
283 u32 min_average_noise)
284{
285 int i;
286 s32 delta_g;
287 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
288
289 /* Find Gain Code for the antennas B and C */
290 for (i = 1; i < NUM_RX_CHAINS; i++) {
291 if ((data->disconn_array[i])) {
292 data->delta_gain_code[i] = 0;
293 continue;
294 }
295 delta_g = (1000 * ((s32)average_noise[0] -
296 (s32)average_noise[i])) / 1500;
297 /* bound gain by 2 bits value max, 3rd bit is sign */
298 data->delta_gain_code[i] =
299 min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
300
301 if (delta_g < 0)
302 /* set negative sign */
303 data->delta_gain_code[i] |= (1 << 2);
304 }
305
306 IWL_DEBUG_CALIB("Delta gains: ANT_B = %d ANT_C = %d\n",
307 data->delta_gain_code[1], data->delta_gain_code[2]);
308
309 if (!data->radio_write) {
310 struct iwl5000_calibration_chain_noise_gain_cmd cmd;
311 memset(&cmd, 0, sizeof(cmd));
312
313 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
314 cmd.delta_gain_1 = data->delta_gain_code[1];
315 cmd.delta_gain_2 = data->delta_gain_code[2];
316 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
317 sizeof(cmd), &cmd, NULL);
318
319 data->radio_write = 1;
320 data->state = IWL_CHAIN_NOISE_CALIBRATED;
321 }
322
323 data->chain_noise_a = 0;
324 data->chain_noise_b = 0;
325 data->chain_noise_c = 0;
326 data->chain_signal_a = 0;
327 data->chain_signal_b = 0;
328 data->chain_signal_c = 0;
329 data->beacon_count = 0;
330}
331
Tomas Winklerf1f69412008-04-24 11:55:35 -0700332
Emmanuel Grumbach33fd5032008-04-24 11:55:30 -0700333static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
334{
335 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
336
337 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
338 struct iwl5000_calibration_chain_noise_reset_cmd cmd;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
342 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
343 sizeof(cmd), &cmd))
344 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
345 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
346 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
347 }
348}
349
350static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
351 .min_nrg_cck = 95,
352 .max_nrg_cck = 0,
353 .auto_corr_min_ofdm = 90,
354 .auto_corr_min_ofdm_mrc = 170,
355 .auto_corr_min_ofdm_x1 = 120,
356 .auto_corr_min_ofdm_mrc_x1 = 240,
357
358 .auto_corr_max_ofdm = 120,
359 .auto_corr_max_ofdm_mrc = 210,
360 .auto_corr_max_ofdm_x1 = 155,
361 .auto_corr_max_ofdm_mrc_x1 = 290,
362
363 .auto_corr_min_cck = 125,
364 .auto_corr_max_cck = 200,
365 .auto_corr_min_cck_mrc = 170,
366 .auto_corr_max_cck_mrc = 400,
367 .nrg_th_cck = 95,
368 .nrg_th_ofdm = 95,
369};
370
371#endif /* CONFIG_IWL5000_RUN_TIME_CALIB */
372
Tomas Winkler25ae3982008-04-24 11:55:27 -0700373static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
374 size_t offset)
375{
376 u32 address = eeprom_indirect_address(priv, offset);
377 BUG_ON(address >= priv->cfg->eeprom_size);
378 return &priv->eeprom[address];
379}
380
Ron Rindjunskydbb983b2008-05-15 13:54:12 +0800381/*
382 * ucode
383 */
384static int iwl5000_load_section(struct iwl_priv *priv,
385 struct fw_desc *image,
386 u32 dst_addr)
387{
388 int ret = 0;
389 unsigned long flags;
390
391 dma_addr_t phy_addr = image->p_addr;
392 u32 byte_cnt = image->len;
393
394 spin_lock_irqsave(&priv->lock, flags);
395 ret = iwl_grab_nic_access(priv);
396 if (ret) {
397 spin_unlock_irqrestore(&priv->lock, flags);
398 return ret;
399 }
400
401 iwl_write_direct32(priv,
402 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
403 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
404
405 iwl_write_direct32(priv,
406 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
407
408 iwl_write_direct32(priv,
409 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
410 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
411
412 /* FIME: write the MSB of the phy_addr in CTRL1
413 * iwl_write_direct32(priv,
414 IWL_FH_TFDIB_CTRL1_REG(IWL_FH_SRVC_CHNL),
415 ((phy_addr & MSB_MSK)
416 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_count);
417 */
418 iwl_write_direct32(priv,
419 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), byte_cnt);
420 iwl_write_direct32(priv,
421 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
422 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
423 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
424 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
425
426 iwl_write_direct32(priv,
427 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
428 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
429 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
430 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
431
432 iwl_release_nic_access(priv);
433 spin_unlock_irqrestore(&priv->lock, flags);
434 return 0;
435}
436
437static int iwl5000_load_given_ucode(struct iwl_priv *priv,
438 struct fw_desc *inst_image,
439 struct fw_desc *data_image)
440{
441 int ret = 0;
442
443 ret = iwl5000_load_section(
444 priv, inst_image, RTC_INST_LOWER_BOUND);
445 if (ret)
446 return ret;
447
448 IWL_DEBUG_INFO("INST uCode section being loaded...\n");
449 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
450 priv->ucode_write_complete, 5 * HZ);
451 if (ret == -ERESTARTSYS) {
452 IWL_ERROR("Could not load the INST uCode section due "
453 "to interrupt\n");
454 return ret;
455 }
456 if (!ret) {
457 IWL_ERROR("Could not load the INST uCode section\n");
458 return -ETIMEDOUT;
459 }
460
461 priv->ucode_write_complete = 0;
462
463 ret = iwl5000_load_section(
464 priv, data_image, RTC_DATA_LOWER_BOUND);
465 if (ret)
466 return ret;
467
468 IWL_DEBUG_INFO("DATA uCode section being loaded...\n");
469
470 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
471 priv->ucode_write_complete, 5 * HZ);
472 if (ret == -ERESTARTSYS) {
473 IWL_ERROR("Could not load the INST uCode section due "
474 "to interrupt\n");
475 return ret;
476 } else if (!ret) {
477 IWL_ERROR("Could not load the DATA uCode section\n");
478 return -ETIMEDOUT;
479 } else
480 ret = 0;
481
482 priv->ucode_write_complete = 0;
483
484 return ret;
485}
486
487static int iwl5000_load_ucode(struct iwl_priv *priv)
488{
489 int ret = 0;
490
491 /* check whether init ucode should be loaded, or rather runtime ucode */
492 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
493 IWL_DEBUG_INFO("Init ucode found. Loading init ucode...\n");
494 ret = iwl5000_load_given_ucode(priv,
495 &priv->ucode_init, &priv->ucode_init_data);
496 if (!ret) {
497 IWL_DEBUG_INFO("Init ucode load complete.\n");
498 priv->ucode_type = UCODE_INIT;
499 }
500 } else {
501 IWL_DEBUG_INFO("Init ucode not found, or already loaded. "
502 "Loading runtime ucode...\n");
503 ret = iwl5000_load_given_ucode(priv,
504 &priv->ucode_code, &priv->ucode_data);
505 if (!ret) {
506 IWL_DEBUG_INFO("Runtime ucode load complete.\n");
507 priv->ucode_type = UCODE_RT;
508 }
509 }
510
511 return ret;
512}
513
Ron Rindjunsky99da1b42008-05-15 13:54:13 +0800514static void iwl5000_init_alive_start(struct iwl_priv *priv)
515{
516 int ret = 0;
517
518 /* Check alive response for "valid" sign from uCode */
519 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
520 /* We had an error bringing up the hardware, so take it
521 * all the way back down so we can try again */
522 IWL_DEBUG_INFO("Initialize Alive failed.\n");
523 goto restart;
524 }
525
526 /* initialize uCode was loaded... verify inst image.
527 * This is a paranoid check, because we would not have gotten the
528 * "initialize" alive if code weren't properly loaded. */
529 if (iwl_verify_ucode(priv)) {
530 /* Runtime instruction load was bad;
531 * take it all the way back down so we can try again */
532 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
533 goto restart;
534 }
535
536 iwlcore_clear_stations_table(priv);
537 ret = priv->cfg->ops->lib->alive_notify(priv);
538 if (ret) {
539 IWL_WARNING("Could not complete ALIVE transition: %d\n", ret);
540 goto restart;
541 }
542
543 return;
544
545restart:
546 /* real restart (first load init_ucode) */
547 queue_work(priv->workqueue, &priv->restart);
548}
549
550static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
551 int txq_id, u32 index)
552{
553 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
554 (index & 0xff) | (txq_id << 8));
555 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
556}
557
558static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
559 struct iwl_tx_queue *txq,
560 int tx_fifo_id, int scd_retry)
561{
562 int txq_id = txq->q.id;
563 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
564
565 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
566 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
567 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
568 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
569 IWL50_SCD_QUEUE_STTS_REG_MSK);
570
571 txq->sched_retry = scd_retry;
572
573 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
574 active ? "Activate" : "Deactivate",
575 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
576}
577
Ron Rindjunsky9636e582008-05-15 13:54:14 +0800578static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
579{
580 struct iwl_wimax_coex_cmd coex_cmd;
581
582 memset(&coex_cmd, 0, sizeof(coex_cmd));
583
584 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
585 sizeof(coex_cmd), &coex_cmd);
586}
587
Ron Rindjunsky99da1b42008-05-15 13:54:13 +0800588static int iwl5000_alive_notify(struct iwl_priv *priv)
589{
590 u32 a;
591 int i = 0;
592 unsigned long flags;
593 int ret;
594
595 spin_lock_irqsave(&priv->lock, flags);
596
597 ret = iwl_grab_nic_access(priv);
598 if (ret) {
599 spin_unlock_irqrestore(&priv->lock, flags);
600 return ret;
601 }
602
603 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
604 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
605 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
606 a += 4)
607 iwl_write_targ_mem(priv, a, 0);
608 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
609 a += 4)
610 iwl_write_targ_mem(priv, a, 0);
611 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
612 iwl_write_targ_mem(priv, a, 0);
613
614 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
615 (priv->shared_phys +
616 offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10);
617 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
618 IWL50_SCD_QUEUECHAIN_SEL_ALL(
619 priv->hw_params.max_txq_num));
620 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
621
622 /* initiate the queues */
623 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
624 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
625 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
626 iwl_write_targ_mem(priv, priv->scd_base_addr +
627 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
628 iwl_write_targ_mem(priv, priv->scd_base_addr +
629 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
630 sizeof(u32),
631 ((SCD_WIN_SIZE <<
632 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
633 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
634 ((SCD_FRAME_LIMIT <<
635 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
636 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
637 }
638
639 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
640 (1 << priv->hw_params.max_txq_num) - 1);
641
642 iwl_write_prph(priv, IWL50_SCD_TXFACT,
643 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
644
645 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
646 /* map qos queues to fifos one-to-one */
647 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
648 int ac = iwl5000_default_queue_to_tx_fifo[i];
649 iwl_txq_ctx_activate(priv, i);
650 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
651 }
652 /* TODO - need to initialize those FIFOs inside the loop above,
653 * not only mark them as active */
654 iwl_txq_ctx_activate(priv, 4);
655 iwl_txq_ctx_activate(priv, 7);
656 iwl_txq_ctx_activate(priv, 8);
657 iwl_txq_ctx_activate(priv, 9);
658
659 iwl_release_nic_access(priv);
660 spin_unlock_irqrestore(&priv->lock, flags);
661
Ron Rindjunsky9636e582008-05-15 13:54:14 +0800662 iwl5000_send_wimax_coex(priv);
663
Ron Rindjunsky99da1b42008-05-15 13:54:13 +0800664 return 0;
665}
666
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700667static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
668{
669 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
670 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
671 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
672 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
673 return -EINVAL;
674 }
Tomas Winkler25ae3982008-04-24 11:55:27 -0700675
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700676 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
677 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700678 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
679 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
680 if (priv->cfg->mod_params->amsdu_size_8K)
681 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
682 else
683 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
684 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
685 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
686 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
687 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
688 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
689 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
690 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_2GHZ) |
691 BIT(IEEE80211_BAND_5GHZ);
Emmanuel Grumbach33fd5032008-04-24 11:55:30 -0700692#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
693 priv->hw_params.sens = &iwl5000_sensitivity;
694#endif
Tomas Winkler25ae3982008-04-24 11:55:27 -0700695
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700696 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
697 case CSR_HW_REV_TYPE_5100:
698 case CSR_HW_REV_TYPE_5150:
699 priv->hw_params.tx_chains_num = 1;
700 priv->hw_params.rx_chains_num = 2;
701 /* FIXME: move to ANT_A, ANT_B, ANT_C enum */
Tomas Winkler1179f182008-04-24 11:55:31 -0700702 priv->hw_params.valid_tx_ant = ANT_A;
703 priv->hw_params.valid_rx_ant = ANT_AB;
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700704 break;
705 case CSR_HW_REV_TYPE_5300:
706 case CSR_HW_REV_TYPE_5350:
707 priv->hw_params.tx_chains_num = 3;
708 priv->hw_params.rx_chains_num = 3;
Tomas Winkler1179f182008-04-24 11:55:31 -0700709 priv->hw_params.valid_tx_ant = ANT_ABC;
710 priv->hw_params.valid_rx_ant = ANT_ABC;
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700711 break;
712 }
Emmanuel Grumbachc031bf82008-04-24 11:55:29 -0700713
714 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
715 case CSR_HW_REV_TYPE_5100:
716 case CSR_HW_REV_TYPE_5300:
717 /* 5X00 wants in Celsius */
718 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
719 break;
720 case CSR_HW_REV_TYPE_5150:
721 case CSR_HW_REV_TYPE_5350:
722 /* 5X50 wants in Kelvin */
723 priv->hw_params.ct_kill_threshold =
724 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
725 break;
726 }
727
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700728 return 0;
729}
Ron Rindjunskyd4100dd2008-04-24 11:55:33 -0700730
731static int iwl5000_alloc_shared_mem(struct iwl_priv *priv)
732{
733 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
734 sizeof(struct iwl5000_shared),
735 &priv->shared_phys);
736 if (!priv->shared_virt)
737 return -ENOMEM;
738
739 memset(priv->shared_virt, 0, sizeof(struct iwl5000_shared));
740
Ron Rindjunskyd67f5482008-05-05 10:22:49 +0800741 priv->rb_closed_offset = offsetof(struct iwl5000_shared, rb_closed);
742
Ron Rindjunskyd4100dd2008-04-24 11:55:33 -0700743 return 0;
744}
745
746static void iwl5000_free_shared_mem(struct iwl_priv *priv)
747{
748 if (priv->shared_virt)
749 pci_free_consistent(priv->pci_dev,
750 sizeof(struct iwl5000_shared),
751 priv->shared_virt,
752 priv->shared_phys);
753}
754
Ron Rindjunskyd67f5482008-05-05 10:22:49 +0800755static int iwl5000_shared_mem_rx_idx(struct iwl_priv *priv)
756{
757 struct iwl5000_shared *s = priv->shared_virt;
758 return le32_to_cpu(s->rb_closed) & 0xFFF;
759}
760
Emmanuel Grumbach7839fc02008-04-24 11:55:34 -0700761/**
762 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
763 */
764static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800765 struct iwl_tx_queue *txq,
Emmanuel Grumbach7839fc02008-04-24 11:55:34 -0700766 u16 byte_cnt)
767{
768 struct iwl5000_shared *shared_data = priv->shared_virt;
769 int txq_id = txq->q.id;
770 u8 sec_ctl = 0;
771 u8 sta = 0;
772 int len;
773
774 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
775
776 if (txq_id != IWL_CMD_QUEUE_NUM) {
777 sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id;
778 sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl;
779
780 switch (sec_ctl & TX_CMD_SEC_MSK) {
781 case TX_CMD_SEC_CCM:
782 len += CCMP_MIC_LEN;
783 break;
784 case TX_CMD_SEC_TKIP:
785 len += TKIP_ICV_LEN;
786 break;
787 case TX_CMD_SEC_WEP:
788 len += WEP_IV_LEN + WEP_ICV_LEN;
789 break;
790 }
791 }
792
793 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
794 tfd_offset[txq->q.write_ptr], byte_cnt, len);
795
796 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
797 tfd_offset[txq->q.write_ptr], sta_id, sta);
798
799 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
800 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
801 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
802 byte_cnt, len);
803 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
804 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
805 sta_id, sta);
806 }
807}
808
Tomas Winkler2469bf22008-05-05 10:22:35 +0800809static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
810{
811 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
812 memcpy(data, cmd, size);
813 return size;
814}
815
816
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800817static int iwl5000_disable_tx_fifo(struct iwl_priv *priv)
818{
819 unsigned long flags;
820 int ret;
821
822 spin_lock_irqsave(&priv->lock, flags);
823
824 ret = iwl_grab_nic_access(priv);
825 if (unlikely(ret)) {
826 IWL_ERROR("Tx fifo reset failed");
827 spin_unlock_irqrestore(&priv->lock, flags);
828 return ret;
829 }
830
831 iwl_write_prph(priv, IWL50_SCD_TXFACT, 0);
832 iwl_release_nic_access(priv);
833 spin_unlock_irqrestore(&priv->lock, flags);
834
835 return 0;
836}
837
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +0800838/* Currently 5000 is the supperset of everything */
839static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
840{
841 return len;
842}
843
Ron Rindjunskyb600e4e2008-05-15 13:54:11 +0800844static void iwl5000_rx_handler_setup(struct iwl_priv *priv)
845{
846}
847
Ron Rindjunsky87283cc2008-05-29 16:34:47 +0800848static int iwl5000_hw_valid_rtc_data_addr(u32 addr)
849{
850 return (addr >= RTC_DATA_LOWER_BOUND) &&
851 (addr < IWL50_RTC_DATA_UPPER_BOUND);
852}
853
Tomas Winklerda8dec22008-04-24 11:55:24 -0700854static struct iwl_hcmd_ops iwl5000_hcmd = {
855};
856
857static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +0800858 .get_hcmd_size = iwl5000_get_hcmd_size,
Tomas Winkler2469bf22008-05-05 10:22:35 +0800859 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
Emmanuel Grumbach33fd5032008-04-24 11:55:30 -0700860#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
861 .gain_computation = iwl5000_gain_computation,
862 .chain_noise_reset = iwl5000_chain_noise_reset,
863#endif
Tomas Winklerda8dec22008-04-24 11:55:24 -0700864};
865
866static struct iwl_lib_ops iwl5000_lib = {
Tomas Winklerfdd3e8a2008-04-24 11:55:28 -0700867 .set_hw_params = iwl5000_hw_set_hw_params,
Ron Rindjunskyd4100dd2008-04-24 11:55:33 -0700868 .alloc_shared_mem = iwl5000_alloc_shared_mem,
869 .free_shared_mem = iwl5000_free_shared_mem,
Ron Rindjunskyd67f5482008-05-05 10:22:49 +0800870 .shared_mem_rx_idx = iwl5000_shared_mem_rx_idx,
Emmanuel Grumbach7839fc02008-04-24 11:55:34 -0700871 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800872 .disable_tx_fifo = iwl5000_disable_tx_fifo,
Ron Rindjunskyb600e4e2008-05-15 13:54:11 +0800873 .rx_handler_setup = iwl5000_rx_handler_setup,
Ron Rindjunsky87283cc2008-05-29 16:34:47 +0800874 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
Ron Rindjunskydbb983b2008-05-15 13:54:12 +0800875 .load_ucode = iwl5000_load_ucode,
Ron Rindjunsky99da1b42008-05-15 13:54:13 +0800876 .init_alive_start = iwl5000_init_alive_start,
877 .alive_notify = iwl5000_alive_notify,
Tomas Winkler30d59262008-04-24 11:55:25 -0700878 .apm_ops = {
879 .init = iwl5000_apm_init,
Tomas Winkler7f066102008-05-29 16:34:57 +0800880 .reset = iwl5000_apm_reset,
Tomas Winklerf118a912008-05-29 16:34:58 +0800881 .stop = iwl5000_apm_stop,
Ron Rindjunsky5a835352008-05-05 10:22:29 +0800882 .config = iwl5000_nic_config,
Tomas Winkler88acbd32008-04-24 11:55:26 -0700883 .set_pwr_src = iwl4965_set_pwr_src,
Tomas Winkler30d59262008-04-24 11:55:25 -0700884 },
Tomas Winklerda8dec22008-04-24 11:55:24 -0700885 .eeprom_ops = {
Tomas Winkler25ae3982008-04-24 11:55:27 -0700886 .regulatory_bands = {
887 EEPROM_5000_REG_BAND_1_CHANNELS,
888 EEPROM_5000_REG_BAND_2_CHANNELS,
889 EEPROM_5000_REG_BAND_3_CHANNELS,
890 EEPROM_5000_REG_BAND_4_CHANNELS,
891 EEPROM_5000_REG_BAND_5_CHANNELS,
892 EEPROM_5000_REG_BAND_24_FAT_CHANNELS,
893 EEPROM_5000_REG_BAND_52_FAT_CHANNELS
894 },
Tomas Winklerda8dec22008-04-24 11:55:24 -0700895 .verify_signature = iwlcore_eeprom_verify_signature,
896 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
897 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winklerf1f69412008-04-24 11:55:35 -0700898 .check_version = iwl5000_eeprom_check_version,
Tomas Winkler25ae3982008-04-24 11:55:27 -0700899 .query_addr = iwl5000_eeprom_query_addr,
Tomas Winklerda8dec22008-04-24 11:55:24 -0700900 },
901};
902
903static struct iwl_ops iwl5000_ops = {
904 .lib = &iwl5000_lib,
905 .hcmd = &iwl5000_hcmd,
906 .utils = &iwl5000_hcmd_utils,
907};
908
Tomas Winkler5a6a2562008-04-24 11:55:23 -0700909static struct iwl_mod_params iwl50_mod_params = {
910 .num_of_queues = IWL50_NUM_QUEUES,
911 .enable_qos = 1,
912 .amsdu_size_8K = 1,
Ester Kummer3a1081e2008-05-06 11:05:14 +0800913 .restart_fw = 1,
Tomas Winkler5a6a2562008-04-24 11:55:23 -0700914 /* the rest are 0 by default */
915};
916
917
918struct iwl_cfg iwl5300_agn_cfg = {
919 .name = "5300AGN",
920 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
921 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winklerda8dec22008-04-24 11:55:24 -0700922 .ops = &iwl5000_ops,
Tomas Winkler25ae3982008-04-24 11:55:27 -0700923 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
Tomas Winkler5a6a2562008-04-24 11:55:23 -0700924 .mod_params = &iwl50_mod_params,
925};
926
927struct iwl_cfg iwl5100_agn_cfg = {
928 .name = "5100AGN",
929 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
930 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winklerda8dec22008-04-24 11:55:24 -0700931 .ops = &iwl5000_ops,
Tomas Winkler25ae3982008-04-24 11:55:27 -0700932 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
Tomas Winkler5a6a2562008-04-24 11:55:23 -0700933 .mod_params = &iwl50_mod_params,
934};
935
936struct iwl_cfg iwl5350_agn_cfg = {
937 .name = "5350AGN",
938 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
939 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winklerda8dec22008-04-24 11:55:24 -0700940 .ops = &iwl5000_ops,
Tomas Winkler25ae3982008-04-24 11:55:27 -0700941 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
Tomas Winkler5a6a2562008-04-24 11:55:23 -0700942 .mod_params = &iwl50_mod_params,
943};
944
945module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
946MODULE_PARM_DESC(disable50,
947 "manually disable the 50XX radio (default 0 [radio on])");
948module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444);
949MODULE_PARM_DESC(swcrypto50,
950 "using software crypto engine (default 0 [hardware])\n");
951module_param_named(debug50, iwl50_mod_params.debug, int, 0444);
952MODULE_PARM_DESC(debug50, "50XX debug output mask");
953module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444);
954MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
955module_param_named(qos_enable50, iwl50_mod_params.enable_qos, int, 0444);
956MODULE_PARM_DESC(qos_enable50, "enable all 50XX QoS functionality");
957module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444);
958MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
Ester Kummer3a1081e2008-05-06 11:05:14 +0800959module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444);
960MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");