Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 3 | * GPL LICENSE SUMMARY |
| 4 | * |
| 5 | * Copyright(c) 2008 Intel Corporation. All rights reserved. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of version 2 of the GNU General Public License as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but |
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 19 | * USA |
| 20 | * |
| 21 | * The full GNU General Public License is included in this distribution |
| 22 | * in the file called LICENSE.GPL. |
| 23 | * |
| 24 | * Contact Information: |
| 25 | * Tomas Winkler <tomas.winkler@intel.com> |
| 26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 27 | *****************************************************************************/ |
| 28 | |
| 29 | #include <linux/kernel.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/version.h> |
Assaf Krauss | 1d0a082 | 2008-03-14 10:38:48 -0700 | [diff] [blame] | 32 | #include <net/mac80211.h> |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 33 | |
Tomas Winkler | 712b6cf | 2008-03-12 16:58:52 -0700 | [diff] [blame] | 34 | struct iwl_priv; /* FIXME: remove */ |
Tomas Winkler | 0a6857e | 2008-03-12 16:58:49 -0700 | [diff] [blame] | 35 | #include "iwl-debug.h" |
Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 36 | #include "iwl-eeprom.h" |
Tomas Winkler | 3e0d4cb | 2008-04-24 11:55:38 -0700 | [diff] [blame] | 37 | #include "iwl-dev.h" /* FIXME: remove */ |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 38 | #include "iwl-core.h" |
Tomas Winkler | b661c81 | 2008-04-23 17:14:54 -0700 | [diff] [blame] | 39 | #include "iwl-io.h" |
Mohamed Abbas | ad97edd | 2008-03-28 16:21:06 -0700 | [diff] [blame] | 40 | #include "iwl-rfkill.h" |
Mohamed Abbas | 5da4b55f | 2008-04-21 15:41:51 -0700 | [diff] [blame] | 41 | #include "iwl-power.h" |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 42 | |
Assaf Krauss | 1d0a082 | 2008-03-14 10:38:48 -0700 | [diff] [blame] | 43 | |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 44 | MODULE_DESCRIPTION("iwl core"); |
| 45 | MODULE_VERSION(IWLWIFI_VERSION); |
| 46 | MODULE_AUTHOR(DRV_COPYRIGHT); |
Tomas Winkler | 712b6cf | 2008-03-12 16:58:52 -0700 | [diff] [blame] | 47 | MODULE_LICENSE("GPL"); |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 48 | |
Tomas Winkler | 0a6857e | 2008-03-12 16:58:49 -0700 | [diff] [blame] | 49 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 50 | u32 iwl_debug_level; |
| 51 | EXPORT_SYMBOL(iwl_debug_level); |
Tomas Winkler | df48c32 | 2008-03-06 10:40:19 -0800 | [diff] [blame] | 52 | #endif |
Assaf Krauss | 1d0a082 | 2008-03-14 10:38:48 -0700 | [diff] [blame] | 53 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 54 | #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ |
| 55 | [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ |
| 56 | IWL_RATE_SISO_##s##M_PLCP, \ |
| 57 | IWL_RATE_MIMO2_##s##M_PLCP,\ |
| 58 | IWL_RATE_MIMO3_##s##M_PLCP,\ |
| 59 | IWL_RATE_##r##M_IEEE, \ |
| 60 | IWL_RATE_##ip##M_INDEX, \ |
| 61 | IWL_RATE_##in##M_INDEX, \ |
| 62 | IWL_RATE_##rp##M_INDEX, \ |
| 63 | IWL_RATE_##rn##M_INDEX, \ |
| 64 | IWL_RATE_##pp##M_INDEX, \ |
| 65 | IWL_RATE_##np##M_INDEX } |
| 66 | |
| 67 | /* |
| 68 | * Parameter order: |
| 69 | * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate |
| 70 | * |
| 71 | * If there isn't a valid next or previous rate then INV is used which |
| 72 | * maps to IWL_RATE_INVALID |
| 73 | * |
| 74 | */ |
| 75 | const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = { |
| 76 | IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ |
| 77 | IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ |
| 78 | IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ |
| 79 | IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ |
| 80 | IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ |
| 81 | IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ |
| 82 | IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ |
| 83 | IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ |
| 84 | IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ |
| 85 | IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ |
| 86 | IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ |
| 87 | IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ |
| 88 | IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ |
| 89 | /* FIXME:RS: ^^ should be INV (legacy) */ |
| 90 | }; |
| 91 | EXPORT_SYMBOL(iwl4965_rates); |
| 92 | |
Assaf Krauss | 1d0a082 | 2008-03-14 10:38:48 -0700 | [diff] [blame] | 93 | /* This function both allocates and initializes hw and priv. */ |
| 94 | struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, |
| 95 | struct ieee80211_ops *hw_ops) |
| 96 | { |
| 97 | struct iwl_priv *priv; |
| 98 | |
| 99 | /* mac80211 allocates memory for this device instance, including |
| 100 | * space for this driver's private structure */ |
| 101 | struct ieee80211_hw *hw = |
| 102 | ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); |
| 103 | if (hw == NULL) { |
| 104 | IWL_ERROR("Can not allocate network device\n"); |
| 105 | goto out; |
| 106 | } |
| 107 | |
| 108 | priv = hw->priv; |
| 109 | priv->hw = hw; |
| 110 | |
| 111 | out: |
| 112 | return hw; |
| 113 | } |
| 114 | EXPORT_SYMBOL(iwl_alloc_all); |
| 115 | |
Tomas Winkler | b661c81 | 2008-04-23 17:14:54 -0700 | [diff] [blame] | 116 | void iwl_hw_detect(struct iwl_priv *priv) |
| 117 | { |
| 118 | priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); |
| 119 | priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); |
| 120 | pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); |
| 121 | } |
| 122 | EXPORT_SYMBOL(iwl_hw_detect); |
| 123 | |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 124 | /* Tell nic where to find the "keep warm" buffer */ |
| 125 | int iwl_kw_init(struct iwl_priv *priv) |
| 126 | { |
| 127 | unsigned long flags; |
| 128 | int ret; |
| 129 | |
| 130 | spin_lock_irqsave(&priv->lock, flags); |
| 131 | ret = iwl_grab_nic_access(priv); |
| 132 | if (ret) |
| 133 | goto out; |
| 134 | |
| 135 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, |
| 136 | priv->kw.dma_addr >> 4); |
| 137 | iwl_release_nic_access(priv); |
| 138 | out: |
| 139 | spin_unlock_irqrestore(&priv->lock, flags); |
| 140 | return ret; |
| 141 | } |
| 142 | |
| 143 | int iwl_kw_alloc(struct iwl_priv *priv) |
| 144 | { |
| 145 | struct pci_dev *dev = priv->pci_dev; |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame^] | 146 | struct iwl_kw *kw = &priv->kw; |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 147 | |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame^] | 148 | kw->size = IWL_KW_SIZE; |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 149 | kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr); |
| 150 | if (!kw->v_addr) |
| 151 | return -ENOMEM; |
| 152 | |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | /** |
| 157 | * iwl_kw_free - Free the "keep warm" buffer |
| 158 | */ |
| 159 | void iwl_kw_free(struct iwl_priv *priv) |
| 160 | { |
| 161 | struct pci_dev *dev = priv->pci_dev; |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame^] | 162 | struct iwl_kw *kw = &priv->kw; |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 163 | |
| 164 | if (kw->v_addr) { |
| 165 | pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr); |
| 166 | memset(kw, 0, sizeof(*kw)); |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | int iwl_hw_nic_init(struct iwl_priv *priv) |
| 171 | { |
| 172 | unsigned long flags; |
| 173 | struct iwl_rx_queue *rxq = &priv->rxq; |
| 174 | int ret; |
| 175 | |
| 176 | /* nic_init */ |
| 177 | priv->cfg->ops->lib->apm_ops.init(priv); |
| 178 | |
| 179 | spin_lock_irqsave(&priv->lock, flags); |
| 180 | iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); |
| 181 | spin_unlock_irqrestore(&priv->lock, flags); |
| 182 | |
| 183 | ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); |
| 184 | |
| 185 | priv->cfg->ops->lib->apm_ops.config(priv); |
| 186 | |
| 187 | /* Allocate the RX queue, or reset if it is already allocated */ |
| 188 | if (!rxq->bd) { |
| 189 | ret = iwl_rx_queue_alloc(priv); |
| 190 | if (ret) { |
| 191 | IWL_ERROR("Unable to initialize Rx queue\n"); |
| 192 | return -ENOMEM; |
| 193 | } |
| 194 | } else |
| 195 | iwl_rx_queue_reset(priv, rxq); |
| 196 | |
| 197 | iwl_rx_replenish(priv); |
| 198 | |
| 199 | iwl_rx_init(priv, rxq); |
| 200 | |
| 201 | spin_lock_irqsave(&priv->lock, flags); |
| 202 | |
| 203 | rxq->need_update = 1; |
| 204 | iwl_rx_queue_update_write_ptr(priv, rxq); |
| 205 | |
| 206 | spin_unlock_irqrestore(&priv->lock, flags); |
| 207 | |
| 208 | /* Allocate and init all Tx and Command queues */ |
| 209 | ret = iwl_txq_ctx_reset(priv); |
| 210 | if (ret) |
| 211 | return ret; |
| 212 | |
| 213 | set_bit(STATUS_INIT, &priv->status); |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | EXPORT_SYMBOL(iwl_hw_nic_init); |
| 218 | |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 219 | /** |
| 220 | * iwlcore_clear_stations_table - Clear the driver's station table |
| 221 | * |
| 222 | * NOTE: This does not clear or otherwise alter the device's station table. |
| 223 | */ |
| 224 | void iwlcore_clear_stations_table(struct iwl_priv *priv) |
| 225 | { |
| 226 | unsigned long flags; |
| 227 | |
| 228 | spin_lock_irqsave(&priv->sta_lock, flags); |
| 229 | |
| 230 | priv->num_stations = 0; |
| 231 | memset(priv->stations, 0, sizeof(priv->stations)); |
| 232 | |
| 233 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
| 234 | } |
| 235 | EXPORT_SYMBOL(iwlcore_clear_stations_table); |
| 236 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 237 | void iwl_reset_qos(struct iwl_priv *priv) |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 238 | { |
| 239 | u16 cw_min = 15; |
| 240 | u16 cw_max = 1023; |
| 241 | u8 aifs = 2; |
| 242 | u8 is_legacy = 0; |
| 243 | unsigned long flags; |
| 244 | int i; |
| 245 | |
| 246 | spin_lock_irqsave(&priv->lock, flags); |
| 247 | priv->qos_data.qos_active = 0; |
| 248 | |
| 249 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { |
| 250 | if (priv->qos_data.qos_enable) |
| 251 | priv->qos_data.qos_active = 1; |
| 252 | if (!(priv->active_rate & 0xfff0)) { |
| 253 | cw_min = 31; |
| 254 | is_legacy = 1; |
| 255 | } |
| 256 | } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { |
| 257 | if (priv->qos_data.qos_enable) |
| 258 | priv->qos_data.qos_active = 1; |
| 259 | } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { |
| 260 | cw_min = 31; |
| 261 | is_legacy = 1; |
| 262 | } |
| 263 | |
| 264 | if (priv->qos_data.qos_active) |
| 265 | aifs = 3; |
| 266 | |
| 267 | priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); |
| 268 | priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); |
| 269 | priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; |
| 270 | priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; |
| 271 | priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; |
| 272 | |
| 273 | if (priv->qos_data.qos_active) { |
| 274 | i = 1; |
| 275 | priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); |
| 276 | priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); |
| 277 | priv->qos_data.def_qos_parm.ac[i].aifsn = 7; |
| 278 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; |
| 279 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 280 | |
| 281 | i = 2; |
| 282 | priv->qos_data.def_qos_parm.ac[i].cw_min = |
| 283 | cpu_to_le16((cw_min + 1) / 2 - 1); |
| 284 | priv->qos_data.def_qos_parm.ac[i].cw_max = |
| 285 | cpu_to_le16(cw_max); |
| 286 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; |
| 287 | if (is_legacy) |
| 288 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 289 | cpu_to_le16(6016); |
| 290 | else |
| 291 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 292 | cpu_to_le16(3008); |
| 293 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 294 | |
| 295 | i = 3; |
| 296 | priv->qos_data.def_qos_parm.ac[i].cw_min = |
| 297 | cpu_to_le16((cw_min + 1) / 4 - 1); |
| 298 | priv->qos_data.def_qos_parm.ac[i].cw_max = |
| 299 | cpu_to_le16((cw_max + 1) / 2 - 1); |
| 300 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; |
| 301 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 302 | if (is_legacy) |
| 303 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 304 | cpu_to_le16(3264); |
| 305 | else |
| 306 | priv->qos_data.def_qos_parm.ac[i].edca_txop = |
| 307 | cpu_to_le16(1504); |
| 308 | } else { |
| 309 | for (i = 1; i < 4; i++) { |
| 310 | priv->qos_data.def_qos_parm.ac[i].cw_min = |
| 311 | cpu_to_le16(cw_min); |
| 312 | priv->qos_data.def_qos_parm.ac[i].cw_max = |
| 313 | cpu_to_le16(cw_max); |
| 314 | priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; |
| 315 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; |
| 316 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; |
| 317 | } |
| 318 | } |
| 319 | IWL_DEBUG_QOS("set QoS to default \n"); |
| 320 | |
| 321 | spin_unlock_irqrestore(&priv->lock, flags); |
| 322 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 323 | EXPORT_SYMBOL(iwl_reset_qos); |
| 324 | |
| 325 | #ifdef CONFIG_IWL4965_HT |
| 326 | static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, |
| 327 | struct ieee80211_ht_info *ht_info, |
| 328 | enum ieee80211_band band) |
| 329 | { |
| 330 | ht_info->cap = 0; |
| 331 | memset(ht_info->supp_mcs_set, 0, 16); |
| 332 | |
| 333 | ht_info->ht_supported = 1; |
| 334 | |
| 335 | if (priv->hw_params.fat_channel & BIT(band)) { |
| 336 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH; |
| 337 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40; |
| 338 | ht_info->supp_mcs_set[4] = 0x01; |
| 339 | } |
| 340 | ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD; |
| 341 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20; |
| 342 | ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS & |
| 343 | (IWL_MIMO_PS_NONE << 2)); |
| 344 | |
| 345 | if (priv->cfg->mod_params->amsdu_size_8K) |
| 346 | ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU; |
| 347 | |
| 348 | ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; |
| 349 | ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; |
| 350 | |
| 351 | ht_info->supp_mcs_set[0] = 0xFF; |
| 352 | if (priv->hw_params.tx_chains_num >= 2) |
| 353 | ht_info->supp_mcs_set[1] = 0xFF; |
| 354 | if (priv->hw_params.tx_chains_num >= 3) |
| 355 | ht_info->supp_mcs_set[2] = 0xFF; |
| 356 | } |
Andrew Morton | 88787d2 | 2008-05-13 21:05:50 -0700 | [diff] [blame] | 357 | #else |
| 358 | static inline void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, |
| 359 | struct ieee80211_ht_info *ht_info, |
| 360 | enum ieee80211_band band) |
| 361 | { |
| 362 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 363 | #endif /* CONFIG_IWL4965_HT */ |
| 364 | |
| 365 | static void iwlcore_init_hw_rates(struct iwl_priv *priv, |
| 366 | struct ieee80211_rate *rates) |
| 367 | { |
| 368 | int i; |
| 369 | |
| 370 | for (i = 0; i < IWL_RATE_COUNT; i++) { |
| 371 | rates[i].bitrate = iwl4965_rates[i].ieee * 5; |
| 372 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ |
| 373 | rates[i].hw_value_short = i; |
| 374 | rates[i].flags = 0; |
| 375 | if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { |
| 376 | /* |
| 377 | * If CCK != 1M then set short preamble rate flag. |
| 378 | */ |
| 379 | rates[i].flags |= |
| 380 | (iwl4965_rates[i].plcp == IWL_RATE_1M_PLCP) ? |
| 381 | 0 : IEEE80211_RATE_SHORT_PREAMBLE; |
| 382 | } |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | /** |
| 387 | * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom |
| 388 | */ |
| 389 | static int iwlcore_init_geos(struct iwl_priv *priv) |
| 390 | { |
| 391 | struct iwl_channel_info *ch; |
| 392 | struct ieee80211_supported_band *sband; |
| 393 | struct ieee80211_channel *channels; |
| 394 | struct ieee80211_channel *geo_ch; |
| 395 | struct ieee80211_rate *rates; |
| 396 | int i = 0; |
| 397 | |
| 398 | if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || |
| 399 | priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { |
| 400 | IWL_DEBUG_INFO("Geography modes already initialized.\n"); |
| 401 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); |
| 402 | return 0; |
| 403 | } |
| 404 | |
| 405 | channels = kzalloc(sizeof(struct ieee80211_channel) * |
| 406 | priv->channel_count, GFP_KERNEL); |
| 407 | if (!channels) |
| 408 | return -ENOMEM; |
| 409 | |
| 410 | rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)), |
| 411 | GFP_KERNEL); |
| 412 | if (!rates) { |
| 413 | kfree(channels); |
| 414 | return -ENOMEM; |
| 415 | } |
| 416 | |
| 417 | /* 5.2GHz channels start after the 2.4GHz channels */ |
| 418 | sband = &priv->bands[IEEE80211_BAND_5GHZ]; |
| 419 | sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; |
| 420 | /* just OFDM */ |
| 421 | sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; |
| 422 | sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE; |
| 423 | |
| 424 | iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ); |
| 425 | |
| 426 | sband = &priv->bands[IEEE80211_BAND_2GHZ]; |
| 427 | sband->channels = channels; |
| 428 | /* OFDM & CCK */ |
| 429 | sband->bitrates = rates; |
| 430 | sband->n_bitrates = IWL_RATE_COUNT; |
| 431 | |
| 432 | iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ); |
| 433 | |
| 434 | priv->ieee_channels = channels; |
| 435 | priv->ieee_rates = rates; |
| 436 | |
| 437 | iwlcore_init_hw_rates(priv, rates); |
| 438 | |
| 439 | for (i = 0; i < priv->channel_count; i++) { |
| 440 | ch = &priv->channel_info[i]; |
| 441 | |
| 442 | /* FIXME: might be removed if scan is OK */ |
| 443 | if (!is_channel_valid(ch)) |
| 444 | continue; |
| 445 | |
| 446 | if (is_channel_a_band(ch)) |
| 447 | sband = &priv->bands[IEEE80211_BAND_5GHZ]; |
| 448 | else |
| 449 | sband = &priv->bands[IEEE80211_BAND_2GHZ]; |
| 450 | |
| 451 | geo_ch = &sband->channels[sband->n_channels++]; |
| 452 | |
| 453 | geo_ch->center_freq = |
| 454 | ieee80211_channel_to_frequency(ch->channel); |
| 455 | geo_ch->max_power = ch->max_power_avg; |
| 456 | geo_ch->max_antenna_gain = 0xff; |
| 457 | geo_ch->hw_value = ch->channel; |
| 458 | |
| 459 | if (is_channel_valid(ch)) { |
| 460 | if (!(ch->flags & EEPROM_CHANNEL_IBSS)) |
| 461 | geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; |
| 462 | |
| 463 | if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) |
| 464 | geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; |
| 465 | |
| 466 | if (ch->flags & EEPROM_CHANNEL_RADAR) |
| 467 | geo_ch->flags |= IEEE80211_CHAN_RADAR; |
| 468 | |
| 469 | if (ch->max_power_avg > priv->max_channel_txpower_limit) |
| 470 | priv->max_channel_txpower_limit = |
| 471 | ch->max_power_avg; |
| 472 | } else { |
| 473 | geo_ch->flags |= IEEE80211_CHAN_DISABLED; |
| 474 | } |
| 475 | |
| 476 | /* Save flags for reg domain usage */ |
| 477 | geo_ch->orig_flags = geo_ch->flags; |
| 478 | |
| 479 | IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n", |
| 480 | ch->channel, geo_ch->center_freq, |
| 481 | is_channel_a_band(ch) ? "5.2" : "2.4", |
| 482 | geo_ch->flags & IEEE80211_CHAN_DISABLED ? |
| 483 | "restricted" : "valid", |
| 484 | geo_ch->flags); |
| 485 | } |
| 486 | |
| 487 | if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && |
| 488 | priv->cfg->sku & IWL_SKU_A) { |
| 489 | printk(KERN_INFO DRV_NAME |
| 490 | ": Incorrectly detected BG card as ABG. Please send " |
| 491 | "your PCI ID 0x%04X:0x%04X to maintainer.\n", |
| 492 | priv->pci_dev->device, priv->pci_dev->subsystem_device); |
| 493 | priv->cfg->sku &= ~IWL_SKU_A; |
| 494 | } |
| 495 | |
| 496 | printk(KERN_INFO DRV_NAME |
| 497 | ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", |
| 498 | priv->bands[IEEE80211_BAND_2GHZ].n_channels, |
| 499 | priv->bands[IEEE80211_BAND_5GHZ].n_channels); |
| 500 | |
| 501 | if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) |
| 502 | priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = |
| 503 | &priv->bands[IEEE80211_BAND_2GHZ]; |
| 504 | if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) |
| 505 | priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = |
| 506 | &priv->bands[IEEE80211_BAND_5GHZ]; |
| 507 | |
| 508 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); |
| 509 | |
| 510 | return 0; |
| 511 | } |
| 512 | |
| 513 | /* |
| 514 | * iwlcore_free_geos - undo allocations in iwlcore_init_geos |
| 515 | */ |
| 516 | void iwlcore_free_geos(struct iwl_priv *priv) |
| 517 | { |
| 518 | kfree(priv->ieee_channels); |
| 519 | kfree(priv->ieee_rates); |
| 520 | clear_bit(STATUS_GEO_CONFIGURED, &priv->status); |
| 521 | } |
| 522 | EXPORT_SYMBOL(iwlcore_free_geos); |
| 523 | |
| 524 | #ifdef CONFIG_IWL4965_HT |
| 525 | static u8 is_single_rx_stream(struct iwl_priv *priv) |
| 526 | { |
| 527 | return !priv->current_ht_config.is_ht || |
| 528 | ((priv->current_ht_config.supp_mcs_set[1] == 0) && |
| 529 | (priv->current_ht_config.supp_mcs_set[2] == 0)) || |
| 530 | priv->ps_mode == IWL_MIMO_PS_STATIC; |
| 531 | } |
Tomas Winkler | 47c5196 | 2008-05-05 10:22:41 +0800 | [diff] [blame] | 532 | static u8 iwl_is_channel_extension(struct iwl_priv *priv, |
| 533 | enum ieee80211_band band, |
| 534 | u16 channel, u8 extension_chan_offset) |
| 535 | { |
| 536 | const struct iwl_channel_info *ch_info; |
| 537 | |
| 538 | ch_info = iwl_get_channel_info(priv, band, channel); |
| 539 | if (!is_channel_valid(ch_info)) |
| 540 | return 0; |
| 541 | |
| 542 | if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE) |
| 543 | return 0; |
| 544 | |
| 545 | if ((ch_info->fat_extension_channel == extension_chan_offset) || |
| 546 | (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX)) |
| 547 | return 1; |
| 548 | |
| 549 | return 0; |
| 550 | } |
| 551 | |
| 552 | u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, |
| 553 | struct ieee80211_ht_info *sta_ht_inf) |
| 554 | { |
| 555 | struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config; |
| 556 | |
| 557 | if ((!iwl_ht_conf->is_ht) || |
| 558 | (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) || |
| 559 | (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)) |
| 560 | return 0; |
| 561 | |
| 562 | if (sta_ht_inf) { |
| 563 | if ((!sta_ht_inf->ht_supported) || |
| 564 | (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH))) |
| 565 | return 0; |
| 566 | } |
| 567 | |
| 568 | return iwl_is_channel_extension(priv, priv->band, |
| 569 | iwl_ht_conf->control_channel, |
| 570 | iwl_ht_conf->extension_chan_offset); |
| 571 | } |
| 572 | EXPORT_SYMBOL(iwl_is_fat_tx_allowed); |
| 573 | |
| 574 | void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) |
| 575 | { |
| 576 | struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; |
| 577 | u32 val; |
| 578 | |
| 579 | if (!ht_info->is_ht) |
| 580 | return; |
| 581 | |
| 582 | /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */ |
| 583 | if (iwl_is_fat_tx_allowed(priv, NULL)) |
| 584 | rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK; |
| 585 | else |
| 586 | rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | |
| 587 | RXON_FLG_CHANNEL_MODE_PURE_40_MSK); |
| 588 | |
| 589 | if (le16_to_cpu(rxon->channel) != ht_info->control_channel) { |
| 590 | IWL_DEBUG_ASSOC("control diff than current %d %d\n", |
| 591 | le16_to_cpu(rxon->channel), |
| 592 | ht_info->control_channel); |
| 593 | rxon->channel = cpu_to_le16(ht_info->control_channel); |
| 594 | return; |
| 595 | } |
| 596 | |
| 597 | /* Note: control channel is opposite of extension channel */ |
| 598 | switch (ht_info->extension_chan_offset) { |
| 599 | case IWL_EXT_CHANNEL_OFFSET_ABOVE: |
| 600 | rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); |
| 601 | break; |
| 602 | case IWL_EXT_CHANNEL_OFFSET_BELOW: |
| 603 | rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; |
| 604 | break; |
| 605 | case IWL_EXT_CHANNEL_OFFSET_NONE: |
| 606 | default: |
| 607 | rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; |
| 608 | break; |
| 609 | } |
| 610 | |
| 611 | val = ht_info->ht_protection; |
| 612 | |
| 613 | rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS); |
| 614 | |
| 615 | iwl_set_rxon_chain(priv); |
| 616 | |
| 617 | IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X " |
| 618 | "rxon flags 0x%X operation mode :0x%X " |
| 619 | "extension channel offset 0x%x " |
| 620 | "control chan %d\n", |
| 621 | ht_info->supp_mcs_set[0], |
| 622 | ht_info->supp_mcs_set[1], |
| 623 | ht_info->supp_mcs_set[2], |
| 624 | le32_to_cpu(rxon->flags), ht_info->ht_protection, |
| 625 | ht_info->extension_chan_offset, |
| 626 | ht_info->control_channel); |
| 627 | return; |
| 628 | } |
| 629 | EXPORT_SYMBOL(iwl_set_rxon_ht); |
| 630 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 631 | #else |
| 632 | static inline u8 is_single_rx_stream(struct iwl_priv *priv) |
| 633 | { |
| 634 | return 1; |
| 635 | } |
| 636 | #endif /*CONFIG_IWL4965_HT */ |
| 637 | |
| 638 | /* |
| 639 | * Determine how many receiver/antenna chains to use. |
| 640 | * More provides better reception via diversity. Fewer saves power. |
| 641 | * MIMO (dual stream) requires at least 2, but works better with 3. |
| 642 | * This does not determine *which* chains to use, just how many. |
| 643 | */ |
| 644 | static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv, |
| 645 | u8 *idle_state, u8 *rx_state) |
| 646 | { |
| 647 | u8 is_single = is_single_rx_stream(priv); |
| 648 | u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1; |
| 649 | |
| 650 | /* # of Rx chains to use when expecting MIMO. */ |
| 651 | if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC))) |
| 652 | *rx_state = 2; |
| 653 | else |
| 654 | *rx_state = 3; |
| 655 | |
| 656 | /* # Rx chains when idling and maybe trying to save power */ |
| 657 | switch (priv->ps_mode) { |
| 658 | case IWL_MIMO_PS_STATIC: |
| 659 | case IWL_MIMO_PS_DYNAMIC: |
| 660 | *idle_state = (is_cam) ? 2 : 1; |
| 661 | break; |
| 662 | case IWL_MIMO_PS_NONE: |
| 663 | *idle_state = (is_cam) ? *rx_state : 1; |
| 664 | break; |
| 665 | default: |
| 666 | *idle_state = 1; |
| 667 | break; |
| 668 | } |
| 669 | |
| 670 | return 0; |
| 671 | } |
| 672 | |
| 673 | /** |
| 674 | * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image |
| 675 | * |
| 676 | * Selects how many and which Rx receivers/antennas/chains to use. |
| 677 | * This should not be used for scan command ... it puts data in wrong place. |
| 678 | */ |
| 679 | void iwl_set_rxon_chain(struct iwl_priv *priv) |
| 680 | { |
| 681 | u8 is_single = is_single_rx_stream(priv); |
| 682 | u8 idle_state, rx_state; |
| 683 | |
| 684 | priv->staging_rxon.rx_chain = 0; |
| 685 | rx_state = idle_state = 3; |
| 686 | |
| 687 | /* Tell uCode which antennas are actually connected. |
| 688 | * Before first association, we assume all antennas are connected. |
| 689 | * Just after first association, iwl_chain_noise_calibration() |
| 690 | * checks which antennas actually *are* connected. */ |
| 691 | priv->staging_rxon.rx_chain |= |
| 692 | cpu_to_le16(priv->hw_params.valid_rx_ant << |
| 693 | RXON_RX_CHAIN_VALID_POS); |
| 694 | |
| 695 | /* How many receivers should we use? */ |
| 696 | iwlcore_get_rx_chain_counter(priv, &idle_state, &rx_state); |
| 697 | priv->staging_rxon.rx_chain |= |
| 698 | cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS); |
| 699 | priv->staging_rxon.rx_chain |= |
| 700 | cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS); |
| 701 | |
| 702 | if (!is_single && (rx_state >= 2) && |
| 703 | !test_bit(STATUS_POWER_PMI, &priv->status)) |
| 704 | priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; |
| 705 | else |
| 706 | priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; |
| 707 | |
| 708 | IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); |
| 709 | } |
| 710 | EXPORT_SYMBOL(iwl_set_rxon_chain); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 711 | |
| 712 | /** |
| 713 | * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON |
| 714 | * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz |
| 715 | * @channel: Any channel valid for the requested phymode |
| 716 | |
| 717 | * In addition to setting the staging RXON, priv->phymode is also set. |
| 718 | * |
| 719 | * NOTE: Does not commit to the hardware; it sets appropriate bit fields |
| 720 | * in the staging RXON flag structure based on the phymode |
| 721 | */ |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 722 | int iwl_set_rxon_channel(struct iwl_priv *priv, |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 723 | enum ieee80211_band band, |
| 724 | u16 channel) |
| 725 | { |
Assaf Krauss | 8622e70 | 2008-03-21 13:53:43 -0700 | [diff] [blame] | 726 | if (!iwl_get_channel_info(priv, band, channel)) { |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 727 | IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", |
| 728 | channel, band); |
| 729 | return -EINVAL; |
| 730 | } |
| 731 | |
| 732 | if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && |
| 733 | (priv->band == band)) |
| 734 | return 0; |
| 735 | |
| 736 | priv->staging_rxon.channel = cpu_to_le16(channel); |
| 737 | if (band == IEEE80211_BAND_5GHZ) |
| 738 | priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; |
| 739 | else |
| 740 | priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; |
| 741 | |
| 742 | priv->band = band; |
| 743 | |
| 744 | IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band); |
| 745 | |
| 746 | return 0; |
| 747 | } |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 748 | EXPORT_SYMBOL(iwl_set_rxon_channel); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 749 | |
| 750 | static void iwlcore_init_hw(struct iwl_priv *priv) |
| 751 | { |
| 752 | struct ieee80211_hw *hw = priv->hw; |
| 753 | hw->rate_control_algorithm = "iwl-4965-rs"; |
| 754 | |
| 755 | /* Tell mac80211 and its clients (e.g. Wireless Extensions) |
| 756 | * the range of signal quality values that we'll provide. |
| 757 | * Negative values for level/noise indicate that we'll provide dBm. |
| 758 | * For WE, at least, non-0 values here *enable* display of values |
| 759 | * in app (iwconfig). */ |
| 760 | hw->max_rssi = -20; /* signal level, negative indicates dBm */ |
| 761 | hw->max_noise = -20; /* noise level, negative indicates dBm */ |
| 762 | hw->max_signal = 100; /* link quality indication (%) */ |
| 763 | |
| 764 | /* Tell mac80211 our Tx characteristics */ |
| 765 | hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; |
| 766 | |
| 767 | /* Default value; 4 EDCA QOS priorities */ |
| 768 | hw->queues = 4; |
| 769 | #ifdef CONFIG_IWL4965_HT |
| 770 | /* Enhanced value; more queues, to support 11n aggregation */ |
Johannes Berg | e100bb6 | 2008-04-30 18:51:21 +0200 | [diff] [blame] | 771 | hw->ampdu_queues = 12; |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 772 | #endif /* CONFIG_IWL4965_HT */ |
| 773 | } |
| 774 | |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 775 | static int iwlcore_init_drv(struct iwl_priv *priv) |
| 776 | { |
| 777 | int ret; |
| 778 | int i; |
| 779 | |
| 780 | priv->retry_rate = 1; |
| 781 | priv->ibss_beacon = NULL; |
| 782 | |
| 783 | spin_lock_init(&priv->lock); |
| 784 | spin_lock_init(&priv->power_data.lock); |
| 785 | spin_lock_init(&priv->sta_lock); |
| 786 | spin_lock_init(&priv->hcmd_lock); |
| 787 | spin_lock_init(&priv->lq_mngr.lock); |
| 788 | |
| 789 | for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) |
| 790 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); |
| 791 | |
| 792 | INIT_LIST_HEAD(&priv->free_frames); |
| 793 | |
| 794 | mutex_init(&priv->mutex); |
| 795 | |
| 796 | /* Clear the driver's (not device's) station table */ |
| 797 | iwlcore_clear_stations_table(priv); |
| 798 | |
| 799 | priv->data_retry_limit = -1; |
| 800 | priv->ieee_channels = NULL; |
| 801 | priv->ieee_rates = NULL; |
| 802 | priv->band = IEEE80211_BAND_2GHZ; |
| 803 | |
| 804 | priv->iw_mode = IEEE80211_IF_TYPE_STA; |
| 805 | |
| 806 | priv->use_ant_b_for_management_frame = 1; /* start with ant B */ |
| 807 | priv->ps_mode = IWL_MIMO_PS_NONE; |
| 808 | |
| 809 | /* Choose which receivers/antennas to use */ |
| 810 | iwl_set_rxon_chain(priv); |
| 811 | |
| 812 | iwl_reset_qos(priv); |
| 813 | |
| 814 | priv->qos_data.qos_active = 0; |
| 815 | priv->qos_data.qos_cap.val = 0; |
| 816 | |
| 817 | iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6); |
| 818 | |
| 819 | priv->rates_mask = IWL_RATES_MASK; |
| 820 | /* If power management is turned on, default to AC mode */ |
| 821 | priv->power_mode = IWL_POWER_AC; |
| 822 | priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; |
| 823 | |
| 824 | ret = iwl_init_channel_map(priv); |
| 825 | if (ret) { |
| 826 | IWL_ERROR("initializing regulatory failed: %d\n", ret); |
| 827 | goto err; |
| 828 | } |
| 829 | |
| 830 | ret = iwlcore_init_geos(priv); |
| 831 | if (ret) { |
| 832 | IWL_ERROR("initializing geos failed: %d\n", ret); |
| 833 | goto err_free_channel_map; |
| 834 | } |
| 835 | |
| 836 | ret = ieee80211_register_hw(priv->hw); |
| 837 | if (ret) { |
| 838 | IWL_ERROR("Failed to register network device (error %d)\n", |
| 839 | ret); |
| 840 | goto err_free_geos; |
| 841 | } |
| 842 | |
| 843 | priv->hw->conf.beacon_int = 100; |
| 844 | priv->mac80211_registered = 1; |
| 845 | |
| 846 | return 0; |
| 847 | |
| 848 | err_free_geos: |
| 849 | iwlcore_free_geos(priv); |
| 850 | err_free_channel_map: |
| 851 | iwl_free_channel_map(priv); |
| 852 | err: |
| 853 | return ret; |
| 854 | } |
| 855 | |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 856 | int iwl_setup(struct iwl_priv *priv) |
| 857 | { |
| 858 | int ret = 0; |
| 859 | iwlcore_init_hw(priv); |
Ron Rindjunsky | c7de35c | 2008-04-23 17:15:05 -0700 | [diff] [blame] | 860 | ret = iwlcore_init_drv(priv); |
Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 861 | return ret; |
| 862 | } |
| 863 | EXPORT_SYMBOL(iwl_setup); |
| 864 | |
Mohamed Abbas | c8381fd | 2008-03-28 16:21:05 -0700 | [diff] [blame] | 865 | /* Low level driver call this function to update iwlcore with |
| 866 | * driver status. |
| 867 | */ |
| 868 | int iwlcore_low_level_notify(struct iwl_priv *priv, |
| 869 | enum iwlcore_card_notify notify) |
| 870 | { |
Mohamed Abbas | 03d29c6 | 2008-04-03 16:05:24 -0700 | [diff] [blame] | 871 | int ret; |
Mohamed Abbas | c8381fd | 2008-03-28 16:21:05 -0700 | [diff] [blame] | 872 | switch (notify) { |
| 873 | case IWLCORE_INIT_EVT: |
Mohamed Abbas | 03d29c6 | 2008-04-03 16:05:24 -0700 | [diff] [blame] | 874 | ret = iwl_rfkill_init(priv); |
| 875 | if (ret) |
| 876 | IWL_ERROR("Unable to initialize RFKILL system. " |
| 877 | "Ignoring error: %d\n", ret); |
Mohamed Abbas | 5da4b55f | 2008-04-21 15:41:51 -0700 | [diff] [blame] | 878 | iwl_power_initialize(priv); |
Mohamed Abbas | c8381fd | 2008-03-28 16:21:05 -0700 | [diff] [blame] | 879 | break; |
| 880 | case IWLCORE_START_EVT: |
Mohamed Abbas | 5da4b55f | 2008-04-21 15:41:51 -0700 | [diff] [blame] | 881 | iwl_power_update_mode(priv, 1); |
Mohamed Abbas | c8381fd | 2008-03-28 16:21:05 -0700 | [diff] [blame] | 882 | break; |
| 883 | case IWLCORE_STOP_EVT: |
| 884 | break; |
| 885 | case IWLCORE_REMOVE_EVT: |
Mohamed Abbas | ad97edd | 2008-03-28 16:21:06 -0700 | [diff] [blame] | 886 | iwl_rfkill_unregister(priv); |
Mohamed Abbas | c8381fd | 2008-03-28 16:21:05 -0700 | [diff] [blame] | 887 | break; |
| 888 | } |
| 889 | |
| 890 | return 0; |
| 891 | } |
| 892 | EXPORT_SYMBOL(iwlcore_low_level_notify); |
| 893 | |
Emmanuel Grumbach | 49ea859 | 2008-04-15 16:01:37 -0700 | [diff] [blame] | 894 | int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags) |
| 895 | { |
| 896 | u32 stat_flags = 0; |
| 897 | struct iwl_host_cmd cmd = { |
| 898 | .id = REPLY_STATISTICS_CMD, |
| 899 | .meta.flags = flags, |
| 900 | .len = sizeof(stat_flags), |
| 901 | .data = (u8 *) &stat_flags, |
| 902 | }; |
| 903 | return iwl_send_cmd(priv, &cmd); |
| 904 | } |
| 905 | EXPORT_SYMBOL(iwl_send_statistics_request); |
Tomas Winkler | 7e8c519 | 2008-04-15 16:01:43 -0700 | [diff] [blame] | 906 | |
Emmanuel Grumbach | b0692f2 | 2008-04-24 11:55:18 -0700 | [diff] [blame] | 907 | /** |
| 908 | * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, |
| 909 | * using sample data 100 bytes apart. If these sample points are good, |
| 910 | * it's a pretty good bet that everything between them is good, too. |
| 911 | */ |
| 912 | static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) |
| 913 | { |
| 914 | u32 val; |
| 915 | int ret = 0; |
| 916 | u32 errcnt = 0; |
| 917 | u32 i; |
| 918 | |
| 919 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); |
| 920 | |
| 921 | ret = iwl_grab_nic_access(priv); |
| 922 | if (ret) |
| 923 | return ret; |
| 924 | |
| 925 | for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { |
| 926 | /* read data comes through single port, auto-incr addr */ |
| 927 | /* NOTE: Use the debugless read so we don't flood kernel log |
| 928 | * if IWL_DL_IO is set */ |
| 929 | iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, |
| 930 | i + RTC_INST_LOWER_BOUND); |
| 931 | val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); |
| 932 | if (val != le32_to_cpu(*image)) { |
| 933 | ret = -EIO; |
| 934 | errcnt++; |
| 935 | if (errcnt >= 3) |
| 936 | break; |
| 937 | } |
| 938 | } |
| 939 | |
| 940 | iwl_release_nic_access(priv); |
| 941 | |
| 942 | return ret; |
| 943 | } |
| 944 | |
| 945 | /** |
| 946 | * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host, |
| 947 | * looking at all data. |
| 948 | */ |
| 949 | static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image, |
| 950 | u32 len) |
| 951 | { |
| 952 | u32 val; |
| 953 | u32 save_len = len; |
| 954 | int ret = 0; |
| 955 | u32 errcnt; |
| 956 | |
| 957 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); |
| 958 | |
| 959 | ret = iwl_grab_nic_access(priv); |
| 960 | if (ret) |
| 961 | return ret; |
| 962 | |
| 963 | iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); |
| 964 | |
| 965 | errcnt = 0; |
| 966 | for (; len > 0; len -= sizeof(u32), image++) { |
| 967 | /* read data comes through single port, auto-incr addr */ |
| 968 | /* NOTE: Use the debugless read so we don't flood kernel log |
| 969 | * if IWL_DL_IO is set */ |
| 970 | val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); |
| 971 | if (val != le32_to_cpu(*image)) { |
| 972 | IWL_ERROR("uCode INST section is invalid at " |
| 973 | "offset 0x%x, is 0x%x, s/b 0x%x\n", |
| 974 | save_len - len, val, le32_to_cpu(*image)); |
| 975 | ret = -EIO; |
| 976 | errcnt++; |
| 977 | if (errcnt >= 20) |
| 978 | break; |
| 979 | } |
| 980 | } |
| 981 | |
| 982 | iwl_release_nic_access(priv); |
| 983 | |
| 984 | if (!errcnt) |
| 985 | IWL_DEBUG_INFO |
| 986 | ("ucode image in INSTRUCTION memory is good\n"); |
| 987 | |
| 988 | return ret; |
| 989 | } |
| 990 | |
| 991 | /** |
| 992 | * iwl_verify_ucode - determine which instruction image is in SRAM, |
| 993 | * and verify its contents |
| 994 | */ |
| 995 | int iwl_verify_ucode(struct iwl_priv *priv) |
| 996 | { |
| 997 | __le32 *image; |
| 998 | u32 len; |
| 999 | int ret; |
| 1000 | |
| 1001 | /* Try bootstrap */ |
| 1002 | image = (__le32 *)priv->ucode_boot.v_addr; |
| 1003 | len = priv->ucode_boot.len; |
| 1004 | ret = iwlcore_verify_inst_sparse(priv, image, len); |
| 1005 | if (!ret) { |
| 1006 | IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); |
| 1007 | return 0; |
| 1008 | } |
| 1009 | |
| 1010 | /* Try initialize */ |
| 1011 | image = (__le32 *)priv->ucode_init.v_addr; |
| 1012 | len = priv->ucode_init.len; |
| 1013 | ret = iwlcore_verify_inst_sparse(priv, image, len); |
| 1014 | if (!ret) { |
| 1015 | IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); |
| 1016 | return 0; |
| 1017 | } |
| 1018 | |
| 1019 | /* Try runtime/protocol */ |
| 1020 | image = (__le32 *)priv->ucode_code.v_addr; |
| 1021 | len = priv->ucode_code.len; |
| 1022 | ret = iwlcore_verify_inst_sparse(priv, image, len); |
| 1023 | if (!ret) { |
| 1024 | IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); |
| 1025 | return 0; |
| 1026 | } |
| 1027 | |
| 1028 | IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); |
| 1029 | |
| 1030 | /* Since nothing seems to match, show first several data entries in |
| 1031 | * instruction SRAM, so maybe visual inspection will give a clue. |
| 1032 | * Selection of bootstrap image (vs. other images) is arbitrary. */ |
| 1033 | image = (__le32 *)priv->ucode_boot.v_addr; |
| 1034 | len = priv->ucode_boot.len; |
| 1035 | ret = iwl_verify_inst_full(priv, image, len); |
| 1036 | |
| 1037 | return ret; |
| 1038 | } |
| 1039 | EXPORT_SYMBOL(iwl_verify_ucode); |
| 1040 | |