blob: 62682cc2e216595f6fb270b83305f5cbc7fcdaed [file] [log] [blame]
Sujith55624202010-01-08 10:36:02 +05301/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19static char *dev_info = "ath9k";
20
21MODULE_AUTHOR("Atheros Communications");
22MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
23MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
24MODULE_LICENSE("Dual BSD/GPL");
25
26static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
27module_param_named(debug, ath9k_debug, uint, 0);
28MODULE_PARM_DESC(debug, "Debugging mask");
29
30int modparam_nohwcrypt;
31module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
32MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
33
34/* We use the hw_value as an index into our private channel structure */
35
36#define CHAN2G(_freq, _idx) { \
37 .center_freq = (_freq), \
38 .hw_value = (_idx), \
39 .max_power = 20, \
40}
41
42#define CHAN5G(_freq, _idx) { \
43 .band = IEEE80211_BAND_5GHZ, \
44 .center_freq = (_freq), \
45 .hw_value = (_idx), \
46 .max_power = 20, \
47}
48
49/* Some 2 GHz radios are actually tunable on 2312-2732
50 * on 5 MHz steps, we support the channels which we know
51 * we have calibration data for all cards though to make
52 * this static */
53static struct ieee80211_channel ath9k_2ghz_chantable[] = {
54 CHAN2G(2412, 0), /* Channel 1 */
55 CHAN2G(2417, 1), /* Channel 2 */
56 CHAN2G(2422, 2), /* Channel 3 */
57 CHAN2G(2427, 3), /* Channel 4 */
58 CHAN2G(2432, 4), /* Channel 5 */
59 CHAN2G(2437, 5), /* Channel 6 */
60 CHAN2G(2442, 6), /* Channel 7 */
61 CHAN2G(2447, 7), /* Channel 8 */
62 CHAN2G(2452, 8), /* Channel 9 */
63 CHAN2G(2457, 9), /* Channel 10 */
64 CHAN2G(2462, 10), /* Channel 11 */
65 CHAN2G(2467, 11), /* Channel 12 */
66 CHAN2G(2472, 12), /* Channel 13 */
67 CHAN2G(2484, 13), /* Channel 14 */
68};
69
70/* Some 5 GHz radios are actually tunable on XXXX-YYYY
71 * on 5 MHz steps, we support the channels which we know
72 * we have calibration data for all cards though to make
73 * this static */
74static struct ieee80211_channel ath9k_5ghz_chantable[] = {
75 /* _We_ call this UNII 1 */
76 CHAN5G(5180, 14), /* Channel 36 */
77 CHAN5G(5200, 15), /* Channel 40 */
78 CHAN5G(5220, 16), /* Channel 44 */
79 CHAN5G(5240, 17), /* Channel 48 */
80 /* _We_ call this UNII 2 */
81 CHAN5G(5260, 18), /* Channel 52 */
82 CHAN5G(5280, 19), /* Channel 56 */
83 CHAN5G(5300, 20), /* Channel 60 */
84 CHAN5G(5320, 21), /* Channel 64 */
85 /* _We_ call this "Middle band" */
86 CHAN5G(5500, 22), /* Channel 100 */
87 CHAN5G(5520, 23), /* Channel 104 */
88 CHAN5G(5540, 24), /* Channel 108 */
89 CHAN5G(5560, 25), /* Channel 112 */
90 CHAN5G(5580, 26), /* Channel 116 */
91 CHAN5G(5600, 27), /* Channel 120 */
92 CHAN5G(5620, 28), /* Channel 124 */
93 CHAN5G(5640, 29), /* Channel 128 */
94 CHAN5G(5660, 30), /* Channel 132 */
95 CHAN5G(5680, 31), /* Channel 136 */
96 CHAN5G(5700, 32), /* Channel 140 */
97 /* _We_ call this UNII 3 */
98 CHAN5G(5745, 33), /* Channel 149 */
99 CHAN5G(5765, 34), /* Channel 153 */
100 CHAN5G(5785, 35), /* Channel 157 */
101 CHAN5G(5805, 36), /* Channel 161 */
102 CHAN5G(5825, 37), /* Channel 165 */
103};
104
105/* Atheros hardware rate code addition for short premble */
106#define SHPCHECK(__hw_rate, __flags) \
107 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
108
109#define RATE(_bitrate, _hw_rate, _flags) { \
110 .bitrate = (_bitrate), \
111 .flags = (_flags), \
112 .hw_value = (_hw_rate), \
113 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
114}
115
116static struct ieee80211_rate ath9k_legacy_rates[] = {
117 RATE(10, 0x1b, 0),
118 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
119 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
120 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(60, 0x0b, 0),
122 RATE(90, 0x0f, 0),
123 RATE(120, 0x0a, 0),
124 RATE(180, 0x0e, 0),
125 RATE(240, 0x09, 0),
126 RATE(360, 0x0d, 0),
127 RATE(480, 0x08, 0),
128 RATE(540, 0x0c, 0),
129};
130
Sujith285f2dd2010-01-08 10:36:07 +0530131static void ath9k_deinit_softc(struct ath_softc *sc);
Sujith55624202010-01-08 10:36:02 +0530132
133/*
134 * Read and write, they both share the same lock. We do this to serialize
135 * reads and writes on Atheros 802.11n PCI devices only. This is required
136 * as the FIFO on these devices can only accept sanely 2 requests.
137 */
138
139static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
140{
141 struct ath_hw *ah = (struct ath_hw *) hw_priv;
142 struct ath_common *common = ath9k_hw_common(ah);
143 struct ath_softc *sc = (struct ath_softc *) common->priv;
144
145 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
146 unsigned long flags;
147 spin_lock_irqsave(&sc->sc_serial_rw, flags);
148 iowrite32(val, sc->mem + reg_offset);
149 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
150 } else
151 iowrite32(val, sc->mem + reg_offset);
152}
153
154static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
155{
156 struct ath_hw *ah = (struct ath_hw *) hw_priv;
157 struct ath_common *common = ath9k_hw_common(ah);
158 struct ath_softc *sc = (struct ath_softc *) common->priv;
159 u32 val;
160
161 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
162 unsigned long flags;
163 spin_lock_irqsave(&sc->sc_serial_rw, flags);
164 val = ioread32(sc->mem + reg_offset);
165 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
166 } else
167 val = ioread32(sc->mem + reg_offset);
168 return val;
169}
170
171static const struct ath_ops ath9k_common_ops = {
172 .read = ath9k_ioread32,
173 .write = ath9k_iowrite32,
174};
175
176/**************************/
177/* Initialization */
178/**************************/
179
180static void setup_ht_cap(struct ath_softc *sc,
181 struct ieee80211_sta_ht_cap *ht_info)
182{
183 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
184 u8 tx_streams, rx_streams;
185
186 ht_info->ht_supported = true;
187 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
188 IEEE80211_HT_CAP_SM_PS |
189 IEEE80211_HT_CAP_SGI_40 |
190 IEEE80211_HT_CAP_DSSSCCK40;
191
192 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
193 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
194
195 /* set up supported mcs set */
196 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
197 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
198 1 : 2;
199 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
200 1 : 2;
201
202 if (tx_streams != rx_streams) {
203 ath_print(common, ATH_DBG_CONFIG,
204 "TX streams %d, RX streams: %d\n",
205 tx_streams, rx_streams);
206 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
207 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
208 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
209 }
210
211 ht_info->mcs.rx_mask[0] = 0xff;
212 if (rx_streams >= 2)
213 ht_info->mcs.rx_mask[1] = 0xff;
214
215 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
216}
217
218static int ath9k_reg_notifier(struct wiphy *wiphy,
219 struct regulatory_request *request)
220{
221 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
222 struct ath_wiphy *aphy = hw->priv;
223 struct ath_softc *sc = aphy->sc;
224 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
225
226 return ath_reg_notifier_apply(wiphy, request, reg);
227}
228
229/*
230 * This function will allocate both the DMA descriptor structure, and the
231 * buffers it contains. These are used to contain the descriptors used
232 * by the system.
233*/
234int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
235 struct list_head *head, const char *name,
236 int nbuf, int ndesc)
237{
238#define DS2PHYS(_dd, _ds) \
239 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
240#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
241#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
243 struct ath_desc *ds;
244 struct ath_buf *bf;
245 int i, bsize, error;
246
247 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
248 name, nbuf, ndesc);
249
250 INIT_LIST_HEAD(head);
251 /* ath_desc must be a multiple of DWORDs */
252 if ((sizeof(struct ath_desc) % 4) != 0) {
253 ath_print(common, ATH_DBG_FATAL,
254 "ath_desc not DWORD aligned\n");
255 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
256 error = -ENOMEM;
257 goto fail;
258 }
259
260 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
261
262 /*
263 * Need additional DMA memory because we can't use
264 * descriptors that cross the 4K page boundary. Assume
265 * one skipped descriptor per 4K page.
266 */
267 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
268 u32 ndesc_skipped =
269 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
270 u32 dma_len;
271
272 while (ndesc_skipped) {
273 dma_len = ndesc_skipped * sizeof(struct ath_desc);
274 dd->dd_desc_len += dma_len;
275
276 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
277 };
278 }
279
280 /* allocate descriptors */
281 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
282 &dd->dd_desc_paddr, GFP_KERNEL);
283 if (dd->dd_desc == NULL) {
284 error = -ENOMEM;
285 goto fail;
286 }
287 ds = dd->dd_desc;
288 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
289 name, ds, (u32) dd->dd_desc_len,
290 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
291
292 /* allocate buffers */
293 bsize = sizeof(struct ath_buf) * nbuf;
294 bf = kzalloc(bsize, GFP_KERNEL);
295 if (bf == NULL) {
296 error = -ENOMEM;
297 goto fail2;
298 }
299 dd->dd_bufptr = bf;
300
301 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
302 bf->bf_desc = ds;
303 bf->bf_daddr = DS2PHYS(dd, ds);
304
305 if (!(sc->sc_ah->caps.hw_caps &
306 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
307 /*
308 * Skip descriptor addresses which can cause 4KB
309 * boundary crossing (addr + length) with a 32 dword
310 * descriptor fetch.
311 */
312 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
313 BUG_ON((caddr_t) bf->bf_desc >=
314 ((caddr_t) dd->dd_desc +
315 dd->dd_desc_len));
316
317 ds += ndesc;
318 bf->bf_desc = ds;
319 bf->bf_daddr = DS2PHYS(dd, ds);
320 }
321 }
322 list_add_tail(&bf->list, head);
323 }
324 return 0;
325fail2:
326 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
327 dd->dd_desc_paddr);
328fail:
329 memset(dd, 0, sizeof(*dd));
330 return error;
331#undef ATH_DESC_4KB_BOUND_CHECK
332#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
333#undef DS2PHYS
334}
335
Sujith285f2dd2010-01-08 10:36:07 +0530336static void ath9k_init_crypto(struct ath_softc *sc)
Sujith55624202010-01-08 10:36:02 +0530337{
Sujith285f2dd2010-01-08 10:36:07 +0530338 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
339 int i = 0;
Sujith55624202010-01-08 10:36:02 +0530340
341 /* Get the hardware key cache size. */
Sujith285f2dd2010-01-08 10:36:07 +0530342 common->keymax = sc->sc_ah->caps.keycache_size;
Sujith55624202010-01-08 10:36:02 +0530343 if (common->keymax > ATH_KEYMAX) {
344 ath_print(common, ATH_DBG_ANY,
345 "Warning, using only %u entries in %u key cache\n",
346 ATH_KEYMAX, common->keymax);
347 common->keymax = ATH_KEYMAX;
348 }
349
350 /*
351 * Reset the key cache since some parts do not
352 * reset the contents on initial power up.
353 */
354 for (i = 0; i < common->keymax; i++)
Sujith285f2dd2010-01-08 10:36:07 +0530355 ath9k_hw_keyreset(sc->sc_ah, (u16) i);
Sujith55624202010-01-08 10:36:02 +0530356
Sujith285f2dd2010-01-08 10:36:07 +0530357 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
Sujith55624202010-01-08 10:36:02 +0530358 ATH9K_CIPHER_TKIP, NULL)) {
359 /*
360 * Whether we should enable h/w TKIP MIC.
361 * XXX: if we don't support WME TKIP MIC, then we wouldn't
362 * report WMM capable, so it's always safe to turn on
363 * TKIP MIC in this case.
364 */
Sujith285f2dd2010-01-08 10:36:07 +0530365 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
Sujith55624202010-01-08 10:36:02 +0530366 }
367
368 /*
369 * Check whether the separate key cache entries
370 * are required to handle both tx+rx MIC keys.
371 * With split mic keys the number of stations is limited
372 * to 27 otherwise 59.
373 */
Sujith285f2dd2010-01-08 10:36:07 +0530374 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
Sujith55624202010-01-08 10:36:02 +0530375 ATH9K_CIPHER_TKIP, NULL)
Sujith285f2dd2010-01-08 10:36:07 +0530376 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
Sujith55624202010-01-08 10:36:02 +0530377 ATH9K_CIPHER_MIC, NULL)
Sujith285f2dd2010-01-08 10:36:07 +0530378 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
Sujith55624202010-01-08 10:36:02 +0530379 0, NULL))
380 common->splitmic = 1;
381
382 /* turn on mcast key search if possible */
Sujith285f2dd2010-01-08 10:36:07 +0530383 if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
384 (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
385 1, 1, NULL);
Sujith55624202010-01-08 10:36:02 +0530386
Sujith285f2dd2010-01-08 10:36:07 +0530387}
Sujith55624202010-01-08 10:36:02 +0530388
Sujith285f2dd2010-01-08 10:36:07 +0530389static int ath9k_init_btcoex(struct ath_softc *sc)
390{
391 int r, qnum;
392
393 switch (sc->sc_ah->btcoex_hw.scheme) {
394 case ATH_BTCOEX_CFG_NONE:
395 break;
396 case ATH_BTCOEX_CFG_2WIRE:
397 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
398 break;
399 case ATH_BTCOEX_CFG_3WIRE:
400 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
401 r = ath_init_btcoex_timer(sc);
402 if (r)
403 return -1;
404 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
405 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
406 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
407 break;
408 default:
409 WARN_ON(1);
410 break;
Sujith55624202010-01-08 10:36:02 +0530411 }
412
Sujith285f2dd2010-01-08 10:36:07 +0530413 return 0;
414}
Sujith55624202010-01-08 10:36:02 +0530415
Sujith285f2dd2010-01-08 10:36:07 +0530416static int ath9k_init_queues(struct ath_softc *sc)
417{
418 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
419 int i = 0;
Sujith55624202010-01-08 10:36:02 +0530420
Sujith285f2dd2010-01-08 10:36:07 +0530421 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
422 sc->tx.hwq_map[i] = -1;
Sujith55624202010-01-08 10:36:02 +0530423
Sujith285f2dd2010-01-08 10:36:07 +0530424 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
425 if (sc->beacon.beaconq == -1) {
426 ath_print(common, ATH_DBG_FATAL,
427 "Unable to setup a beacon xmit queue\n");
428 goto err;
Sujith55624202010-01-08 10:36:02 +0530429 }
430
Sujith285f2dd2010-01-08 10:36:07 +0530431 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
432 if (sc->beacon.cabq == NULL) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup CAB xmit queue\n");
435 goto err;
436 }
Sujith55624202010-01-08 10:36:02 +0530437
Sujith285f2dd2010-01-08 10:36:07 +0530438 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
439 ath_cabq_update(sc);
440
441 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
442 ath_print(common, ATH_DBG_FATAL,
443 "Unable to setup xmit queue for BK traffic\n");
444 goto err;
445 }
446
447 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
448 ath_print(common, ATH_DBG_FATAL,
449 "Unable to setup xmit queue for BE traffic\n");
450 goto err;
451 }
452 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
453 ath_print(common, ATH_DBG_FATAL,
454 "Unable to setup xmit queue for VI traffic\n");
455 goto err;
456 }
457 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
458 ath_print(common, ATH_DBG_FATAL,
459 "Unable to setup xmit queue for VO traffic\n");
460 goto err;
461 }
462
463 return 0;
464
465err:
466 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
467 if (ATH_TXQ_SETUP(sc, i))
468 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
469
470 return -EIO;
471}
472
473static void ath9k_init_channels_rates(struct ath_softc *sc)
474{
Sujith55624202010-01-08 10:36:02 +0530475 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
476 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
477 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
478 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
479 ARRAY_SIZE(ath9k_2ghz_chantable);
480 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
481 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
482 ARRAY_SIZE(ath9k_legacy_rates);
483 }
484
485 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
486 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
487 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
488 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
489 ARRAY_SIZE(ath9k_5ghz_chantable);
490 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
491 ath9k_legacy_rates + 4;
492 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
493 ARRAY_SIZE(ath9k_legacy_rates) - 4;
494 }
Sujith285f2dd2010-01-08 10:36:07 +0530495}
Sujith55624202010-01-08 10:36:02 +0530496
Sujith285f2dd2010-01-08 10:36:07 +0530497static void ath9k_init_misc(struct ath_softc *sc)
498{
499 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
500 int i = 0;
501
502 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
503 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
504
505 sc->config.txpowlimit = ATH_TXPOWER_MAX;
506
507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
508 sc->sc_flags |= SC_OP_TXAGGR;
509 sc->sc_flags |= SC_OP_RXAGGR;
Sujith55624202010-01-08 10:36:02 +0530510 }
511
Sujith285f2dd2010-01-08 10:36:07 +0530512 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
513 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
514
515 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
516 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
517
518 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
519 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
520
521 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
522
523 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
524 sc->beacon.bslot[i] = NULL;
525 sc->beacon.bslot_aphy[i] = NULL;
526 }
527}
528
529static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
530 const struct ath_bus_ops *bus_ops)
531{
532 struct ath_hw *ah = NULL;
533 struct ath_common *common;
534 int ret = 0, i;
535 int csz = 0;
536
Sujith285f2dd2010-01-08 10:36:07 +0530537 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
538 if (!ah)
539 return -ENOMEM;
540
541 ah->hw_version.devid = devid;
542 ah->hw_version.subsysid = subsysid;
543 sc->sc_ah = ah;
544
545 common = ath9k_hw_common(ah);
546 common->ops = &ath9k_common_ops;
547 common->bus_ops = bus_ops;
548 common->ah = ah;
549 common->hw = sc->hw;
550 common->priv = sc;
551 common->debug_mask = ath9k_debug;
552
553 spin_lock_init(&sc->wiphy_lock);
554 spin_lock_init(&sc->sc_resetlock);
555 spin_lock_init(&sc->sc_serial_rw);
556 spin_lock_init(&sc->sc_pm_lock);
557 mutex_init(&sc->mutex);
558 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
559 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
560 (unsigned long)sc);
561
562 /*
563 * Cache line size is used to size and align various
564 * structures used to communicate with the hardware.
565 */
566 ath_read_cachesize(common, &csz);
567 common->cachelsz = csz << 2; /* convert to bytes */
568
Luis R. Rodriguezd70357d2010-04-15 17:38:06 -0400569 /* Initializes the hardware for all supported chipsets */
Sujith285f2dd2010-01-08 10:36:07 +0530570 ret = ath9k_hw_init(ah);
Luis R. Rodriguezd70357d2010-04-15 17:38:06 -0400571 if (ret)
Sujith285f2dd2010-01-08 10:36:07 +0530572 goto err_hw;
Sujith285f2dd2010-01-08 10:36:07 +0530573
574 ret = ath9k_init_debug(ah);
575 if (ret) {
576 ath_print(common, ATH_DBG_FATAL,
577 "Unable to create debugfs files\n");
578 goto err_debug;
579 }
580
581 ret = ath9k_init_queues(sc);
582 if (ret)
583 goto err_queues;
584
585 ret = ath9k_init_btcoex(sc);
586 if (ret)
587 goto err_btcoex;
588
589 ath9k_init_crypto(sc);
590 ath9k_init_channels_rates(sc);
591 ath9k_init_misc(sc);
592
Sujith55624202010-01-08 10:36:02 +0530593 return 0;
Sujith285f2dd2010-01-08 10:36:07 +0530594
595err_btcoex:
Sujith55624202010-01-08 10:36:02 +0530596 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
597 if (ATH_TXQ_SETUP(sc, i))
598 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
Sujith285f2dd2010-01-08 10:36:07 +0530599err_queues:
600 ath9k_exit_debug(ah);
601err_debug:
602 ath9k_hw_deinit(ah);
603err_hw:
604 tasklet_kill(&sc->intr_tq);
605 tasklet_kill(&sc->bcon_tasklet);
Sujith55624202010-01-08 10:36:02 +0530606
Sujith285f2dd2010-01-08 10:36:07 +0530607 kfree(ah);
608 sc->sc_ah = NULL;
609
610 return ret;
Sujith55624202010-01-08 10:36:02 +0530611}
612
Sujith285f2dd2010-01-08 10:36:07 +0530613void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
Sujith55624202010-01-08 10:36:02 +0530614{
Sujith285f2dd2010-01-08 10:36:07 +0530615 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
616
Sujith55624202010-01-08 10:36:02 +0530617 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
618 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
619 IEEE80211_HW_SIGNAL_DBM |
Sujith55624202010-01-08 10:36:02 +0530620 IEEE80211_HW_SUPPORTS_PS |
621 IEEE80211_HW_PS_NULLFUNC_STACK |
Vivek Natarajan05df4982010-02-09 11:34:50 +0530622 IEEE80211_HW_SPECTRUM_MGMT |
623 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
Sujith55624202010-01-08 10:36:02 +0530624
Luis R. Rodriguez5ffaf8a2010-02-02 11:58:33 -0500625 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
626 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
627
Sujith55624202010-01-08 10:36:02 +0530628 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
629 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
630
631 hw->wiphy->interface_modes =
632 BIT(NL80211_IFTYPE_AP) |
633 BIT(NL80211_IFTYPE_STATION) |
634 BIT(NL80211_IFTYPE_ADHOC) |
635 BIT(NL80211_IFTYPE_MESH_POINT);
636
637 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
638
639 hw->queues = 4;
640 hw->max_rates = 4;
641 hw->channel_change_time = 5000;
642 hw->max_listen_interval = 10;
Felix Fietkau65896512010-01-24 03:26:11 +0100643 hw->max_rate_tries = 10;
Sujith55624202010-01-08 10:36:02 +0530644 hw->sta_data_size = sizeof(struct ath_node);
645 hw->vif_data_size = sizeof(struct ath_vif);
646
647 hw->rate_control_algorithm = "ath9k_rate_control";
648
649 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
650 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
651 &sc->sbands[IEEE80211_BAND_2GHZ];
652 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
653 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
654 &sc->sbands[IEEE80211_BAND_5GHZ];
Sujith285f2dd2010-01-08 10:36:07 +0530655
656 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
657 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
658 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
659 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
660 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
661 }
662
663 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
Sujith55624202010-01-08 10:36:02 +0530664}
665
Sujith285f2dd2010-01-08 10:36:07 +0530666int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
Sujith55624202010-01-08 10:36:02 +0530667 const struct ath_bus_ops *bus_ops)
668{
669 struct ieee80211_hw *hw = sc->hw;
670 struct ath_common *common;
671 struct ath_hw *ah;
Sujith285f2dd2010-01-08 10:36:07 +0530672 int error = 0;
Sujith55624202010-01-08 10:36:02 +0530673 struct ath_regulatory *reg;
674
Sujith285f2dd2010-01-08 10:36:07 +0530675 /* Bring up device */
676 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
Sujith55624202010-01-08 10:36:02 +0530677 if (error != 0)
Sujith285f2dd2010-01-08 10:36:07 +0530678 goto error_init;
Sujith55624202010-01-08 10:36:02 +0530679
680 ah = sc->sc_ah;
681 common = ath9k_hw_common(ah);
Sujith285f2dd2010-01-08 10:36:07 +0530682 ath9k_set_hw_capab(sc, hw);
Sujith55624202010-01-08 10:36:02 +0530683
Sujith285f2dd2010-01-08 10:36:07 +0530684 /* Initialize regulatory */
Sujith55624202010-01-08 10:36:02 +0530685 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
686 ath9k_reg_notifier);
687 if (error)
Sujith285f2dd2010-01-08 10:36:07 +0530688 goto error_regd;
Sujith55624202010-01-08 10:36:02 +0530689
690 reg = &common->regulatory;
691
Sujith285f2dd2010-01-08 10:36:07 +0530692 /* Setup TX DMA */
Sujith55624202010-01-08 10:36:02 +0530693 error = ath_tx_init(sc, ATH_TXBUF);
694 if (error != 0)
Sujith285f2dd2010-01-08 10:36:07 +0530695 goto error_tx;
Sujith55624202010-01-08 10:36:02 +0530696
Sujith285f2dd2010-01-08 10:36:07 +0530697 /* Setup RX DMA */
Sujith55624202010-01-08 10:36:02 +0530698 error = ath_rx_init(sc, ATH_RXBUF);
699 if (error != 0)
Sujith285f2dd2010-01-08 10:36:07 +0530700 goto error_rx;
701
702 /* Register with mac80211 */
703 error = ieee80211_register_hw(hw);
704 if (error)
705 goto error_register;
706
707 /* Handle world regulatory */
708 if (!ath_is_world_regd(reg)) {
709 error = regulatory_hint(hw->wiphy, reg->alpha2);
710 if (error)
711 goto error_world;
712 }
Sujith55624202010-01-08 10:36:02 +0530713
714 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
715 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
716 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
717
Sujith55624202010-01-08 10:36:02 +0530718 ath_init_leds(sc);
Sujith55624202010-01-08 10:36:02 +0530719 ath_start_rfkill_poll(sc);
720
721 return 0;
722
Sujith285f2dd2010-01-08 10:36:07 +0530723error_world:
724 ieee80211_unregister_hw(hw);
725error_register:
726 ath_rx_cleanup(sc);
727error_rx:
728 ath_tx_cleanup(sc);
729error_tx:
730 /* Nothing */
731error_regd:
732 ath9k_deinit_softc(sc);
733error_init:
Sujith55624202010-01-08 10:36:02 +0530734 return error;
735}
736
737/*****************************/
738/* De-Initialization */
739/*****************************/
740
Sujith285f2dd2010-01-08 10:36:07 +0530741static void ath9k_deinit_softc(struct ath_softc *sc)
Sujith55624202010-01-08 10:36:02 +0530742{
Sujith285f2dd2010-01-08 10:36:07 +0530743 int i = 0;
Sujith55624202010-01-08 10:36:02 +0530744
Sujith285f2dd2010-01-08 10:36:07 +0530745 if ((sc->btcoex.no_stomp_timer) &&
746 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
747 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
Sujith55624202010-01-08 10:36:02 +0530748
Sujith285f2dd2010-01-08 10:36:07 +0530749 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
750 if (ATH_TXQ_SETUP(sc, i))
751 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
752
753 ath9k_exit_debug(sc->sc_ah);
754 ath9k_hw_deinit(sc->sc_ah);
755
756 tasklet_kill(&sc->intr_tq);
757 tasklet_kill(&sc->bcon_tasklet);
Sujith736b3a22010-03-17 14:25:24 +0530758
759 kfree(sc->sc_ah);
760 sc->sc_ah = NULL;
Sujith55624202010-01-08 10:36:02 +0530761}
762
Sujith285f2dd2010-01-08 10:36:07 +0530763void ath9k_deinit_device(struct ath_softc *sc)
Sujith55624202010-01-08 10:36:02 +0530764{
765 struct ieee80211_hw *hw = sc->hw;
Sujith55624202010-01-08 10:36:02 +0530766 int i = 0;
767
768 ath9k_ps_wakeup(sc);
769
Sujith55624202010-01-08 10:36:02 +0530770 wiphy_rfkill_stop_polling(sc->hw->wiphy);
Sujith285f2dd2010-01-08 10:36:07 +0530771 ath_deinit_leds(sc);
Sujith55624202010-01-08 10:36:02 +0530772
773 for (i = 0; i < sc->num_sec_wiphy; i++) {
774 struct ath_wiphy *aphy = sc->sec_wiphy[i];
775 if (aphy == NULL)
776 continue;
777 sc->sec_wiphy[i] = NULL;
778 ieee80211_unregister_hw(aphy->hw);
779 ieee80211_free_hw(aphy->hw);
780 }
Sujith285f2dd2010-01-08 10:36:07 +0530781 kfree(sc->sec_wiphy);
782
Sujith55624202010-01-08 10:36:02 +0530783 ieee80211_unregister_hw(hw);
784 ath_rx_cleanup(sc);
785 ath_tx_cleanup(sc);
Sujith285f2dd2010-01-08 10:36:07 +0530786 ath9k_deinit_softc(sc);
Sujith55624202010-01-08 10:36:02 +0530787}
788
789void ath_descdma_cleanup(struct ath_softc *sc,
790 struct ath_descdma *dd,
791 struct list_head *head)
792{
793 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
794 dd->dd_desc_paddr);
795
796 INIT_LIST_HEAD(head);
797 kfree(dd->dd_bufptr);
798 memset(dd, 0, sizeof(*dd));
799}
800
Sujith55624202010-01-08 10:36:02 +0530801/************************/
802/* Module Hooks */
803/************************/
804
805static int __init ath9k_init(void)
806{
807 int error;
808
809 /* Register rate control algorithm */
810 error = ath_rate_control_register();
811 if (error != 0) {
812 printk(KERN_ERR
813 "ath9k: Unable to register rate control "
814 "algorithm: %d\n",
815 error);
816 goto err_out;
817 }
818
819 error = ath9k_debug_create_root();
820 if (error) {
821 printk(KERN_ERR
822 "ath9k: Unable to create debugfs root: %d\n",
823 error);
824 goto err_rate_unregister;
825 }
826
827 error = ath_pci_init();
828 if (error < 0) {
829 printk(KERN_ERR
830 "ath9k: No PCI devices found, driver not installed.\n");
831 error = -ENODEV;
832 goto err_remove_root;
833 }
834
835 error = ath_ahb_init();
836 if (error < 0) {
837 error = -ENODEV;
838 goto err_pci_exit;
839 }
840
841 return 0;
842
843 err_pci_exit:
844 ath_pci_exit();
845
846 err_remove_root:
847 ath9k_debug_remove_root();
848 err_rate_unregister:
849 ath_rate_control_unregister();
850 err_out:
851 return error;
852}
853module_init(ath9k_init);
854
855static void __exit ath9k_exit(void)
856{
857 ath_ahb_exit();
858 ath_pci_exit();
859 ath9k_debug_remove_root();
860 ath_rate_control_unregister();
861 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
862}
863module_exit(ath9k_exit);