blob: f388dcc8e463e866d0039b1c0b6780344de8b5b2 [file] [log] [blame]
Sujith55624202010-01-08 10:36:02 +05301/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
18
Sujith55624202010-01-08 10:36:02 +053019#include "ath9k.h"
20
21static char *dev_info = "ath9k";
22
23MODULE_AUTHOR("Atheros Communications");
24MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26MODULE_LICENSE("Dual BSD/GPL");
27
28static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29module_param_named(debug, ath9k_debug, uint, 0);
30MODULE_PARM_DESC(debug, "Debugging mask");
31
32int modparam_nohwcrypt;
33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35
36/* We use the hw_value as an index into our private channel structure */
37
38#define CHAN2G(_freq, _idx) { \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42}
43
44#define CHAN5G(_freq, _idx) { \
45 .band = IEEE80211_BAND_5GHZ, \
46 .center_freq = (_freq), \
47 .hw_value = (_idx), \
48 .max_power = 20, \
49}
50
51/* Some 2 GHz radios are actually tunable on 2312-2732
52 * on 5 MHz steps, we support the channels which we know
53 * we have calibration data for all cards though to make
54 * this static */
55static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 CHAN2G(2412, 0), /* Channel 1 */
57 CHAN2G(2417, 1), /* Channel 2 */
58 CHAN2G(2422, 2), /* Channel 3 */
59 CHAN2G(2427, 3), /* Channel 4 */
60 CHAN2G(2432, 4), /* Channel 5 */
61 CHAN2G(2437, 5), /* Channel 6 */
62 CHAN2G(2442, 6), /* Channel 7 */
63 CHAN2G(2447, 7), /* Channel 8 */
64 CHAN2G(2452, 8), /* Channel 9 */
65 CHAN2G(2457, 9), /* Channel 10 */
66 CHAN2G(2462, 10), /* Channel 11 */
67 CHAN2G(2467, 11), /* Channel 12 */
68 CHAN2G(2472, 12), /* Channel 13 */
69 CHAN2G(2484, 13), /* Channel 14 */
70};
71
72/* Some 5 GHz radios are actually tunable on XXXX-YYYY
73 * on 5 MHz steps, we support the channels which we know
74 * we have calibration data for all cards though to make
75 * this static */
76static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 /* _We_ call this UNII 1 */
78 CHAN5G(5180, 14), /* Channel 36 */
79 CHAN5G(5200, 15), /* Channel 40 */
80 CHAN5G(5220, 16), /* Channel 44 */
81 CHAN5G(5240, 17), /* Channel 48 */
82 /* _We_ call this UNII 2 */
83 CHAN5G(5260, 18), /* Channel 52 */
84 CHAN5G(5280, 19), /* Channel 56 */
85 CHAN5G(5300, 20), /* Channel 60 */
86 CHAN5G(5320, 21), /* Channel 64 */
87 /* _We_ call this "Middle band" */
88 CHAN5G(5500, 22), /* Channel 100 */
89 CHAN5G(5520, 23), /* Channel 104 */
90 CHAN5G(5540, 24), /* Channel 108 */
91 CHAN5G(5560, 25), /* Channel 112 */
92 CHAN5G(5580, 26), /* Channel 116 */
93 CHAN5G(5600, 27), /* Channel 120 */
94 CHAN5G(5620, 28), /* Channel 124 */
95 CHAN5G(5640, 29), /* Channel 128 */
96 CHAN5G(5660, 30), /* Channel 132 */
97 CHAN5G(5680, 31), /* Channel 136 */
98 CHAN5G(5700, 32), /* Channel 140 */
99 /* _We_ call this UNII 3 */
100 CHAN5G(5745, 33), /* Channel 149 */
101 CHAN5G(5765, 34), /* Channel 153 */
102 CHAN5G(5785, 35), /* Channel 157 */
103 CHAN5G(5805, 36), /* Channel 161 */
104 CHAN5G(5825, 37), /* Channel 165 */
105};
106
107/* Atheros hardware rate code addition for short premble */
108#define SHPCHECK(__hw_rate, __flags) \
109 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110
111#define RATE(_bitrate, _hw_rate, _flags) { \
112 .bitrate = (_bitrate), \
113 .flags = (_flags), \
114 .hw_value = (_hw_rate), \
115 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
116}
117
118static struct ieee80211_rate ath9k_legacy_rates[] = {
119 RATE(10, 0x1b, 0),
120 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 RATE(60, 0x0b, 0),
124 RATE(90, 0x0f, 0),
125 RATE(120, 0x0a, 0),
126 RATE(180, 0x0e, 0),
127 RATE(240, 0x09, 0),
128 RATE(360, 0x0d, 0),
129 RATE(480, 0x08, 0),
130 RATE(540, 0x0c, 0),
131};
132
Sujith285f2dd2010-01-08 10:36:07 +0530133static void ath9k_deinit_softc(struct ath_softc *sc);
Sujith55624202010-01-08 10:36:02 +0530134
135/*
136 * Read and write, they both share the same lock. We do this to serialize
137 * reads and writes on Atheros 802.11n PCI devices only. This is required
138 * as the FIFO on these devices can only accept sanely 2 requests.
139 */
140
141static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
142{
143 struct ath_hw *ah = (struct ath_hw *) hw_priv;
144 struct ath_common *common = ath9k_hw_common(ah);
145 struct ath_softc *sc = (struct ath_softc *) common->priv;
146
147 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
148 unsigned long flags;
149 spin_lock_irqsave(&sc->sc_serial_rw, flags);
150 iowrite32(val, sc->mem + reg_offset);
151 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
152 } else
153 iowrite32(val, sc->mem + reg_offset);
154}
155
156static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
157{
158 struct ath_hw *ah = (struct ath_hw *) hw_priv;
159 struct ath_common *common = ath9k_hw_common(ah);
160 struct ath_softc *sc = (struct ath_softc *) common->priv;
161 u32 val;
162
163 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
164 unsigned long flags;
165 spin_lock_irqsave(&sc->sc_serial_rw, flags);
166 val = ioread32(sc->mem + reg_offset);
167 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
168 } else
169 val = ioread32(sc->mem + reg_offset);
170 return val;
171}
172
173static const struct ath_ops ath9k_common_ops = {
174 .read = ath9k_ioread32,
175 .write = ath9k_iowrite32,
176};
177
Felix Fietkau3bb065a2010-04-19 19:57:34 +0200178static int count_streams(unsigned int chainmask, int max)
179{
180 int streams = 0;
181
182 do {
183 if (++streams == max)
184 break;
185 } while ((chainmask = chainmask & (chainmask - 1)));
186
187 return streams;
188}
189
Sujith55624202010-01-08 10:36:02 +0530190/**************************/
191/* Initialization */
192/**************************/
193
194static void setup_ht_cap(struct ath_softc *sc,
195 struct ieee80211_sta_ht_cap *ht_info)
196{
Felix Fietkau3bb065a2010-04-19 19:57:34 +0200197 struct ath_hw *ah = sc->sc_ah;
198 struct ath_common *common = ath9k_hw_common(ah);
Sujith55624202010-01-08 10:36:02 +0530199 u8 tx_streams, rx_streams;
Felix Fietkau3bb065a2010-04-19 19:57:34 +0200200 int i, max_streams;
Sujith55624202010-01-08 10:36:02 +0530201
202 ht_info->ht_supported = true;
203 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
204 IEEE80211_HT_CAP_SM_PS |
205 IEEE80211_HT_CAP_SGI_40 |
206 IEEE80211_HT_CAP_DSSSCCK40;
207
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -0400208 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
209 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
210
Vasanthakumar Thiagarajan6473d242010-05-13 18:42:38 -0700211 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
212 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
213
Sujith55624202010-01-08 10:36:02 +0530214 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
215 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
216
Felix Fietkau3bb065a2010-04-19 19:57:34 +0200217 if (AR_SREV_9300_20_OR_LATER(ah))
218 max_streams = 3;
219 else
220 max_streams = 2;
221
Felix Fietkau074a8c02010-04-19 19:57:36 +0200222 if (AR_SREV_9280_10_OR_LATER(ah)) {
223 if (max_streams >= 2)
224 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
225 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
226 }
227
Sujith55624202010-01-08 10:36:02 +0530228 /* set up supported mcs set */
229 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
Felix Fietkau3bb065a2010-04-19 19:57:34 +0200230 tx_streams = count_streams(common->tx_chainmask, max_streams);
231 rx_streams = count_streams(common->rx_chainmask, max_streams);
232
233 ath_print(common, ATH_DBG_CONFIG,
234 "TX streams %d, RX streams: %d\n",
235 tx_streams, rx_streams);
Sujith55624202010-01-08 10:36:02 +0530236
237 if (tx_streams != rx_streams) {
Sujith55624202010-01-08 10:36:02 +0530238 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
239 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
240 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
241 }
242
Felix Fietkau3bb065a2010-04-19 19:57:34 +0200243 for (i = 0; i < rx_streams; i++)
244 ht_info->mcs.rx_mask[i] = 0xff;
Sujith55624202010-01-08 10:36:02 +0530245
246 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
247}
248
249static int ath9k_reg_notifier(struct wiphy *wiphy,
250 struct regulatory_request *request)
251{
252 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
253 struct ath_wiphy *aphy = hw->priv;
254 struct ath_softc *sc = aphy->sc;
255 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
256
257 return ath_reg_notifier_apply(wiphy, request, reg);
258}
259
260/*
261 * This function will allocate both the DMA descriptor structure, and the
262 * buffers it contains. These are used to contain the descriptors used
263 * by the system.
264*/
265int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
266 struct list_head *head, const char *name,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400267 int nbuf, int ndesc, bool is_tx)
Sujith55624202010-01-08 10:36:02 +0530268{
269#define DS2PHYS(_dd, _ds) \
270 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
271#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
272#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
273 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400274 u8 *ds;
Sujith55624202010-01-08 10:36:02 +0530275 struct ath_buf *bf;
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400276 int i, bsize, error, desc_len;
Sujith55624202010-01-08 10:36:02 +0530277
278 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
279 name, nbuf, ndesc);
280
281 INIT_LIST_HEAD(head);
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400282
283 if (is_tx)
284 desc_len = sc->sc_ah->caps.tx_desc_len;
285 else
286 desc_len = sizeof(struct ath_desc);
287
Sujith55624202010-01-08 10:36:02 +0530288 /* ath_desc must be a multiple of DWORDs */
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400289 if ((desc_len % 4) != 0) {
Sujith55624202010-01-08 10:36:02 +0530290 ath_print(common, ATH_DBG_FATAL,
291 "ath_desc not DWORD aligned\n");
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400292 BUG_ON((desc_len % 4) != 0);
Sujith55624202010-01-08 10:36:02 +0530293 error = -ENOMEM;
294 goto fail;
295 }
296
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400297 dd->dd_desc_len = desc_len * nbuf * ndesc;
Sujith55624202010-01-08 10:36:02 +0530298
299 /*
300 * Need additional DMA memory because we can't use
301 * descriptors that cross the 4K page boundary. Assume
302 * one skipped descriptor per 4K page.
303 */
304 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
305 u32 ndesc_skipped =
306 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
307 u32 dma_len;
308
309 while (ndesc_skipped) {
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400310 dma_len = ndesc_skipped * desc_len;
Sujith55624202010-01-08 10:36:02 +0530311 dd->dd_desc_len += dma_len;
312
313 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
Joe Perchesee289b62010-05-17 22:47:34 -0700314 }
Sujith55624202010-01-08 10:36:02 +0530315 }
316
317 /* allocate descriptors */
318 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
319 &dd->dd_desc_paddr, GFP_KERNEL);
320 if (dd->dd_desc == NULL) {
321 error = -ENOMEM;
322 goto fail;
323 }
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400324 ds = (u8 *) dd->dd_desc;
Sujith55624202010-01-08 10:36:02 +0530325 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
326 name, ds, (u32) dd->dd_desc_len,
327 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
328
329 /* allocate buffers */
330 bsize = sizeof(struct ath_buf) * nbuf;
331 bf = kzalloc(bsize, GFP_KERNEL);
332 if (bf == NULL) {
333 error = -ENOMEM;
334 goto fail2;
335 }
336 dd->dd_bufptr = bf;
337
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400338 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
Sujith55624202010-01-08 10:36:02 +0530339 bf->bf_desc = ds;
340 bf->bf_daddr = DS2PHYS(dd, ds);
341
342 if (!(sc->sc_ah->caps.hw_caps &
343 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
344 /*
345 * Skip descriptor addresses which can cause 4KB
346 * boundary crossing (addr + length) with a 32 dword
347 * descriptor fetch.
348 */
349 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
350 BUG_ON((caddr_t) bf->bf_desc >=
351 ((caddr_t) dd->dd_desc +
352 dd->dd_desc_len));
353
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -0400354 ds += (desc_len * ndesc);
Sujith55624202010-01-08 10:36:02 +0530355 bf->bf_desc = ds;
356 bf->bf_daddr = DS2PHYS(dd, ds);
357 }
358 }
359 list_add_tail(&bf->list, head);
360 }
361 return 0;
362fail2:
363 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
364 dd->dd_desc_paddr);
365fail:
366 memset(dd, 0, sizeof(*dd));
367 return error;
368#undef ATH_DESC_4KB_BOUND_CHECK
369#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
370#undef DS2PHYS
371}
372
Sujith285f2dd2010-01-08 10:36:07 +0530373static void ath9k_init_crypto(struct ath_softc *sc)
Sujith55624202010-01-08 10:36:02 +0530374{
Sujith285f2dd2010-01-08 10:36:07 +0530375 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
376 int i = 0;
Sujith55624202010-01-08 10:36:02 +0530377
378 /* Get the hardware key cache size. */
Sujith285f2dd2010-01-08 10:36:07 +0530379 common->keymax = sc->sc_ah->caps.keycache_size;
Sujith55624202010-01-08 10:36:02 +0530380 if (common->keymax > ATH_KEYMAX) {
381 ath_print(common, ATH_DBG_ANY,
382 "Warning, using only %u entries in %u key cache\n",
383 ATH_KEYMAX, common->keymax);
384 common->keymax = ATH_KEYMAX;
385 }
386
387 /*
388 * Reset the key cache since some parts do not
389 * reset the contents on initial power up.
390 */
391 for (i = 0; i < common->keymax; i++)
Sujith285f2dd2010-01-08 10:36:07 +0530392 ath9k_hw_keyreset(sc->sc_ah, (u16) i);
Sujith55624202010-01-08 10:36:02 +0530393
Sujith285f2dd2010-01-08 10:36:07 +0530394 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
Sujith55624202010-01-08 10:36:02 +0530395 ATH9K_CIPHER_TKIP, NULL)) {
396 /*
397 * Whether we should enable h/w TKIP MIC.
398 * XXX: if we don't support WME TKIP MIC, then we wouldn't
399 * report WMM capable, so it's always safe to turn on
400 * TKIP MIC in this case.
401 */
Sujith285f2dd2010-01-08 10:36:07 +0530402 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
Sujith55624202010-01-08 10:36:02 +0530403 }
404
405 /*
406 * Check whether the separate key cache entries
407 * are required to handle both tx+rx MIC keys.
408 * With split mic keys the number of stations is limited
409 * to 27 otherwise 59.
410 */
Sujith285f2dd2010-01-08 10:36:07 +0530411 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
Sujith55624202010-01-08 10:36:02 +0530412 ATH9K_CIPHER_TKIP, NULL)
Sujith285f2dd2010-01-08 10:36:07 +0530413 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
Sujith55624202010-01-08 10:36:02 +0530414 ATH9K_CIPHER_MIC, NULL)
Sujith285f2dd2010-01-08 10:36:07 +0530415 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
Sujith55624202010-01-08 10:36:02 +0530416 0, NULL))
417 common->splitmic = 1;
418
419 /* turn on mcast key search if possible */
Sujith285f2dd2010-01-08 10:36:07 +0530420 if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
421 (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
422 1, 1, NULL);
Sujith55624202010-01-08 10:36:02 +0530423
Sujith285f2dd2010-01-08 10:36:07 +0530424}
Sujith55624202010-01-08 10:36:02 +0530425
Sujith285f2dd2010-01-08 10:36:07 +0530426static int ath9k_init_btcoex(struct ath_softc *sc)
427{
428 int r, qnum;
429
430 switch (sc->sc_ah->btcoex_hw.scheme) {
431 case ATH_BTCOEX_CFG_NONE:
432 break;
433 case ATH_BTCOEX_CFG_2WIRE:
434 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
435 break;
436 case ATH_BTCOEX_CFG_3WIRE:
437 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
438 r = ath_init_btcoex_timer(sc);
439 if (r)
440 return -1;
441 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
442 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
443 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
444 break;
445 default:
446 WARN_ON(1);
447 break;
Sujith55624202010-01-08 10:36:02 +0530448 }
449
Sujith285f2dd2010-01-08 10:36:07 +0530450 return 0;
451}
Sujith55624202010-01-08 10:36:02 +0530452
Sujith285f2dd2010-01-08 10:36:07 +0530453static int ath9k_init_queues(struct ath_softc *sc)
454{
455 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
456 int i = 0;
Sujith55624202010-01-08 10:36:02 +0530457
Sujith285f2dd2010-01-08 10:36:07 +0530458 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
459 sc->tx.hwq_map[i] = -1;
Sujith55624202010-01-08 10:36:02 +0530460
Sujith285f2dd2010-01-08 10:36:07 +0530461 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
462 if (sc->beacon.beaconq == -1) {
463 ath_print(common, ATH_DBG_FATAL,
464 "Unable to setup a beacon xmit queue\n");
465 goto err;
Sujith55624202010-01-08 10:36:02 +0530466 }
467
Sujith285f2dd2010-01-08 10:36:07 +0530468 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
469 if (sc->beacon.cabq == NULL) {
470 ath_print(common, ATH_DBG_FATAL,
471 "Unable to setup CAB xmit queue\n");
472 goto err;
473 }
Sujith55624202010-01-08 10:36:02 +0530474
Sujith285f2dd2010-01-08 10:36:07 +0530475 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
476 ath_cabq_update(sc);
477
478 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
479 ath_print(common, ATH_DBG_FATAL,
480 "Unable to setup xmit queue for BK traffic\n");
481 goto err;
482 }
483
484 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
485 ath_print(common, ATH_DBG_FATAL,
486 "Unable to setup xmit queue for BE traffic\n");
487 goto err;
488 }
489 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
490 ath_print(common, ATH_DBG_FATAL,
491 "Unable to setup xmit queue for VI traffic\n");
492 goto err;
493 }
494 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
495 ath_print(common, ATH_DBG_FATAL,
496 "Unable to setup xmit queue for VO traffic\n");
497 goto err;
498 }
499
500 return 0;
501
502err:
503 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
504 if (ATH_TXQ_SETUP(sc, i))
505 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
506
507 return -EIO;
508}
509
510static void ath9k_init_channels_rates(struct ath_softc *sc)
511{
Sujith55624202010-01-08 10:36:02 +0530512 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
513 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
514 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
515 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
516 ARRAY_SIZE(ath9k_2ghz_chantable);
517 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
518 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
519 ARRAY_SIZE(ath9k_legacy_rates);
520 }
521
522 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
523 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
524 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
525 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
526 ARRAY_SIZE(ath9k_5ghz_chantable);
527 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
528 ath9k_legacy_rates + 4;
529 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
530 ARRAY_SIZE(ath9k_legacy_rates) - 4;
531 }
Sujith285f2dd2010-01-08 10:36:07 +0530532}
Sujith55624202010-01-08 10:36:02 +0530533
Sujith285f2dd2010-01-08 10:36:07 +0530534static void ath9k_init_misc(struct ath_softc *sc)
535{
536 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
537 int i = 0;
538
539 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
540 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
541
542 sc->config.txpowlimit = ATH_TXPOWER_MAX;
543
544 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
545 sc->sc_flags |= SC_OP_TXAGGR;
546 sc->sc_flags |= SC_OP_RXAGGR;
Sujith55624202010-01-08 10:36:02 +0530547 }
548
Sujith285f2dd2010-01-08 10:36:07 +0530549 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
550 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
551
Luis R. Rodriguez8fe65362010-04-15 17:38:14 -0400552 ath9k_hw_set_diversity(sc->sc_ah, true);
Sujith285f2dd2010-01-08 10:36:07 +0530553 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
554
555 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
556 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
557
558 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
559
560 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
561 sc->beacon.bslot[i] = NULL;
562 sc->beacon.bslot_aphy[i] = NULL;
563 }
564}
565
566static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
567 const struct ath_bus_ops *bus_ops)
568{
569 struct ath_hw *ah = NULL;
570 struct ath_common *common;
571 int ret = 0, i;
572 int csz = 0;
573
Sujith285f2dd2010-01-08 10:36:07 +0530574 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
575 if (!ah)
576 return -ENOMEM;
577
578 ah->hw_version.devid = devid;
579 ah->hw_version.subsysid = subsysid;
580 sc->sc_ah = ah;
581
582 common = ath9k_hw_common(ah);
583 common->ops = &ath9k_common_ops;
584 common->bus_ops = bus_ops;
585 common->ah = ah;
586 common->hw = sc->hw;
587 common->priv = sc;
588 common->debug_mask = ath9k_debug;
589
590 spin_lock_init(&sc->wiphy_lock);
591 spin_lock_init(&sc->sc_resetlock);
592 spin_lock_init(&sc->sc_serial_rw);
593 spin_lock_init(&sc->sc_pm_lock);
594 mutex_init(&sc->mutex);
595 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
596 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
597 (unsigned long)sc);
598
599 /*
600 * Cache line size is used to size and align various
601 * structures used to communicate with the hardware.
602 */
603 ath_read_cachesize(common, &csz);
604 common->cachelsz = csz << 2; /* convert to bytes */
605
Luis R. Rodriguezd70357d2010-04-15 17:38:06 -0400606 /* Initializes the hardware for all supported chipsets */
Sujith285f2dd2010-01-08 10:36:07 +0530607 ret = ath9k_hw_init(ah);
Luis R. Rodriguezd70357d2010-04-15 17:38:06 -0400608 if (ret)
Sujith285f2dd2010-01-08 10:36:07 +0530609 goto err_hw;
Sujith285f2dd2010-01-08 10:36:07 +0530610
611 ret = ath9k_init_debug(ah);
612 if (ret) {
613 ath_print(common, ATH_DBG_FATAL,
614 "Unable to create debugfs files\n");
615 goto err_debug;
616 }
617
618 ret = ath9k_init_queues(sc);
619 if (ret)
620 goto err_queues;
621
622 ret = ath9k_init_btcoex(sc);
623 if (ret)
624 goto err_btcoex;
625
626 ath9k_init_crypto(sc);
627 ath9k_init_channels_rates(sc);
628 ath9k_init_misc(sc);
629
Sujith55624202010-01-08 10:36:02 +0530630 return 0;
Sujith285f2dd2010-01-08 10:36:07 +0530631
632err_btcoex:
Sujith55624202010-01-08 10:36:02 +0530633 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
634 if (ATH_TXQ_SETUP(sc, i))
635 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
Sujith285f2dd2010-01-08 10:36:07 +0530636err_queues:
637 ath9k_exit_debug(ah);
638err_debug:
639 ath9k_hw_deinit(ah);
640err_hw:
641 tasklet_kill(&sc->intr_tq);
642 tasklet_kill(&sc->bcon_tasklet);
Sujith55624202010-01-08 10:36:02 +0530643
Sujith285f2dd2010-01-08 10:36:07 +0530644 kfree(ah);
645 sc->sc_ah = NULL;
646
647 return ret;
Sujith55624202010-01-08 10:36:02 +0530648}
649
Sujith285f2dd2010-01-08 10:36:07 +0530650void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
Sujith55624202010-01-08 10:36:02 +0530651{
Sujith285f2dd2010-01-08 10:36:07 +0530652 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
653
Sujith55624202010-01-08 10:36:02 +0530654 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
655 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
656 IEEE80211_HW_SIGNAL_DBM |
Sujith55624202010-01-08 10:36:02 +0530657 IEEE80211_HW_SUPPORTS_PS |
658 IEEE80211_HW_PS_NULLFUNC_STACK |
Vivek Natarajan05df4982010-02-09 11:34:50 +0530659 IEEE80211_HW_SPECTRUM_MGMT |
660 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
Sujith55624202010-01-08 10:36:02 +0530661
Luis R. Rodriguez5ffaf8a2010-02-02 11:58:33 -0500662 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
663 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
664
Sujith55624202010-01-08 10:36:02 +0530665 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
666 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
667
668 hw->wiphy->interface_modes =
669 BIT(NL80211_IFTYPE_AP) |
670 BIT(NL80211_IFTYPE_STATION) |
671 BIT(NL80211_IFTYPE_ADHOC) |
672 BIT(NL80211_IFTYPE_MESH_POINT);
673
674 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
675
676 hw->queues = 4;
677 hw->max_rates = 4;
678 hw->channel_change_time = 5000;
679 hw->max_listen_interval = 10;
Felix Fietkau65896512010-01-24 03:26:11 +0100680 hw->max_rate_tries = 10;
Sujith55624202010-01-08 10:36:02 +0530681 hw->sta_data_size = sizeof(struct ath_node);
682 hw->vif_data_size = sizeof(struct ath_vif);
683
684 hw->rate_control_algorithm = "ath9k_rate_control";
685
686 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
687 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
688 &sc->sbands[IEEE80211_BAND_2GHZ];
689 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
690 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
691 &sc->sbands[IEEE80211_BAND_5GHZ];
Sujith285f2dd2010-01-08 10:36:07 +0530692
693 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
694 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
695 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
696 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
697 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
698 }
699
700 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
Sujith55624202010-01-08 10:36:02 +0530701}
702
Sujith285f2dd2010-01-08 10:36:07 +0530703int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
Sujith55624202010-01-08 10:36:02 +0530704 const struct ath_bus_ops *bus_ops)
705{
706 struct ieee80211_hw *hw = sc->hw;
707 struct ath_common *common;
708 struct ath_hw *ah;
Sujith285f2dd2010-01-08 10:36:07 +0530709 int error = 0;
Sujith55624202010-01-08 10:36:02 +0530710 struct ath_regulatory *reg;
711
Sujith285f2dd2010-01-08 10:36:07 +0530712 /* Bring up device */
713 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
Sujith55624202010-01-08 10:36:02 +0530714 if (error != 0)
Sujith285f2dd2010-01-08 10:36:07 +0530715 goto error_init;
Sujith55624202010-01-08 10:36:02 +0530716
717 ah = sc->sc_ah;
718 common = ath9k_hw_common(ah);
Sujith285f2dd2010-01-08 10:36:07 +0530719 ath9k_set_hw_capab(sc, hw);
Sujith55624202010-01-08 10:36:02 +0530720
Sujith285f2dd2010-01-08 10:36:07 +0530721 /* Initialize regulatory */
Sujith55624202010-01-08 10:36:02 +0530722 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
723 ath9k_reg_notifier);
724 if (error)
Sujith285f2dd2010-01-08 10:36:07 +0530725 goto error_regd;
Sujith55624202010-01-08 10:36:02 +0530726
727 reg = &common->regulatory;
728
Sujith285f2dd2010-01-08 10:36:07 +0530729 /* Setup TX DMA */
Sujith55624202010-01-08 10:36:02 +0530730 error = ath_tx_init(sc, ATH_TXBUF);
731 if (error != 0)
Sujith285f2dd2010-01-08 10:36:07 +0530732 goto error_tx;
Sujith55624202010-01-08 10:36:02 +0530733
Sujith285f2dd2010-01-08 10:36:07 +0530734 /* Setup RX DMA */
Sujith55624202010-01-08 10:36:02 +0530735 error = ath_rx_init(sc, ATH_RXBUF);
736 if (error != 0)
Sujith285f2dd2010-01-08 10:36:07 +0530737 goto error_rx;
738
739 /* Register with mac80211 */
740 error = ieee80211_register_hw(hw);
741 if (error)
742 goto error_register;
743
744 /* Handle world regulatory */
745 if (!ath_is_world_regd(reg)) {
746 error = regulatory_hint(hw->wiphy, reg->alpha2);
747 if (error)
748 goto error_world;
749 }
Sujith55624202010-01-08 10:36:02 +0530750
751 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
752 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
753 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
754
Sujith55624202010-01-08 10:36:02 +0530755 ath_init_leds(sc);
Sujith55624202010-01-08 10:36:02 +0530756 ath_start_rfkill_poll(sc);
757
758 return 0;
759
Sujith285f2dd2010-01-08 10:36:07 +0530760error_world:
761 ieee80211_unregister_hw(hw);
762error_register:
763 ath_rx_cleanup(sc);
764error_rx:
765 ath_tx_cleanup(sc);
766error_tx:
767 /* Nothing */
768error_regd:
769 ath9k_deinit_softc(sc);
770error_init:
Sujith55624202010-01-08 10:36:02 +0530771 return error;
772}
773
774/*****************************/
775/* De-Initialization */
776/*****************************/
777
Sujith285f2dd2010-01-08 10:36:07 +0530778static void ath9k_deinit_softc(struct ath_softc *sc)
Sujith55624202010-01-08 10:36:02 +0530779{
Sujith285f2dd2010-01-08 10:36:07 +0530780 int i = 0;
Sujith55624202010-01-08 10:36:02 +0530781
Sujith285f2dd2010-01-08 10:36:07 +0530782 if ((sc->btcoex.no_stomp_timer) &&
783 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
784 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
Sujith55624202010-01-08 10:36:02 +0530785
Sujith285f2dd2010-01-08 10:36:07 +0530786 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
787 if (ATH_TXQ_SETUP(sc, i))
788 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
789
790 ath9k_exit_debug(sc->sc_ah);
791 ath9k_hw_deinit(sc->sc_ah);
792
793 tasklet_kill(&sc->intr_tq);
794 tasklet_kill(&sc->bcon_tasklet);
Sujith736b3a22010-03-17 14:25:24 +0530795
796 kfree(sc->sc_ah);
797 sc->sc_ah = NULL;
Sujith55624202010-01-08 10:36:02 +0530798}
799
Sujith285f2dd2010-01-08 10:36:07 +0530800void ath9k_deinit_device(struct ath_softc *sc)
Sujith55624202010-01-08 10:36:02 +0530801{
802 struct ieee80211_hw *hw = sc->hw;
Sujith55624202010-01-08 10:36:02 +0530803 int i = 0;
804
805 ath9k_ps_wakeup(sc);
806
Sujith55624202010-01-08 10:36:02 +0530807 wiphy_rfkill_stop_polling(sc->hw->wiphy);
Sujith285f2dd2010-01-08 10:36:07 +0530808 ath_deinit_leds(sc);
Sujith55624202010-01-08 10:36:02 +0530809
810 for (i = 0; i < sc->num_sec_wiphy; i++) {
811 struct ath_wiphy *aphy = sc->sec_wiphy[i];
812 if (aphy == NULL)
813 continue;
814 sc->sec_wiphy[i] = NULL;
815 ieee80211_unregister_hw(aphy->hw);
816 ieee80211_free_hw(aphy->hw);
817 }
Sujith285f2dd2010-01-08 10:36:07 +0530818 kfree(sc->sec_wiphy);
819
Sujith55624202010-01-08 10:36:02 +0530820 ieee80211_unregister_hw(hw);
821 ath_rx_cleanup(sc);
822 ath_tx_cleanup(sc);
Sujith285f2dd2010-01-08 10:36:07 +0530823 ath9k_deinit_softc(sc);
Sujith55624202010-01-08 10:36:02 +0530824}
825
826void ath_descdma_cleanup(struct ath_softc *sc,
827 struct ath_descdma *dd,
828 struct list_head *head)
829{
830 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
831 dd->dd_desc_paddr);
832
833 INIT_LIST_HEAD(head);
834 kfree(dd->dd_bufptr);
835 memset(dd, 0, sizeof(*dd));
836}
837
Sujith55624202010-01-08 10:36:02 +0530838/************************/
839/* Module Hooks */
840/************************/
841
842static int __init ath9k_init(void)
843{
844 int error;
845
846 /* Register rate control algorithm */
847 error = ath_rate_control_register();
848 if (error != 0) {
849 printk(KERN_ERR
850 "ath9k: Unable to register rate control "
851 "algorithm: %d\n",
852 error);
853 goto err_out;
854 }
855
856 error = ath9k_debug_create_root();
857 if (error) {
858 printk(KERN_ERR
859 "ath9k: Unable to create debugfs root: %d\n",
860 error);
861 goto err_rate_unregister;
862 }
863
864 error = ath_pci_init();
865 if (error < 0) {
866 printk(KERN_ERR
867 "ath9k: No PCI devices found, driver not installed.\n");
868 error = -ENODEV;
869 goto err_remove_root;
870 }
871
872 error = ath_ahb_init();
873 if (error < 0) {
874 error = -ENODEV;
875 goto err_pci_exit;
876 }
877
878 return 0;
879
880 err_pci_exit:
881 ath_pci_exit();
882
883 err_remove_root:
884 ath9k_debug_remove_root();
885 err_rate_unregister:
886 ath_rate_control_unregister();
887 err_out:
888 return error;
889}
890module_init(ath9k_init);
891
892static void __exit ath9k_exit(void)
893{
894 ath_ahb_exit();
895 ath_pci_exit();
896 ath9k_debug_remove_root();
897 ath_rate_control_unregister();
898 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
899}
900module_exit(ath9k_exit);