blob: cb4995ccbc14ec26c9aa1fe8560c6301ca1ccc10 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
Felix Fietkaub5c804752010-04-15 17:38:48 -040020#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
21
Jouni Malinenbce048d2009-03-03 19:23:28 +020022static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
23 struct ieee80211_hdr *hdr)
24{
Jouni Malinenc52f33d2009-03-03 19:23:29 +020025 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
26 int i;
27
28 spin_lock_bh(&sc->wiphy_lock);
29 for (i = 0; i < sc->num_sec_wiphy; i++) {
30 struct ath_wiphy *aphy = sc->sec_wiphy[i];
31 if (aphy == NULL)
32 continue;
33 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
34 == 0) {
35 hw = aphy->hw;
36 break;
37 }
38 }
39 spin_unlock_bh(&sc->wiphy_lock);
40 return hw;
Jouni Malinenbce048d2009-03-03 19:23:28 +020041}
42
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070043/*
44 * Setup and link descriptors.
45 *
46 * 11N: we can no longer afford to self link the last descriptor.
47 * MAC acknowledges BA status as long as it copies frames to host
48 * buffer (or rx fifo). This can incorrectly acknowledge packets
49 * to a sender if last desc is self-linked.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070050 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070051static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
52{
Sujithcbe61d82009-02-09 13:27:12 +053053 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -080054 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070055 struct ath_desc *ds;
56 struct sk_buff *skb;
57
58 ATH_RXBUF_RESET(bf);
59
60 ds = bf->bf_desc;
Sujithbe0418a2008-11-18 09:05:55 +053061 ds->ds_link = 0; /* link to null */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070062 ds->ds_data = bf->bf_buf_addr;
63
Sujithbe0418a2008-11-18 09:05:55 +053064 /* virtual addr of the beginning of the buffer. */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070065 skb = bf->bf_mpdu;
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -070066 BUG_ON(skb == NULL);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070067 ds->ds_vdata = skb->data;
68
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -080069 /*
70 * setup rx descriptors. The rx_bufsize here tells the hardware
Luis R. Rodriguezb4b6cda2008-11-20 17:15:13 -080071 * how much data it can DMA to us and that we are prepared
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -080072 * to process
73 */
Sujithb77f4832008-12-07 21:44:03 +053074 ath9k_hw_setuprxdesc(ah, ds,
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -080075 common->rx_bufsize,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070076 0);
77
Sujithb77f4832008-12-07 21:44:03 +053078 if (sc->rx.rxlink == NULL)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070079 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
80 else
Sujithb77f4832008-12-07 21:44:03 +053081 *sc->rx.rxlink = bf->bf_daddr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070082
Sujithb77f4832008-12-07 21:44:03 +053083 sc->rx.rxlink = &ds->ds_link;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070084 ath9k_hw_rxena(ah);
85}
86
Sujithff37e332008-11-24 12:07:55 +053087static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
88{
89 /* XXX block beacon interrupts */
90 ath9k_hw_setantenna(sc->sc_ah, antenna);
Sujithb77f4832008-12-07 21:44:03 +053091 sc->rx.defant = antenna;
92 sc->rx.rxotherant = 0;
Sujithff37e332008-11-24 12:07:55 +053093}
94
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070095static void ath_opmode_init(struct ath_softc *sc)
96{
Sujithcbe61d82009-02-09 13:27:12 +053097 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguez15107182009-09-10 09:22:37 -070098 struct ath_common *common = ath9k_hw_common(ah);
99
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700100 u32 rfilt, mfilt[2];
101
102 /* configure rx filter */
103 rfilt = ath_calcrxfilter(sc);
104 ath9k_hw_setrxfilter(ah, rfilt);
105
106 /* configure bssid mask */
Sujith2660b812009-02-09 13:27:26 +0530107 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
Luis R. Rodriguez13b81552009-09-10 17:52:45 -0700108 ath_hw_setbssidmask(common);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700109
110 /* configure operational mode */
111 ath9k_hw_setopmode(ah);
112
113 /* Handle any link-level address change. */
Luis R. Rodriguez15107182009-09-10 09:22:37 -0700114 ath9k_hw_setmac(ah, common->macaddr);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700115
116 /* calculate and install multicast filter */
117 mfilt[0] = mfilt[1] = ~0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700118 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700119}
120
Felix Fietkaub5c804752010-04-15 17:38:48 -0400121static bool ath_rx_edma_buf_link(struct ath_softc *sc,
122 enum ath9k_rx_qtype qtype)
123{
124 struct ath_hw *ah = sc->sc_ah;
125 struct ath_rx_edma *rx_edma;
126 struct sk_buff *skb;
127 struct ath_buf *bf;
128
129 rx_edma = &sc->rx.rx_edma[qtype];
130 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
131 return false;
132
133 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
134 list_del_init(&bf->list);
135
136 skb = bf->bf_mpdu;
137
138 ATH_RXBUF_RESET(bf);
139 memset(skb->data, 0, ah->caps.rx_status_len);
140 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
141 ah->caps.rx_status_len, DMA_TO_DEVICE);
142
143 SKB_CB_ATHBUF(skb) = bf;
144 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
145 skb_queue_tail(&rx_edma->rx_fifo, skb);
146
147 return true;
148}
149
150static void ath_rx_addbuffer_edma(struct ath_softc *sc,
151 enum ath9k_rx_qtype qtype, int size)
152{
153 struct ath_rx_edma *rx_edma;
154 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
155 u32 nbuf = 0;
156
157 rx_edma = &sc->rx.rx_edma[qtype];
158 if (list_empty(&sc->rx.rxbuf)) {
159 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
160 return;
161 }
162
163 while (!list_empty(&sc->rx.rxbuf)) {
164 nbuf++;
165
166 if (!ath_rx_edma_buf_link(sc, qtype))
167 break;
168
169 if (nbuf >= size)
170 break;
171 }
172}
173
174static void ath_rx_remove_buffer(struct ath_softc *sc,
175 enum ath9k_rx_qtype qtype)
176{
177 struct ath_buf *bf;
178 struct ath_rx_edma *rx_edma;
179 struct sk_buff *skb;
180
181 rx_edma = &sc->rx.rx_edma[qtype];
182
183 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
184 bf = SKB_CB_ATHBUF(skb);
185 BUG_ON(!bf);
186 list_add_tail(&bf->list, &sc->rx.rxbuf);
187 }
188}
189
190static void ath_rx_edma_cleanup(struct ath_softc *sc)
191{
192 struct ath_buf *bf;
193
194 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
195 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
196
197 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
198 if (bf->bf_mpdu)
199 dev_kfree_skb_any(bf->bf_mpdu);
200 }
201
202 INIT_LIST_HEAD(&sc->rx.rxbuf);
203
204 kfree(sc->rx.rx_bufptr);
205 sc->rx.rx_bufptr = NULL;
206}
207
208static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
209{
210 skb_queue_head_init(&rx_edma->rx_fifo);
211 skb_queue_head_init(&rx_edma->rx_buffers);
212 rx_edma->rx_fifo_hwsize = size;
213}
214
215static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
216{
217 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
218 struct ath_hw *ah = sc->sc_ah;
219 struct sk_buff *skb;
220 struct ath_buf *bf;
221 int error = 0, i;
222 u32 size;
223
224
225 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
226 ah->caps.rx_status_len,
227 min(common->cachelsz, (u16)64));
228
229 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
230 ah->caps.rx_status_len);
231
232 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
233 ah->caps.rx_lp_qdepth);
234 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
235 ah->caps.rx_hp_qdepth);
236
237 size = sizeof(struct ath_buf) * nbufs;
238 bf = kzalloc(size, GFP_KERNEL);
239 if (!bf)
240 return -ENOMEM;
241
242 INIT_LIST_HEAD(&sc->rx.rxbuf);
243 sc->rx.rx_bufptr = bf;
244
245 for (i = 0; i < nbufs; i++, bf++) {
246 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
247 if (!skb) {
248 error = -ENOMEM;
249 goto rx_init_fail;
250 }
251
252 memset(skb->data, 0, common->rx_bufsize);
253 bf->bf_mpdu = skb;
254
255 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
256 common->rx_bufsize,
257 DMA_BIDIRECTIONAL);
258 if (unlikely(dma_mapping_error(sc->dev,
259 bf->bf_buf_addr))) {
260 dev_kfree_skb_any(skb);
261 bf->bf_mpdu = NULL;
262 ath_print(common, ATH_DBG_FATAL,
263 "dma_mapping_error() on RX init\n");
264 error = -ENOMEM;
265 goto rx_init_fail;
266 }
267
268 list_add_tail(&bf->list, &sc->rx.rxbuf);
269 }
270
271 return 0;
272
273rx_init_fail:
274 ath_rx_edma_cleanup(sc);
275 return error;
276}
277
278static void ath_edma_start_recv(struct ath_softc *sc)
279{
280 spin_lock_bh(&sc->rx.rxbuflock);
281
282 ath9k_hw_rxena(sc->sc_ah);
283
284 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
285 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
286
287 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
288 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
289
290 spin_unlock_bh(&sc->rx.rxbuflock);
291
292 ath_opmode_init(sc);
293
294 ath9k_hw_startpcureceive(sc->sc_ah);
295}
296
297static void ath_edma_stop_recv(struct ath_softc *sc)
298{
299 spin_lock_bh(&sc->rx.rxbuflock);
300 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
301 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
302 spin_unlock_bh(&sc->rx.rxbuflock);
303}
304
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700305int ath_rx_init(struct ath_softc *sc, int nbufs)
306{
Luis R. Rodriguez27c51f12009-09-10 11:08:14 -0700307 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700308 struct sk_buff *skb;
309 struct ath_buf *bf;
310 int error = 0;
311
Sujith797fe5cb2009-03-30 15:28:45 +0530312 spin_lock_init(&sc->rx.rxflushlock);
313 sc->sc_flags &= ~SC_OP_RXFLUSH;
314 spin_lock_init(&sc->rx.rxbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700315
Felix Fietkaub5c804752010-04-15 17:38:48 -0400316 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
317 return ath_rx_edma_init(sc, nbufs);
318 } else {
319 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
320 min(common->cachelsz, (u16)64));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700321
Felix Fietkaub5c804752010-04-15 17:38:48 -0400322 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
323 common->cachelsz, common->rx_bufsize);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700324
Felix Fietkaub5c804752010-04-15 17:38:48 -0400325 /* Initialize rx descriptors */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700326
Felix Fietkaub5c804752010-04-15 17:38:48 -0400327 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
328 "rx", nbufs, 1);
329 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700330 ath_print(common, ATH_DBG_FATAL,
Felix Fietkaub5c804752010-04-15 17:38:48 -0400331 "failed to allocate rx descriptors: %d\n",
332 error);
Sujith797fe5cb2009-03-30 15:28:45 +0530333 goto err;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700334 }
Felix Fietkaub5c804752010-04-15 17:38:48 -0400335
336 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
337 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
338 GFP_KERNEL);
339 if (skb == NULL) {
340 error = -ENOMEM;
341 goto err;
342 }
343
344 bf->bf_mpdu = skb;
345 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
346 common->rx_bufsize,
347 DMA_FROM_DEVICE);
348 if (unlikely(dma_mapping_error(sc->dev,
349 bf->bf_buf_addr))) {
350 dev_kfree_skb_any(skb);
351 bf->bf_mpdu = NULL;
352 ath_print(common, ATH_DBG_FATAL,
353 "dma_mapping_error() on RX init\n");
354 error = -ENOMEM;
355 goto err;
356 }
357 bf->bf_dmacontext = bf->bf_buf_addr;
358 }
359 sc->rx.rxlink = NULL;
Sujith797fe5cb2009-03-30 15:28:45 +0530360 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700361
Sujith797fe5cb2009-03-30 15:28:45 +0530362err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700363 if (error)
364 ath_rx_cleanup(sc);
365
366 return error;
367}
368
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700369void ath_rx_cleanup(struct ath_softc *sc)
370{
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -0800371 struct ath_hw *ah = sc->sc_ah;
372 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700373 struct sk_buff *skb;
374 struct ath_buf *bf;
375
Felix Fietkaub5c804752010-04-15 17:38:48 -0400376 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
377 ath_rx_edma_cleanup(sc);
378 return;
379 } else {
380 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
381 skb = bf->bf_mpdu;
382 if (skb) {
383 dma_unmap_single(sc->dev, bf->bf_buf_addr,
384 common->rx_bufsize,
385 DMA_FROM_DEVICE);
386 dev_kfree_skb(skb);
387 }
Luis R. Rodriguez051b9192009-03-23 18:25:01 -0400388 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700389
Felix Fietkaub5c804752010-04-15 17:38:48 -0400390 if (sc->rx.rxdma.dd_desc_len != 0)
391 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
392 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700393}
394
395/*
396 * Calculate the receive filter according to the
397 * operating mode and state:
398 *
399 * o always accept unicast, broadcast, and multicast traffic
400 * o maintain current state of phy error reception (the hal
401 * may enable phy error frames for noise immunity work)
402 * o probe request frames are accepted only when operating in
403 * hostap, adhoc, or monitor modes
404 * o enable promiscuous mode according to the interface state
405 * o accept beacons:
406 * - when operating in adhoc mode so the 802.11 layer creates
407 * node table entries for peers,
408 * - when operating in station mode for collecting rssi data when
409 * the station is otherwise quiet, or
410 * - when operating as a repeater so we see repeater-sta beacons
411 * - when scanning
412 */
413
414u32 ath_calcrxfilter(struct ath_softc *sc)
415{
416#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
Sujith7dcfdcd2008-08-11 14:03:13 +0530417
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700418 u32 rfilt;
419
420 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
421 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
422 | ATH9K_RX_FILTER_MCAST;
423
424 /* If not a STA, enable processing of Probe Requests */
Sujith2660b812009-02-09 13:27:26 +0530425 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700426 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
427
Jouni Malinen217ba9d2009-03-10 10:55:50 +0200428 /*
429 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
430 * mode interface or when in monitor mode. AP mode does not need this
431 * since it receives all in-BSS frames anyway.
432 */
Sujith2660b812009-02-09 13:27:26 +0530433 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
Sujithb77f4832008-12-07 21:44:03 +0530434 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
Jouni Malinen217ba9d2009-03-10 10:55:50 +0200435 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700436 rfilt |= ATH9K_RX_FILTER_PROM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700437
Sujithd42c6b72009-02-04 08:10:22 +0530438 if (sc->rx.rxfilter & FIF_CONTROL)
439 rfilt |= ATH9K_RX_FILTER_CONTROL;
440
Vasanthakumar Thiagarajandbaaa142009-02-19 15:41:52 +0530441 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
442 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
443 rfilt |= ATH9K_RX_FILTER_MYBEACON;
444 else
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700445 rfilt |= ATH9K_RX_FILTER_BEACON;
446
Senthil Balasubramanian66afad02009-09-18 15:06:07 +0530447 if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
448 AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
449 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
450 (sc->rx.rxfilter & FIF_PSPOLL))
Vasanthakumar Thiagarajandbaaa142009-02-19 15:41:52 +0530451 rfilt |= ATH9K_RX_FILTER_PSPOLL;
Sujithbe0418a2008-11-18 09:05:55 +0530452
Sujith7ea310b2009-09-03 12:08:43 +0530453 if (conf_is_ht(&sc->hw->conf))
454 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
455
Javier Cardona5eb6ba82009-08-20 19:12:07 -0700456 if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
Jouni Malinenb93bce22009-03-03 19:23:30 +0200457 /* TODO: only needed if more than one BSSID is in use in
458 * station/adhoc mode */
Javier Cardona5eb6ba82009-08-20 19:12:07 -0700459 /* The following may also be needed for other older chips */
460 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
461 rfilt |= ATH9K_RX_FILTER_PROM;
Jouni Malinenb93bce22009-03-03 19:23:30 +0200462 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
463 }
464
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700465 return rfilt;
Sujith7dcfdcd2008-08-11 14:03:13 +0530466
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700467#undef RX_FILTER_PRESERVE
468}
469
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700470int ath_startrecv(struct ath_softc *sc)
471{
Sujithcbe61d82009-02-09 13:27:12 +0530472 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700473 struct ath_buf *bf, *tbf;
474
Felix Fietkaub5c804752010-04-15 17:38:48 -0400475 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
476 ath_edma_start_recv(sc);
477 return 0;
478 }
479
Sujithb77f4832008-12-07 21:44:03 +0530480 spin_lock_bh(&sc->rx.rxbuflock);
481 if (list_empty(&sc->rx.rxbuf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700482 goto start_recv;
483
Sujithb77f4832008-12-07 21:44:03 +0530484 sc->rx.rxlink = NULL;
485 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700486 ath_rx_buf_link(sc, bf);
487 }
488
489 /* We could have deleted elements so the list may be empty now */
Sujithb77f4832008-12-07 21:44:03 +0530490 if (list_empty(&sc->rx.rxbuf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700491 goto start_recv;
492
Sujithb77f4832008-12-07 21:44:03 +0530493 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700494 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
Sujithbe0418a2008-11-18 09:05:55 +0530495 ath9k_hw_rxena(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700496
497start_recv:
Sujithb77f4832008-12-07 21:44:03 +0530498 spin_unlock_bh(&sc->rx.rxbuflock);
Sujithbe0418a2008-11-18 09:05:55 +0530499 ath_opmode_init(sc);
500 ath9k_hw_startpcureceive(ah);
501
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700502 return 0;
503}
504
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700505bool ath_stoprecv(struct ath_softc *sc)
506{
Sujithcbe61d82009-02-09 13:27:12 +0530507 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700508 bool stopped;
509
Sujithbe0418a2008-11-18 09:05:55 +0530510 ath9k_hw_stoppcurecv(ah);
511 ath9k_hw_setrxfilter(ah, 0);
512 stopped = ath9k_hw_stopdmarecv(ah);
Felix Fietkaub5c804752010-04-15 17:38:48 -0400513
514 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
515 ath_edma_stop_recv(sc);
516 else
517 sc->rx.rxlink = NULL;
Sujithbe0418a2008-11-18 09:05:55 +0530518
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700519 return stopped;
520}
521
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700522void ath_flushrecv(struct ath_softc *sc)
523{
Sujithb77f4832008-12-07 21:44:03 +0530524 spin_lock_bh(&sc->rx.rxflushlock);
Sujith98deeea2008-08-11 14:05:46 +0530525 sc->sc_flags |= SC_OP_RXFLUSH;
Felix Fietkaub5c804752010-04-15 17:38:48 -0400526 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
527 ath_rx_tasklet(sc, 1, true);
528 ath_rx_tasklet(sc, 1, false);
Sujith98deeea2008-08-11 14:05:46 +0530529 sc->sc_flags &= ~SC_OP_RXFLUSH;
Sujithb77f4832008-12-07 21:44:03 +0530530 spin_unlock_bh(&sc->rx.rxflushlock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700531}
532
Jouni Malinencc659652009-05-14 21:28:48 +0300533static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
534{
535 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
536 struct ieee80211_mgmt *mgmt;
537 u8 *pos, *end, id, elen;
538 struct ieee80211_tim_ie *tim;
539
540 mgmt = (struct ieee80211_mgmt *)skb->data;
541 pos = mgmt->u.beacon.variable;
542 end = skb->data + skb->len;
543
544 while (pos + 2 < end) {
545 id = *pos++;
546 elen = *pos++;
547 if (pos + elen > end)
548 break;
549
550 if (id == WLAN_EID_TIM) {
551 if (elen < sizeof(*tim))
552 break;
553 tim = (struct ieee80211_tim_ie *) pos;
554 if (tim->dtim_count != 0)
555 break;
556 return tim->bitmap_ctrl & 0x01;
557 }
558
559 pos += elen;
560 }
561
562 return false;
563}
564
Jouni Malinencc659652009-05-14 21:28:48 +0300565static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
566{
567 struct ieee80211_mgmt *mgmt;
Luis R. Rodriguez15107182009-09-10 09:22:37 -0700568 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Jouni Malinencc659652009-05-14 21:28:48 +0300569
570 if (skb->len < 24 + 8 + 2 + 2)
571 return;
572
573 mgmt = (struct ieee80211_mgmt *)skb->data;
Luis R. Rodriguez15107182009-09-10 09:22:37 -0700574 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
Jouni Malinencc659652009-05-14 21:28:48 +0300575 return; /* not from our current AP */
576
Sujith1b04b932010-01-08 10:36:05 +0530577 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
Gabor Juhos293dc5d2009-06-19 12:17:48 +0200578
Sujith1b04b932010-01-08 10:36:05 +0530579 if (sc->ps_flags & PS_BEACON_SYNC) {
580 sc->ps_flags &= ~PS_BEACON_SYNC;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700581 ath_print(common, ATH_DBG_PS,
582 "Reconfigure Beacon timers based on "
583 "timestamp from the AP\n");
Jouni Malinenccdfeab2009-05-20 21:59:08 +0300584 ath_beacon_config(sc, NULL);
585 }
586
Jouni Malinencc659652009-05-14 21:28:48 +0300587 if (ath_beacon_dtim_pending_cab(skb)) {
588 /*
589 * Remain awake waiting for buffered broadcast/multicast
Gabor Juhos58f5fff2009-06-17 20:53:20 +0200590 * frames. If the last broadcast/multicast frame is not
591 * received properly, the next beacon frame will work as
592 * a backup trigger for returning into NETWORK SLEEP state,
593 * so we are waiting for it as well.
Jouni Malinencc659652009-05-14 21:28:48 +0300594 */
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700595 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
596 "buffered broadcast/multicast frame(s)\n");
Sujith1b04b932010-01-08 10:36:05 +0530597 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
Jouni Malinencc659652009-05-14 21:28:48 +0300598 return;
599 }
600
Sujith1b04b932010-01-08 10:36:05 +0530601 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
Jouni Malinencc659652009-05-14 21:28:48 +0300602 /*
603 * This can happen if a broadcast frame is dropped or the AP
604 * fails to send a frame indicating that all CAB frames have
605 * been delivered.
606 */
Sujith1b04b932010-01-08 10:36:05 +0530607 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700608 ath_print(common, ATH_DBG_PS,
609 "PS wait for CAB frames timed out\n");
Jouni Malinencc659652009-05-14 21:28:48 +0300610 }
Jouni Malinencc659652009-05-14 21:28:48 +0300611}
612
613static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
614{
615 struct ieee80211_hdr *hdr;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700616 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Jouni Malinencc659652009-05-14 21:28:48 +0300617
618 hdr = (struct ieee80211_hdr *)skb->data;
619
620 /* Process Beacon and CAB receive in PS state */
Sujith1b04b932010-01-08 10:36:05 +0530621 if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
Jouni Malinen9a23f9c2009-05-19 17:01:38 +0300622 ieee80211_is_beacon(hdr->frame_control))
Jouni Malinencc659652009-05-14 21:28:48 +0300623 ath_rx_ps_beacon(sc, skb);
Sujith1b04b932010-01-08 10:36:05 +0530624 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
Jouni Malinencc659652009-05-14 21:28:48 +0300625 (ieee80211_is_data(hdr->frame_control) ||
626 ieee80211_is_action(hdr->frame_control)) &&
627 is_multicast_ether_addr(hdr->addr1) &&
628 !ieee80211_has_moredata(hdr->frame_control)) {
Jouni Malinencc659652009-05-14 21:28:48 +0300629 /*
630 * No more broadcast/multicast frames to be received at this
631 * point.
632 */
Sujith1b04b932010-01-08 10:36:05 +0530633 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700634 ath_print(common, ATH_DBG_PS,
635 "All PS CAB frames received, back to sleep\n");
Sujith1b04b932010-01-08 10:36:05 +0530636 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
Jouni Malinen9a23f9c2009-05-19 17:01:38 +0300637 !is_multicast_ether_addr(hdr->addr1) &&
638 !ieee80211_has_morefrags(hdr->frame_control)) {
Sujith1b04b932010-01-08 10:36:05 +0530639 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700640 ath_print(common, ATH_DBG_PS,
641 "Going back to sleep after having received "
Pavel Roskinf643e512010-01-29 17:22:12 -0500642 "PS-Poll data (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +0530643 sc->ps_flags & (PS_WAIT_FOR_BEACON |
644 PS_WAIT_FOR_CAB |
645 PS_WAIT_FOR_PSPOLL_DATA |
646 PS_WAIT_FOR_TX_ACK));
Jouni Malinencc659652009-05-14 21:28:48 +0300647 }
648}
649
Luis R. Rodriguezb4afffc2009-11-02 11:36:08 -0800650static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
651 struct ath_softc *sc, struct sk_buff *skb,
Luis R. Rodriguez5ca42622009-11-04 08:20:42 -0800652 struct ieee80211_rx_status *rxs)
Jouni Malinen9d64a3c2009-05-14 21:28:47 +0300653{
654 struct ieee80211_hdr *hdr;
655
656 hdr = (struct ieee80211_hdr *)skb->data;
657
658 /* Send the frame to mac80211 */
659 if (is_multicast_ether_addr(hdr->addr1)) {
660 int i;
661 /*
662 * Deliver broadcast/multicast frames to all suitable
663 * virtual wiphys.
664 */
665 /* TODO: filter based on channel configuration */
666 for (i = 0; i < sc->num_sec_wiphy; i++) {
667 struct ath_wiphy *aphy = sc->sec_wiphy[i];
668 struct sk_buff *nskb;
669 if (aphy == NULL)
670 continue;
671 nskb = skb_copy(skb, GFP_ATOMIC);
Luis R. Rodriguez5ca42622009-11-04 08:20:42 -0800672 if (!nskb)
673 continue;
674 ieee80211_rx(aphy->hw, nskb);
Jouni Malinen9d64a3c2009-05-14 21:28:47 +0300675 }
Johannes Bergf1d58c22009-06-17 13:13:00 +0200676 ieee80211_rx(sc->hw, skb);
Luis R. Rodriguez5ca42622009-11-04 08:20:42 -0800677 } else
Jouni Malinen9d64a3c2009-05-14 21:28:47 +0300678 /* Deliver unicast frames based on receiver address */
Luis R. Rodriguezb4afffc2009-11-02 11:36:08 -0800679 ieee80211_rx(hw, skb);
Jouni Malinen9d64a3c2009-05-14 21:28:47 +0300680}
681
Felix Fietkaub5c804752010-04-15 17:38:48 -0400682static bool ath_edma_get_buffers(struct ath_softc *sc,
683 enum ath9k_rx_qtype qtype)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700684{
Felix Fietkaub5c804752010-04-15 17:38:48 -0400685 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
686 struct ath_hw *ah = sc->sc_ah;
687 struct ath_common *common = ath9k_hw_common(ah);
688 struct sk_buff *skb;
Sujithbe0418a2008-11-18 09:05:55 +0530689 struct ath_buf *bf;
Felix Fietkaub5c804752010-04-15 17:38:48 -0400690 int ret;
691
692 skb = skb_peek(&rx_edma->rx_fifo);
693 if (!skb)
694 return false;
695
696 bf = SKB_CB_ATHBUF(skb);
697 BUG_ON(!bf);
698
699 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
700 common->rx_bufsize, DMA_FROM_DEVICE);
701
702 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
703 if (ret == -EINPROGRESS)
704 return false;
705
706 __skb_unlink(skb, &rx_edma->rx_fifo);
707 if (ret == -EINVAL) {
708 /* corrupt descriptor, skip this one and the following one */
709 list_add_tail(&bf->list, &sc->rx.rxbuf);
710 ath_rx_edma_buf_link(sc, qtype);
711 skb = skb_peek(&rx_edma->rx_fifo);
712 if (!skb)
713 return true;
714
715 bf = SKB_CB_ATHBUF(skb);
716 BUG_ON(!bf);
717
718 __skb_unlink(skb, &rx_edma->rx_fifo);
719 list_add_tail(&bf->list, &sc->rx.rxbuf);
720 ath_rx_edma_buf_link(sc, qtype);
721 }
722 skb_queue_tail(&rx_edma->rx_buffers, skb);
723
724 return true;
725}
726
727static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
728 struct ath_rx_status *rs,
729 enum ath9k_rx_qtype qtype)
730{
731 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
732 struct sk_buff *skb;
733 struct ath_buf *bf;
734
735 while (ath_edma_get_buffers(sc, qtype));
736 skb = __skb_dequeue(&rx_edma->rx_buffers);
737 if (!skb)
738 return NULL;
739
740 bf = SKB_CB_ATHBUF(skb);
741 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
742 return bf;
743}
744
745static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
746 struct ath_rx_status *rs)
747{
748 struct ath_hw *ah = sc->sc_ah;
749 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700750 struct ath_desc *ds;
Felix Fietkaub5c804752010-04-15 17:38:48 -0400751 struct ath_buf *bf;
752 int ret;
753
754 if (list_empty(&sc->rx.rxbuf)) {
755 sc->rx.rxlink = NULL;
756 return NULL;
757 }
758
759 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
760 ds = bf->bf_desc;
761
762 /*
763 * Must provide the virtual address of the current
764 * descriptor, the physical address, and the virtual
765 * address of the next descriptor in the h/w chain.
766 * This allows the HAL to look ahead to see if the
767 * hardware is done with a descriptor by checking the
768 * done bit in the following descriptor and the address
769 * of the current descriptor the DMA engine is working
770 * on. All this is necessary because of our use of
771 * a self-linked list to avoid rx overruns.
772 */
773 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
774 if (ret == -EINPROGRESS) {
775 struct ath_rx_status trs;
776 struct ath_buf *tbf;
777 struct ath_desc *tds;
778
779 memset(&trs, 0, sizeof(trs));
780 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
781 sc->rx.rxlink = NULL;
782 return NULL;
783 }
784
785 tbf = list_entry(bf->list.next, struct ath_buf, list);
786
787 /*
788 * On some hardware the descriptor status words could
789 * get corrupted, including the done bit. Because of
790 * this, check if the next descriptor's done bit is
791 * set or not.
792 *
793 * If the next descriptor's done bit is set, the current
794 * descriptor has been corrupted. Force s/w to discard
795 * this descriptor and continue...
796 */
797
798 tds = tbf->bf_desc;
799 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
800 if (ret == -EINPROGRESS)
801 return NULL;
802 }
803
804 if (!bf->bf_mpdu)
805 return bf;
806
807 /*
808 * Synchronize the DMA transfer with CPU before
809 * 1. accessing the frame
810 * 2. requeueing the same buffer to h/w
811 */
812 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
813 common->rx_bufsize,
814 DMA_FROM_DEVICE);
815
816 return bf;
817}
818
819
820int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
821{
822 struct ath_buf *bf;
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800823 struct sk_buff *skb = NULL, *requeue_skb;
Luis R. Rodriguez5ca42622009-11-04 08:20:42 -0800824 struct ieee80211_rx_status *rxs;
Sujithcbe61d82009-02-09 13:27:12 +0530825 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguez27c51f12009-09-10 11:08:14 -0700826 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezb4afffc2009-11-02 11:36:08 -0800827 /*
828 * The hw can techncically differ from common->hw when using ath9k
829 * virtual wiphy so to account for that we iterate over the active
830 * wiphys and find the appropriate wiphy and therefore hw.
831 */
832 struct ieee80211_hw *hw = NULL;
Sujithbe0418a2008-11-18 09:05:55 +0530833 struct ieee80211_hdr *hdr;
Luis R. Rodriguezc9b14172009-11-04 16:47:22 -0800834 int retval;
Sujithbe0418a2008-11-18 09:05:55 +0530835 bool decrypt_error = false;
Felix Fietkau29bffa92010-03-29 20:14:23 -0700836 struct ath_rx_status rs;
Felix Fietkaub5c804752010-04-15 17:38:48 -0400837 enum ath9k_rx_qtype qtype;
838 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
839 int dma_type;
Sujithbe0418a2008-11-18 09:05:55 +0530840
Felix Fietkaub5c804752010-04-15 17:38:48 -0400841 if (edma)
842 dma_type = DMA_FROM_DEVICE;
843 else
844 dma_type = DMA_BIDIRECTIONAL;
845
846 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
Sujithb77f4832008-12-07 21:44:03 +0530847 spin_lock_bh(&sc->rx.rxbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700848
849 do {
850 /* If handling rx interrupt and flush is in progress => exit */
Sujith98deeea2008-08-11 14:05:46 +0530851 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700852 break;
853
Felix Fietkau29bffa92010-03-29 20:14:23 -0700854 memset(&rs, 0, sizeof(rs));
Felix Fietkaub5c804752010-04-15 17:38:48 -0400855 if (edma)
856 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
857 else
858 bf = ath_get_next_rx_buf(sc, &rs);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700859
Felix Fietkaub5c804752010-04-15 17:38:48 -0400860 if (!bf)
861 break;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700862
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700863 skb = bf->bf_mpdu;
Sujithbe0418a2008-11-18 09:05:55 +0530864 if (!skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700865 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700866
Luis R. Rodriguezb4afffc2009-11-02 11:36:08 -0800867 hdr = (struct ieee80211_hdr *) skb->data;
Luis R. Rodriguez5ca42622009-11-04 08:20:42 -0800868 rxs = IEEE80211_SKB_RXCB(skb);
869
Luis R. Rodriguezb4afffc2009-11-02 11:36:08 -0800870 hw = ath_get_virt_hw(sc, hdr);
871
Felix Fietkau29bffa92010-03-29 20:14:23 -0700872 ath_debug_stat_rx(sc, &rs);
Sujith1395d3f2010-01-08 10:36:11 +0530873
Vasanthakumar Thiagarajan9bf9fca2008-12-15 20:40:46 +0530874 /*
Sujithbe0418a2008-11-18 09:05:55 +0530875 * If we're asked to flush receive queue, directly
876 * chain it back at the queue without processing it.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700877 */
Sujithbe0418a2008-11-18 09:05:55 +0530878 if (flush)
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800879 goto requeue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700880
Felix Fietkau29bffa92010-03-29 20:14:23 -0700881 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
Luis R. Rodriguezdb86f072009-11-05 08:44:39 -0800882 rxs, &decrypt_error);
Luis R. Rodriguez1e875e92009-11-04 16:34:33 -0800883 if (retval)
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800884 goto requeue;
885
886 /* Ensure we always have an skb to requeue once we are done
887 * processing the current buffer's skb */
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -0800888 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800889
890 /* If there is no memory we ignore the current RX'd frame,
891 * tell hardware it can give us a new frame using the old
Sujithb77f4832008-12-07 21:44:03 +0530892 * skb and put it at the tail of the sc->rx.rxbuf list for
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800893 * processing. */
894 if (!requeue_skb)
895 goto requeue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700896
Vasanthakumar Thiagarajan9bf9fca2008-12-15 20:40:46 +0530897 /* Unmap the frame */
Gabor Juhos7da3c552009-01-14 20:17:03 +0100898 dma_unmap_single(sc->dev, bf->bf_buf_addr,
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -0800899 common->rx_bufsize,
Felix Fietkaub5c804752010-04-15 17:38:48 -0400900 dma_type);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700901
Felix Fietkaub5c804752010-04-15 17:38:48 -0400902 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
903 if (ah->caps.rx_status_len)
904 skb_pull(skb, ah->caps.rx_status_len);
Sujithbe0418a2008-11-18 09:05:55 +0530905
Felix Fietkau29bffa92010-03-29 20:14:23 -0700906 ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
Luis R. Rodriguezdb86f072009-11-05 08:44:39 -0800907 rxs, decrypt_error);
Sujithbe0418a2008-11-18 09:05:55 +0530908
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800909 /* We will now give hardware our shiny new allocated skb */
910 bf->bf_mpdu = requeue_skb;
Gabor Juhos7da3c552009-01-14 20:17:03 +0100911 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
Luis R. Rodriguezcc861f72009-11-04 09:11:34 -0800912 common->rx_bufsize,
Felix Fietkaub5c804752010-04-15 17:38:48 -0400913 dma_type);
Gabor Juhos7da3c552009-01-14 20:17:03 +0100914 if (unlikely(dma_mapping_error(sc->dev,
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -0800915 bf->bf_buf_addr))) {
916 dev_kfree_skb_any(requeue_skb);
917 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700918 ath_print(common, ATH_DBG_FATAL,
919 "dma_mapping_error() on RX\n");
Luis R. Rodriguez5ca42622009-11-04 08:20:42 -0800920 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -0800921 break;
922 }
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800923 bf->bf_dmacontext = bf->bf_buf_addr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700924
925 /*
926 * change the default rx antenna if rx diversity chooses the
927 * other antenna 3 times in a row.
928 */
Felix Fietkau29bffa92010-03-29 20:14:23 -0700929 if (sc->rx.defant != rs.rs_antenna) {
Sujithb77f4832008-12-07 21:44:03 +0530930 if (++sc->rx.rxotherant >= 3)
Felix Fietkau29bffa92010-03-29 20:14:23 -0700931 ath_setdefantenna(sc, rs.rs_antenna);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700932 } else {
Sujithb77f4832008-12-07 21:44:03 +0530933 sc->rx.rxotherant = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700934 }
Vivek Natarajan3cbb5dd2009-01-20 11:17:08 +0530935
Sujith1b04b932010-01-08 10:36:05 +0530936 if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
937 PS_WAIT_FOR_CAB |
938 PS_WAIT_FOR_PSPOLL_DATA)))
Jouni Malinencc659652009-05-14 21:28:48 +0300939 ath_rx_ps(sc, skb);
940
Luis R. Rodriguez5ca42622009-11-04 08:20:42 -0800941 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
Jouni Malinencc659652009-05-14 21:28:48 +0300942
Luis R. Rodriguezcb71d9b2008-11-21 17:41:33 -0800943requeue:
Felix Fietkaub5c804752010-04-15 17:38:48 -0400944 if (edma) {
945 list_add_tail(&bf->list, &sc->rx.rxbuf);
946 ath_rx_edma_buf_link(sc, qtype);
947 } else {
948 list_move_tail(&bf->list, &sc->rx.rxbuf);
949 ath_rx_buf_link(sc, bf);
950 }
Sujithbe0418a2008-11-18 09:05:55 +0530951 } while (1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700952
Sujithb77f4832008-12-07 21:44:03 +0530953 spin_unlock_bh(&sc->rx.rxbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700954
955 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700956}