ath9k: Add Rx EDMA support

Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Vasanthakumar Thiagarajan <vasanth@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 94560e2..ffb599c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -16,6 +16,8 @@
 
 #include "ath9k.h"
 
+#define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))
+
 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
 					     struct ieee80211_hdr *hdr)
 {
@@ -115,6 +117,190 @@
 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
 }
 
+static bool ath_rx_edma_buf_link(struct ath_softc *sc,
+				 enum ath9k_rx_qtype qtype)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_rx_edma *rx_edma;
+	struct sk_buff *skb;
+	struct ath_buf *bf;
+
+	rx_edma = &sc->rx.rx_edma[qtype];
+	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
+		return false;
+
+	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+	list_del_init(&bf->list);
+
+	skb = bf->bf_mpdu;
+
+	ATH_RXBUF_RESET(bf);
+	memset(skb->data, 0, ah->caps.rx_status_len);
+	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+				ah->caps.rx_status_len, DMA_TO_DEVICE);
+
+	SKB_CB_ATHBUF(skb) = bf;
+	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
+	skb_queue_tail(&rx_edma->rx_fifo, skb);
+
+	return true;
+}
+
+static void ath_rx_addbuffer_edma(struct ath_softc *sc,
+				  enum ath9k_rx_qtype qtype, int size)
+{
+	struct ath_rx_edma *rx_edma;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	u32 nbuf = 0;
+
+	rx_edma = &sc->rx.rx_edma[qtype];
+	if (list_empty(&sc->rx.rxbuf)) {
+		ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+		return;
+	}
+
+	while (!list_empty(&sc->rx.rxbuf)) {
+		nbuf++;
+
+		if (!ath_rx_edma_buf_link(sc, qtype))
+			break;
+
+		if (nbuf >= size)
+			break;
+	}
+}
+
+static void ath_rx_remove_buffer(struct ath_softc *sc,
+				 enum ath9k_rx_qtype qtype)
+{
+	struct ath_buf *bf;
+	struct ath_rx_edma *rx_edma;
+	struct sk_buff *skb;
+
+	rx_edma = &sc->rx.rx_edma[qtype];
+
+	while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
+		bf = SKB_CB_ATHBUF(skb);
+		BUG_ON(!bf);
+		list_add_tail(&bf->list, &sc->rx.rxbuf);
+	}
+}
+
+static void ath_rx_edma_cleanup(struct ath_softc *sc)
+{
+	struct ath_buf *bf;
+
+	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+
+	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+		if (bf->bf_mpdu)
+			dev_kfree_skb_any(bf->bf_mpdu);
+	}
+
+	INIT_LIST_HEAD(&sc->rx.rxbuf);
+
+	kfree(sc->rx.rx_bufptr);
+	sc->rx.rx_bufptr = NULL;
+}
+
+static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
+{
+	skb_queue_head_init(&rx_edma->rx_fifo);
+	skb_queue_head_init(&rx_edma->rx_buffers);
+	rx_edma->rx_fifo_hwsize = size;
+}
+
+static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
+{
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct ath_hw *ah = sc->sc_ah;
+	struct sk_buff *skb;
+	struct ath_buf *bf;
+	int error = 0, i;
+	u32 size;
+
+
+	common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
+				     ah->caps.rx_status_len,
+				     min(common->cachelsz, (u16)64));
+
+	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+				    ah->caps.rx_status_len);
+
+	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
+			       ah->caps.rx_lp_qdepth);
+	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
+			       ah->caps.rx_hp_qdepth);
+
+	size = sizeof(struct ath_buf) * nbufs;
+	bf = kzalloc(size, GFP_KERNEL);
+	if (!bf)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&sc->rx.rxbuf);
+	sc->rx.rx_bufptr = bf;
+
+	for (i = 0; i < nbufs; i++, bf++) {
+		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
+		if (!skb) {
+			error = -ENOMEM;
+			goto rx_init_fail;
+		}
+
+		memset(skb->data, 0, common->rx_bufsize);
+		bf->bf_mpdu = skb;
+
+		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+						 common->rx_bufsize,
+						 DMA_BIDIRECTIONAL);
+		if (unlikely(dma_mapping_error(sc->dev,
+						bf->bf_buf_addr))) {
+				dev_kfree_skb_any(skb);
+				bf->bf_mpdu = NULL;
+				ath_print(common, ATH_DBG_FATAL,
+					"dma_mapping_error() on RX init\n");
+				error = -ENOMEM;
+				goto rx_init_fail;
+		}
+
+		list_add_tail(&bf->list, &sc->rx.rxbuf);
+	}
+
+	return 0;
+
+rx_init_fail:
+	ath_rx_edma_cleanup(sc);
+	return error;
+}
+
+static void ath_edma_start_recv(struct ath_softc *sc)
+{
+	spin_lock_bh(&sc->rx.rxbuflock);
+
+	ath9k_hw_rxena(sc->sc_ah);
+
+	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
+			      sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
+
+	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
+			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
+
+	spin_unlock_bh(&sc->rx.rxbuflock);
+
+	ath_opmode_init(sc);
+
+	ath9k_hw_startpcureceive(sc->sc_ah);
+}
+
+static void ath_edma_stop_recv(struct ath_softc *sc)
+{
+	spin_lock_bh(&sc->rx.rxbuflock);
+	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+	spin_unlock_bh(&sc->rx.rxbuflock);
+}
+
 int ath_rx_init(struct ath_softc *sc, int nbufs)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -126,45 +312,51 @@
 	sc->sc_flags &= ~SC_OP_RXFLUSH;
 	spin_lock_init(&sc->rx.rxbuflock);
 
-	common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
-				     min(common->cachelsz, (u16)64));
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+		return ath_rx_edma_init(sc, nbufs);
+	} else {
+		common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+				min(common->cachelsz, (u16)64));
 
-	ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
-		  common->cachelsz, common->rx_bufsize);
+		ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+				common->cachelsz, common->rx_bufsize);
 
-	/* Initialize rx descriptors */
+		/* Initialize rx descriptors */
 
-	error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
-				  "rx", nbufs, 1);
-	if (error != 0) {
-		ath_print(common, ATH_DBG_FATAL,
-			  "failed to allocate rx descriptors: %d\n", error);
-		goto err;
-	}
-
-	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
-		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
-		if (skb == NULL) {
-			error = -ENOMEM;
-			goto err;
-		}
-
-		bf->bf_mpdu = skb;
-		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
-						 common->rx_bufsize,
-						 DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(sc->dev,
-					       bf->bf_buf_addr))) {
-			dev_kfree_skb_any(skb);
-			bf->bf_mpdu = NULL;
+		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
+				"rx", nbufs, 1);
+		if (error != 0) {
 			ath_print(common, ATH_DBG_FATAL,
-				  "dma_mapping_error() on RX init\n");
-			error = -ENOMEM;
+				  "failed to allocate rx descriptors: %d\n",
+				  error);
 			goto err;
 		}
-		bf->bf_dmacontext = bf->bf_buf_addr;
+
+		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+			skb = ath_rxbuf_alloc(common, common->rx_bufsize,
+					      GFP_KERNEL);
+			if (skb == NULL) {
+				error = -ENOMEM;
+				goto err;
+			}
+
+			bf->bf_mpdu = skb;
+			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+					common->rx_bufsize,
+					DMA_FROM_DEVICE);
+			if (unlikely(dma_mapping_error(sc->dev,
+							bf->bf_buf_addr))) {
+				dev_kfree_skb_any(skb);
+				bf->bf_mpdu = NULL;
+				ath_print(common, ATH_DBG_FATAL,
+					  "dma_mapping_error() on RX init\n");
+				error = -ENOMEM;
+				goto err;
+			}
+			bf->bf_dmacontext = bf->bf_buf_addr;
+		}
+		sc->rx.rxlink = NULL;
 	}
-	sc->rx.rxlink = NULL;
 
 err:
 	if (error)
@@ -180,17 +372,23 @@
 	struct sk_buff *skb;
 	struct ath_buf *bf;
 
-	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
-		skb = bf->bf_mpdu;
-		if (skb) {
-			dma_unmap_single(sc->dev, bf->bf_buf_addr,
-					 common->rx_bufsize, DMA_FROM_DEVICE);
-			dev_kfree_skb(skb);
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+		ath_rx_edma_cleanup(sc);
+		return;
+	} else {
+		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+			skb = bf->bf_mpdu;
+			if (skb) {
+				dma_unmap_single(sc->dev, bf->bf_buf_addr,
+						common->rx_bufsize,
+						DMA_FROM_DEVICE);
+				dev_kfree_skb(skb);
+			}
 		}
-	}
 
-	if (sc->rx.rxdma.dd_desc_len != 0)
-		ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+		if (sc->rx.rxdma.dd_desc_len != 0)
+			ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+	}
 }
 
 /*
@@ -273,6 +471,11 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_buf *bf, *tbf;
 
+	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+		ath_edma_start_recv(sc);
+		return 0;
+	}
+
 	spin_lock_bh(&sc->rx.rxbuflock);
 	if (list_empty(&sc->rx.rxbuf))
 		goto start_recv;
@@ -306,7 +509,11 @@
 	ath9k_hw_stoppcurecv(ah);
 	ath9k_hw_setrxfilter(ah, 0);
 	stopped = ath9k_hw_stopdmarecv(ah);
-	sc->rx.rxlink = NULL;
+
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+		ath_edma_stop_recv(sc);
+	else
+		sc->rx.rxlink = NULL;
 
 	return stopped;
 }
@@ -315,7 +522,9 @@
 {
 	spin_lock_bh(&sc->rx.rxflushlock);
 	sc->sc_flags |= SC_OP_RXFLUSH;
-	ath_rx_tasklet(sc, 1);
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+		ath_rx_tasklet(sc, 1, true);
+	ath_rx_tasklet(sc, 1, false);
 	sc->sc_flags &= ~SC_OP_RXFLUSH;
 	spin_unlock_bh(&sc->rx.rxflushlock);
 }
@@ -469,14 +678,147 @@
 		ieee80211_rx(hw, skb);
 }
 
-int ath_rx_tasklet(struct ath_softc *sc, int flush)
+static bool ath_edma_get_buffers(struct ath_softc *sc,
+				 enum ath9k_rx_qtype qtype)
 {
-#define PA2DESC(_sc, _pa)                                               \
-	((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc +		\
-			     ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
-
+	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct sk_buff *skb;
 	struct ath_buf *bf;
+	int ret;
+
+	skb = skb_peek(&rx_edma->rx_fifo);
+	if (!skb)
+		return false;
+
+	bf = SKB_CB_ATHBUF(skb);
+	BUG_ON(!bf);
+
+	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+				common->rx_bufsize, DMA_FROM_DEVICE);
+
+	ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
+	if (ret == -EINPROGRESS)
+		return false;
+
+	__skb_unlink(skb, &rx_edma->rx_fifo);
+	if (ret == -EINVAL) {
+		/* corrupt descriptor, skip this one and the following one */
+		list_add_tail(&bf->list, &sc->rx.rxbuf);
+		ath_rx_edma_buf_link(sc, qtype);
+		skb = skb_peek(&rx_edma->rx_fifo);
+		if (!skb)
+			return true;
+
+		bf = SKB_CB_ATHBUF(skb);
+		BUG_ON(!bf);
+
+		__skb_unlink(skb, &rx_edma->rx_fifo);
+		list_add_tail(&bf->list, &sc->rx.rxbuf);
+		ath_rx_edma_buf_link(sc, qtype);
+	}
+	skb_queue_tail(&rx_edma->rx_buffers, skb);
+
+	return true;
+}
+
+static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+						struct ath_rx_status *rs,
+						enum ath9k_rx_qtype qtype)
+{
+	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+	struct sk_buff *skb;
+	struct ath_buf *bf;
+
+	while (ath_edma_get_buffers(sc, qtype));
+	skb = __skb_dequeue(&rx_edma->rx_buffers);
+	if (!skb)
+		return NULL;
+
+	bf = SKB_CB_ATHBUF(skb);
+	ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
+	return bf;
+}
+
+static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+					   struct ath_rx_status *rs)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_desc *ds;
+	struct ath_buf *bf;
+	int ret;
+
+	if (list_empty(&sc->rx.rxbuf)) {
+		sc->rx.rxlink = NULL;
+		return NULL;
+	}
+
+	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+	ds = bf->bf_desc;
+
+	/*
+	 * Must provide the virtual address of the current
+	 * descriptor, the physical address, and the virtual
+	 * address of the next descriptor in the h/w chain.
+	 * This allows the HAL to look ahead to see if the
+	 * hardware is done with a descriptor by checking the
+	 * done bit in the following descriptor and the address
+	 * of the current descriptor the DMA engine is working
+	 * on.  All this is necessary because of our use of
+	 * a self-linked list to avoid rx overruns.
+	 */
+	ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
+	if (ret == -EINPROGRESS) {
+		struct ath_rx_status trs;
+		struct ath_buf *tbf;
+		struct ath_desc *tds;
+
+		memset(&trs, 0, sizeof(trs));
+		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
+			sc->rx.rxlink = NULL;
+			return NULL;
+		}
+
+		tbf = list_entry(bf->list.next, struct ath_buf, list);
+
+		/*
+		 * On some hardware the descriptor status words could
+		 * get corrupted, including the done bit. Because of
+		 * this, check if the next descriptor's done bit is
+		 * set or not.
+		 *
+		 * If the next descriptor's done bit is set, the current
+		 * descriptor has been corrupted. Force s/w to discard
+		 * this descriptor and continue...
+		 */
+
+		tds = tbf->bf_desc;
+		ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
+		if (ret == -EINPROGRESS)
+			return NULL;
+	}
+
+	if (!bf->bf_mpdu)
+		return bf;
+
+	/*
+	 * Synchronize the DMA transfer with CPU before
+	 * 1. accessing the frame
+	 * 2. requeueing the same buffer to h/w
+	 */
+	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+			common->rx_bufsize,
+			DMA_FROM_DEVICE);
+
+	return bf;
+}
+
+
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+{
+	struct ath_buf *bf;
 	struct sk_buff *skb = NULL, *requeue_skb;
 	struct ieee80211_rx_status *rxs;
 	struct ath_hw *ah = sc->sc_ah;
@@ -491,7 +833,16 @@
 	int retval;
 	bool decrypt_error = false;
 	struct ath_rx_status rs;
+	enum ath9k_rx_qtype qtype;
+	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+	int dma_type;
 
+	if (edma)
+		dma_type = DMA_FROM_DEVICE;
+	else
+		dma_type = DMA_BIDIRECTIONAL;
+
+	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
 	spin_lock_bh(&sc->rx.rxbuflock);
 
 	do {
@@ -499,71 +850,19 @@
 		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
 			break;
 
-		if (list_empty(&sc->rx.rxbuf)) {
-			sc->rx.rxlink = NULL;
-			break;
-		}
-
-		bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
-		ds = bf->bf_desc;
-
-		/*
-		 * Must provide the virtual address of the current
-		 * descriptor, the physical address, and the virtual
-		 * address of the next descriptor in the h/w chain.
-		 * This allows the HAL to look ahead to see if the
-		 * hardware is done with a descriptor by checking the
-		 * done bit in the following descriptor and the address
-		 * of the current descriptor the DMA engine is working
-		 * on.  All this is necessary because of our use of
-		 * a self-linked list to avoid rx overruns.
-		 */
 		memset(&rs, 0, sizeof(rs));
-		retval = ath9k_hw_rxprocdesc(ah, ds, &rs, 0);
-		if (retval == -EINPROGRESS) {
-			struct ath_rx_status trs;
-			struct ath_buf *tbf;
-			struct ath_desc *tds;
+		if (edma)
+			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
+		else
+			bf = ath_get_next_rx_buf(sc, &rs);
 
-			memset(&trs, 0, sizeof(trs));
-			if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
-				sc->rx.rxlink = NULL;
-				break;
-			}
-
-			tbf = list_entry(bf->list.next, struct ath_buf, list);
-
-			/*
-			 * On some hardware the descriptor status words could
-			 * get corrupted, including the done bit. Because of
-			 * this, check if the next descriptor's done bit is
-			 * set or not.
-			 *
-			 * If the next descriptor's done bit is set, the current
-			 * descriptor has been corrupted. Force s/w to discard
-			 * this descriptor and continue...
-			 */
-
-			tds = tbf->bf_desc;
-			retval = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
-			if (retval == -EINPROGRESS) {
-				break;
-			}
-		}
+		if (!bf)
+			break;
 
 		skb = bf->bf_mpdu;
 		if (!skb)
 			continue;
 
-		/*
-		 * Synchronize the DMA transfer with CPU before
-		 * 1. accessing the frame
-		 * 2. requeueing the same buffer to h/w
-		 */
-		dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
-				common->rx_bufsize,
-				DMA_FROM_DEVICE);
-
 		hdr = (struct ieee80211_hdr *) skb->data;
 		rxs =  IEEE80211_SKB_RXCB(skb);
 
@@ -597,9 +896,11 @@
 		/* Unmap the frame */
 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
 				 common->rx_bufsize,
-				 DMA_FROM_DEVICE);
+				 dma_type);
 
-		skb_put(skb, rs.rs_datalen);
+		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
+		if (ah->caps.rx_status_len)
+			skb_pull(skb, ah->caps.rx_status_len);
 
 		ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
 					     rxs, decrypt_error);
@@ -608,7 +909,7 @@
 		bf->bf_mpdu = requeue_skb;
 		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
 						 common->rx_bufsize,
-						 DMA_FROM_DEVICE);
+						 dma_type);
 		if (unlikely(dma_mapping_error(sc->dev,
 			  bf->bf_buf_addr))) {
 			dev_kfree_skb_any(requeue_skb);
@@ -639,12 +940,16 @@
 		ath_rx_send_to_mac80211(hw, sc, skb, rxs);
 
 requeue:
-		list_move_tail(&bf->list, &sc->rx.rxbuf);
-		ath_rx_buf_link(sc, bf);
+		if (edma) {
+			list_add_tail(&bf->list, &sc->rx.rxbuf);
+			ath_rx_edma_buf_link(sc, qtype);
+		} else {
+			list_move_tail(&bf->list, &sc->rx.rxbuf);
+			ath_rx_buf_link(sc, bf);
+		}
 	} while (1);
 
 	spin_unlock_bh(&sc->rx.rxbuflock);
 
 	return 0;
-#undef PA2DESC
 }