ath9k: Move acq to channel context

Add support to maintain per-channel ACs list.

Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Rajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 66acb2c..b2e66d2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -103,9 +103,16 @@
 		ieee80211_tx_status(sc->hw, skb);
 }
 
-static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
+static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
+			     struct ath_atx_tid *tid)
 {
 	struct ath_atx_ac *ac = tid->ac;
+	struct list_head *list;
+	struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
+	struct ath_chanctx *ctx = avp->chanctx;
+
+	if (!ctx)
+		return;
 
 	if (tid->sched)
 		return;
@@ -117,7 +124,9 @@
 		return;
 
 	ac->sched = true;
-	list_add_tail(&ac->list, &txq->axq_acq);
+
+	list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
+	list_add_tail(&ac->list, list);
 }
 
 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -626,7 +635,7 @@
 
 		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
 		if (!an->sleeping) {
-			ath_tx_queue_tid(txq, tid);
+			ath_tx_queue_tid(sc, txq, tid);
 
 			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
 				tid->ac->clear_ps_filter = true;
@@ -1483,7 +1492,7 @@
 		ac->clear_ps_filter = true;
 
 		if (ath_tid_has_buffered(tid)) {
-			ath_tx_queue_tid(txq, tid);
+			ath_tx_queue_tid(sc, txq, tid);
 			ath_txq_schedule(sc, txq);
 		}
 
@@ -1507,7 +1516,7 @@
 	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
 
 	if (ath_tid_has_buffered(tid)) {
-		ath_tx_queue_tid(txq, tid);
+		ath_tx_queue_tid(sc, txq, tid);
 		ath_txq_schedule(sc, txq);
 	}
 
@@ -1642,7 +1651,6 @@
 		txq->axq_link = NULL;
 		__skb_queue_head_init(&txq->complete_q);
 		INIT_LIST_HEAD(&txq->axq_q);
-		INIT_LIST_HEAD(&txq->axq_acq);
 		spin_lock_init(&txq->axq_lock);
 		txq->axq_depth = 0;
 		txq->axq_ampdu_depth = 0;
@@ -1804,7 +1812,7 @@
 	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
 }
 
-/* For each axq_acq entry, for each tid, try to schedule packets
+/* For each acq entry, for each tid, try to schedule packets
  * for transmit until ampdu_depth has reached min Q depth.
  */
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
@@ -1812,19 +1820,25 @@
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_atx_ac *ac, *last_ac;
 	struct ath_atx_tid *tid, *last_tid;
+	struct list_head *ac_list;
 	bool sent = false;
 
+	if (txq->mac80211_qnum < 0)
+		return;
+
+	ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+
 	if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
-	    list_empty(&txq->axq_acq))
+	    list_empty(ac_list))
 		return;
 
 	rcu_read_lock();
 
-	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
-	while (!list_empty(&txq->axq_acq)) {
+	last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
+	while (!list_empty(ac_list)) {
 		bool stop = false;
 
-		ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
+		ac = list_first_entry(ac_list, struct ath_atx_ac, list);
 		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
 		list_del(&ac->list);
 		ac->sched = false;
@@ -1844,7 +1858,7 @@
 			 * are pending for the tid
 			 */
 			if (ath_tid_has_buffered(tid))
-				ath_tx_queue_tid(txq, tid);
+				ath_tx_queue_tid(sc, txq, tid);
 
 			if (stop || tid == last_tid)
 				break;
@@ -1852,7 +1866,7 @@
 
 		if (!list_empty(&ac->tid_q) && !ac->sched) {
 			ac->sched = true;
-			list_add_tail(&ac->list, &txq->axq_acq);
+			list_add_tail(&ac->list, ac_list);
 		}
 
 		if (stop)
@@ -1863,7 +1877,7 @@
 				break;
 
 			sent = false;
-			last_ac = list_entry(txq->axq_acq.prev,
+			last_ac = list_entry(ac_list->prev,
 					     struct ath_atx_ac, list);
 		}
 	}
@@ -1871,6 +1885,20 @@
 	rcu_read_unlock();
 }
 
+void ath_txq_schedule_all(struct ath_softc *sc)
+{
+	struct ath_txq *txq;
+	int i;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		txq = sc->tx.txq_map[i];
+
+		spin_lock_bh(&txq->axq_lock);
+		ath_txq_schedule(sc, txq);
+		spin_unlock_bh(&txq->axq_lock);
+	}
+}
+
 /***********/
 /* TX, DMA */
 /***********/
@@ -2198,7 +2226,7 @@
 		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
 		__skb_queue_tail(&tid->buf_q, skb);
 		if (!txctl->an->sleeping)
-			ath_tx_queue_tid(txq, tid);
+			ath_tx_queue_tid(sc, txq, tid);
 
 		ath_txq_schedule(sc, txq);
 		goto out;