blob: 534e4bf9a34cab627c7f5fbcdaa173f4a51adf39 [file] [log] [blame]
Felix Fietkau7bc04212017-11-21 10:50:53 +01001/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "mt76x2.h"
18#include "mt76x2_dma.h"
19
20struct beacon_bc_data {
21 struct mt76x2_dev *dev;
22 struct sk_buff_head q;
23 struct sk_buff *tail[8];
24};
25
26void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
27 struct sk_buff *skb)
28{
29 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
30 struct mt76x2_dev *dev = hw->priv;
31 struct ieee80211_vif *vif = info->control.vif;
32 struct mt76_wcid *wcid = &dev->global_wcid;
33
34 if (control->sta) {
35 struct mt76x2_sta *msta;
36
37 msta = (struct mt76x2_sta *) control->sta->drv_priv;
38 wcid = &msta->wcid;
Felix Fietkau23405232018-01-18 19:01:43 +010039 }
40
41 if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) {
Felix Fietkau7bc04212017-11-21 10:50:53 +010042 struct mt76x2_vif *mvif;
43
44 mvif = (struct mt76x2_vif *) vif->drv_priv;
45 wcid = &mvif->group_wcid;
46 }
47
48 mt76_tx(&dev->mt76, control->sta, wcid, skb);
49}
50
51void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
52{
53 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
54
55 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
56 ieee80211_free_txskb(mt76_hw(dev), skb);
57 } else {
58 ieee80211_tx_info_clear_status(info);
59 info->status.rates[0].idx = -1;
60 info->flags |= IEEE80211_TX_STAT_ACK;
61 ieee80211_tx_status(mt76_hw(dev), skb);
62 }
63}
64
65s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
66 const struct ieee80211_tx_rate *rate)
67{
68 s8 max_txpwr;
69
70 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
71 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
72
73 if (mcs == 8 || mcs == 9) {
74 max_txpwr = dev->rate_power.vht[8];
75 } else {
76 u8 nss, idx;
77
78 nss = ieee80211_rate_get_vht_nss(rate);
79 idx = ((nss - 1) << 3) + mcs;
80 max_txpwr = dev->rate_power.ht[idx & 0xf];
81 }
82 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
83 max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
84 } else {
85 enum nl80211_band band = dev->mt76.chandef.chan->band;
86
87 if (band == NL80211_BAND_2GHZ) {
88 const struct ieee80211_rate *r;
89 struct wiphy *wiphy = mt76_hw(dev)->wiphy;
90 struct mt76_rate_power *rp = &dev->rate_power;
91
92 r = &wiphy->bands[band]->bitrates[rate->idx];
93 if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
94 max_txpwr = rp->cck[r->hw_value & 0x3];
95 else
96 max_txpwr = rp->ofdm[r->hw_value & 0x7];
97 } else {
98 max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
99 }
100 }
101
102 return max_txpwr;
103}
104
105s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
106{
107 txpwr = min_t(s8, txpwr, dev->txpower_conf);
108 txpwr -= (dev->target_power + dev->target_power_delta[0]);
109 txpwr = min_t(s8, txpwr, max_txpwr_adj);
110
111 if (!dev->enable_tpc)
112 return 0;
113 else if (txpwr >= 0)
114 return min_t(s8, txpwr, 7);
115 else
116 return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
117}
118
119void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
120{
121 s8 txpwr_adj;
122
123 txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
124 dev->rate_power.ofdm[4]);
125 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
126 MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
127 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
128 MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
129}
130
131static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
132{
133 int len = ieee80211_get_hdrlen_from_skb(skb);
134
135 if (len % 4 == 0)
136 return 0;
137
138 skb_push(skb, 2);
139 memmove(skb->data, skb->data + 2, len);
140
141 skb->data[len] = 0;
142 skb->data[len + 1] = 0;
143 return 2;
144}
145
146int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
147 struct sk_buff *skb, struct mt76_queue *q,
148 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
149 u32 *tx_info)
150{
151 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
152 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
153 int qsel = MT_QSEL_EDCA;
154 int ret;
155
156 if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
157 mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
158
159 mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
160
161 ret = mt76x2_insert_hdr_pad(skb);
162 if (ret < 0)
163 return ret;
164
165 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
166 qsel = MT_QSEL_MGMT;
167
168 *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
169 MT_TXD_INFO_80211;
170
Felix Fietkau23405232018-01-18 19:01:43 +0100171 if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
Felix Fietkau7bc04212017-11-21 10:50:53 +0100172 *tx_info |= MT_TXD_INFO_WIV;
173
174 return 0;
175}
176
177static void
178mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
179{
180 struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
181 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
182 struct sk_buff *skb = NULL;
183
184 if (!(dev->beacon_mask & BIT(mvif->idx)))
185 return;
186
187 skb = ieee80211_beacon_get(mt76_hw(dev), vif);
188 if (!skb)
189 return;
190
191 mt76x2_mac_set_beacon(dev, mvif->idx, skb);
192}
193
194static void
195mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
196{
197 struct beacon_bc_data *data = priv;
198 struct mt76x2_dev *dev = data->dev;
199 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
200 struct ieee80211_tx_info *info;
201 struct sk_buff *skb;
202
203 if (!(dev->beacon_mask & BIT(mvif->idx)))
204 return;
205
206 skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
207 if (!skb)
208 return;
209
210 info = IEEE80211_SKB_CB(skb);
211 info->control.vif = vif;
212 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
213 mt76_skb_set_moredata(skb, true);
214 __skb_queue_tail(&data->q, skb);
215 data->tail[mvif->idx] = skb;
216}
217
218void mt76x2_pre_tbtt_tasklet(unsigned long arg)
219{
220 struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
221 struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
222 struct beacon_bc_data data = {};
223 struct sk_buff *skb;
224 int i, nframes;
225
226 data.dev = dev;
227 __skb_queue_head_init(&data.q);
228
229 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
230 IEEE80211_IFACE_ITER_RESUME_ALL,
231 mt76x2_update_beacon_iter, dev);
232
233 do {
234 nframes = skb_queue_len(&data.q);
235 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
236 IEEE80211_IFACE_ITER_RESUME_ALL,
237 mt76x2_add_buffered_bc, &data);
238 } while (nframes != skb_queue_len(&data.q));
239
240 if (!nframes)
241 return;
242
243 for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
244 if (!data.tail[i])
245 continue;
246
247 mt76_skb_set_moredata(data.tail[i], false);
248 }
249
250 spin_lock_bh(&q->lock);
251 while ((skb = __skb_dequeue(&data.q)) != NULL) {
252 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
253 struct ieee80211_vif *vif = info->control.vif;
254 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
255
256 mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
257 }
258 spin_unlock_bh(&q->lock);
259}
260