Felix Fietkau | 7bc0421 | 2017-11-21 10:50:53 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> |
| 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for any |
| 5 | * purpose with or without fee is hereby granted, provided that the above |
| 6 | * copyright notice and this permission notice appear in all copies. |
| 7 | * |
| 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 15 | */ |
| 16 | |
| 17 | #include "mt76x2.h" |
| 18 | #include "mt76x2_dma.h" |
| 19 | |
| 20 | struct beacon_bc_data { |
| 21 | struct mt76x2_dev *dev; |
| 22 | struct sk_buff_head q; |
| 23 | struct sk_buff *tail[8]; |
| 24 | }; |
| 25 | |
| 26 | void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, |
| 27 | struct sk_buff *skb) |
| 28 | { |
| 29 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 30 | struct mt76x2_dev *dev = hw->priv; |
| 31 | struct ieee80211_vif *vif = info->control.vif; |
| 32 | struct mt76_wcid *wcid = &dev->global_wcid; |
| 33 | |
| 34 | if (control->sta) { |
| 35 | struct mt76x2_sta *msta; |
| 36 | |
| 37 | msta = (struct mt76x2_sta *) control->sta->drv_priv; |
| 38 | wcid = &msta->wcid; |
| 39 | } else if (vif) { |
| 40 | struct mt76x2_vif *mvif; |
| 41 | |
| 42 | mvif = (struct mt76x2_vif *) vif->drv_priv; |
| 43 | wcid = &mvif->group_wcid; |
| 44 | } |
| 45 | |
| 46 | mt76_tx(&dev->mt76, control->sta, wcid, skb); |
| 47 | } |
| 48 | |
| 49 | void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb) |
| 50 | { |
| 51 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 52 | |
| 53 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { |
| 54 | ieee80211_free_txskb(mt76_hw(dev), skb); |
| 55 | } else { |
| 56 | ieee80211_tx_info_clear_status(info); |
| 57 | info->status.rates[0].idx = -1; |
| 58 | info->flags |= IEEE80211_TX_STAT_ACK; |
| 59 | ieee80211_tx_status(mt76_hw(dev), skb); |
| 60 | } |
| 61 | } |
| 62 | |
| 63 | s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, |
| 64 | const struct ieee80211_tx_rate *rate) |
| 65 | { |
| 66 | s8 max_txpwr; |
| 67 | |
| 68 | if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { |
| 69 | u8 mcs = ieee80211_rate_get_vht_mcs(rate); |
| 70 | |
| 71 | if (mcs == 8 || mcs == 9) { |
| 72 | max_txpwr = dev->rate_power.vht[8]; |
| 73 | } else { |
| 74 | u8 nss, idx; |
| 75 | |
| 76 | nss = ieee80211_rate_get_vht_nss(rate); |
| 77 | idx = ((nss - 1) << 3) + mcs; |
| 78 | max_txpwr = dev->rate_power.ht[idx & 0xf]; |
| 79 | } |
| 80 | } else if (rate->flags & IEEE80211_TX_RC_MCS) { |
| 81 | max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; |
| 82 | } else { |
| 83 | enum nl80211_band band = dev->mt76.chandef.chan->band; |
| 84 | |
| 85 | if (band == NL80211_BAND_2GHZ) { |
| 86 | const struct ieee80211_rate *r; |
| 87 | struct wiphy *wiphy = mt76_hw(dev)->wiphy; |
| 88 | struct mt76_rate_power *rp = &dev->rate_power; |
| 89 | |
| 90 | r = &wiphy->bands[band]->bitrates[rate->idx]; |
| 91 | if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) |
| 92 | max_txpwr = rp->cck[r->hw_value & 0x3]; |
| 93 | else |
| 94 | max_txpwr = rp->ofdm[r->hw_value & 0x7]; |
| 95 | } else { |
| 96 | max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | return max_txpwr; |
| 101 | } |
| 102 | |
| 103 | s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj) |
| 104 | { |
| 105 | txpwr = min_t(s8, txpwr, dev->txpower_conf); |
| 106 | txpwr -= (dev->target_power + dev->target_power_delta[0]); |
| 107 | txpwr = min_t(s8, txpwr, max_txpwr_adj); |
| 108 | |
| 109 | if (!dev->enable_tpc) |
| 110 | return 0; |
| 111 | else if (txpwr >= 0) |
| 112 | return min_t(s8, txpwr, 7); |
| 113 | else |
| 114 | return (txpwr < -16) ? 8 : (txpwr + 32) / 2; |
| 115 | } |
| 116 | |
| 117 | void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr) |
| 118 | { |
| 119 | s8 txpwr_adj; |
| 120 | |
| 121 | txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr, |
| 122 | dev->rate_power.ofdm[4]); |
| 123 | mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, |
| 124 | MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); |
| 125 | mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, |
| 126 | MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); |
| 127 | } |
| 128 | |
| 129 | static int mt76x2_insert_hdr_pad(struct sk_buff *skb) |
| 130 | { |
| 131 | int len = ieee80211_get_hdrlen_from_skb(skb); |
| 132 | |
| 133 | if (len % 4 == 0) |
| 134 | return 0; |
| 135 | |
| 136 | skb_push(skb, 2); |
| 137 | memmove(skb->data, skb->data + 2, len); |
| 138 | |
| 139 | skb->data[len] = 0; |
| 140 | skb->data[len + 1] = 0; |
| 141 | return 2; |
| 142 | } |
| 143 | |
| 144 | int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, |
| 145 | struct sk_buff *skb, struct mt76_queue *q, |
| 146 | struct mt76_wcid *wcid, struct ieee80211_sta *sta, |
| 147 | u32 *tx_info) |
| 148 | { |
| 149 | struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); |
| 150 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 151 | int qsel = MT_QSEL_EDCA; |
| 152 | int ret; |
| 153 | |
| 154 | if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) |
| 155 | mt76x2_mac_wcid_set_drop(dev, wcid->idx, false); |
| 156 | |
| 157 | mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta); |
| 158 | |
| 159 | ret = mt76x2_insert_hdr_pad(skb); |
| 160 | if (ret < 0) |
| 161 | return ret; |
| 162 | |
| 163 | if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) |
| 164 | qsel = MT_QSEL_MGMT; |
| 165 | |
| 166 | *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | |
| 167 | MT_TXD_INFO_80211; |
| 168 | |
| 169 | if (!wcid || wcid->hw_key_idx == 0xff) |
| 170 | *tx_info |= MT_TXD_INFO_WIV; |
| 171 | |
| 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | static void |
| 176 | mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) |
| 177 | { |
| 178 | struct mt76x2_dev *dev = (struct mt76x2_dev *) priv; |
| 179 | struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; |
| 180 | struct sk_buff *skb = NULL; |
| 181 | |
| 182 | if (!(dev->beacon_mask & BIT(mvif->idx))) |
| 183 | return; |
| 184 | |
| 185 | skb = ieee80211_beacon_get(mt76_hw(dev), vif); |
| 186 | if (!skb) |
| 187 | return; |
| 188 | |
| 189 | mt76x2_mac_set_beacon(dev, mvif->idx, skb); |
| 190 | } |
| 191 | |
| 192 | static void |
| 193 | mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) |
| 194 | { |
| 195 | struct beacon_bc_data *data = priv; |
| 196 | struct mt76x2_dev *dev = data->dev; |
| 197 | struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; |
| 198 | struct ieee80211_tx_info *info; |
| 199 | struct sk_buff *skb; |
| 200 | |
| 201 | if (!(dev->beacon_mask & BIT(mvif->idx))) |
| 202 | return; |
| 203 | |
| 204 | skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); |
| 205 | if (!skb) |
| 206 | return; |
| 207 | |
| 208 | info = IEEE80211_SKB_CB(skb); |
| 209 | info->control.vif = vif; |
| 210 | info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; |
| 211 | mt76_skb_set_moredata(skb, true); |
| 212 | __skb_queue_tail(&data->q, skb); |
| 213 | data->tail[mvif->idx] = skb; |
| 214 | } |
| 215 | |
| 216 | void mt76x2_pre_tbtt_tasklet(unsigned long arg) |
| 217 | { |
| 218 | struct mt76x2_dev *dev = (struct mt76x2_dev *) arg; |
| 219 | struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; |
| 220 | struct beacon_bc_data data = {}; |
| 221 | struct sk_buff *skb; |
| 222 | int i, nframes; |
| 223 | |
| 224 | data.dev = dev; |
| 225 | __skb_queue_head_init(&data.q); |
| 226 | |
| 227 | ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), |
| 228 | IEEE80211_IFACE_ITER_RESUME_ALL, |
| 229 | mt76x2_update_beacon_iter, dev); |
| 230 | |
| 231 | do { |
| 232 | nframes = skb_queue_len(&data.q); |
| 233 | ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), |
| 234 | IEEE80211_IFACE_ITER_RESUME_ALL, |
| 235 | mt76x2_add_buffered_bc, &data); |
| 236 | } while (nframes != skb_queue_len(&data.q)); |
| 237 | |
| 238 | if (!nframes) |
| 239 | return; |
| 240 | |
| 241 | for (i = 0; i < ARRAY_SIZE(data.tail); i++) { |
| 242 | if (!data.tail[i]) |
| 243 | continue; |
| 244 | |
| 245 | mt76_skb_set_moredata(data.tail[i], false); |
| 246 | } |
| 247 | |
| 248 | spin_lock_bh(&q->lock); |
| 249 | while ((skb = __skb_dequeue(&data.q)) != NULL) { |
| 250 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 251 | struct ieee80211_vif *vif = info->control.vif; |
| 252 | struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; |
| 253 | |
| 254 | mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL); |
| 255 | } |
| 256 | spin_unlock_bh(&q->lock); |
| 257 | } |
| 258 | |