Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #include "core.h" |
| 19 | #include "txrx.h" |
| 20 | #include "htt.h" |
| 21 | #include "mac.h" |
| 22 | #include "debug.h" |
| 23 | |
| 24 | static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) |
| 25 | { |
| 26 | if (!ATH10K_SKB_CB(skb)->htt.is_offchan) |
| 27 | return; |
| 28 | |
| 29 | /* If the original wait_for_completion() timed out before |
| 30 | * {data,mgmt}_tx_completed() was called then we could complete |
| 31 | * offchan_tx_completed for a different skb. Prevent this by using |
| 32 | * offchan_tx_skb. */ |
| 33 | spin_lock_bh(&ar->data_lock); |
| 34 | if (ar->offchan_tx_skb != skb) { |
| 35 | ath10k_warn("completed old offchannel frame\n"); |
| 36 | goto out; |
| 37 | } |
| 38 | |
| 39 | complete(&ar->offchan_tx_completed); |
| 40 | ar->offchan_tx_skb = NULL; /* just for sanity */ |
| 41 | |
| 42 | ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); |
| 43 | out: |
| 44 | spin_unlock_bh(&ar->data_lock); |
| 45 | } |
| 46 | |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 47 | void ath10k_txrx_tx_unref(struct ath10k_htt *htt, |
| 48 | const struct htt_tx_done *tx_done) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 49 | { |
| 50 | struct device *dev = htt->ar->dev; |
| 51 | struct ieee80211_tx_info *info; |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 52 | struct sk_buff *msdu, *txfrag; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 53 | int ret; |
| 54 | |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 55 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", |
| 56 | tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 57 | |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 58 | if (tx_done->msdu_id >= htt->max_num_pending_tx) { |
| 59 | ath10k_warn("warning: msdu_id %d too big, ignoring\n", |
| 60 | tx_done->msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 61 | return; |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 62 | } |
| 63 | |
| 64 | msdu = htt->pending_tx[tx_done->msdu_id]; |
| 65 | txfrag = ATH10K_SKB_CB(msdu)->htt.txfrag; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 66 | |
| 67 | if (txfrag) { |
| 68 | ret = ath10k_skb_unmap(dev, txfrag); |
| 69 | if (ret) |
| 70 | ath10k_warn("txfrag unmap failed (%d)\n", ret); |
| 71 | |
| 72 | dev_kfree_skb_any(txfrag); |
| 73 | } |
| 74 | |
| 75 | ret = ath10k_skb_unmap(dev, msdu); |
| 76 | if (ret) |
| 77 | ath10k_warn("data skb unmap failed (%d)\n", ret); |
| 78 | |
| 79 | ath10k_report_offchan_tx(htt->ar, msdu); |
| 80 | |
| 81 | info = IEEE80211_SKB_CB(msdu); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 82 | |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 83 | if (tx_done->discard) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 84 | ieee80211_free_txskb(htt->ar->hw, msdu); |
| 85 | goto exit; |
| 86 | } |
| 87 | |
| 88 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) |
| 89 | info->flags |= IEEE80211_TX_STAT_ACK; |
| 90 | |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 91 | if (tx_done->no_ack) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 92 | info->flags &= ~IEEE80211_TX_STAT_ACK; |
| 93 | |
| 94 | ieee80211_tx_status(htt->ar->hw, msdu); |
| 95 | /* we do not own the msdu anymore */ |
| 96 | |
| 97 | exit: |
| 98 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame^] | 99 | htt->pending_tx[tx_done->msdu_id] = NULL; |
| 100 | ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 101 | __ath10k_htt_tx_dec_pending(htt); |
Michal Kazior | 0945baf | 2013-09-18 14:43:18 +0200 | [diff] [blame] | 102 | if (htt->num_pending_tx == 0) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 103 | wake_up(&htt->empty_tx_wq); |
| 104 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | static const u8 rx_legacy_rate_idx[] = { |
| 108 | 3, /* 0x00 - 11Mbps */ |
| 109 | 2, /* 0x01 - 5.5Mbps */ |
| 110 | 1, /* 0x02 - 2Mbps */ |
| 111 | 0, /* 0x03 - 1Mbps */ |
| 112 | 3, /* 0x04 - 11Mbps */ |
| 113 | 2, /* 0x05 - 5.5Mbps */ |
| 114 | 1, /* 0x06 - 2Mbps */ |
| 115 | 0, /* 0x07 - 1Mbps */ |
| 116 | 10, /* 0x08 - 48Mbps */ |
| 117 | 8, /* 0x09 - 24Mbps */ |
| 118 | 6, /* 0x0A - 12Mbps */ |
| 119 | 4, /* 0x0B - 6Mbps */ |
| 120 | 11, /* 0x0C - 54Mbps */ |
| 121 | 9, /* 0x0D - 36Mbps */ |
| 122 | 7, /* 0x0E - 18Mbps */ |
| 123 | 5, /* 0x0F - 9Mbps */ |
| 124 | }; |
| 125 | |
| 126 | static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info, |
| 127 | enum ieee80211_band band, |
| 128 | struct ieee80211_rx_status *status) |
| 129 | { |
| 130 | u8 cck, rate, rate_idx, bw, sgi, mcs, nss; |
| 131 | u8 info0 = info->rate.info0; |
| 132 | u32 info1 = info->rate.info1; |
| 133 | u32 info2 = info->rate.info2; |
| 134 | u8 preamble = 0; |
| 135 | |
| 136 | /* Check if valid fields */ |
| 137 | if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) |
| 138 | return; |
| 139 | |
| 140 | preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); |
| 141 | |
| 142 | switch (preamble) { |
| 143 | case HTT_RX_LEGACY: |
| 144 | cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; |
| 145 | rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); |
| 146 | rate_idx = 0; |
| 147 | |
| 148 | if (rate < 0x08 || rate > 0x0F) |
| 149 | break; |
| 150 | |
| 151 | switch (band) { |
| 152 | case IEEE80211_BAND_2GHZ: |
| 153 | if (cck) |
| 154 | rate &= ~BIT(3); |
| 155 | rate_idx = rx_legacy_rate_idx[rate]; |
| 156 | break; |
| 157 | case IEEE80211_BAND_5GHZ: |
| 158 | rate_idx = rx_legacy_rate_idx[rate]; |
| 159 | /* We are using same rate table registering |
| 160 | HW - ath10k_rates[]. In case of 5GHz skip |
| 161 | CCK rates, so -4 here */ |
| 162 | rate_idx -= 4; |
| 163 | break; |
| 164 | default: |
| 165 | break; |
| 166 | } |
| 167 | |
| 168 | status->rate_idx = rate_idx; |
| 169 | break; |
| 170 | case HTT_RX_HT: |
| 171 | case HTT_RX_HT_WITH_TXBF: |
| 172 | /* HT-SIG - Table 20-11 in info1 and info2 */ |
| 173 | mcs = info1 & 0x1F; |
| 174 | nss = mcs >> 3; |
| 175 | bw = (info1 >> 7) & 1; |
| 176 | sgi = (info2 >> 7) & 1; |
| 177 | |
| 178 | status->rate_idx = mcs; |
| 179 | status->flag |= RX_FLAG_HT; |
| 180 | if (sgi) |
| 181 | status->flag |= RX_FLAG_SHORT_GI; |
| 182 | if (bw) |
| 183 | status->flag |= RX_FLAG_40MHZ; |
| 184 | break; |
| 185 | case HTT_RX_VHT: |
| 186 | case HTT_RX_VHT_WITH_TXBF: |
| 187 | /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 |
| 188 | TODO check this */ |
| 189 | mcs = (info2 >> 4) & 0x0F; |
| 190 | nss = (info1 >> 10) & 0x07; |
| 191 | bw = info1 & 3; |
| 192 | sgi = info2 & 1; |
| 193 | |
| 194 | status->rate_idx = mcs; |
| 195 | status->vht_nss = nss; |
| 196 | |
| 197 | if (sgi) |
| 198 | status->flag |= RX_FLAG_SHORT_GI; |
| 199 | |
| 200 | switch (bw) { |
| 201 | /* 20MHZ */ |
| 202 | case 0: |
| 203 | break; |
| 204 | /* 40MHZ */ |
| 205 | case 1: |
| 206 | status->flag |= RX_FLAG_40MHZ; |
| 207 | break; |
| 208 | /* 80MHZ */ |
| 209 | case 2: |
| 210 | status->flag |= RX_FLAG_80MHZ; |
| 211 | } |
| 212 | |
| 213 | status->flag |= RX_FLAG_VHT; |
| 214 | break; |
| 215 | default: |
| 216 | break; |
| 217 | } |
| 218 | } |
| 219 | |
| 220 | void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info) |
| 221 | { |
| 222 | struct ieee80211_rx_status *status; |
| 223 | struct ieee80211_channel *ch; |
| 224 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data; |
| 225 | |
| 226 | status = IEEE80211_SKB_RXCB(info->skb); |
| 227 | memset(status, 0, sizeof(*status)); |
| 228 | |
| 229 | if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { |
| 230 | status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | |
| 231 | RX_FLAG_MMIC_STRIPPED; |
| 232 | hdr->frame_control = __cpu_to_le16( |
| 233 | __le16_to_cpu(hdr->frame_control) & |
| 234 | ~IEEE80211_FCTL_PROTECTED); |
| 235 | } |
| 236 | |
| 237 | if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) |
| 238 | status->flag |= RX_FLAG_MMIC_ERROR; |
| 239 | |
| 240 | if (info->fcs_err) |
| 241 | status->flag |= RX_FLAG_FAILED_FCS_CRC; |
| 242 | |
| 243 | status->signal = info->signal; |
| 244 | |
| 245 | spin_lock_bh(&ar->data_lock); |
| 246 | ch = ar->scan_channel; |
| 247 | if (!ch) |
| 248 | ch = ar->rx_channel; |
| 249 | spin_unlock_bh(&ar->data_lock); |
| 250 | |
| 251 | if (!ch) { |
| 252 | ath10k_warn("no channel configured; ignoring frame!\n"); |
| 253 | dev_kfree_skb_any(info->skb); |
| 254 | return; |
| 255 | } |
| 256 | |
| 257 | process_rx_rates(ar, info, ch->band, status); |
| 258 | status->band = ch->band; |
| 259 | status->freq = ch->center_freq; |
| 260 | |
| 261 | ath10k_dbg(ATH10K_DBG_DATA, |
| 262 | "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n", |
| 263 | info->skb, |
| 264 | info->skb->len, |
| 265 | status->flag == 0 ? "legacy" : "", |
| 266 | status->flag & RX_FLAG_HT ? "ht" : "", |
| 267 | status->flag & RX_FLAG_VHT ? "vht" : "", |
| 268 | status->flag & RX_FLAG_40MHZ ? "40" : "", |
| 269 | status->flag & RX_FLAG_80MHZ ? "80" : "", |
| 270 | status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", |
| 271 | status->rate_idx, |
| 272 | status->vht_nss, |
| 273 | status->freq, |
| 274 | status->band); |
| 275 | |
| 276 | ieee80211_rx(ar->hw, info->skb); |
| 277 | } |
| 278 | |
| 279 | struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, |
| 280 | const u8 *addr) |
| 281 | { |
| 282 | struct ath10k_peer *peer; |
| 283 | |
| 284 | lockdep_assert_held(&ar->data_lock); |
| 285 | |
| 286 | list_for_each_entry(peer, &ar->peers, list) { |
| 287 | if (peer->vdev_id != vdev_id) |
| 288 | continue; |
| 289 | if (memcmp(peer->addr, addr, ETH_ALEN)) |
| 290 | continue; |
| 291 | |
| 292 | return peer; |
| 293 | } |
| 294 | |
| 295 | return NULL; |
| 296 | } |
| 297 | |
| 298 | static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, |
| 299 | int peer_id) |
| 300 | { |
| 301 | struct ath10k_peer *peer; |
| 302 | |
| 303 | lockdep_assert_held(&ar->data_lock); |
| 304 | |
| 305 | list_for_each_entry(peer, &ar->peers, list) |
| 306 | if (test_bit(peer_id, peer->peer_ids)) |
| 307 | return peer; |
| 308 | |
| 309 | return NULL; |
| 310 | } |
| 311 | |
| 312 | static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, |
| 313 | const u8 *addr, bool expect_mapped) |
| 314 | { |
| 315 | int ret; |
| 316 | |
| 317 | ret = wait_event_timeout(ar->peer_mapping_wq, ({ |
| 318 | bool mapped; |
| 319 | |
| 320 | spin_lock_bh(&ar->data_lock); |
| 321 | mapped = !!ath10k_peer_find(ar, vdev_id, addr); |
| 322 | spin_unlock_bh(&ar->data_lock); |
| 323 | |
| 324 | mapped == expect_mapped; |
| 325 | }), 3*HZ); |
| 326 | |
| 327 | if (ret <= 0) |
| 328 | return -ETIMEDOUT; |
| 329 | |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr) |
| 334 | { |
| 335 | return ath10k_wait_for_peer_common(ar, vdev_id, addr, true); |
| 336 | } |
| 337 | |
| 338 | int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr) |
| 339 | { |
| 340 | return ath10k_wait_for_peer_common(ar, vdev_id, addr, false); |
| 341 | } |
| 342 | |
| 343 | void ath10k_peer_map_event(struct ath10k_htt *htt, |
| 344 | struct htt_peer_map_event *ev) |
| 345 | { |
| 346 | struct ath10k *ar = htt->ar; |
| 347 | struct ath10k_peer *peer; |
| 348 | |
| 349 | spin_lock_bh(&ar->data_lock); |
| 350 | peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr); |
| 351 | if (!peer) { |
| 352 | peer = kzalloc(sizeof(*peer), GFP_ATOMIC); |
| 353 | if (!peer) |
| 354 | goto exit; |
| 355 | |
| 356 | peer->vdev_id = ev->vdev_id; |
| 357 | memcpy(peer->addr, ev->addr, ETH_ALEN); |
| 358 | list_add(&peer->list, &ar->peers); |
| 359 | wake_up(&ar->peer_mapping_wq); |
| 360 | } |
| 361 | |
| 362 | ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", |
| 363 | ev->vdev_id, ev->addr, ev->peer_id); |
| 364 | |
| 365 | set_bit(ev->peer_id, peer->peer_ids); |
| 366 | exit: |
| 367 | spin_unlock_bh(&ar->data_lock); |
| 368 | } |
| 369 | |
| 370 | void ath10k_peer_unmap_event(struct ath10k_htt *htt, |
| 371 | struct htt_peer_unmap_event *ev) |
| 372 | { |
| 373 | struct ath10k *ar = htt->ar; |
| 374 | struct ath10k_peer *peer; |
| 375 | |
| 376 | spin_lock_bh(&ar->data_lock); |
| 377 | peer = ath10k_peer_find_by_id(ar, ev->peer_id); |
| 378 | if (!peer) { |
| 379 | ath10k_warn("unknown peer id %d\n", ev->peer_id); |
| 380 | goto exit; |
| 381 | } |
| 382 | |
| 383 | ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", |
| 384 | peer->vdev_id, peer->addr, ev->peer_id); |
| 385 | |
| 386 | clear_bit(ev->peer_id, peer->peer_ids); |
| 387 | |
| 388 | if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) { |
| 389 | list_del(&peer->list); |
| 390 | kfree(peer); |
| 391 | wake_up(&ar->peer_mapping_wq); |
| 392 | } |
| 393 | |
| 394 | exit: |
| 395 | spin_unlock_bh(&ar->data_lock); |
| 396 | } |