Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/etherdevice.h> |
| 19 | #include "htt.h" |
| 20 | #include "mac.h" |
| 21 | #include "hif.h" |
| 22 | #include "txrx.h" |
| 23 | #include "debug.h" |
| 24 | |
Michal Kazior | c1a43d9 | 2016-03-06 16:14:36 +0200 | [diff] [blame] | 25 | static u8 ath10k_htt_tx_txq_calc_size(size_t count) |
| 26 | { |
| 27 | int exp; |
| 28 | int factor; |
| 29 | |
| 30 | exp = 0; |
| 31 | factor = count >> 7; |
| 32 | |
| 33 | while (factor >= 64 && exp < 4) { |
| 34 | factor >>= 3; |
| 35 | exp++; |
| 36 | } |
| 37 | |
| 38 | if (exp == 4) |
| 39 | return 0xff; |
| 40 | |
| 41 | if (count > 0) |
| 42 | factor = max(1, factor); |
| 43 | |
| 44 | return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | |
| 45 | SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); |
| 46 | } |
| 47 | |
| 48 | static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, |
| 49 | struct ieee80211_txq *txq) |
| 50 | { |
| 51 | struct ath10k *ar = hw->priv; |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 52 | struct ath10k_sta *arsta; |
Michal Kazior | c1a43d9 | 2016-03-06 16:14:36 +0200 | [diff] [blame] | 53 | struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; |
| 54 | unsigned long frame_cnt; |
| 55 | unsigned long byte_cnt; |
| 56 | int idx; |
| 57 | u32 bit; |
| 58 | u16 peer_id; |
| 59 | u8 tid; |
| 60 | u8 count; |
| 61 | |
| 62 | lockdep_assert_held(&ar->htt.tx_lock); |
| 63 | |
| 64 | if (!ar->htt.tx_q_state.enabled) |
| 65 | return; |
| 66 | |
Michal Kazior | 426e10e | 2016-03-06 16:14:43 +0200 | [diff] [blame] | 67 | if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) |
| 68 | return; |
| 69 | |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 70 | if (txq->sta) { |
| 71 | arsta = (void *)txq->sta->drv_priv; |
Michal Kazior | c1a43d9 | 2016-03-06 16:14:36 +0200 | [diff] [blame] | 72 | peer_id = arsta->peer_id; |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 73 | } else { |
Michal Kazior | c1a43d9 | 2016-03-06 16:14:36 +0200 | [diff] [blame] | 74 | peer_id = arvif->peer_id; |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 75 | } |
Michal Kazior | c1a43d9 | 2016-03-06 16:14:36 +0200 | [diff] [blame] | 76 | |
| 77 | tid = txq->tid; |
| 78 | bit = BIT(peer_id % 32); |
| 79 | idx = peer_id / 32; |
| 80 | |
| 81 | ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); |
| 82 | count = ath10k_htt_tx_txq_calc_size(byte_cnt); |
| 83 | |
| 84 | if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || |
| 85 | unlikely(tid >= ar->htt.tx_q_state.num_tids)) { |
| 86 | ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n", |
| 87 | peer_id, tid); |
| 88 | return; |
| 89 | } |
| 90 | |
| 91 | ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; |
| 92 | ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; |
| 93 | ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; |
| 94 | |
| 95 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n", |
| 96 | peer_id, tid, count); |
| 97 | } |
| 98 | |
| 99 | static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) |
| 100 | { |
| 101 | u32 seq; |
| 102 | size_t size; |
| 103 | |
| 104 | lockdep_assert_held(&ar->htt.tx_lock); |
| 105 | |
| 106 | if (!ar->htt.tx_q_state.enabled) |
| 107 | return; |
| 108 | |
Michal Kazior | 426e10e | 2016-03-06 16:14:43 +0200 | [diff] [blame] | 109 | if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) |
| 110 | return; |
| 111 | |
Michal Kazior | c1a43d9 | 2016-03-06 16:14:36 +0200 | [diff] [blame] | 112 | seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); |
| 113 | seq++; |
| 114 | ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); |
| 115 | |
| 116 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", |
| 117 | seq); |
| 118 | |
| 119 | size = sizeof(*ar->htt.tx_q_state.vaddr); |
| 120 | dma_sync_single_for_device(ar->dev, |
| 121 | ar->htt.tx_q_state.paddr, |
| 122 | size, |
| 123 | DMA_TO_DEVICE); |
| 124 | } |
| 125 | |
Michal Kazior | 426e10e | 2016-03-06 16:14:43 +0200 | [diff] [blame] | 126 | void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, |
| 127 | struct ieee80211_txq *txq) |
| 128 | { |
| 129 | struct ath10k *ar = hw->priv; |
| 130 | |
| 131 | spin_lock_bh(&ar->htt.tx_lock); |
| 132 | __ath10k_htt_tx_txq_recalc(hw, txq); |
| 133 | spin_unlock_bh(&ar->htt.tx_lock); |
| 134 | } |
| 135 | |
| 136 | void ath10k_htt_tx_txq_sync(struct ath10k *ar) |
| 137 | { |
| 138 | spin_lock_bh(&ar->htt.tx_lock); |
| 139 | __ath10k_htt_tx_txq_sync(ar); |
| 140 | spin_unlock_bh(&ar->htt.tx_lock); |
| 141 | } |
| 142 | |
Michal Kazior | c1a43d9 | 2016-03-06 16:14:36 +0200 | [diff] [blame] | 143 | void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, |
| 144 | struct ieee80211_txq *txq) |
| 145 | { |
| 146 | struct ath10k *ar = hw->priv; |
| 147 | |
| 148 | spin_lock_bh(&ar->htt.tx_lock); |
| 149 | __ath10k_htt_tx_txq_recalc(hw, txq); |
| 150 | __ath10k_htt_tx_txq_sync(ar); |
| 151 | spin_unlock_bh(&ar->htt.tx_lock); |
| 152 | } |
| 153 | |
Rajkumar Manoharan | cac0855 | 2016-03-09 20:25:46 +0530 | [diff] [blame] | 154 | void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 155 | { |
Michal Kazior | 6421969f | 2016-03-06 16:14:25 +0200 | [diff] [blame] | 156 | lockdep_assert_held(&htt->tx_lock); |
| 157 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 158 | htt->num_pending_tx--; |
| 159 | if (htt->num_pending_tx == htt->max_num_pending_tx - 1) |
Michal Kazior | 96d828d | 2015-03-31 10:26:23 +0000 | [diff] [blame] | 160 | ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 161 | } |
| 162 | |
Rajkumar Manoharan | cac0855 | 2016-03-09 20:25:46 +0530 | [diff] [blame] | 163 | int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 164 | { |
Michal Kazior | 6421969f | 2016-03-06 16:14:25 +0200 | [diff] [blame] | 165 | lockdep_assert_held(&htt->tx_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 166 | |
Michal Kazior | 6421969f | 2016-03-06 16:14:25 +0200 | [diff] [blame] | 167 | if (htt->num_pending_tx >= htt->max_num_pending_tx) |
| 168 | return -EBUSY; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 169 | |
| 170 | htt->num_pending_tx++; |
| 171 | if (htt->num_pending_tx == htt->max_num_pending_tx) |
Michal Kazior | 96d828d | 2015-03-31 10:26:23 +0000 | [diff] [blame] | 172 | ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 173 | |
Michal Kazior | 6421969f | 2016-03-06 16:14:25 +0200 | [diff] [blame] | 174 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 175 | } |
| 176 | |
Rajkumar Manoharan | cac0855 | 2016-03-09 20:25:46 +0530 | [diff] [blame] | 177 | int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, |
| 178 | bool is_presp) |
| 179 | { |
| 180 | struct ath10k *ar = htt->ar; |
| 181 | |
| 182 | lockdep_assert_held(&htt->tx_lock); |
| 183 | |
| 184 | if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) |
| 185 | return 0; |
| 186 | |
| 187 | if (is_presp && |
| 188 | ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) |
| 189 | return -EBUSY; |
| 190 | |
| 191 | htt->num_pending_mgmt_tx++; |
| 192 | |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) |
| 197 | { |
| 198 | lockdep_assert_held(&htt->tx_lock); |
| 199 | |
| 200 | if (!htt->ar->hw_params.max_probe_resp_desc_thres) |
| 201 | return; |
| 202 | |
| 203 | htt->num_pending_mgmt_tx--; |
| 204 | } |
| 205 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 206 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 207 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 208 | struct ath10k *ar = htt->ar; |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 209 | int ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 210 | |
| 211 | lockdep_assert_held(&htt->tx_lock); |
| 212 | |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 213 | ret = idr_alloc(&htt->pending_tx, skb, 0, |
| 214 | htt->max_num_pending_tx, GFP_ATOMIC); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 215 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 216 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); |
| 217 | |
| 218 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 219 | } |
| 220 | |
| 221 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) |
| 222 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 223 | struct ath10k *ar = htt->ar; |
| 224 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 225 | lockdep_assert_held(&htt->tx_lock); |
| 226 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 227 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 228 | |
| 229 | idr_remove(&htt->pending_tx, msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 230 | } |
| 231 | |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 232 | static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) |
| 233 | { |
| 234 | size_t size; |
| 235 | |
| 236 | if (!htt->frag_desc.vaddr) |
| 237 | return; |
| 238 | |
| 239 | size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); |
| 240 | |
| 241 | dma_free_coherent(htt->ar->dev, |
| 242 | size, |
| 243 | htt->frag_desc.vaddr, |
| 244 | htt->frag_desc.paddr); |
| 245 | } |
| 246 | |
| 247 | static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) |
| 248 | { |
| 249 | struct ath10k *ar = htt->ar; |
| 250 | size_t size; |
| 251 | |
| 252 | if (!ar->hw_params.continuous_frag_desc) |
| 253 | return 0; |
| 254 | |
| 255 | size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); |
| 256 | htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, |
| 257 | &htt->frag_desc.paddr, |
| 258 | GFP_KERNEL); |
| 259 | if (!htt->frag_desc.vaddr) { |
| 260 | ath10k_err(ar, "failed to alloc fragment desc memory\n"); |
| 261 | return -ENOMEM; |
| 262 | } |
| 263 | |
| 264 | return 0; |
| 265 | } |
| 266 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 267 | static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) |
| 268 | { |
| 269 | struct ath10k *ar = htt->ar; |
| 270 | size_t size; |
| 271 | |
Kalle Valo | c4cdf75 | 2016-04-20 19:45:18 +0300 | [diff] [blame] | 272 | if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, |
| 273 | ar->running_fw->fw_file.fw_features)) |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 274 | return; |
| 275 | |
| 276 | size = sizeof(*htt->tx_q_state.vaddr); |
| 277 | |
| 278 | dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); |
| 279 | kfree(htt->tx_q_state.vaddr); |
| 280 | } |
| 281 | |
| 282 | static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) |
| 283 | { |
| 284 | struct ath10k *ar = htt->ar; |
| 285 | size_t size; |
| 286 | int ret; |
| 287 | |
Kalle Valo | c4cdf75 | 2016-04-20 19:45:18 +0300 | [diff] [blame] | 288 | if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, |
| 289 | ar->running_fw->fw_file.fw_features)) |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 290 | return 0; |
| 291 | |
| 292 | htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; |
| 293 | htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; |
| 294 | htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; |
| 295 | |
| 296 | size = sizeof(*htt->tx_q_state.vaddr); |
| 297 | htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); |
| 298 | if (!htt->tx_q_state.vaddr) |
| 299 | return -ENOMEM; |
| 300 | |
| 301 | htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, |
| 302 | size, DMA_TO_DEVICE); |
| 303 | ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); |
| 304 | if (ret) { |
| 305 | ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); |
| 306 | kfree(htt->tx_q_state.vaddr); |
| 307 | return -EIO; |
| 308 | } |
| 309 | |
| 310 | return 0; |
| 311 | } |
| 312 | |
Michal Kazior | 95bf21f | 2014-05-16 17:15:39 +0300 | [diff] [blame] | 313 | int ath10k_htt_tx_alloc(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 314 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 315 | struct ath10k *ar = htt->ar; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 316 | int ret, size; |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 317 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 318 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 319 | htt->max_num_pending_tx); |
| 320 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 321 | spin_lock_init(&htt->tx_lock); |
| 322 | idr_init(&htt->pending_tx); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 323 | |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 324 | size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); |
| 325 | htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, |
| 326 | &htt->txbuf.paddr, |
Felix Fietkau | d6cb23b5 | 2015-11-24 11:36:52 +0100 | [diff] [blame] | 327 | GFP_KERNEL); |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 328 | if (!htt->txbuf.vaddr) { |
| 329 | ath10k_err(ar, "failed to alloc tx buffer\n"); |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 330 | ret = -ENOMEM; |
| 331 | goto free_idr_pending_tx; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 332 | } |
| 333 | |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 334 | ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); |
| 335 | if (ret) { |
| 336 | ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 337 | goto free_txbuf; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 338 | } |
| 339 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 340 | ret = ath10k_htt_tx_alloc_txq(htt); |
| 341 | if (ret) { |
| 342 | ath10k_err(ar, "failed to alloc txq: %d\n", ret); |
| 343 | goto free_frag_desc; |
| 344 | } |
| 345 | |
Rajkumar Manoharan | 59465fe | 2016-03-22 17:22:11 +0530 | [diff] [blame] | 346 | size = roundup_pow_of_two(htt->max_num_pending_tx); |
| 347 | ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); |
| 348 | if (ret) { |
| 349 | ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); |
| 350 | goto free_txq; |
| 351 | } |
| 352 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 353 | return 0; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 354 | |
Rajkumar Manoharan | 59465fe | 2016-03-22 17:22:11 +0530 | [diff] [blame] | 355 | free_txq: |
| 356 | ath10k_htt_tx_free_txq(htt); |
| 357 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 358 | free_frag_desc: |
| 359 | ath10k_htt_tx_free_cont_frag_desc(htt); |
| 360 | |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 361 | free_txbuf: |
| 362 | size = htt->max_num_pending_tx * |
| 363 | sizeof(struct ath10k_htt_txbuf); |
| 364 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, |
| 365 | htt->txbuf.paddr); |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 366 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 367 | free_idr_pending_tx: |
| 368 | idr_destroy(&htt->pending_tx); |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 369 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 370 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 371 | } |
| 372 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 373 | static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 374 | { |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 375 | struct ath10k *ar = ctx; |
| 376 | struct ath10k_htt *htt = &ar->htt; |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame] | 377 | struct htt_tx_done tx_done = {0}; |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 378 | |
| 379 | ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); |
| 380 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 381 | tx_done.msdu_id = msdu_id; |
Rajkumar Manoharan | 59465fe | 2016-03-22 17:22:11 +0530 | [diff] [blame] | 382 | tx_done.status = HTT_TX_COMPL_STATE_DISCARD; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 383 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 384 | ath10k_txrx_tx_unref(htt, &tx_done); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 385 | |
| 386 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 387 | } |
| 388 | |
Michal Kazior | 95bf21f | 2014-05-16 17:15:39 +0300 | [diff] [blame] | 389 | void ath10k_htt_tx_free(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 390 | { |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 391 | int size; |
| 392 | |
Ben Greear | de0170b | 2016-06-30 15:23:53 +0300 | [diff] [blame] | 393 | tasklet_kill(&htt->txrx_compl_task); |
| 394 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 395 | idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); |
| 396 | idr_destroy(&htt->pending_tx); |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 397 | |
| 398 | if (htt->txbuf.vaddr) { |
| 399 | size = htt->max_num_pending_tx * |
| 400 | sizeof(struct ath10k_htt_txbuf); |
| 401 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, |
| 402 | htt->txbuf.paddr); |
| 403 | } |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 404 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 405 | ath10k_htt_tx_free_txq(htt); |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 406 | ath10k_htt_tx_free_cont_frag_desc(htt); |
Rajkumar Manoharan | 59465fe | 2016-03-22 17:22:11 +0530 | [diff] [blame] | 407 | WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); |
| 408 | kfifo_free(&htt->txdone_fifo); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 409 | } |
| 410 | |
| 411 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
| 412 | { |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame] | 413 | dev_kfree_skb_any(skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 414 | } |
| 415 | |
Rajkumar Manoharan | 3f0f7ed | 2015-10-12 18:27:03 +0530 | [diff] [blame] | 416 | void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
| 417 | { |
| 418 | dev_kfree_skb_any(skb); |
| 419 | } |
| 420 | EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); |
| 421 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 422 | int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) |
| 423 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 424 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 425 | struct sk_buff *skb; |
| 426 | struct htt_cmd *cmd; |
| 427 | int len = 0; |
| 428 | int ret; |
| 429 | |
| 430 | len += sizeof(cmd->hdr); |
| 431 | len += sizeof(cmd->ver_req); |
| 432 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 433 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 434 | if (!skb) |
| 435 | return -ENOMEM; |
| 436 | |
| 437 | skb_put(skb, len); |
| 438 | cmd = (struct htt_cmd *)skb->data; |
| 439 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; |
| 440 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 441 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 442 | if (ret) { |
| 443 | dev_kfree_skb_any(skb); |
| 444 | return ret; |
| 445 | } |
| 446 | |
| 447 | return 0; |
| 448 | } |
| 449 | |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 450 | int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) |
| 451 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 452 | struct ath10k *ar = htt->ar; |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 453 | struct htt_stats_req *req; |
| 454 | struct sk_buff *skb; |
| 455 | struct htt_cmd *cmd; |
| 456 | int len = 0, ret; |
| 457 | |
| 458 | len += sizeof(cmd->hdr); |
| 459 | len += sizeof(cmd->stats_req); |
| 460 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 461 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 462 | if (!skb) |
| 463 | return -ENOMEM; |
| 464 | |
| 465 | skb_put(skb, len); |
| 466 | cmd = (struct htt_cmd *)skb->data; |
| 467 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; |
| 468 | |
| 469 | req = &cmd->stats_req; |
| 470 | |
| 471 | memset(req, 0, sizeof(*req)); |
| 472 | |
| 473 | /* currently we support only max 8 bit masks so no need to worry |
| 474 | * about endian support */ |
| 475 | req->upload_types[0] = mask; |
| 476 | req->reset_types[0] = mask; |
| 477 | req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; |
| 478 | req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); |
| 479 | req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); |
| 480 | |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 481 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 482 | if (ret) { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 483 | ath10k_warn(ar, "failed to send htt type stats request: %d", |
| 484 | ret); |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 485 | dev_kfree_skb_any(skb); |
| 486 | return ret; |
| 487 | } |
| 488 | |
| 489 | return 0; |
| 490 | } |
| 491 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 492 | int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) |
| 493 | { |
| 494 | struct ath10k *ar = htt->ar; |
| 495 | struct sk_buff *skb; |
| 496 | struct htt_cmd *cmd; |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 497 | struct htt_frag_desc_bank_cfg *cfg; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 498 | int ret, size; |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 499 | u8 info; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 500 | |
| 501 | if (!ar->hw_params.continuous_frag_desc) |
| 502 | return 0; |
| 503 | |
| 504 | if (!htt->frag_desc.paddr) { |
| 505 | ath10k_warn(ar, "invalid frag desc memory\n"); |
| 506 | return -EINVAL; |
| 507 | } |
| 508 | |
| 509 | size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); |
| 510 | skb = ath10k_htc_alloc_skb(ar, size); |
| 511 | if (!skb) |
| 512 | return -ENOMEM; |
| 513 | |
| 514 | skb_put(skb, size); |
| 515 | cmd = (struct htt_cmd *)skb->data; |
| 516 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 517 | |
| 518 | info = 0; |
| 519 | info |= SM(htt->tx_q_state.type, |
| 520 | HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); |
| 521 | |
Kalle Valo | c4cdf75 | 2016-04-20 19:45:18 +0300 | [diff] [blame] | 522 | if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, |
| 523 | ar->running_fw->fw_file.fw_features)) |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 524 | info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; |
| 525 | |
| 526 | cfg = &cmd->frag_desc_bank_cfg; |
| 527 | cfg->info = info; |
| 528 | cfg->num_banks = 1; |
| 529 | cfg->desc_size = sizeof(struct htt_msdu_ext_desc); |
| 530 | cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); |
| 531 | cfg->bank_id[0].bank_min_id = 0; |
| 532 | cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - |
| 533 | 1); |
| 534 | |
| 535 | cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); |
| 536 | cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); |
| 537 | cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); |
| 538 | cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; |
| 539 | cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; |
| 540 | |
| 541 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 542 | |
| 543 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 544 | if (ret) { |
| 545 | ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", |
| 546 | ret); |
| 547 | dev_kfree_skb_any(skb); |
| 548 | return ret; |
| 549 | } |
| 550 | |
| 551 | return 0; |
| 552 | } |
| 553 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 554 | int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) |
| 555 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 556 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 557 | struct sk_buff *skb; |
| 558 | struct htt_cmd *cmd; |
| 559 | struct htt_rx_ring_setup_ring *ring; |
| 560 | const int num_rx_ring = 1; |
| 561 | u16 flags; |
| 562 | u32 fw_idx; |
| 563 | int len; |
| 564 | int ret; |
| 565 | |
| 566 | /* |
| 567 | * the HW expects the buffer to be an integral number of 4-byte |
| 568 | * "words" |
| 569 | */ |
| 570 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); |
| 571 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); |
| 572 | |
| 573 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) |
| 574 | + (sizeof(*ring) * num_rx_ring); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 575 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 576 | if (!skb) |
| 577 | return -ENOMEM; |
| 578 | |
| 579 | skb_put(skb, len); |
| 580 | |
| 581 | cmd = (struct htt_cmd *)skb->data; |
| 582 | ring = &cmd->rx_setup.rings[0]; |
| 583 | |
| 584 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; |
| 585 | cmd->rx_setup.hdr.num_rings = 1; |
| 586 | |
| 587 | /* FIXME: do we need all of this? */ |
| 588 | flags = 0; |
| 589 | flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; |
| 590 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; |
| 591 | flags |= HTT_RX_RING_FLAGS_PPDU_START; |
| 592 | flags |= HTT_RX_RING_FLAGS_PPDU_END; |
| 593 | flags |= HTT_RX_RING_FLAGS_MPDU_START; |
| 594 | flags |= HTT_RX_RING_FLAGS_MPDU_END; |
| 595 | flags |= HTT_RX_RING_FLAGS_MSDU_START; |
| 596 | flags |= HTT_RX_RING_FLAGS_MSDU_END; |
| 597 | flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; |
| 598 | flags |= HTT_RX_RING_FLAGS_FRAG_INFO; |
| 599 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; |
| 600 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; |
| 601 | flags |= HTT_RX_RING_FLAGS_CTRL_RX; |
| 602 | flags |= HTT_RX_RING_FLAGS_MGMT_RX; |
| 603 | flags |= HTT_RX_RING_FLAGS_NULL_RX; |
| 604 | flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; |
| 605 | |
| 606 | fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
| 607 | |
| 608 | ring->fw_idx_shadow_reg_paddr = |
| 609 | __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); |
| 610 | ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); |
| 611 | ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); |
| 612 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); |
| 613 | ring->flags = __cpu_to_le16(flags); |
| 614 | ring->fw_idx_init_val = __cpu_to_le16(fw_idx); |
| 615 | |
| 616 | #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) |
| 617 | |
| 618 | ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); |
| 619 | ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); |
| 620 | ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); |
| 621 | ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); |
| 622 | ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); |
| 623 | ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); |
| 624 | ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); |
| 625 | ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); |
| 626 | ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); |
| 627 | ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); |
| 628 | |
| 629 | #undef desc_offset |
| 630 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 631 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 632 | if (ret) { |
| 633 | dev_kfree_skb_any(skb); |
| 634 | return ret; |
| 635 | } |
| 636 | |
| 637 | return 0; |
| 638 | } |
| 639 | |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 640 | int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, |
| 641 | u8 max_subfrms_ampdu, |
| 642 | u8 max_subfrms_amsdu) |
| 643 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 644 | struct ath10k *ar = htt->ar; |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 645 | struct htt_aggr_conf *aggr_conf; |
| 646 | struct sk_buff *skb; |
| 647 | struct htt_cmd *cmd; |
| 648 | int len; |
| 649 | int ret; |
| 650 | |
| 651 | /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ |
| 652 | |
| 653 | if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) |
| 654 | return -EINVAL; |
| 655 | |
| 656 | if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) |
| 657 | return -EINVAL; |
| 658 | |
| 659 | len = sizeof(cmd->hdr); |
| 660 | len += sizeof(cmd->aggr_conf); |
| 661 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 662 | skb = ath10k_htc_alloc_skb(ar, len); |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 663 | if (!skb) |
| 664 | return -ENOMEM; |
| 665 | |
| 666 | skb_put(skb, len); |
| 667 | cmd = (struct htt_cmd *)skb->data; |
| 668 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; |
| 669 | |
| 670 | aggr_conf = &cmd->aggr_conf; |
| 671 | aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; |
| 672 | aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; |
| 673 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 674 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 675 | aggr_conf->max_num_amsdu_subframes, |
| 676 | aggr_conf->max_num_ampdu_subframes); |
| 677 | |
| 678 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 679 | if (ret) { |
| 680 | dev_kfree_skb_any(skb); |
| 681 | return ret; |
| 682 | } |
| 683 | |
| 684 | return 0; |
| 685 | } |
| 686 | |
Michal Kazior | 839ae63 | 2016-03-06 16:14:32 +0200 | [diff] [blame] | 687 | int ath10k_htt_tx_fetch_resp(struct ath10k *ar, |
| 688 | __le32 token, |
| 689 | __le16 fetch_seq_num, |
| 690 | struct htt_tx_fetch_record *records, |
| 691 | size_t num_records) |
| 692 | { |
| 693 | struct sk_buff *skb; |
| 694 | struct htt_cmd *cmd; |
Michal Kazior | 426e10e | 2016-03-06 16:14:43 +0200 | [diff] [blame] | 695 | const u16 resp_id = 0; |
Michal Kazior | 839ae63 | 2016-03-06 16:14:32 +0200 | [diff] [blame] | 696 | int len = 0; |
| 697 | int ret; |
| 698 | |
Michal Kazior | 426e10e | 2016-03-06 16:14:43 +0200 | [diff] [blame] | 699 | /* Response IDs are echo-ed back only for host driver convienence |
| 700 | * purposes. They aren't used for anything in the driver yet so use 0. |
| 701 | */ |
| 702 | |
Michal Kazior | 839ae63 | 2016-03-06 16:14:32 +0200 | [diff] [blame] | 703 | len += sizeof(cmd->hdr); |
| 704 | len += sizeof(cmd->tx_fetch_resp); |
| 705 | len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; |
| 706 | |
| 707 | skb = ath10k_htc_alloc_skb(ar, len); |
| 708 | if (!skb) |
| 709 | return -ENOMEM; |
| 710 | |
Michal Kazior | 839ae63 | 2016-03-06 16:14:32 +0200 | [diff] [blame] | 711 | skb_put(skb, len); |
| 712 | cmd = (struct htt_cmd *)skb->data; |
| 713 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; |
| 714 | cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); |
| 715 | cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; |
| 716 | cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); |
| 717 | cmd->tx_fetch_resp.token = token; |
| 718 | |
| 719 | memcpy(cmd->tx_fetch_resp.records, records, |
| 720 | sizeof(records[0]) * num_records); |
| 721 | |
| 722 | ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); |
| 723 | if (ret) { |
| 724 | ath10k_warn(ar, "failed to submit htc command: %d\n", ret); |
Michal Kazior | 426e10e | 2016-03-06 16:14:43 +0200 | [diff] [blame] | 725 | goto err_free_skb; |
Michal Kazior | 839ae63 | 2016-03-06 16:14:32 +0200 | [diff] [blame] | 726 | } |
| 727 | |
| 728 | return 0; |
| 729 | |
Michal Kazior | 839ae63 | 2016-03-06 16:14:32 +0200 | [diff] [blame] | 730 | err_free_skb: |
| 731 | dev_kfree_skb_any(skb); |
| 732 | |
| 733 | return ret; |
| 734 | } |
| 735 | |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 736 | static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) |
| 737 | { |
| 738 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 739 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 740 | struct ath10k_vif *arvif; |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 741 | |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 742 | if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 743 | return ar->scan.vdev_id; |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 744 | } else if (cb->vif) { |
| 745 | arvif = (void *)cb->vif->drv_priv; |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 746 | return arvif->vdev_id; |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 747 | } else if (ar->monitor_started) { |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 748 | return ar->monitor_vdev_id; |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 749 | } else { |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 750 | return 0; |
Bob Copeland | a66cd73 | 2016-06-29 19:29:25 +0300 | [diff] [blame] | 751 | } |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 752 | } |
| 753 | |
| 754 | static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) |
| 755 | { |
| 756 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 757 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); |
| 758 | |
| 759 | if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) |
| 760 | return HTT_DATA_TX_EXT_TID_MGMT; |
| 761 | else if (cb->flags & ATH10K_SKB_F_QOS) |
| 762 | return skb->priority % IEEE80211_QOS_CTL_TID_MASK; |
| 763 | else |
| 764 | return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; |
| 765 | } |
| 766 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 767 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) |
| 768 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 769 | struct ath10k *ar = htt->ar; |
| 770 | struct device *dev = ar->dev; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 771 | struct sk_buff *txdesc = NULL; |
| 772 | struct htt_cmd *cmd; |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 773 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 774 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 775 | int len = 0; |
| 776 | int msdu_id = -1; |
| 777 | int res; |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 778 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 779 | |
| 780 | len += sizeof(cmd->hdr); |
| 781 | len += sizeof(cmd->mgmt_tx); |
| 782 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 783 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 784 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
Qi Zhou | 005fb16 | 2015-07-22 16:38:24 -0400 | [diff] [blame] | 785 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 786 | if (res < 0) |
Michal Kazior | 6421969f | 2016-03-06 16:14:25 +0200 | [diff] [blame] | 787 | goto err; |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 788 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 789 | msdu_id = res; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 790 | |
Tamizh chelvam | 90eceb3 | 2015-10-29 14:27:42 +0200 | [diff] [blame] | 791 | if ((ieee80211_is_action(hdr->frame_control) || |
| 792 | ieee80211_is_deauth(hdr->frame_control) || |
| 793 | ieee80211_is_disassoc(hdr->frame_control)) && |
| 794 | ieee80211_has_protected(hdr->frame_control)) { |
| 795 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
| 796 | } |
| 797 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 798 | txdesc = ath10k_htc_alloc_skb(ar, len); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 799 | if (!txdesc) { |
| 800 | res = -ENOMEM; |
| 801 | goto err_free_msdu_id; |
| 802 | } |
| 803 | |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 804 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
| 805 | DMA_TO_DEVICE); |
| 806 | res = dma_mapping_error(dev, skb_cb->paddr); |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 807 | if (res) { |
| 808 | res = -EIO; |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 809 | goto err_free_txdesc; |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 810 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 811 | |
| 812 | skb_put(txdesc, len); |
| 813 | cmd = (struct htt_cmd *)txdesc->data; |
Raja Mani | 1d0088f | 2015-07-21 10:52:00 +0530 | [diff] [blame] | 814 | memset(cmd, 0, len); |
| 815 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 816 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; |
| 817 | cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); |
| 818 | cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); |
| 819 | cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); |
| 820 | cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); |
| 821 | memcpy(cmd->mgmt_tx.hdr, msdu->data, |
| 822 | min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); |
| 823 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 824 | res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 825 | if (res) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 826 | goto err_unmap_msdu; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 827 | |
| 828 | return 0; |
| 829 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 830 | err_unmap_msdu: |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 831 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 832 | err_free_txdesc: |
| 833 | dev_kfree_skb_any(txdesc); |
| 834 | err_free_msdu_id: |
| 835 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 836 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
| 837 | spin_unlock_bh(&htt->tx_lock); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 838 | err: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 839 | return res; |
| 840 | } |
| 841 | |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 842 | int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, |
| 843 | struct sk_buff *msdu) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 844 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 845 | struct ath10k *ar = htt->ar; |
| 846 | struct device *dev = ar->dev; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 847 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 848 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 849 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 850 | struct ath10k_hif_sg_item sg_items[2]; |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 851 | struct ath10k_htt_txbuf *txbuf; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 852 | struct htt_data_tx_desc_frag *frags; |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 853 | bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); |
| 854 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); |
| 855 | u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 856 | int prefetch_len; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 857 | int res; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 858 | u8 flags0 = 0; |
| 859 | u16 msdu_id, flags1 = 0; |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 860 | u16 freq = 0; |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 861 | u32 frags_paddr = 0; |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 862 | u32 txbuf_paddr; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 863 | struct htt_msdu_ext_desc *ext_desc = NULL; |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 864 | |
| 865 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 866 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
Qi Zhou | 005fb16 | 2015-07-22 16:38:24 -0400 | [diff] [blame] | 867 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 868 | if (res < 0) |
Michal Kazior | 6421969f | 2016-03-06 16:14:25 +0200 | [diff] [blame] | 869 | goto err; |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 870 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 871 | msdu_id = res; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 872 | |
| 873 | prefetch_len = min(htt->prefetch_len, msdu->len); |
| 874 | prefetch_len = roundup(prefetch_len, 4); |
| 875 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 876 | txbuf = &htt->txbuf.vaddr[msdu_id]; |
| 877 | txbuf_paddr = htt->txbuf.paddr + |
| 878 | (sizeof(struct ath10k_htt_txbuf) * msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 879 | |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 880 | if ((ieee80211_is_action(hdr->frame_control) || |
| 881 | ieee80211_is_deauth(hdr->frame_control) || |
| 882 | ieee80211_is_disassoc(hdr->frame_control)) && |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 883 | ieee80211_has_protected(hdr->frame_control)) { |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 884 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
Michal Kazior | 66b8a01 | 2015-11-18 06:59:20 +0100 | [diff] [blame] | 885 | } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 886 | txmode == ATH10K_HW_TXRX_RAW && |
Bob Copeland | bc76c28 | 2015-09-09 12:47:35 -0400 | [diff] [blame] | 887 | ieee80211_has_protected(hdr->frame_control)) { |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 888 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
| 889 | } |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 890 | |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 891 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
| 892 | DMA_TO_DEVICE); |
| 893 | res = dma_mapping_error(dev, skb_cb->paddr); |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 894 | if (res) { |
| 895 | res = -EIO; |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 896 | goto err_free_msdu_id; |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 897 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 898 | |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 899 | if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) |
| 900 | freq = ar->scan.roc_freq; |
| 901 | |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 902 | switch (txmode) { |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 903 | case ATH10K_HW_TXRX_RAW: |
| 904 | case ATH10K_HW_TXRX_NATIVE_WIFI: |
| 905 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
| 906 | /* pass through */ |
| 907 | case ATH10K_HW_TXRX_ETHERNET: |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 908 | if (ar->hw_params.continuous_frag_desc) { |
Peter Oh | ae7d382 | 2015-07-29 11:58:50 +0300 | [diff] [blame] | 909 | memset(&htt->frag_desc.vaddr[msdu_id], 0, |
| 910 | sizeof(struct htt_msdu_ext_desc)); |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 911 | frags = (struct htt_data_tx_desc_frag *) |
| 912 | &htt->frag_desc.vaddr[msdu_id].frags; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 913 | ext_desc = &htt->frag_desc.vaddr[msdu_id]; |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 914 | frags[0].tword_addr.paddr_lo = |
| 915 | __cpu_to_le32(skb_cb->paddr); |
| 916 | frags[0].tword_addr.paddr_hi = 0; |
| 917 | frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 918 | |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 919 | frags_paddr = htt->frag_desc.paddr + |
| 920 | (sizeof(struct htt_msdu_ext_desc) * msdu_id); |
| 921 | } else { |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 922 | frags = txbuf->frags; |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 923 | frags[0].dword_addr.paddr = |
| 924 | __cpu_to_le32(skb_cb->paddr); |
| 925 | frags[0].dword_addr.len = __cpu_to_le32(msdu->len); |
| 926 | frags[1].dword_addr.paddr = 0; |
| 927 | frags[1].dword_addr.len = 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 928 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 929 | frags_paddr = txbuf_paddr; |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 930 | } |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 931 | flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 932 | break; |
| 933 | case ATH10K_HW_TXRX_MGMT: |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 934 | flags0 |= SM(ATH10K_HW_TXRX_MGMT, |
Michal Kazior | 961d4c3 | 2013-08-09 10:13:34 +0200 | [diff] [blame] | 935 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 936 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 937 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 938 | frags_paddr = skb_cb->paddr; |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 939 | break; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 940 | } |
| 941 | |
| 942 | /* Normally all commands go through HTC which manages tx credits for |
| 943 | * each endpoint and notifies when tx is completed. |
| 944 | * |
| 945 | * HTT endpoint is creditless so there's no need to care about HTC |
| 946 | * flags. In that case it is trivial to fill the HTC header here. |
| 947 | * |
| 948 | * MSDU transmission is considered completed upon HTT event. This |
| 949 | * implies no relevant resources can be freed until after the event is |
| 950 | * received. That's why HTC tx completion handler itself is ignored by |
| 951 | * setting NULL to transfer_context for all sg items. |
| 952 | * |
| 953 | * There is simply no point in pushing HTT TX_FRM through HTC tx path |
| 954 | * as it's a waste of resources. By bypassing HTC it is possible to |
| 955 | * avoid extra memory allocations, compress data structures and thus |
| 956 | * improve performance. */ |
| 957 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 958 | txbuf->htc_hdr.eid = htt->eid; |
| 959 | txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + |
| 960 | sizeof(txbuf->cmd_tx) + |
| 961 | prefetch_len); |
| 962 | txbuf->htc_hdr.flags = 0; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 963 | |
Michal Kazior | 66b8a01 | 2015-11-18 06:59:20 +0100 | [diff] [blame] | 964 | if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 965 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; |
| 966 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 967 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
| 968 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 969 | if (msdu->ip_summed == CHECKSUM_PARTIAL && |
| 970 | !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { |
Helmut Schaa | 75930d1 | 2015-01-28 11:31:32 +0100 | [diff] [blame] | 971 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
| 972 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 973 | if (ar->hw_params.continuous_frag_desc) |
| 974 | ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; |
Helmut Schaa | 75930d1 | 2015-01-28 11:31:32 +0100 | [diff] [blame] | 975 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 976 | |
Michal Kazior | 708b9bd | 2014-07-21 20:52:59 +0300 | [diff] [blame] | 977 | /* Prevent firmware from sending up tx inspection requests. There's |
| 978 | * nothing ath10k can do with frames requested for inspection so force |
| 979 | * it to simply rely a regular tx completion with discard status. |
| 980 | */ |
| 981 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; |
| 982 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 983 | txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; |
| 984 | txbuf->cmd_tx.flags0 = flags0; |
| 985 | txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); |
| 986 | txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); |
| 987 | txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); |
| 988 | txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 989 | if (ath10k_mac_tx_frm_has_freq(ar)) { |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 990 | txbuf->cmd_tx.offchan_tx.peerid = |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 991 | __cpu_to_le16(HTT_INVALID_PEERID); |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 992 | txbuf->cmd_tx.offchan_tx.freq = |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 993 | __cpu_to_le16(freq); |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 994 | } else { |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 995 | txbuf->cmd_tx.peerid = |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 996 | __cpu_to_le32(HTT_INVALID_PEERID); |
| 997 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 998 | |
Rajkumar Manoharan | d1e50f4 | 2014-10-03 08:02:54 +0300 | [diff] [blame] | 999 | trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 1000 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
Michal Kazior | 8d6d362 | 2014-11-24 14:58:31 +0100 | [diff] [blame] | 1001 | "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1002 | flags0, flags1, msdu->len, msdu_id, frags_paddr, |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 1003 | (u32)skb_cb->paddr, vdev_id, tid, freq); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 1004 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1005 | msdu->data, msdu->len); |
Rajkumar Manoharan | 5ce8e7f | 2014-11-05 19:14:31 +0530 | [diff] [blame] | 1006 | trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); |
| 1007 | trace_ath10k_tx_payload(ar, msdu->data, msdu->len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1008 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1009 | sg_items[0].transfer_id = 0; |
| 1010 | sg_items[0].transfer_context = NULL; |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 1011 | sg_items[0].vaddr = &txbuf->htc_hdr; |
| 1012 | sg_items[0].paddr = txbuf_paddr + |
| 1013 | sizeof(txbuf->frags); |
| 1014 | sg_items[0].len = sizeof(txbuf->htc_hdr) + |
| 1015 | sizeof(txbuf->cmd_hdr) + |
| 1016 | sizeof(txbuf->cmd_tx); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1017 | |
| 1018 | sg_items[1].transfer_id = 0; |
| 1019 | sg_items[1].transfer_context = NULL; |
| 1020 | sg_items[1].vaddr = msdu->data; |
| 1021 | sg_items[1].paddr = skb_cb->paddr; |
| 1022 | sg_items[1].len = prefetch_len; |
| 1023 | |
| 1024 | res = ath10k_hif_tx_sg(htt->ar, |
| 1025 | htt->ar->htc.endpoint[htt->eid].ul_pipe_id, |
| 1026 | sg_items, ARRAY_SIZE(sg_items)); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1027 | if (res) |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 1028 | goto err_unmap_msdu; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1029 | |
| 1030 | return 0; |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 1031 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 1032 | err_unmap_msdu: |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 1033 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 1034 | err_free_msdu_id: |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 1035 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 1036 | err: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1037 | return res; |
| 1038 | } |