Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/etherdevice.h> |
| 19 | #include "htt.h" |
| 20 | #include "mac.h" |
| 21 | #include "hif.h" |
| 22 | #include "txrx.h" |
| 23 | #include "debug.h" |
| 24 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 25 | void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 26 | { |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 27 | if (limit_mgmt_desc) |
| 28 | htt->num_pending_mgmt_tx--; |
| 29 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 30 | htt->num_pending_tx--; |
| 31 | if (htt->num_pending_tx == htt->max_num_pending_tx - 1) |
Michal Kazior | 96d828d | 2015-03-31 10:26:23 +0000 | [diff] [blame] | 32 | ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 33 | } |
| 34 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 35 | static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, |
| 36 | bool limit_mgmt_desc) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 37 | { |
| 38 | spin_lock_bh(&htt->tx_lock); |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 39 | __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 40 | spin_unlock_bh(&htt->tx_lock); |
| 41 | } |
| 42 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 43 | static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, |
| 44 | bool limit_mgmt_desc, bool is_probe_resp) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 45 | { |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 46 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 47 | int ret = 0; |
| 48 | |
| 49 | spin_lock_bh(&htt->tx_lock); |
| 50 | |
| 51 | if (htt->num_pending_tx >= htt->max_num_pending_tx) { |
| 52 | ret = -EBUSY; |
| 53 | goto exit; |
| 54 | } |
| 55 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 56 | if (limit_mgmt_desc) { |
| 57 | if (is_probe_resp && (htt->num_pending_mgmt_tx > |
| 58 | ar->hw_params.max_probe_resp_desc_thres)) { |
| 59 | ret = -EBUSY; |
| 60 | goto exit; |
| 61 | } |
| 62 | htt->num_pending_mgmt_tx++; |
| 63 | } |
| 64 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 65 | htt->num_pending_tx++; |
| 66 | if (htt->num_pending_tx == htt->max_num_pending_tx) |
Michal Kazior | 96d828d | 2015-03-31 10:26:23 +0000 | [diff] [blame] | 67 | ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 68 | |
| 69 | exit: |
| 70 | spin_unlock_bh(&htt->tx_lock); |
| 71 | return ret; |
| 72 | } |
| 73 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 74 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 75 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 76 | struct ath10k *ar = htt->ar; |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 77 | int ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 78 | |
| 79 | lockdep_assert_held(&htt->tx_lock); |
| 80 | |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 81 | ret = idr_alloc(&htt->pending_tx, skb, 0, |
| 82 | htt->max_num_pending_tx, GFP_ATOMIC); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 83 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 84 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); |
| 85 | |
| 86 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) |
| 90 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 91 | struct ath10k *ar = htt->ar; |
| 92 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 93 | lockdep_assert_held(&htt->tx_lock); |
| 94 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 95 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 96 | |
| 97 | idr_remove(&htt->pending_tx, msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 98 | } |
| 99 | |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 100 | static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) |
| 101 | { |
| 102 | size_t size; |
| 103 | |
| 104 | if (!htt->frag_desc.vaddr) |
| 105 | return; |
| 106 | |
| 107 | size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); |
| 108 | |
| 109 | dma_free_coherent(htt->ar->dev, |
| 110 | size, |
| 111 | htt->frag_desc.vaddr, |
| 112 | htt->frag_desc.paddr); |
| 113 | } |
| 114 | |
| 115 | static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) |
| 116 | { |
| 117 | struct ath10k *ar = htt->ar; |
| 118 | size_t size; |
| 119 | |
| 120 | if (!ar->hw_params.continuous_frag_desc) |
| 121 | return 0; |
| 122 | |
| 123 | size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); |
| 124 | htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, |
| 125 | &htt->frag_desc.paddr, |
| 126 | GFP_KERNEL); |
| 127 | if (!htt->frag_desc.vaddr) { |
| 128 | ath10k_err(ar, "failed to alloc fragment desc memory\n"); |
| 129 | return -ENOMEM; |
| 130 | } |
| 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 135 | static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) |
| 136 | { |
| 137 | struct ath10k *ar = htt->ar; |
| 138 | size_t size; |
| 139 | |
| 140 | if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) |
| 141 | return; |
| 142 | |
| 143 | size = sizeof(*htt->tx_q_state.vaddr); |
| 144 | |
| 145 | dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); |
| 146 | kfree(htt->tx_q_state.vaddr); |
| 147 | } |
| 148 | |
| 149 | static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) |
| 150 | { |
| 151 | struct ath10k *ar = htt->ar; |
| 152 | size_t size; |
| 153 | int ret; |
| 154 | |
| 155 | if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) |
| 156 | return 0; |
| 157 | |
| 158 | htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; |
| 159 | htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; |
| 160 | htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; |
| 161 | |
| 162 | size = sizeof(*htt->tx_q_state.vaddr); |
| 163 | htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); |
| 164 | if (!htt->tx_q_state.vaddr) |
| 165 | return -ENOMEM; |
| 166 | |
| 167 | htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, |
| 168 | size, DMA_TO_DEVICE); |
| 169 | ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); |
| 170 | if (ret) { |
| 171 | ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); |
| 172 | kfree(htt->tx_q_state.vaddr); |
| 173 | return -EIO; |
| 174 | } |
| 175 | |
| 176 | return 0; |
| 177 | } |
| 178 | |
Michal Kazior | 95bf21f | 2014-05-16 17:15:39 +0300 | [diff] [blame] | 179 | int ath10k_htt_tx_alloc(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 180 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 181 | struct ath10k *ar = htt->ar; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 182 | int ret, size; |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 183 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 184 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 185 | htt->max_num_pending_tx); |
| 186 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 187 | spin_lock_init(&htt->tx_lock); |
| 188 | idr_init(&htt->pending_tx); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 189 | |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 190 | size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); |
| 191 | htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, |
| 192 | &htt->txbuf.paddr, |
Felix Fietkau | d6cb23b5 | 2015-11-24 11:36:52 +0100 | [diff] [blame] | 193 | GFP_KERNEL); |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 194 | if (!htt->txbuf.vaddr) { |
| 195 | ath10k_err(ar, "failed to alloc tx buffer\n"); |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 196 | ret = -ENOMEM; |
| 197 | goto free_idr_pending_tx; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 198 | } |
| 199 | |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 200 | ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); |
| 201 | if (ret) { |
| 202 | ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 203 | goto free_txbuf; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 204 | } |
| 205 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 206 | ret = ath10k_htt_tx_alloc_txq(htt); |
| 207 | if (ret) { |
| 208 | ath10k_err(ar, "failed to alloc txq: %d\n", ret); |
| 209 | goto free_frag_desc; |
| 210 | } |
| 211 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 212 | return 0; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 213 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 214 | free_frag_desc: |
| 215 | ath10k_htt_tx_free_cont_frag_desc(htt); |
| 216 | |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 217 | free_txbuf: |
| 218 | size = htt->max_num_pending_tx * |
| 219 | sizeof(struct ath10k_htt_txbuf); |
| 220 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, |
| 221 | htt->txbuf.paddr); |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 222 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 223 | free_idr_pending_tx: |
| 224 | idr_destroy(&htt->pending_tx); |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 225 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 226 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 227 | } |
| 228 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 229 | static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 230 | { |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 231 | struct ath10k *ar = ctx; |
| 232 | struct ath10k_htt *htt = &ar->htt; |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame] | 233 | struct htt_tx_done tx_done = {0}; |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 234 | |
| 235 | ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); |
| 236 | |
| 237 | tx_done.discard = 1; |
| 238 | tx_done.msdu_id = msdu_id; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 239 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 240 | ath10k_txrx_tx_unref(htt, &tx_done); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 241 | |
| 242 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 243 | } |
| 244 | |
Michal Kazior | 95bf21f | 2014-05-16 17:15:39 +0300 | [diff] [blame] | 245 | void ath10k_htt_tx_free(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 246 | { |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 247 | int size; |
| 248 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 249 | idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); |
| 250 | idr_destroy(&htt->pending_tx); |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 251 | |
| 252 | if (htt->txbuf.vaddr) { |
| 253 | size = htt->max_num_pending_tx * |
| 254 | sizeof(struct ath10k_htt_txbuf); |
| 255 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, |
| 256 | htt->txbuf.paddr); |
| 257 | } |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 258 | |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 259 | ath10k_htt_tx_free_txq(htt); |
Michal Kazior | 575fc89 | 2016-01-21 14:13:26 +0100 | [diff] [blame] | 260 | ath10k_htt_tx_free_cont_frag_desc(htt); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 261 | } |
| 262 | |
| 263 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
| 264 | { |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame] | 265 | dev_kfree_skb_any(skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 266 | } |
| 267 | |
Rajkumar Manoharan | 3f0f7ed | 2015-10-12 18:27:03 +0530 | [diff] [blame] | 268 | void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
| 269 | { |
| 270 | dev_kfree_skb_any(skb); |
| 271 | } |
| 272 | EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); |
| 273 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 274 | int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) |
| 275 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 276 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 277 | struct sk_buff *skb; |
| 278 | struct htt_cmd *cmd; |
| 279 | int len = 0; |
| 280 | int ret; |
| 281 | |
| 282 | len += sizeof(cmd->hdr); |
| 283 | len += sizeof(cmd->ver_req); |
| 284 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 285 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 286 | if (!skb) |
| 287 | return -ENOMEM; |
| 288 | |
| 289 | skb_put(skb, len); |
| 290 | cmd = (struct htt_cmd *)skb->data; |
| 291 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; |
| 292 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 293 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 294 | if (ret) { |
| 295 | dev_kfree_skb_any(skb); |
| 296 | return ret; |
| 297 | } |
| 298 | |
| 299 | return 0; |
| 300 | } |
| 301 | |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 302 | int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) |
| 303 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 304 | struct ath10k *ar = htt->ar; |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 305 | struct htt_stats_req *req; |
| 306 | struct sk_buff *skb; |
| 307 | struct htt_cmd *cmd; |
| 308 | int len = 0, ret; |
| 309 | |
| 310 | len += sizeof(cmd->hdr); |
| 311 | len += sizeof(cmd->stats_req); |
| 312 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 313 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 314 | if (!skb) |
| 315 | return -ENOMEM; |
| 316 | |
| 317 | skb_put(skb, len); |
| 318 | cmd = (struct htt_cmd *)skb->data; |
| 319 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; |
| 320 | |
| 321 | req = &cmd->stats_req; |
| 322 | |
| 323 | memset(req, 0, sizeof(*req)); |
| 324 | |
| 325 | /* currently we support only max 8 bit masks so no need to worry |
| 326 | * about endian support */ |
| 327 | req->upload_types[0] = mask; |
| 328 | req->reset_types[0] = mask; |
| 329 | req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; |
| 330 | req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); |
| 331 | req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); |
| 332 | |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 333 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 334 | if (ret) { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 335 | ath10k_warn(ar, "failed to send htt type stats request: %d", |
| 336 | ret); |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 337 | dev_kfree_skb_any(skb); |
| 338 | return ret; |
| 339 | } |
| 340 | |
| 341 | return 0; |
| 342 | } |
| 343 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 344 | int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) |
| 345 | { |
| 346 | struct ath10k *ar = htt->ar; |
| 347 | struct sk_buff *skb; |
| 348 | struct htt_cmd *cmd; |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 349 | struct htt_frag_desc_bank_cfg *cfg; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 350 | int ret, size; |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 351 | u8 info; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 352 | |
| 353 | if (!ar->hw_params.continuous_frag_desc) |
| 354 | return 0; |
| 355 | |
| 356 | if (!htt->frag_desc.paddr) { |
| 357 | ath10k_warn(ar, "invalid frag desc memory\n"); |
| 358 | return -EINVAL; |
| 359 | } |
| 360 | |
| 361 | size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); |
| 362 | skb = ath10k_htc_alloc_skb(ar, size); |
| 363 | if (!skb) |
| 364 | return -ENOMEM; |
| 365 | |
| 366 | skb_put(skb, size); |
| 367 | cmd = (struct htt_cmd *)skb->data; |
| 368 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; |
Michal Kazior | 9b15873 | 2016-01-21 14:13:27 +0100 | [diff] [blame] | 369 | |
| 370 | info = 0; |
| 371 | info |= SM(htt->tx_q_state.type, |
| 372 | HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); |
| 373 | |
| 374 | if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) |
| 375 | info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; |
| 376 | |
| 377 | cfg = &cmd->frag_desc_bank_cfg; |
| 378 | cfg->info = info; |
| 379 | cfg->num_banks = 1; |
| 380 | cfg->desc_size = sizeof(struct htt_msdu_ext_desc); |
| 381 | cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); |
| 382 | cfg->bank_id[0].bank_min_id = 0; |
| 383 | cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - |
| 384 | 1); |
| 385 | |
| 386 | cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); |
| 387 | cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); |
| 388 | cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); |
| 389 | cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; |
| 390 | cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; |
| 391 | |
| 392 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 393 | |
| 394 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 395 | if (ret) { |
| 396 | ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", |
| 397 | ret); |
| 398 | dev_kfree_skb_any(skb); |
| 399 | return ret; |
| 400 | } |
| 401 | |
| 402 | return 0; |
| 403 | } |
| 404 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 405 | int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) |
| 406 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 407 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 408 | struct sk_buff *skb; |
| 409 | struct htt_cmd *cmd; |
| 410 | struct htt_rx_ring_setup_ring *ring; |
| 411 | const int num_rx_ring = 1; |
| 412 | u16 flags; |
| 413 | u32 fw_idx; |
| 414 | int len; |
| 415 | int ret; |
| 416 | |
| 417 | /* |
| 418 | * the HW expects the buffer to be an integral number of 4-byte |
| 419 | * "words" |
| 420 | */ |
| 421 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); |
| 422 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); |
| 423 | |
| 424 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) |
| 425 | + (sizeof(*ring) * num_rx_ring); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 426 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 427 | if (!skb) |
| 428 | return -ENOMEM; |
| 429 | |
| 430 | skb_put(skb, len); |
| 431 | |
| 432 | cmd = (struct htt_cmd *)skb->data; |
| 433 | ring = &cmd->rx_setup.rings[0]; |
| 434 | |
| 435 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; |
| 436 | cmd->rx_setup.hdr.num_rings = 1; |
| 437 | |
| 438 | /* FIXME: do we need all of this? */ |
| 439 | flags = 0; |
| 440 | flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; |
| 441 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; |
| 442 | flags |= HTT_RX_RING_FLAGS_PPDU_START; |
| 443 | flags |= HTT_RX_RING_FLAGS_PPDU_END; |
| 444 | flags |= HTT_RX_RING_FLAGS_MPDU_START; |
| 445 | flags |= HTT_RX_RING_FLAGS_MPDU_END; |
| 446 | flags |= HTT_RX_RING_FLAGS_MSDU_START; |
| 447 | flags |= HTT_RX_RING_FLAGS_MSDU_END; |
| 448 | flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; |
| 449 | flags |= HTT_RX_RING_FLAGS_FRAG_INFO; |
| 450 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; |
| 451 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; |
| 452 | flags |= HTT_RX_RING_FLAGS_CTRL_RX; |
| 453 | flags |= HTT_RX_RING_FLAGS_MGMT_RX; |
| 454 | flags |= HTT_RX_RING_FLAGS_NULL_RX; |
| 455 | flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; |
| 456 | |
| 457 | fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
| 458 | |
| 459 | ring->fw_idx_shadow_reg_paddr = |
| 460 | __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); |
| 461 | ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); |
| 462 | ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); |
| 463 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); |
| 464 | ring->flags = __cpu_to_le16(flags); |
| 465 | ring->fw_idx_init_val = __cpu_to_le16(fw_idx); |
| 466 | |
| 467 | #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) |
| 468 | |
| 469 | ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); |
| 470 | ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); |
| 471 | ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); |
| 472 | ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); |
| 473 | ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); |
| 474 | ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); |
| 475 | ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); |
| 476 | ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); |
| 477 | ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); |
| 478 | ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); |
| 479 | |
| 480 | #undef desc_offset |
| 481 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 482 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 483 | if (ret) { |
| 484 | dev_kfree_skb_any(skb); |
| 485 | return ret; |
| 486 | } |
| 487 | |
| 488 | return 0; |
| 489 | } |
| 490 | |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 491 | int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, |
| 492 | u8 max_subfrms_ampdu, |
| 493 | u8 max_subfrms_amsdu) |
| 494 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 495 | struct ath10k *ar = htt->ar; |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 496 | struct htt_aggr_conf *aggr_conf; |
| 497 | struct sk_buff *skb; |
| 498 | struct htt_cmd *cmd; |
| 499 | int len; |
| 500 | int ret; |
| 501 | |
| 502 | /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ |
| 503 | |
| 504 | if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) |
| 505 | return -EINVAL; |
| 506 | |
| 507 | if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) |
| 508 | return -EINVAL; |
| 509 | |
| 510 | len = sizeof(cmd->hdr); |
| 511 | len += sizeof(cmd->aggr_conf); |
| 512 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 513 | skb = ath10k_htc_alloc_skb(ar, len); |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 514 | if (!skb) |
| 515 | return -ENOMEM; |
| 516 | |
| 517 | skb_put(skb, len); |
| 518 | cmd = (struct htt_cmd *)skb->data; |
| 519 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; |
| 520 | |
| 521 | aggr_conf = &cmd->aggr_conf; |
| 522 | aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; |
| 523 | aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; |
| 524 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 525 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 526 | aggr_conf->max_num_amsdu_subframes, |
| 527 | aggr_conf->max_num_ampdu_subframes); |
| 528 | |
| 529 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 530 | if (ret) { |
| 531 | dev_kfree_skb_any(skb); |
| 532 | return ret; |
| 533 | } |
| 534 | |
| 535 | return 0; |
| 536 | } |
| 537 | |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 538 | static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) |
| 539 | { |
| 540 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 541 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); |
| 542 | struct ath10k_vif *arvif = (void *)cb->vif->drv_priv; |
| 543 | |
| 544 | if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) |
| 545 | return ar->scan.vdev_id; |
| 546 | else if (cb->vif) |
| 547 | return arvif->vdev_id; |
| 548 | else if (ar->monitor_started) |
| 549 | return ar->monitor_vdev_id; |
| 550 | else |
| 551 | return 0; |
| 552 | } |
| 553 | |
| 554 | static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) |
| 555 | { |
| 556 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 557 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); |
| 558 | |
| 559 | if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) |
| 560 | return HTT_DATA_TX_EXT_TID_MGMT; |
| 561 | else if (cb->flags & ATH10K_SKB_F_QOS) |
| 562 | return skb->priority % IEEE80211_QOS_CTL_TID_MASK; |
| 563 | else |
| 564 | return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; |
| 565 | } |
| 566 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 567 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) |
| 568 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 569 | struct ath10k *ar = htt->ar; |
| 570 | struct device *dev = ar->dev; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 571 | struct sk_buff *txdesc = NULL; |
| 572 | struct htt_cmd *cmd; |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 573 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 574 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 575 | int len = 0; |
| 576 | int msdu_id = -1; |
| 577 | int res; |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 578 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
| 579 | bool limit_mgmt_desc = false; |
| 580 | bool is_probe_resp = false; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 581 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 582 | if (ar->hw_params.max_probe_resp_desc_thres) { |
| 583 | limit_mgmt_desc = true; |
| 584 | |
| 585 | if (ieee80211_is_probe_resp(hdr->frame_control)) |
| 586 | is_probe_resp = true; |
| 587 | } |
| 588 | |
| 589 | res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); |
| 590 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 591 | if (res) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 592 | goto err; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 593 | |
| 594 | len += sizeof(cmd->hdr); |
| 595 | len += sizeof(cmd->mgmt_tx); |
| 596 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 597 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 598 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
Qi Zhou | 005fb16 | 2015-07-22 16:38:24 -0400 | [diff] [blame] | 599 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 600 | if (res < 0) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 601 | goto err_tx_dec; |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 602 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 603 | msdu_id = res; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 604 | |
Tamizh chelvam | 90eceb3 | 2015-10-29 14:27:42 +0200 | [diff] [blame] | 605 | if ((ieee80211_is_action(hdr->frame_control) || |
| 606 | ieee80211_is_deauth(hdr->frame_control) || |
| 607 | ieee80211_is_disassoc(hdr->frame_control)) && |
| 608 | ieee80211_has_protected(hdr->frame_control)) { |
| 609 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
| 610 | } |
| 611 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 612 | txdesc = ath10k_htc_alloc_skb(ar, len); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 613 | if (!txdesc) { |
| 614 | res = -ENOMEM; |
| 615 | goto err_free_msdu_id; |
| 616 | } |
| 617 | |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 618 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
| 619 | DMA_TO_DEVICE); |
| 620 | res = dma_mapping_error(dev, skb_cb->paddr); |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 621 | if (res) { |
| 622 | res = -EIO; |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 623 | goto err_free_txdesc; |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 624 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 625 | |
| 626 | skb_put(txdesc, len); |
| 627 | cmd = (struct htt_cmd *)txdesc->data; |
Raja Mani | 1d0088f | 2015-07-21 10:52:00 +0530 | [diff] [blame] | 628 | memset(cmd, 0, len); |
| 629 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 630 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; |
| 631 | cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); |
| 632 | cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); |
| 633 | cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); |
| 634 | cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); |
| 635 | memcpy(cmd->mgmt_tx.hdr, msdu->data, |
| 636 | min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); |
| 637 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 638 | res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 639 | if (res) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 640 | goto err_unmap_msdu; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 641 | |
| 642 | return 0; |
| 643 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 644 | err_unmap_msdu: |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 645 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 646 | err_free_txdesc: |
| 647 | dev_kfree_skb_any(txdesc); |
| 648 | err_free_msdu_id: |
| 649 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 650 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
| 651 | spin_unlock_bh(&htt->tx_lock); |
| 652 | err_tx_dec: |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 653 | ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 654 | err: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 655 | return res; |
| 656 | } |
| 657 | |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 658 | int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, |
| 659 | struct sk_buff *msdu) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 660 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 661 | struct ath10k *ar = htt->ar; |
| 662 | struct device *dev = ar->dev; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 663 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 664 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 665 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 666 | struct ath10k_hif_sg_item sg_items[2]; |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 667 | struct ath10k_htt_txbuf *txbuf; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 668 | struct htt_data_tx_desc_frag *frags; |
Michal Kazior | 609db22 | 2015-11-18 06:59:22 +0100 | [diff] [blame] | 669 | bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); |
| 670 | u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); |
| 671 | u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 672 | int prefetch_len; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 673 | int res; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 674 | u8 flags0 = 0; |
| 675 | u16 msdu_id, flags1 = 0; |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 676 | u16 freq = 0; |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 677 | u32 frags_paddr = 0; |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 678 | u32 txbuf_paddr; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 679 | struct htt_msdu_ext_desc *ext_desc = NULL; |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 680 | bool limit_mgmt_desc = false; |
| 681 | bool is_probe_resp = false; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 682 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 683 | if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) && |
| 684 | ar->hw_params.max_probe_resp_desc_thres) { |
| 685 | limit_mgmt_desc = true; |
| 686 | |
| 687 | if (ieee80211_is_probe_resp(hdr->frame_control)) |
| 688 | is_probe_resp = true; |
| 689 | } |
| 690 | |
| 691 | res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 692 | if (res) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 693 | goto err; |
| 694 | |
| 695 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 696 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
Qi Zhou | 005fb16 | 2015-07-22 16:38:24 -0400 | [diff] [blame] | 697 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 698 | if (res < 0) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 699 | goto err_tx_dec; |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 700 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 701 | msdu_id = res; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 702 | |
| 703 | prefetch_len = min(htt->prefetch_len, msdu->len); |
| 704 | prefetch_len = roundup(prefetch_len, 4); |
| 705 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 706 | txbuf = &htt->txbuf.vaddr[msdu_id]; |
| 707 | txbuf_paddr = htt->txbuf.paddr + |
| 708 | (sizeof(struct ath10k_htt_txbuf) * msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 709 | |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 710 | if ((ieee80211_is_action(hdr->frame_control) || |
| 711 | ieee80211_is_deauth(hdr->frame_control) || |
| 712 | ieee80211_is_disassoc(hdr->frame_control)) && |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 713 | ieee80211_has_protected(hdr->frame_control)) { |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 714 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
Michal Kazior | 66b8a01 | 2015-11-18 06:59:20 +0100 | [diff] [blame] | 715 | } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 716 | txmode == ATH10K_HW_TXRX_RAW && |
Bob Copeland | bc76c28 | 2015-09-09 12:47:35 -0400 | [diff] [blame] | 717 | ieee80211_has_protected(hdr->frame_control)) { |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 718 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
| 719 | } |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 720 | |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 721 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
| 722 | DMA_TO_DEVICE); |
| 723 | res = dma_mapping_error(dev, skb_cb->paddr); |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 724 | if (res) { |
| 725 | res = -EIO; |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame] | 726 | goto err_free_msdu_id; |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 727 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 728 | |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 729 | if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) |
| 730 | freq = ar->scan.roc_freq; |
| 731 | |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 732 | switch (txmode) { |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 733 | case ATH10K_HW_TXRX_RAW: |
| 734 | case ATH10K_HW_TXRX_NATIVE_WIFI: |
| 735 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
| 736 | /* pass through */ |
| 737 | case ATH10K_HW_TXRX_ETHERNET: |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 738 | if (ar->hw_params.continuous_frag_desc) { |
Peter Oh | ae7d382 | 2015-07-29 11:58:50 +0300 | [diff] [blame] | 739 | memset(&htt->frag_desc.vaddr[msdu_id], 0, |
| 740 | sizeof(struct htt_msdu_ext_desc)); |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 741 | frags = (struct htt_data_tx_desc_frag *) |
| 742 | &htt->frag_desc.vaddr[msdu_id].frags; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 743 | ext_desc = &htt->frag_desc.vaddr[msdu_id]; |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 744 | frags[0].tword_addr.paddr_lo = |
| 745 | __cpu_to_le32(skb_cb->paddr); |
| 746 | frags[0].tword_addr.paddr_hi = 0; |
| 747 | frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 748 | |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 749 | frags_paddr = htt->frag_desc.paddr + |
| 750 | (sizeof(struct htt_msdu_ext_desc) * msdu_id); |
| 751 | } else { |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 752 | frags = txbuf->frags; |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 753 | frags[0].dword_addr.paddr = |
| 754 | __cpu_to_le32(skb_cb->paddr); |
| 755 | frags[0].dword_addr.len = __cpu_to_le32(msdu->len); |
| 756 | frags[1].dword_addr.paddr = 0; |
| 757 | frags[1].dword_addr.len = 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 758 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 759 | frags_paddr = txbuf_paddr; |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 760 | } |
Michal Kazior | 8a93396 | 2015-11-18 06:59:17 +0100 | [diff] [blame] | 761 | flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 762 | break; |
| 763 | case ATH10K_HW_TXRX_MGMT: |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 764 | flags0 |= SM(ATH10K_HW_TXRX_MGMT, |
Michal Kazior | 961d4c3 | 2013-08-09 10:13:34 +0200 | [diff] [blame] | 765 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 766 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 767 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 768 | frags_paddr = skb_cb->paddr; |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 769 | break; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 770 | } |
| 771 | |
| 772 | /* Normally all commands go through HTC which manages tx credits for |
| 773 | * each endpoint and notifies when tx is completed. |
| 774 | * |
| 775 | * HTT endpoint is creditless so there's no need to care about HTC |
| 776 | * flags. In that case it is trivial to fill the HTC header here. |
| 777 | * |
| 778 | * MSDU transmission is considered completed upon HTT event. This |
| 779 | * implies no relevant resources can be freed until after the event is |
| 780 | * received. That's why HTC tx completion handler itself is ignored by |
| 781 | * setting NULL to transfer_context for all sg items. |
| 782 | * |
| 783 | * There is simply no point in pushing HTT TX_FRM through HTC tx path |
| 784 | * as it's a waste of resources. By bypassing HTC it is possible to |
| 785 | * avoid extra memory allocations, compress data structures and thus |
| 786 | * improve performance. */ |
| 787 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 788 | txbuf->htc_hdr.eid = htt->eid; |
| 789 | txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + |
| 790 | sizeof(txbuf->cmd_tx) + |
| 791 | prefetch_len); |
| 792 | txbuf->htc_hdr.flags = 0; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 793 | |
Michal Kazior | 66b8a01 | 2015-11-18 06:59:20 +0100 | [diff] [blame] | 794 | if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 795 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; |
| 796 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 797 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
| 798 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 799 | if (msdu->ip_summed == CHECKSUM_PARTIAL && |
| 800 | !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { |
Helmut Schaa | 75930d1 | 2015-01-28 11:31:32 +0100 | [diff] [blame] | 801 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
| 802 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 803 | if (ar->hw_params.continuous_frag_desc) |
| 804 | ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; |
Helmut Schaa | 75930d1 | 2015-01-28 11:31:32 +0100 | [diff] [blame] | 805 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 806 | |
Michal Kazior | 708b9bd | 2014-07-21 20:52:59 +0300 | [diff] [blame] | 807 | /* Prevent firmware from sending up tx inspection requests. There's |
| 808 | * nothing ath10k can do with frames requested for inspection so force |
| 809 | * it to simply rely a regular tx completion with discard status. |
| 810 | */ |
| 811 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; |
| 812 | |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 813 | txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; |
| 814 | txbuf->cmd_tx.flags0 = flags0; |
| 815 | txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); |
| 816 | txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); |
| 817 | txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); |
| 818 | txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 819 | if (ath10k_mac_tx_frm_has_freq(ar)) { |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 820 | txbuf->cmd_tx.offchan_tx.peerid = |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 821 | __cpu_to_le16(HTT_INVALID_PEERID); |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 822 | txbuf->cmd_tx.offchan_tx.freq = |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 823 | __cpu_to_le16(freq); |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 824 | } else { |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 825 | txbuf->cmd_tx.peerid = |
Vasanthakumar Thiagarajan | d39de99 | 2015-11-05 11:34:00 +0530 | [diff] [blame] | 826 | __cpu_to_le32(HTT_INVALID_PEERID); |
| 827 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 828 | |
Rajkumar Manoharan | d1e50f4 | 2014-10-03 08:02:54 +0300 | [diff] [blame] | 829 | trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 830 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
Michal Kazior | 8d6d362 | 2014-11-24 14:58:31 +0100 | [diff] [blame] | 831 | "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 832 | flags0, flags1, msdu->len, msdu_id, frags_paddr, |
Michal Kazior | bd87744 | 2015-11-18 06:59:19 +0100 | [diff] [blame] | 833 | (u32)skb_cb->paddr, vdev_id, tid, freq); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 834 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 835 | msdu->data, msdu->len); |
Rajkumar Manoharan | 5ce8e7f | 2014-11-05 19:14:31 +0530 | [diff] [blame] | 836 | trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); |
| 837 | trace_ath10k_tx_payload(ar, msdu->data, msdu->len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 838 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 839 | sg_items[0].transfer_id = 0; |
| 840 | sg_items[0].transfer_context = NULL; |
Michal Kazior | aca146a | 2015-11-18 06:59:23 +0100 | [diff] [blame] | 841 | sg_items[0].vaddr = &txbuf->htc_hdr; |
| 842 | sg_items[0].paddr = txbuf_paddr + |
| 843 | sizeof(txbuf->frags); |
| 844 | sg_items[0].len = sizeof(txbuf->htc_hdr) + |
| 845 | sizeof(txbuf->cmd_hdr) + |
| 846 | sizeof(txbuf->cmd_tx); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 847 | |
| 848 | sg_items[1].transfer_id = 0; |
| 849 | sg_items[1].transfer_context = NULL; |
| 850 | sg_items[1].vaddr = msdu->data; |
| 851 | sg_items[1].paddr = skb_cb->paddr; |
| 852 | sg_items[1].len = prefetch_len; |
| 853 | |
| 854 | res = ath10k_hif_tx_sg(htt->ar, |
| 855 | htt->ar->htc.endpoint[htt->eid].ul_pipe_id, |
| 856 | sg_items, ARRAY_SIZE(sg_items)); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 857 | if (res) |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 858 | goto err_unmap_msdu; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 859 | |
| 860 | return 0; |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 861 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 862 | err_unmap_msdu: |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 863 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 864 | err_free_msdu_id: |
| 865 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 866 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
| 867 | spin_unlock_bh(&htt->tx_lock); |
| 868 | err_tx_dec: |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 869 | ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 870 | err: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 871 | return res; |
| 872 | } |