Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/etherdevice.h> |
| 19 | #include "htt.h" |
| 20 | #include "mac.h" |
| 21 | #include "hif.h" |
| 22 | #include "txrx.h" |
| 23 | #include "debug.h" |
| 24 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 25 | void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 26 | { |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 27 | if (limit_mgmt_desc) |
| 28 | htt->num_pending_mgmt_tx--; |
| 29 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 30 | htt->num_pending_tx--; |
| 31 | if (htt->num_pending_tx == htt->max_num_pending_tx - 1) |
Michal Kazior | 96d828d | 2015-03-31 10:26:23 +0000 | [diff] [blame] | 32 | ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 33 | } |
| 34 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 35 | static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, |
| 36 | bool limit_mgmt_desc) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 37 | { |
| 38 | spin_lock_bh(&htt->tx_lock); |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 39 | __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 40 | spin_unlock_bh(&htt->tx_lock); |
| 41 | } |
| 42 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 43 | static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, |
| 44 | bool limit_mgmt_desc, bool is_probe_resp) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 45 | { |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 46 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 47 | int ret = 0; |
| 48 | |
| 49 | spin_lock_bh(&htt->tx_lock); |
| 50 | |
| 51 | if (htt->num_pending_tx >= htt->max_num_pending_tx) { |
| 52 | ret = -EBUSY; |
| 53 | goto exit; |
| 54 | } |
| 55 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 56 | if (limit_mgmt_desc) { |
| 57 | if (is_probe_resp && (htt->num_pending_mgmt_tx > |
| 58 | ar->hw_params.max_probe_resp_desc_thres)) { |
| 59 | ret = -EBUSY; |
| 60 | goto exit; |
| 61 | } |
| 62 | htt->num_pending_mgmt_tx++; |
| 63 | } |
| 64 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 65 | htt->num_pending_tx++; |
| 66 | if (htt->num_pending_tx == htt->max_num_pending_tx) |
Michal Kazior | 96d828d | 2015-03-31 10:26:23 +0000 | [diff] [blame] | 67 | ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 68 | |
| 69 | exit: |
| 70 | spin_unlock_bh(&htt->tx_lock); |
| 71 | return ret; |
| 72 | } |
| 73 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 74 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 75 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 76 | struct ath10k *ar = htt->ar; |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 77 | int ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 78 | |
| 79 | lockdep_assert_held(&htt->tx_lock); |
| 80 | |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 81 | ret = idr_alloc(&htt->pending_tx, skb, 0, |
| 82 | htt->max_num_pending_tx, GFP_ATOMIC); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 83 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 84 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); |
| 85 | |
| 86 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) |
| 90 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 91 | struct ath10k *ar = htt->ar; |
| 92 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 93 | lockdep_assert_held(&htt->tx_lock); |
| 94 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 95 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 96 | |
| 97 | idr_remove(&htt->pending_tx, msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 98 | } |
| 99 | |
Michal Kazior | 95bf21f | 2014-05-16 17:15:39 +0300 | [diff] [blame] | 100 | int ath10k_htt_tx_alloc(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 101 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 102 | struct ath10k *ar = htt->ar; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 103 | int ret, size; |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 104 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 105 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 106 | htt->max_num_pending_tx); |
| 107 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 108 | spin_lock_init(&htt->tx_lock); |
| 109 | idr_init(&htt->pending_tx); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 110 | |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame^] | 111 | size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); |
| 112 | htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, |
| 113 | &htt->txbuf.paddr, |
| 114 | GFP_DMA); |
| 115 | if (!htt->txbuf.vaddr) { |
| 116 | ath10k_err(ar, "failed to alloc tx buffer\n"); |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 117 | ret = -ENOMEM; |
| 118 | goto free_idr_pending_tx; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 119 | } |
| 120 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 121 | if (!ar->hw_params.continuous_frag_desc) |
| 122 | goto skip_frag_desc_alloc; |
| 123 | |
| 124 | size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); |
| 125 | htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, |
| 126 | &htt->frag_desc.paddr, |
| 127 | GFP_DMA); |
| 128 | if (!htt->frag_desc.vaddr) { |
| 129 | ath10k_warn(ar, "failed to alloc fragment desc memory\n"); |
| 130 | ret = -ENOMEM; |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame^] | 131 | goto free_txbuf; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | skip_frag_desc_alloc: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 135 | return 0; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 136 | |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame^] | 137 | free_txbuf: |
| 138 | size = htt->max_num_pending_tx * |
| 139 | sizeof(struct ath10k_htt_txbuf); |
| 140 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, |
| 141 | htt->txbuf.paddr); |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 142 | free_idr_pending_tx: |
| 143 | idr_destroy(&htt->pending_tx); |
| 144 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 145 | } |
| 146 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 147 | static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 148 | { |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 149 | struct ath10k *ar = ctx; |
| 150 | struct ath10k_htt *htt = &ar->htt; |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame] | 151 | struct htt_tx_done tx_done = {0}; |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 152 | |
| 153 | ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); |
| 154 | |
| 155 | tx_done.discard = 1; |
| 156 | tx_done.msdu_id = msdu_id; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 157 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 158 | ath10k_txrx_tx_unref(htt, &tx_done); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 159 | |
| 160 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 161 | } |
| 162 | |
Michal Kazior | 95bf21f | 2014-05-16 17:15:39 +0300 | [diff] [blame] | 163 | void ath10k_htt_tx_free(struct ath10k_htt *htt) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 164 | { |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 165 | int size; |
| 166 | |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 167 | idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); |
| 168 | idr_destroy(&htt->pending_tx); |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame^] | 169 | |
| 170 | if (htt->txbuf.vaddr) { |
| 171 | size = htt->max_num_pending_tx * |
| 172 | sizeof(struct ath10k_htt_txbuf); |
| 173 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, |
| 174 | htt->txbuf.paddr); |
| 175 | } |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 176 | |
| 177 | if (htt->frag_desc.vaddr) { |
| 178 | size = htt->max_num_pending_tx * |
| 179 | sizeof(struct htt_msdu_ext_desc); |
| 180 | dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, |
| 181 | htt->frag_desc.paddr); |
| 182 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
| 186 | { |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame] | 187 | dev_kfree_skb_any(skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) |
| 191 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 192 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 193 | struct sk_buff *skb; |
| 194 | struct htt_cmd *cmd; |
| 195 | int len = 0; |
| 196 | int ret; |
| 197 | |
| 198 | len += sizeof(cmd->hdr); |
| 199 | len += sizeof(cmd->ver_req); |
| 200 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 201 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 202 | if (!skb) |
| 203 | return -ENOMEM; |
| 204 | |
| 205 | skb_put(skb, len); |
| 206 | cmd = (struct htt_cmd *)skb->data; |
| 207 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; |
| 208 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 209 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 210 | if (ret) { |
| 211 | dev_kfree_skb_any(skb); |
| 212 | return ret; |
| 213 | } |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 218 | int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) |
| 219 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 220 | struct ath10k *ar = htt->ar; |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 221 | struct htt_stats_req *req; |
| 222 | struct sk_buff *skb; |
| 223 | struct htt_cmd *cmd; |
| 224 | int len = 0, ret; |
| 225 | |
| 226 | len += sizeof(cmd->hdr); |
| 227 | len += sizeof(cmd->stats_req); |
| 228 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 229 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 230 | if (!skb) |
| 231 | return -ENOMEM; |
| 232 | |
| 233 | skb_put(skb, len); |
| 234 | cmd = (struct htt_cmd *)skb->data; |
| 235 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; |
| 236 | |
| 237 | req = &cmd->stats_req; |
| 238 | |
| 239 | memset(req, 0, sizeof(*req)); |
| 240 | |
| 241 | /* currently we support only max 8 bit masks so no need to worry |
| 242 | * about endian support */ |
| 243 | req->upload_types[0] = mask; |
| 244 | req->reset_types[0] = mask; |
| 245 | req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; |
| 246 | req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); |
| 247 | req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); |
| 248 | |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 249 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 250 | if (ret) { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 251 | ath10k_warn(ar, "failed to send htt type stats request: %d", |
| 252 | ret); |
Kalle Valo | a3d135e | 2013-09-03 11:44:10 +0300 | [diff] [blame] | 253 | dev_kfree_skb_any(skb); |
| 254 | return ret; |
| 255 | } |
| 256 | |
| 257 | return 0; |
| 258 | } |
| 259 | |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 260 | int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) |
| 261 | { |
| 262 | struct ath10k *ar = htt->ar; |
| 263 | struct sk_buff *skb; |
| 264 | struct htt_cmd *cmd; |
| 265 | int ret, size; |
| 266 | |
| 267 | if (!ar->hw_params.continuous_frag_desc) |
| 268 | return 0; |
| 269 | |
| 270 | if (!htt->frag_desc.paddr) { |
| 271 | ath10k_warn(ar, "invalid frag desc memory\n"); |
| 272 | return -EINVAL; |
| 273 | } |
| 274 | |
| 275 | size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); |
| 276 | skb = ath10k_htc_alloc_skb(ar, size); |
| 277 | if (!skb) |
| 278 | return -ENOMEM; |
| 279 | |
| 280 | skb_put(skb, size); |
| 281 | cmd = (struct htt_cmd *)skb->data; |
| 282 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; |
| 283 | cmd->frag_desc_bank_cfg.info = 0; |
| 284 | cmd->frag_desc_bank_cfg.num_banks = 1; |
| 285 | cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); |
| 286 | cmd->frag_desc_bank_cfg.bank_base_addrs[0] = |
| 287 | __cpu_to_le32(htt->frag_desc.paddr); |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 288 | cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; |
Raja Mani | d9156b5 | 2015-06-22 20:22:27 +0530 | [diff] [blame] | 289 | cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = |
| 290 | __cpu_to_le16(htt->max_num_pending_tx - 1); |
| 291 | |
| 292 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 293 | if (ret) { |
| 294 | ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", |
| 295 | ret); |
| 296 | dev_kfree_skb_any(skb); |
| 297 | return ret; |
| 298 | } |
| 299 | |
| 300 | return 0; |
| 301 | } |
| 302 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 303 | int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) |
| 304 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 305 | struct ath10k *ar = htt->ar; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 306 | struct sk_buff *skb; |
| 307 | struct htt_cmd *cmd; |
| 308 | struct htt_rx_ring_setup_ring *ring; |
| 309 | const int num_rx_ring = 1; |
| 310 | u16 flags; |
| 311 | u32 fw_idx; |
| 312 | int len; |
| 313 | int ret; |
| 314 | |
| 315 | /* |
| 316 | * the HW expects the buffer to be an integral number of 4-byte |
| 317 | * "words" |
| 318 | */ |
| 319 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); |
| 320 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); |
| 321 | |
| 322 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) |
| 323 | + (sizeof(*ring) * num_rx_ring); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 324 | skb = ath10k_htc_alloc_skb(ar, len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 325 | if (!skb) |
| 326 | return -ENOMEM; |
| 327 | |
| 328 | skb_put(skb, len); |
| 329 | |
| 330 | cmd = (struct htt_cmd *)skb->data; |
| 331 | ring = &cmd->rx_setup.rings[0]; |
| 332 | |
| 333 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; |
| 334 | cmd->rx_setup.hdr.num_rings = 1; |
| 335 | |
| 336 | /* FIXME: do we need all of this? */ |
| 337 | flags = 0; |
| 338 | flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; |
| 339 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; |
| 340 | flags |= HTT_RX_RING_FLAGS_PPDU_START; |
| 341 | flags |= HTT_RX_RING_FLAGS_PPDU_END; |
| 342 | flags |= HTT_RX_RING_FLAGS_MPDU_START; |
| 343 | flags |= HTT_RX_RING_FLAGS_MPDU_END; |
| 344 | flags |= HTT_RX_RING_FLAGS_MSDU_START; |
| 345 | flags |= HTT_RX_RING_FLAGS_MSDU_END; |
| 346 | flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; |
| 347 | flags |= HTT_RX_RING_FLAGS_FRAG_INFO; |
| 348 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; |
| 349 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; |
| 350 | flags |= HTT_RX_RING_FLAGS_CTRL_RX; |
| 351 | flags |= HTT_RX_RING_FLAGS_MGMT_RX; |
| 352 | flags |= HTT_RX_RING_FLAGS_NULL_RX; |
| 353 | flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; |
| 354 | |
| 355 | fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
| 356 | |
| 357 | ring->fw_idx_shadow_reg_paddr = |
| 358 | __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); |
| 359 | ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); |
| 360 | ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); |
| 361 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); |
| 362 | ring->flags = __cpu_to_le16(flags); |
| 363 | ring->fw_idx_init_val = __cpu_to_le16(fw_idx); |
| 364 | |
| 365 | #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) |
| 366 | |
| 367 | ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); |
| 368 | ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); |
| 369 | ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); |
| 370 | ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); |
| 371 | ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); |
| 372 | ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); |
| 373 | ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); |
| 374 | ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); |
| 375 | ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); |
| 376 | ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); |
| 377 | |
| 378 | #undef desc_offset |
| 379 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 380 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 381 | if (ret) { |
| 382 | dev_kfree_skb_any(skb); |
| 383 | return ret; |
| 384 | } |
| 385 | |
| 386 | return 0; |
| 387 | } |
| 388 | |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 389 | int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, |
| 390 | u8 max_subfrms_ampdu, |
| 391 | u8 max_subfrms_amsdu) |
| 392 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 393 | struct ath10k *ar = htt->ar; |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 394 | struct htt_aggr_conf *aggr_conf; |
| 395 | struct sk_buff *skb; |
| 396 | struct htt_cmd *cmd; |
| 397 | int len; |
| 398 | int ret; |
| 399 | |
| 400 | /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ |
| 401 | |
| 402 | if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) |
| 403 | return -EINVAL; |
| 404 | |
| 405 | if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) |
| 406 | return -EINVAL; |
| 407 | |
| 408 | len = sizeof(cmd->hdr); |
| 409 | len += sizeof(cmd->aggr_conf); |
| 410 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 411 | skb = ath10k_htc_alloc_skb(ar, len); |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 412 | if (!skb) |
| 413 | return -ENOMEM; |
| 414 | |
| 415 | skb_put(skb, len); |
| 416 | cmd = (struct htt_cmd *)skb->data; |
| 417 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; |
| 418 | |
| 419 | aggr_conf = &cmd->aggr_conf; |
| 420 | aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; |
| 421 | aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; |
| 422 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 423 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", |
Janusz Dziedzic | d385623 | 2014-06-02 21:19:46 +0300 | [diff] [blame] | 424 | aggr_conf->max_num_amsdu_subframes, |
| 425 | aggr_conf->max_num_ampdu_subframes); |
| 426 | |
| 427 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
| 428 | if (ret) { |
| 429 | dev_kfree_skb_any(skb); |
| 430 | return ret; |
| 431 | } |
| 432 | |
| 433 | return 0; |
| 434 | } |
| 435 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 436 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) |
| 437 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 438 | struct ath10k *ar = htt->ar; |
| 439 | struct device *dev = ar->dev; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 440 | struct sk_buff *txdesc = NULL; |
| 441 | struct htt_cmd *cmd; |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 442 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
Bartosz Markowski | 5e00d31 | 2013-09-26 17:47:12 +0200 | [diff] [blame] | 443 | u8 vdev_id = skb_cb->vdev_id; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 444 | int len = 0; |
| 445 | int msdu_id = -1; |
| 446 | int res; |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 447 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
| 448 | bool limit_mgmt_desc = false; |
| 449 | bool is_probe_resp = false; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 450 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 451 | if (ar->hw_params.max_probe_resp_desc_thres) { |
| 452 | limit_mgmt_desc = true; |
| 453 | |
| 454 | if (ieee80211_is_probe_resp(hdr->frame_control)) |
| 455 | is_probe_resp = true; |
| 456 | } |
| 457 | |
| 458 | res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); |
| 459 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 460 | if (res) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 461 | goto err; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 462 | |
| 463 | len += sizeof(cmd->hdr); |
| 464 | len += sizeof(cmd->mgmt_tx); |
| 465 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 466 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 467 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
Qi Zhou | 005fb16 | 2015-07-22 16:38:24 -0400 | [diff] [blame] | 468 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 469 | if (res < 0) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 470 | goto err_tx_dec; |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 471 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 472 | msdu_id = res; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 473 | |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 474 | txdesc = ath10k_htc_alloc_skb(ar, len); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 475 | if (!txdesc) { |
| 476 | res = -ENOMEM; |
| 477 | goto err_free_msdu_id; |
| 478 | } |
| 479 | |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 480 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
| 481 | DMA_TO_DEVICE); |
| 482 | res = dma_mapping_error(dev, skb_cb->paddr); |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 483 | if (res) { |
| 484 | res = -EIO; |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 485 | goto err_free_txdesc; |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 486 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 487 | |
| 488 | skb_put(txdesc, len); |
| 489 | cmd = (struct htt_cmd *)txdesc->data; |
Raja Mani | 1d0088f | 2015-07-21 10:52:00 +0530 | [diff] [blame] | 490 | memset(cmd, 0, len); |
| 491 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 492 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; |
| 493 | cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); |
| 494 | cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); |
| 495 | cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); |
| 496 | cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); |
| 497 | memcpy(cmd->mgmt_tx.hdr, msdu->data, |
| 498 | min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); |
| 499 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 500 | skb_cb->htt.txbuf = NULL; |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 501 | |
Michal Kazior | cd003fa | 2013-07-05 16:15:13 +0300 | [diff] [blame] | 502 | res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 503 | if (res) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 504 | goto err_unmap_msdu; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 505 | |
| 506 | return 0; |
| 507 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 508 | err_unmap_msdu: |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 509 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 510 | err_free_txdesc: |
| 511 | dev_kfree_skb_any(txdesc); |
| 512 | err_free_msdu_id: |
| 513 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 514 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
| 515 | spin_unlock_bh(&htt->tx_lock); |
| 516 | err_tx_dec: |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 517 | ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 518 | err: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 519 | return res; |
| 520 | } |
| 521 | |
| 522 | int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) |
| 523 | { |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 524 | struct ath10k *ar = htt->ar; |
| 525 | struct device *dev = ar->dev; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 526 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 527 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 528 | struct ath10k_hif_sg_item sg_items[2]; |
| 529 | struct htt_data_tx_desc_frag *frags; |
| 530 | u8 vdev_id = skb_cb->vdev_id; |
| 531 | u8 tid = skb_cb->htt.tid; |
| 532 | int prefetch_len; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 533 | int res; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 534 | u8 flags0 = 0; |
| 535 | u16 msdu_id, flags1 = 0; |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 536 | u32 frags_paddr = 0; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 537 | struct htt_msdu_ext_desc *ext_desc = NULL; |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 538 | bool limit_mgmt_desc = false; |
| 539 | bool is_probe_resp = false; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 540 | |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 541 | if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) && |
| 542 | ar->hw_params.max_probe_resp_desc_thres) { |
| 543 | limit_mgmt_desc = true; |
| 544 | |
| 545 | if (ieee80211_is_probe_resp(hdr->frame_control)) |
| 546 | is_probe_resp = true; |
| 547 | } |
| 548 | |
| 549 | res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 550 | if (res) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 551 | goto err; |
| 552 | |
| 553 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 89d6d83 | 2015-01-24 12:14:51 +0200 | [diff] [blame] | 554 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
Qi Zhou | 005fb16 | 2015-07-22 16:38:24 -0400 | [diff] [blame] | 555 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 556 | if (res < 0) |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 557 | goto err_tx_dec; |
Kalle Valo | b9e284e | 2015-10-05 17:56:35 +0300 | [diff] [blame] | 558 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 559 | msdu_id = res; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 560 | |
| 561 | prefetch_len = min(htt->prefetch_len, msdu->len); |
| 562 | prefetch_len = roundup(prefetch_len, 4); |
| 563 | |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame^] | 564 | skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id]; |
| 565 | skb_cb->htt.txbuf_paddr = htt->txbuf.paddr + |
| 566 | (sizeof(struct ath10k_htt_txbuf) * msdu_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 567 | |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 568 | if ((ieee80211_is_action(hdr->frame_control) || |
| 569 | ieee80211_is_deauth(hdr->frame_control) || |
| 570 | ieee80211_is_disassoc(hdr->frame_control)) && |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 571 | ieee80211_has_protected(hdr->frame_control)) { |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 572 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 573 | } else if (!skb_cb->htt.nohwcrypt && |
Bob Copeland | bc76c28 | 2015-09-09 12:47:35 -0400 | [diff] [blame] | 574 | skb_cb->txmode == ATH10K_HW_TXRX_RAW && |
| 575 | ieee80211_has_protected(hdr->frame_control)) { |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 576 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
| 577 | } |
Marek Kwaczynski | eebc67f | 2015-01-24 12:14:53 +0200 | [diff] [blame] | 578 | |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 579 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
| 580 | DMA_TO_DEVICE); |
| 581 | res = dma_mapping_error(dev, skb_cb->paddr); |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 582 | if (res) { |
| 583 | res = -EIO; |
Peter Oh | 683b95e | 2015-10-05 17:56:40 +0300 | [diff] [blame^] | 584 | goto err_free_msdu_id; |
Michal Kazior | 5e55e3c | 2015-08-19 13:10:43 +0200 | [diff] [blame] | 585 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 586 | |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 587 | switch (skb_cb->txmode) { |
| 588 | case ATH10K_HW_TXRX_RAW: |
| 589 | case ATH10K_HW_TXRX_NATIVE_WIFI: |
| 590 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
| 591 | /* pass through */ |
| 592 | case ATH10K_HW_TXRX_ETHERNET: |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 593 | if (ar->hw_params.continuous_frag_desc) { |
Peter Oh | ae7d382 | 2015-07-29 11:58:50 +0300 | [diff] [blame] | 594 | memset(&htt->frag_desc.vaddr[msdu_id], 0, |
| 595 | sizeof(struct htt_msdu_ext_desc)); |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 596 | frags = (struct htt_data_tx_desc_frag *) |
| 597 | &htt->frag_desc.vaddr[msdu_id].frags; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 598 | ext_desc = &htt->frag_desc.vaddr[msdu_id]; |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 599 | frags[0].tword_addr.paddr_lo = |
| 600 | __cpu_to_le32(skb_cb->paddr); |
| 601 | frags[0].tword_addr.paddr_hi = 0; |
| 602 | frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 603 | |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 604 | frags_paddr = htt->frag_desc.paddr + |
| 605 | (sizeof(struct htt_msdu_ext_desc) * msdu_id); |
| 606 | } else { |
| 607 | frags = skb_cb->htt.txbuf->frags; |
| 608 | frags[0].dword_addr.paddr = |
| 609 | __cpu_to_le32(skb_cb->paddr); |
| 610 | frags[0].dword_addr.len = __cpu_to_le32(msdu->len); |
| 611 | frags[1].dword_addr.paddr = 0; |
| 612 | frags[1].dword_addr.len = 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 613 | |
Peter Oh | fbc03a4 | 2015-07-15 19:01:19 -0700 | [diff] [blame] | 614 | frags_paddr = skb_cb->htt.txbuf_paddr; |
| 615 | } |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 616 | flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 617 | break; |
| 618 | case ATH10K_HW_TXRX_MGMT: |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 619 | flags0 |= SM(ATH10K_HW_TXRX_MGMT, |
Michal Kazior | 961d4c3 | 2013-08-09 10:13:34 +0200 | [diff] [blame] | 620 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 621 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 622 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 623 | frags_paddr = skb_cb->paddr; |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 624 | break; |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 625 | } |
| 626 | |
| 627 | /* Normally all commands go through HTC which manages tx credits for |
| 628 | * each endpoint and notifies when tx is completed. |
| 629 | * |
| 630 | * HTT endpoint is creditless so there's no need to care about HTC |
| 631 | * flags. In that case it is trivial to fill the HTC header here. |
| 632 | * |
| 633 | * MSDU transmission is considered completed upon HTT event. This |
| 634 | * implies no relevant resources can be freed until after the event is |
| 635 | * received. That's why HTC tx completion handler itself is ignored by |
| 636 | * setting NULL to transfer_context for all sg items. |
| 637 | * |
| 638 | * There is simply no point in pushing HTT TX_FRM through HTC tx path |
| 639 | * as it's a waste of resources. By bypassing HTC it is possible to |
| 640 | * avoid extra memory allocations, compress data structures and thus |
| 641 | * improve performance. */ |
| 642 | |
| 643 | skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; |
| 644 | skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( |
| 645 | sizeof(skb_cb->htt.txbuf->cmd_hdr) + |
| 646 | sizeof(skb_cb->htt.txbuf->cmd_tx) + |
| 647 | prefetch_len); |
| 648 | skb_cb->htt.txbuf->htc_hdr.flags = 0; |
| 649 | |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 650 | if (skb_cb->htt.nohwcrypt) |
| 651 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; |
| 652 | |
Michal Kazior | d740d8f | 2015-03-30 09:51:51 +0300 | [diff] [blame] | 653 | if (!skb_cb->is_protected) |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 654 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; |
| 655 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 656 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
| 657 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); |
David Liu | ccec903 | 2015-07-24 20:25:32 +0300 | [diff] [blame] | 658 | if (msdu->ip_summed == CHECKSUM_PARTIAL && |
| 659 | !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { |
Helmut Schaa | 75930d1 | 2015-01-28 11:31:32 +0100 | [diff] [blame] | 660 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
| 661 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; |
Manikanta Pubbisetty | b963519 | 2015-07-20 17:56:12 +0530 | [diff] [blame] | 662 | if (ar->hw_params.continuous_frag_desc) |
| 663 | ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; |
Helmut Schaa | 75930d1 | 2015-01-28 11:31:32 +0100 | [diff] [blame] | 664 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 665 | |
Michal Kazior | 708b9bd | 2014-07-21 20:52:59 +0300 | [diff] [blame] | 666 | /* Prevent firmware from sending up tx inspection requests. There's |
| 667 | * nothing ath10k can do with frames requested for inspection so force |
| 668 | * it to simply rely a regular tx completion with discard status. |
| 669 | */ |
| 670 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; |
| 671 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 672 | skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; |
| 673 | skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; |
| 674 | skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); |
| 675 | skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); |
| 676 | skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); |
| 677 | skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); |
Michal Kazior | 8d6d362 | 2014-11-24 14:58:31 +0100 | [diff] [blame] | 678 | skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); |
| 679 | skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 680 | |
Rajkumar Manoharan | d1e50f4 | 2014-10-03 08:02:54 +0300 | [diff] [blame] | 681 | trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 682 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
Michal Kazior | 8d6d362 | 2014-11-24 14:58:31 +0100 | [diff] [blame] | 683 | "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 684 | flags0, flags1, msdu->len, msdu_id, frags_paddr, |
Michal Kazior | 8d6d362 | 2014-11-24 14:58:31 +0100 | [diff] [blame] | 685 | (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); |
Michal Kazior | 7aa7a72 | 2014-08-25 12:09:38 +0200 | [diff] [blame] | 686 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 687 | msdu->data, msdu->len); |
Rajkumar Manoharan | 5ce8e7f | 2014-11-05 19:14:31 +0530 | [diff] [blame] | 688 | trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); |
| 689 | trace_ath10k_tx_payload(ar, msdu->data, msdu->len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 690 | |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 691 | sg_items[0].transfer_id = 0; |
| 692 | sg_items[0].transfer_context = NULL; |
| 693 | sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; |
| 694 | sg_items[0].paddr = skb_cb->htt.txbuf_paddr + |
| 695 | sizeof(skb_cb->htt.txbuf->frags); |
| 696 | sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + |
| 697 | sizeof(skb_cb->htt.txbuf->cmd_hdr) + |
| 698 | sizeof(skb_cb->htt.txbuf->cmd_tx); |
| 699 | |
| 700 | sg_items[1].transfer_id = 0; |
| 701 | sg_items[1].transfer_context = NULL; |
| 702 | sg_items[1].vaddr = msdu->data; |
| 703 | sg_items[1].paddr = skb_cb->paddr; |
| 704 | sg_items[1].len = prefetch_len; |
| 705 | |
| 706 | res = ath10k_hif_tx_sg(htt->ar, |
| 707 | htt->ar->htc.endpoint[htt->eid].ul_pipe_id, |
| 708 | sg_items, ARRAY_SIZE(sg_items)); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 709 | if (res) |
Michal Kazior | 1f8bb15 | 2013-09-18 14:43:22 +0200 | [diff] [blame] | 710 | goto err_unmap_msdu; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 711 | |
| 712 | return 0; |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 713 | |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 714 | err_unmap_msdu: |
Michal Kazior | 767d34f | 2014-02-27 18:50:03 +0200 | [diff] [blame] | 715 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 716 | err_free_msdu_id: |
| 717 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 718 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
| 719 | spin_unlock_bh(&htt->tx_lock); |
| 720 | err_tx_dec: |
Vivek Natarajan | 7b7da0a | 2015-08-31 16:34:55 +0530 | [diff] [blame] | 721 | ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
Michal Kazior | 2f3773b | 2013-09-18 14:43:21 +0200 | [diff] [blame] | 722 | err: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 723 | return res; |
| 724 | } |