blob: 555174756eb176ade8bc4d5bf40b1b1b77e13d8d [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053025void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
Kalle Valo5e3dd152013-06-12 20:52:10 +030026{
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053027 if (limit_mgmt_desc)
28 htt->num_pending_mgmt_tx--;
29
Kalle Valo5e3dd152013-06-12 20:52:10 +030030 htt->num_pending_tx--;
31 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
Michal Kazior96d828d2015-03-31 10:26:23 +000032 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030033}
34
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053035static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
36 bool limit_mgmt_desc)
Kalle Valo5e3dd152013-06-12 20:52:10 +030037{
38 spin_lock_bh(&htt->tx_lock);
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053039 __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Kalle Valo5e3dd152013-06-12 20:52:10 +030040 spin_unlock_bh(&htt->tx_lock);
41}
42
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053043static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
44 bool limit_mgmt_desc, bool is_probe_resp)
Kalle Valo5e3dd152013-06-12 20:52:10 +030045{
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053046 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +030047 int ret = 0;
48
49 spin_lock_bh(&htt->tx_lock);
50
51 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
52 ret = -EBUSY;
53 goto exit;
54 }
55
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053056 if (limit_mgmt_desc) {
57 if (is_probe_resp && (htt->num_pending_mgmt_tx >
58 ar->hw_params.max_probe_resp_desc_thres)) {
59 ret = -EBUSY;
60 goto exit;
61 }
62 htt->num_pending_mgmt_tx++;
63 }
64
Kalle Valo5e3dd152013-06-12 20:52:10 +030065 htt->num_pending_tx++;
66 if (htt->num_pending_tx == htt->max_num_pending_tx)
Michal Kazior96d828d2015-03-31 10:26:23 +000067 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030068
69exit:
70 spin_unlock_bh(&htt->tx_lock);
71 return ret;
72}
73
Michal Kazior89d6d832015-01-24 12:14:51 +020074int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +030075{
Michal Kazior7aa7a722014-08-25 12:09:38 +020076 struct ath10k *ar = htt->ar;
Michal Kazior89d6d832015-01-24 12:14:51 +020077 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030078
79 lockdep_assert_held(&htt->tx_lock);
80
Peter Ohfbc03a42015-07-15 19:01:19 -070081 ret = idr_alloc(&htt->pending_tx, skb, 0,
82 htt->max_num_pending_tx, GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +030083
Michal Kazior89d6d832015-01-24 12:14:51 +020084 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
85
86 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030087}
88
89void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
90{
Michal Kazior7aa7a722014-08-25 12:09:38 +020091 struct ath10k *ar = htt->ar;
92
Kalle Valo5e3dd152013-06-12 20:52:10 +030093 lockdep_assert_held(&htt->tx_lock);
94
Michal Kazior7aa7a722014-08-25 12:09:38 +020095 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
Michal Kazior89d6d832015-01-24 12:14:51 +020096
97 idr_remove(&htt->pending_tx, msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030098}
99
Michal Kazior95bf21f2014-05-16 17:15:39 +0300100int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300101{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200102 struct ath10k *ar = htt->ar;
Raja Manid9156b52015-06-22 20:22:27 +0530103 int ret, size;
Michal Kazior7aa7a722014-08-25 12:09:38 +0200104
Michal Kazior7aa7a722014-08-25 12:09:38 +0200105 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300106 htt->max_num_pending_tx);
107
Michal Kazior89d6d832015-01-24 12:14:51 +0200108 spin_lock_init(&htt->tx_lock);
109 idr_init(&htt->pending_tx);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300110
Peter Oh683b95e2015-10-05 17:56:40 +0300111 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
112 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
113 &htt->txbuf.paddr,
114 GFP_DMA);
115 if (!htt->txbuf.vaddr) {
116 ath10k_err(ar, "failed to alloc tx buffer\n");
Raja Manid9156b52015-06-22 20:22:27 +0530117 ret = -ENOMEM;
118 goto free_idr_pending_tx;
Michal Kaziora16942e2014-02-27 18:50:04 +0200119 }
120
Raja Manid9156b52015-06-22 20:22:27 +0530121 if (!ar->hw_params.continuous_frag_desc)
122 goto skip_frag_desc_alloc;
123
124 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
125 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
126 &htt->frag_desc.paddr,
127 GFP_DMA);
128 if (!htt->frag_desc.vaddr) {
129 ath10k_warn(ar, "failed to alloc fragment desc memory\n");
130 ret = -ENOMEM;
Peter Oh683b95e2015-10-05 17:56:40 +0300131 goto free_txbuf;
Raja Manid9156b52015-06-22 20:22:27 +0530132 }
133
134skip_frag_desc_alloc:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300135 return 0;
Raja Manid9156b52015-06-22 20:22:27 +0530136
Peter Oh683b95e2015-10-05 17:56:40 +0300137free_txbuf:
138 size = htt->max_num_pending_tx *
139 sizeof(struct ath10k_htt_txbuf);
140 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
141 htt->txbuf.paddr);
Raja Manid9156b52015-06-22 20:22:27 +0530142free_idr_pending_tx:
143 idr_destroy(&htt->pending_tx);
144 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300145}
146
Michal Kazior89d6d832015-01-24 12:14:51 +0200147static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300148{
Michal Kazior89d6d832015-01-24 12:14:51 +0200149 struct ath10k *ar = ctx;
150 struct ath10k_htt *htt = &ar->htt;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200151 struct htt_tx_done tx_done = {0};
Michal Kazior89d6d832015-01-24 12:14:51 +0200152
153 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
154
155 tx_done.discard = 1;
156 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300157
Michal Kazior89d6d832015-01-24 12:14:51 +0200158 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior89d6d832015-01-24 12:14:51 +0200159
160 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300161}
162
Michal Kazior95bf21f2014-05-16 17:15:39 +0300163void ath10k_htt_tx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300164{
Raja Manid9156b52015-06-22 20:22:27 +0530165 int size;
166
Michal Kazior89d6d832015-01-24 12:14:51 +0200167 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
168 idr_destroy(&htt->pending_tx);
Peter Oh683b95e2015-10-05 17:56:40 +0300169
170 if (htt->txbuf.vaddr) {
171 size = htt->max_num_pending_tx *
172 sizeof(struct ath10k_htt_txbuf);
173 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
174 htt->txbuf.paddr);
175 }
Raja Manid9156b52015-06-22 20:22:27 +0530176
177 if (htt->frag_desc.vaddr) {
178 size = htt->max_num_pending_tx *
179 sizeof(struct htt_msdu_ext_desc);
180 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
181 htt->frag_desc.paddr);
182 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300183}
184
185void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
186{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200187 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300188}
189
190int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
191{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200192 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300193 struct sk_buff *skb;
194 struct htt_cmd *cmd;
195 int len = 0;
196 int ret;
197
198 len += sizeof(cmd->hdr);
199 len += sizeof(cmd->ver_req);
200
Michal Kazior7aa7a722014-08-25 12:09:38 +0200201 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300202 if (!skb)
203 return -ENOMEM;
204
205 skb_put(skb, len);
206 cmd = (struct htt_cmd *)skb->data;
207 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
208
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300209 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300210 if (ret) {
211 dev_kfree_skb_any(skb);
212 return ret;
213 }
214
215 return 0;
216}
217
Kalle Valoa3d135e2013-09-03 11:44:10 +0300218int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
219{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200220 struct ath10k *ar = htt->ar;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300221 struct htt_stats_req *req;
222 struct sk_buff *skb;
223 struct htt_cmd *cmd;
224 int len = 0, ret;
225
226 len += sizeof(cmd->hdr);
227 len += sizeof(cmd->stats_req);
228
Michal Kazior7aa7a722014-08-25 12:09:38 +0200229 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300230 if (!skb)
231 return -ENOMEM;
232
233 skb_put(skb, len);
234 cmd = (struct htt_cmd *)skb->data;
235 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
236
237 req = &cmd->stats_req;
238
239 memset(req, 0, sizeof(*req));
240
241 /* currently we support only max 8 bit masks so no need to worry
242 * about endian support */
243 req->upload_types[0] = mask;
244 req->reset_types[0] = mask;
245 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
246 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
247 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
248
Kalle Valoa3d135e2013-09-03 11:44:10 +0300249 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
250 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200251 ath10k_warn(ar, "failed to send htt type stats request: %d",
252 ret);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300253 dev_kfree_skb_any(skb);
254 return ret;
255 }
256
257 return 0;
258}
259
Raja Manid9156b52015-06-22 20:22:27 +0530260int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
261{
262 struct ath10k *ar = htt->ar;
263 struct sk_buff *skb;
264 struct htt_cmd *cmd;
265 int ret, size;
266
267 if (!ar->hw_params.continuous_frag_desc)
268 return 0;
269
270 if (!htt->frag_desc.paddr) {
271 ath10k_warn(ar, "invalid frag desc memory\n");
272 return -EINVAL;
273 }
274
275 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
276 skb = ath10k_htc_alloc_skb(ar, size);
277 if (!skb)
278 return -ENOMEM;
279
280 skb_put(skb, size);
281 cmd = (struct htt_cmd *)skb->data;
282 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
283 cmd->frag_desc_bank_cfg.info = 0;
284 cmd->frag_desc_bank_cfg.num_banks = 1;
285 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
286 cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
287 __cpu_to_le32(htt->frag_desc.paddr);
Peter Ohfbc03a42015-07-15 19:01:19 -0700288 cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
Raja Manid9156b52015-06-22 20:22:27 +0530289 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
290 __cpu_to_le16(htt->max_num_pending_tx - 1);
291
292 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
293 if (ret) {
294 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
295 ret);
296 dev_kfree_skb_any(skb);
297 return ret;
298 }
299
300 return 0;
301}
302
Kalle Valo5e3dd152013-06-12 20:52:10 +0300303int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
304{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200305 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300306 struct sk_buff *skb;
307 struct htt_cmd *cmd;
308 struct htt_rx_ring_setup_ring *ring;
309 const int num_rx_ring = 1;
310 u16 flags;
311 u32 fw_idx;
312 int len;
313 int ret;
314
315 /*
316 * the HW expects the buffer to be an integral number of 4-byte
317 * "words"
318 */
319 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
320 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
321
322 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
323 + (sizeof(*ring) * num_rx_ring);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200324 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300325 if (!skb)
326 return -ENOMEM;
327
328 skb_put(skb, len);
329
330 cmd = (struct htt_cmd *)skb->data;
331 ring = &cmd->rx_setup.rings[0];
332
333 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
334 cmd->rx_setup.hdr.num_rings = 1;
335
336 /* FIXME: do we need all of this? */
337 flags = 0;
338 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
339 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
340 flags |= HTT_RX_RING_FLAGS_PPDU_START;
341 flags |= HTT_RX_RING_FLAGS_PPDU_END;
342 flags |= HTT_RX_RING_FLAGS_MPDU_START;
343 flags |= HTT_RX_RING_FLAGS_MPDU_END;
344 flags |= HTT_RX_RING_FLAGS_MSDU_START;
345 flags |= HTT_RX_RING_FLAGS_MSDU_END;
346 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
347 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
348 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
349 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
350 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
351 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
352 flags |= HTT_RX_RING_FLAGS_NULL_RX;
353 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
354
355 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
356
357 ring->fw_idx_shadow_reg_paddr =
358 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
359 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
360 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
361 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
362 ring->flags = __cpu_to_le16(flags);
363 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
364
365#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
366
367 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
368 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
369 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
370 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
371 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
372 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
373 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
374 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
375 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
376 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
377
378#undef desc_offset
379
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300380 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300381 if (ret) {
382 dev_kfree_skb_any(skb);
383 return ret;
384 }
385
386 return 0;
387}
388
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300389int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
390 u8 max_subfrms_ampdu,
391 u8 max_subfrms_amsdu)
392{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200393 struct ath10k *ar = htt->ar;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300394 struct htt_aggr_conf *aggr_conf;
395 struct sk_buff *skb;
396 struct htt_cmd *cmd;
397 int len;
398 int ret;
399
400 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
401
402 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
403 return -EINVAL;
404
405 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
406 return -EINVAL;
407
408 len = sizeof(cmd->hdr);
409 len += sizeof(cmd->aggr_conf);
410
Michal Kazior7aa7a722014-08-25 12:09:38 +0200411 skb = ath10k_htc_alloc_skb(ar, len);
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300412 if (!skb)
413 return -ENOMEM;
414
415 skb_put(skb, len);
416 cmd = (struct htt_cmd *)skb->data;
417 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
418
419 aggr_conf = &cmd->aggr_conf;
420 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
421 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
422
Michal Kazior7aa7a722014-08-25 12:09:38 +0200423 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300424 aggr_conf->max_num_amsdu_subframes,
425 aggr_conf->max_num_ampdu_subframes);
426
427 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
428 if (ret) {
429 dev_kfree_skb_any(skb);
430 return ret;
431 }
432
433 return 0;
434}
435
Kalle Valo5e3dd152013-06-12 20:52:10 +0300436int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
437{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200438 struct ath10k *ar = htt->ar;
439 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300440 struct sk_buff *txdesc = NULL;
441 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200442 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200443 u8 vdev_id = skb_cb->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300444 int len = 0;
445 int msdu_id = -1;
446 int res;
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530447 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
448 bool limit_mgmt_desc = false;
449 bool is_probe_resp = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300450
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530451 if (ar->hw_params.max_probe_resp_desc_thres) {
452 limit_mgmt_desc = true;
453
454 if (ieee80211_is_probe_resp(hdr->frame_control))
455 is_probe_resp = true;
456 }
457
458 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
459
Kalle Valo5e3dd152013-06-12 20:52:10 +0300460 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200461 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300462
463 len += sizeof(cmd->hdr);
464 len += sizeof(cmd->mgmt_tx);
465
Kalle Valo5e3dd152013-06-12 20:52:10 +0300466 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200467 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400468 spin_unlock_bh(&htt->tx_lock);
Kalle Valob9e284e2015-10-05 17:56:35 +0300469 if (res < 0)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200470 goto err_tx_dec;
Kalle Valob9e284e2015-10-05 17:56:35 +0300471
Michal Kazior2f3773b2013-09-18 14:43:21 +0200472 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300473
Michal Kazior7aa7a722014-08-25 12:09:38 +0200474 txdesc = ath10k_htc_alloc_skb(ar, len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200475 if (!txdesc) {
476 res = -ENOMEM;
477 goto err_free_msdu_id;
478 }
479
Michal Kazior767d34f2014-02-27 18:50:03 +0200480 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
481 DMA_TO_DEVICE);
482 res = dma_mapping_error(dev, skb_cb->paddr);
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200483 if (res) {
484 res = -EIO;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200485 goto err_free_txdesc;
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200486 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300487
488 skb_put(txdesc, len);
489 cmd = (struct htt_cmd *)txdesc->data;
Raja Mani1d0088f2015-07-21 10:52:00 +0530490 memset(cmd, 0, len);
491
Kalle Valo5e3dd152013-06-12 20:52:10 +0300492 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
493 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
494 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
495 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
496 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
497 memcpy(cmd->mgmt_tx.hdr, msdu->data,
498 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
499
Michal Kaziora16942e2014-02-27 18:50:04 +0200500 skb_cb->htt.txbuf = NULL;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200501
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300502 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300503 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200504 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300505
506 return 0;
507
Michal Kazior2f3773b2013-09-18 14:43:21 +0200508err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200509 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200510err_free_txdesc:
511 dev_kfree_skb_any(txdesc);
512err_free_msdu_id:
513 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200514 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
515 spin_unlock_bh(&htt->tx_lock);
516err_tx_dec:
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530517 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200518err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300519 return res;
520}
521
522int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
523{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200524 struct ath10k *ar = htt->ar;
525 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300526 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200527 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kaziora16942e2014-02-27 18:50:04 +0200528 struct ath10k_hif_sg_item sg_items[2];
529 struct htt_data_tx_desc_frag *frags;
530 u8 vdev_id = skb_cb->vdev_id;
531 u8 tid = skb_cb->htt.tid;
532 int prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300533 int res;
Michal Kaziora16942e2014-02-27 18:50:04 +0200534 u8 flags0 = 0;
535 u16 msdu_id, flags1 = 0;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300536 u32 frags_paddr = 0;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530537 struct htt_msdu_ext_desc *ext_desc = NULL;
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530538 bool limit_mgmt_desc = false;
539 bool is_probe_resp = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300540
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530541 if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
542 ar->hw_params.max_probe_resp_desc_thres) {
543 limit_mgmt_desc = true;
544
545 if (ieee80211_is_probe_resp(hdr->frame_control))
546 is_probe_resp = true;
547 }
548
549 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300550 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200551 goto err;
552
553 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200554 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400555 spin_unlock_bh(&htt->tx_lock);
Kalle Valob9e284e2015-10-05 17:56:35 +0300556 if (res < 0)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200557 goto err_tx_dec;
Kalle Valob9e284e2015-10-05 17:56:35 +0300558
Michal Kazior2f3773b2013-09-18 14:43:21 +0200559 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300560
561 prefetch_len = min(htt->prefetch_len, msdu->len);
562 prefetch_len = roundup(prefetch_len, 4);
563
Peter Oh683b95e2015-10-05 17:56:40 +0300564 skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
565 skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
566 (sizeof(struct ath10k_htt_txbuf) * msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300567
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200568 if ((ieee80211_is_action(hdr->frame_control) ||
569 ieee80211_is_deauth(hdr->frame_control) ||
570 ieee80211_is_disassoc(hdr->frame_control)) &&
David Liuccec9032015-07-24 20:25:32 +0300571 ieee80211_has_protected(hdr->frame_control)) {
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200572 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
David Liuccec9032015-07-24 20:25:32 +0300573 } else if (!skb_cb->htt.nohwcrypt &&
Bob Copelandbc76c282015-09-09 12:47:35 -0400574 skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
575 ieee80211_has_protected(hdr->frame_control)) {
David Liuccec9032015-07-24 20:25:32 +0300576 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
577 }
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200578
Michal Kazior767d34f2014-02-27 18:50:03 +0200579 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
580 DMA_TO_DEVICE);
581 res = dma_mapping_error(dev, skb_cb->paddr);
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200582 if (res) {
583 res = -EIO;
Peter Oh683b95e2015-10-05 17:56:40 +0300584 goto err_free_msdu_id;
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200585 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300586
Michal Kaziord740d8f2015-03-30 09:51:51 +0300587 switch (skb_cb->txmode) {
588 case ATH10K_HW_TXRX_RAW:
589 case ATH10K_HW_TXRX_NATIVE_WIFI:
590 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
591 /* pass through */
592 case ATH10K_HW_TXRX_ETHERNET:
Peter Ohfbc03a42015-07-15 19:01:19 -0700593 if (ar->hw_params.continuous_frag_desc) {
Peter Ohae7d3822015-07-29 11:58:50 +0300594 memset(&htt->frag_desc.vaddr[msdu_id], 0,
595 sizeof(struct htt_msdu_ext_desc));
Peter Ohfbc03a42015-07-15 19:01:19 -0700596 frags = (struct htt_data_tx_desc_frag *)
597 &htt->frag_desc.vaddr[msdu_id].frags;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530598 ext_desc = &htt->frag_desc.vaddr[msdu_id];
Peter Ohfbc03a42015-07-15 19:01:19 -0700599 frags[0].tword_addr.paddr_lo =
600 __cpu_to_le32(skb_cb->paddr);
601 frags[0].tword_addr.paddr_hi = 0;
602 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200603
Peter Ohfbc03a42015-07-15 19:01:19 -0700604 frags_paddr = htt->frag_desc.paddr +
605 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
606 } else {
607 frags = skb_cb->htt.txbuf->frags;
608 frags[0].dword_addr.paddr =
609 __cpu_to_le32(skb_cb->paddr);
610 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
611 frags[1].dword_addr.paddr = 0;
612 frags[1].dword_addr.len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300613
Peter Ohfbc03a42015-07-15 19:01:19 -0700614 frags_paddr = skb_cb->htt.txbuf_paddr;
615 }
Michal Kaziord740d8f2015-03-30 09:51:51 +0300616 flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300617 break;
618 case ATH10K_HW_TXRX_MGMT:
Michal Kazior2f3773b2013-09-18 14:43:21 +0200619 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200620 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300621 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300622
Michal Kaziora16942e2014-02-27 18:50:04 +0200623 frags_paddr = skb_cb->paddr;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300624 break;
Michal Kaziora16942e2014-02-27 18:50:04 +0200625 }
626
627 /* Normally all commands go through HTC which manages tx credits for
628 * each endpoint and notifies when tx is completed.
629 *
630 * HTT endpoint is creditless so there's no need to care about HTC
631 * flags. In that case it is trivial to fill the HTC header here.
632 *
633 * MSDU transmission is considered completed upon HTT event. This
634 * implies no relevant resources can be freed until after the event is
635 * received. That's why HTC tx completion handler itself is ignored by
636 * setting NULL to transfer_context for all sg items.
637 *
638 * There is simply no point in pushing HTT TX_FRM through HTC tx path
639 * as it's a waste of resources. By bypassing HTC it is possible to
640 * avoid extra memory allocations, compress data structures and thus
641 * improve performance. */
642
643 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
644 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
645 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
646 sizeof(skb_cb->htt.txbuf->cmd_tx) +
647 prefetch_len);
648 skb_cb->htt.txbuf->htc_hdr.flags = 0;
649
David Liuccec9032015-07-24 20:25:32 +0300650 if (skb_cb->htt.nohwcrypt)
651 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
652
Michal Kaziord740d8f2015-03-30 09:51:51 +0300653 if (!skb_cb->is_protected)
Michal Kaziora16942e2014-02-27 18:50:04 +0200654 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
655
Kalle Valo5e3dd152013-06-12 20:52:10 +0300656 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
657 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
David Liuccec9032015-07-24 20:25:32 +0300658 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
659 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
Helmut Schaa75930d12015-01-28 11:31:32 +0100660 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
661 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530662 if (ar->hw_params.continuous_frag_desc)
663 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
Helmut Schaa75930d12015-01-28 11:31:32 +0100664 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300665
Michal Kazior708b9bd2014-07-21 20:52:59 +0300666 /* Prevent firmware from sending up tx inspection requests. There's
667 * nothing ath10k can do with frames requested for inspection so force
668 * it to simply rely a regular tx completion with discard status.
669 */
670 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
671
Michal Kaziora16942e2014-02-27 18:50:04 +0200672 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
673 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
674 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
675 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
676 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
677 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
Michal Kazior8d6d3622014-11-24 14:58:31 +0100678 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
679 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300680
Rajkumar Manoharand1e50f42014-10-03 08:02:54 +0300681 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200682 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100683 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
Michal Kaziora16942e2014-02-27 18:50:04 +0200684 flags0, flags1, msdu->len, msdu_id, frags_paddr,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100685 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200686 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
Michal Kaziora16942e2014-02-27 18:50:04 +0200687 msdu->data, msdu->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530688 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
689 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300690
Michal Kaziora16942e2014-02-27 18:50:04 +0200691 sg_items[0].transfer_id = 0;
692 sg_items[0].transfer_context = NULL;
693 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
694 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
695 sizeof(skb_cb->htt.txbuf->frags);
696 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
697 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
698 sizeof(skb_cb->htt.txbuf->cmd_tx);
699
700 sg_items[1].transfer_id = 0;
701 sg_items[1].transfer_context = NULL;
702 sg_items[1].vaddr = msdu->data;
703 sg_items[1].paddr = skb_cb->paddr;
704 sg_items[1].len = prefetch_len;
705
706 res = ath10k_hif_tx_sg(htt->ar,
707 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
708 sg_items, ARRAY_SIZE(sg_items));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300709 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200710 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300711
712 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200713
Michal Kazior2f3773b2013-09-18 14:43:21 +0200714err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200715 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200716err_free_msdu_id:
717 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200718 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
719 spin_unlock_bh(&htt->tx_lock);
720err_tx_dec:
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530721 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200722err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300723 return res;
724}