blob: 28b8d7af8506b35d445244dac3926a274afc5b17 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053025void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
Kalle Valo5e3dd152013-06-12 20:52:10 +030026{
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053027 if (limit_mgmt_desc)
28 htt->num_pending_mgmt_tx--;
29
Kalle Valo5e3dd152013-06-12 20:52:10 +030030 htt->num_pending_tx--;
31 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
Michal Kazior96d828d2015-03-31 10:26:23 +000032 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030033}
34
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053035static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
36 bool limit_mgmt_desc)
Kalle Valo5e3dd152013-06-12 20:52:10 +030037{
38 spin_lock_bh(&htt->tx_lock);
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053039 __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Kalle Valo5e3dd152013-06-12 20:52:10 +030040 spin_unlock_bh(&htt->tx_lock);
41}
42
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053043static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
44 bool limit_mgmt_desc, bool is_probe_resp)
Kalle Valo5e3dd152013-06-12 20:52:10 +030045{
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053046 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +030047 int ret = 0;
48
49 spin_lock_bh(&htt->tx_lock);
50
51 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
52 ret = -EBUSY;
53 goto exit;
54 }
55
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053056 if (limit_mgmt_desc) {
57 if (is_probe_resp && (htt->num_pending_mgmt_tx >
58 ar->hw_params.max_probe_resp_desc_thres)) {
59 ret = -EBUSY;
60 goto exit;
61 }
62 htt->num_pending_mgmt_tx++;
63 }
64
Kalle Valo5e3dd152013-06-12 20:52:10 +030065 htt->num_pending_tx++;
66 if (htt->num_pending_tx == htt->max_num_pending_tx)
Michal Kazior96d828d2015-03-31 10:26:23 +000067 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030068
69exit:
70 spin_unlock_bh(&htt->tx_lock);
71 return ret;
72}
73
Michal Kazior89d6d832015-01-24 12:14:51 +020074int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +030075{
Michal Kazior7aa7a722014-08-25 12:09:38 +020076 struct ath10k *ar = htt->ar;
Michal Kazior89d6d832015-01-24 12:14:51 +020077 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030078
79 lockdep_assert_held(&htt->tx_lock);
80
Peter Ohfbc03a42015-07-15 19:01:19 -070081 ret = idr_alloc(&htt->pending_tx, skb, 0,
82 htt->max_num_pending_tx, GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +030083
Michal Kazior89d6d832015-01-24 12:14:51 +020084 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
85
86 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030087}
88
89void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
90{
Michal Kazior7aa7a722014-08-25 12:09:38 +020091 struct ath10k *ar = htt->ar;
92
Kalle Valo5e3dd152013-06-12 20:52:10 +030093 lockdep_assert_held(&htt->tx_lock);
94
Michal Kazior7aa7a722014-08-25 12:09:38 +020095 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
Michal Kazior89d6d832015-01-24 12:14:51 +020096
97 idr_remove(&htt->pending_tx, msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030098}
99
Michal Kazior575fc892016-01-21 14:13:26 +0100100static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
101{
102 size_t size;
103
104 if (!htt->frag_desc.vaddr)
105 return;
106
107 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
108
109 dma_free_coherent(htt->ar->dev,
110 size,
111 htt->frag_desc.vaddr,
112 htt->frag_desc.paddr);
113}
114
115static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
116{
117 struct ath10k *ar = htt->ar;
118 size_t size;
119
120 if (!ar->hw_params.continuous_frag_desc)
121 return 0;
122
123 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
124 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
125 &htt->frag_desc.paddr,
126 GFP_KERNEL);
127 if (!htt->frag_desc.vaddr) {
128 ath10k_err(ar, "failed to alloc fragment desc memory\n");
129 return -ENOMEM;
130 }
131
132 return 0;
133}
134
Michal Kazior95bf21f2014-05-16 17:15:39 +0300135int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300136{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200137 struct ath10k *ar = htt->ar;
Raja Manid9156b52015-06-22 20:22:27 +0530138 int ret, size;
Michal Kazior7aa7a722014-08-25 12:09:38 +0200139
Michal Kazior7aa7a722014-08-25 12:09:38 +0200140 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300141 htt->max_num_pending_tx);
142
Michal Kazior89d6d832015-01-24 12:14:51 +0200143 spin_lock_init(&htt->tx_lock);
144 idr_init(&htt->pending_tx);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300145
Peter Oh683b95e2015-10-05 17:56:40 +0300146 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
147 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
148 &htt->txbuf.paddr,
Felix Fietkaud6cb23b52015-11-24 11:36:52 +0100149 GFP_KERNEL);
Peter Oh683b95e2015-10-05 17:56:40 +0300150 if (!htt->txbuf.vaddr) {
151 ath10k_err(ar, "failed to alloc tx buffer\n");
Raja Manid9156b52015-06-22 20:22:27 +0530152 ret = -ENOMEM;
153 goto free_idr_pending_tx;
Michal Kaziora16942e2014-02-27 18:50:04 +0200154 }
155
Michal Kazior575fc892016-01-21 14:13:26 +0100156 ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
157 if (ret) {
158 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
Peter Oh683b95e2015-10-05 17:56:40 +0300159 goto free_txbuf;
Raja Manid9156b52015-06-22 20:22:27 +0530160 }
161
Kalle Valo5e3dd152013-06-12 20:52:10 +0300162 return 0;
Raja Manid9156b52015-06-22 20:22:27 +0530163
Peter Oh683b95e2015-10-05 17:56:40 +0300164free_txbuf:
165 size = htt->max_num_pending_tx *
166 sizeof(struct ath10k_htt_txbuf);
167 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
168 htt->txbuf.paddr);
Michal Kazior575fc892016-01-21 14:13:26 +0100169
Raja Manid9156b52015-06-22 20:22:27 +0530170free_idr_pending_tx:
171 idr_destroy(&htt->pending_tx);
Michal Kazior575fc892016-01-21 14:13:26 +0100172
Raja Manid9156b52015-06-22 20:22:27 +0530173 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300174}
175
Michal Kazior89d6d832015-01-24 12:14:51 +0200176static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300177{
Michal Kazior89d6d832015-01-24 12:14:51 +0200178 struct ath10k *ar = ctx;
179 struct ath10k_htt *htt = &ar->htt;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200180 struct htt_tx_done tx_done = {0};
Michal Kazior89d6d832015-01-24 12:14:51 +0200181
182 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
183
184 tx_done.discard = 1;
185 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300186
Michal Kazior89d6d832015-01-24 12:14:51 +0200187 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior89d6d832015-01-24 12:14:51 +0200188
189 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300190}
191
Michal Kazior95bf21f2014-05-16 17:15:39 +0300192void ath10k_htt_tx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300193{
Raja Manid9156b52015-06-22 20:22:27 +0530194 int size;
195
Michal Kazior89d6d832015-01-24 12:14:51 +0200196 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
197 idr_destroy(&htt->pending_tx);
Peter Oh683b95e2015-10-05 17:56:40 +0300198
199 if (htt->txbuf.vaddr) {
200 size = htt->max_num_pending_tx *
201 sizeof(struct ath10k_htt_txbuf);
202 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
203 htt->txbuf.paddr);
204 }
Raja Manid9156b52015-06-22 20:22:27 +0530205
Michal Kazior575fc892016-01-21 14:13:26 +0100206 ath10k_htt_tx_free_cont_frag_desc(htt);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300207}
208
209void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
210{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200211 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300212}
213
Rajkumar Manoharan3f0f7ed2015-10-12 18:27:03 +0530214void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
215{
216 dev_kfree_skb_any(skb);
217}
218EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
219
Kalle Valo5e3dd152013-06-12 20:52:10 +0300220int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
221{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200222 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300223 struct sk_buff *skb;
224 struct htt_cmd *cmd;
225 int len = 0;
226 int ret;
227
228 len += sizeof(cmd->hdr);
229 len += sizeof(cmd->ver_req);
230
Michal Kazior7aa7a722014-08-25 12:09:38 +0200231 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300232 if (!skb)
233 return -ENOMEM;
234
235 skb_put(skb, len);
236 cmd = (struct htt_cmd *)skb->data;
237 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
238
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300239 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300240 if (ret) {
241 dev_kfree_skb_any(skb);
242 return ret;
243 }
244
245 return 0;
246}
247
Kalle Valoa3d135e2013-09-03 11:44:10 +0300248int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
249{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200250 struct ath10k *ar = htt->ar;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300251 struct htt_stats_req *req;
252 struct sk_buff *skb;
253 struct htt_cmd *cmd;
254 int len = 0, ret;
255
256 len += sizeof(cmd->hdr);
257 len += sizeof(cmd->stats_req);
258
Michal Kazior7aa7a722014-08-25 12:09:38 +0200259 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300260 if (!skb)
261 return -ENOMEM;
262
263 skb_put(skb, len);
264 cmd = (struct htt_cmd *)skb->data;
265 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
266
267 req = &cmd->stats_req;
268
269 memset(req, 0, sizeof(*req));
270
271 /* currently we support only max 8 bit masks so no need to worry
272 * about endian support */
273 req->upload_types[0] = mask;
274 req->reset_types[0] = mask;
275 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
276 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
277 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
278
Kalle Valoa3d135e2013-09-03 11:44:10 +0300279 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
280 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200281 ath10k_warn(ar, "failed to send htt type stats request: %d",
282 ret);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300283 dev_kfree_skb_any(skb);
284 return ret;
285 }
286
287 return 0;
288}
289
Raja Manid9156b52015-06-22 20:22:27 +0530290int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
291{
292 struct ath10k *ar = htt->ar;
293 struct sk_buff *skb;
294 struct htt_cmd *cmd;
295 int ret, size;
296
297 if (!ar->hw_params.continuous_frag_desc)
298 return 0;
299
300 if (!htt->frag_desc.paddr) {
301 ath10k_warn(ar, "invalid frag desc memory\n");
302 return -EINVAL;
303 }
304
305 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
306 skb = ath10k_htc_alloc_skb(ar, size);
307 if (!skb)
308 return -ENOMEM;
309
310 skb_put(skb, size);
311 cmd = (struct htt_cmd *)skb->data;
312 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
313 cmd->frag_desc_bank_cfg.info = 0;
314 cmd->frag_desc_bank_cfg.num_banks = 1;
315 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
316 cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
317 __cpu_to_le32(htt->frag_desc.paddr);
Peter Ohfbc03a42015-07-15 19:01:19 -0700318 cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
Raja Manid9156b52015-06-22 20:22:27 +0530319 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
320 __cpu_to_le16(htt->max_num_pending_tx - 1);
321
322 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
323 if (ret) {
324 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
325 ret);
326 dev_kfree_skb_any(skb);
327 return ret;
328 }
329
330 return 0;
331}
332
Kalle Valo5e3dd152013-06-12 20:52:10 +0300333int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
334{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200335 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300336 struct sk_buff *skb;
337 struct htt_cmd *cmd;
338 struct htt_rx_ring_setup_ring *ring;
339 const int num_rx_ring = 1;
340 u16 flags;
341 u32 fw_idx;
342 int len;
343 int ret;
344
345 /*
346 * the HW expects the buffer to be an integral number of 4-byte
347 * "words"
348 */
349 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
350 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
351
352 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
353 + (sizeof(*ring) * num_rx_ring);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200354 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300355 if (!skb)
356 return -ENOMEM;
357
358 skb_put(skb, len);
359
360 cmd = (struct htt_cmd *)skb->data;
361 ring = &cmd->rx_setup.rings[0];
362
363 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
364 cmd->rx_setup.hdr.num_rings = 1;
365
366 /* FIXME: do we need all of this? */
367 flags = 0;
368 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
369 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
370 flags |= HTT_RX_RING_FLAGS_PPDU_START;
371 flags |= HTT_RX_RING_FLAGS_PPDU_END;
372 flags |= HTT_RX_RING_FLAGS_MPDU_START;
373 flags |= HTT_RX_RING_FLAGS_MPDU_END;
374 flags |= HTT_RX_RING_FLAGS_MSDU_START;
375 flags |= HTT_RX_RING_FLAGS_MSDU_END;
376 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
377 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
378 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
379 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
380 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
381 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
382 flags |= HTT_RX_RING_FLAGS_NULL_RX;
383 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
384
385 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
386
387 ring->fw_idx_shadow_reg_paddr =
388 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
389 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
390 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
391 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
392 ring->flags = __cpu_to_le16(flags);
393 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
394
395#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
396
397 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
398 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
399 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
400 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
401 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
402 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
403 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
404 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
405 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
406 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
407
408#undef desc_offset
409
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300410 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300411 if (ret) {
412 dev_kfree_skb_any(skb);
413 return ret;
414 }
415
416 return 0;
417}
418
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300419int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
420 u8 max_subfrms_ampdu,
421 u8 max_subfrms_amsdu)
422{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200423 struct ath10k *ar = htt->ar;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300424 struct htt_aggr_conf *aggr_conf;
425 struct sk_buff *skb;
426 struct htt_cmd *cmd;
427 int len;
428 int ret;
429
430 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
431
432 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
433 return -EINVAL;
434
435 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
436 return -EINVAL;
437
438 len = sizeof(cmd->hdr);
439 len += sizeof(cmd->aggr_conf);
440
Michal Kazior7aa7a722014-08-25 12:09:38 +0200441 skb = ath10k_htc_alloc_skb(ar, len);
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300442 if (!skb)
443 return -ENOMEM;
444
445 skb_put(skb, len);
446 cmd = (struct htt_cmd *)skb->data;
447 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
448
449 aggr_conf = &cmd->aggr_conf;
450 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
451 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
452
Michal Kazior7aa7a722014-08-25 12:09:38 +0200453 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300454 aggr_conf->max_num_amsdu_subframes,
455 aggr_conf->max_num_ampdu_subframes);
456
457 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
458 if (ret) {
459 dev_kfree_skb_any(skb);
460 return ret;
461 }
462
463 return 0;
464}
465
Michal Kazior609db222015-11-18 06:59:22 +0100466static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
467{
468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
469 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
470 struct ath10k_vif *arvif = (void *)cb->vif->drv_priv;
471
472 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
473 return ar->scan.vdev_id;
474 else if (cb->vif)
475 return arvif->vdev_id;
476 else if (ar->monitor_started)
477 return ar->monitor_vdev_id;
478 else
479 return 0;
480}
481
482static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
483{
484 struct ieee80211_hdr *hdr = (void *)skb->data;
485 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
486
487 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
488 return HTT_DATA_TX_EXT_TID_MGMT;
489 else if (cb->flags & ATH10K_SKB_F_QOS)
490 return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
491 else
492 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
493}
494
Kalle Valo5e3dd152013-06-12 20:52:10 +0300495int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
496{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200497 struct ath10k *ar = htt->ar;
498 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300499 struct sk_buff *txdesc = NULL;
500 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200501 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kazior609db222015-11-18 06:59:22 +0100502 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300503 int len = 0;
504 int msdu_id = -1;
505 int res;
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530506 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
507 bool limit_mgmt_desc = false;
508 bool is_probe_resp = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300509
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530510 if (ar->hw_params.max_probe_resp_desc_thres) {
511 limit_mgmt_desc = true;
512
513 if (ieee80211_is_probe_resp(hdr->frame_control))
514 is_probe_resp = true;
515 }
516
517 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
518
Kalle Valo5e3dd152013-06-12 20:52:10 +0300519 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200520 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300521
522 len += sizeof(cmd->hdr);
523 len += sizeof(cmd->mgmt_tx);
524
Kalle Valo5e3dd152013-06-12 20:52:10 +0300525 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200526 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400527 spin_unlock_bh(&htt->tx_lock);
Kalle Valob9e284e2015-10-05 17:56:35 +0300528 if (res < 0)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200529 goto err_tx_dec;
Kalle Valob9e284e2015-10-05 17:56:35 +0300530
Michal Kazior2f3773b2013-09-18 14:43:21 +0200531 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300532
Tamizh chelvam90eceb32015-10-29 14:27:42 +0200533 if ((ieee80211_is_action(hdr->frame_control) ||
534 ieee80211_is_deauth(hdr->frame_control) ||
535 ieee80211_is_disassoc(hdr->frame_control)) &&
536 ieee80211_has_protected(hdr->frame_control)) {
537 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
538 }
539
Michal Kazior7aa7a722014-08-25 12:09:38 +0200540 txdesc = ath10k_htc_alloc_skb(ar, len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200541 if (!txdesc) {
542 res = -ENOMEM;
543 goto err_free_msdu_id;
544 }
545
Michal Kazior767d34f2014-02-27 18:50:03 +0200546 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
547 DMA_TO_DEVICE);
548 res = dma_mapping_error(dev, skb_cb->paddr);
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200549 if (res) {
550 res = -EIO;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200551 goto err_free_txdesc;
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200552 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300553
554 skb_put(txdesc, len);
555 cmd = (struct htt_cmd *)txdesc->data;
Raja Mani1d0088f2015-07-21 10:52:00 +0530556 memset(cmd, 0, len);
557
Kalle Valo5e3dd152013-06-12 20:52:10 +0300558 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
559 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
560 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
561 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
562 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
563 memcpy(cmd->mgmt_tx.hdr, msdu->data,
564 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
565
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300566 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300567 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200568 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300569
570 return 0;
571
Michal Kazior2f3773b2013-09-18 14:43:21 +0200572err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200573 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200574err_free_txdesc:
575 dev_kfree_skb_any(txdesc);
576err_free_msdu_id:
577 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200578 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
579 spin_unlock_bh(&htt->tx_lock);
580err_tx_dec:
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530581 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200582err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300583 return res;
584}
585
Michal Kazior8a933962015-11-18 06:59:17 +0100586int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
587 struct sk_buff *msdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300588{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200589 struct ath10k *ar = htt->ar;
590 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300591 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kaziorbd877442015-11-18 06:59:19 +0100592 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200593 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kaziora16942e2014-02-27 18:50:04 +0200594 struct ath10k_hif_sg_item sg_items[2];
Michal Kazioraca146a2015-11-18 06:59:23 +0100595 struct ath10k_htt_txbuf *txbuf;
Michal Kaziora16942e2014-02-27 18:50:04 +0200596 struct htt_data_tx_desc_frag *frags;
Michal Kazior609db222015-11-18 06:59:22 +0100597 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
598 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
599 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
Michal Kaziora16942e2014-02-27 18:50:04 +0200600 int prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300601 int res;
Michal Kaziora16942e2014-02-27 18:50:04 +0200602 u8 flags0 = 0;
603 u16 msdu_id, flags1 = 0;
Michal Kaziorbd877442015-11-18 06:59:19 +0100604 u16 freq = 0;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300605 u32 frags_paddr = 0;
Michal Kazioraca146a2015-11-18 06:59:23 +0100606 u32 txbuf_paddr;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530607 struct htt_msdu_ext_desc *ext_desc = NULL;
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530608 bool limit_mgmt_desc = false;
609 bool is_probe_resp = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300610
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530611 if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
612 ar->hw_params.max_probe_resp_desc_thres) {
613 limit_mgmt_desc = true;
614
615 if (ieee80211_is_probe_resp(hdr->frame_control))
616 is_probe_resp = true;
617 }
618
619 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300620 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200621 goto err;
622
623 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200624 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400625 spin_unlock_bh(&htt->tx_lock);
Kalle Valob9e284e2015-10-05 17:56:35 +0300626 if (res < 0)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200627 goto err_tx_dec;
Kalle Valob9e284e2015-10-05 17:56:35 +0300628
Michal Kazior2f3773b2013-09-18 14:43:21 +0200629 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300630
631 prefetch_len = min(htt->prefetch_len, msdu->len);
632 prefetch_len = roundup(prefetch_len, 4);
633
Michal Kazioraca146a2015-11-18 06:59:23 +0100634 txbuf = &htt->txbuf.vaddr[msdu_id];
635 txbuf_paddr = htt->txbuf.paddr +
636 (sizeof(struct ath10k_htt_txbuf) * msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300637
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200638 if ((ieee80211_is_action(hdr->frame_control) ||
639 ieee80211_is_deauth(hdr->frame_control) ||
640 ieee80211_is_disassoc(hdr->frame_control)) &&
David Liuccec9032015-07-24 20:25:32 +0300641 ieee80211_has_protected(hdr->frame_control)) {
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200642 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
Michal Kazior66b8a012015-11-18 06:59:20 +0100643 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
Michal Kazior8a933962015-11-18 06:59:17 +0100644 txmode == ATH10K_HW_TXRX_RAW &&
Bob Copelandbc76c282015-09-09 12:47:35 -0400645 ieee80211_has_protected(hdr->frame_control)) {
David Liuccec9032015-07-24 20:25:32 +0300646 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
647 }
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200648
Michal Kazior767d34f2014-02-27 18:50:03 +0200649 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
650 DMA_TO_DEVICE);
651 res = dma_mapping_error(dev, skb_cb->paddr);
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200652 if (res) {
653 res = -EIO;
Peter Oh683b95e2015-10-05 17:56:40 +0300654 goto err_free_msdu_id;
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200655 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300656
Michal Kaziorbd877442015-11-18 06:59:19 +0100657 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
658 freq = ar->scan.roc_freq;
659
Michal Kazior8a933962015-11-18 06:59:17 +0100660 switch (txmode) {
Michal Kaziord740d8f2015-03-30 09:51:51 +0300661 case ATH10K_HW_TXRX_RAW:
662 case ATH10K_HW_TXRX_NATIVE_WIFI:
663 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
664 /* pass through */
665 case ATH10K_HW_TXRX_ETHERNET:
Peter Ohfbc03a42015-07-15 19:01:19 -0700666 if (ar->hw_params.continuous_frag_desc) {
Peter Ohae7d3822015-07-29 11:58:50 +0300667 memset(&htt->frag_desc.vaddr[msdu_id], 0,
668 sizeof(struct htt_msdu_ext_desc));
Peter Ohfbc03a42015-07-15 19:01:19 -0700669 frags = (struct htt_data_tx_desc_frag *)
670 &htt->frag_desc.vaddr[msdu_id].frags;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530671 ext_desc = &htt->frag_desc.vaddr[msdu_id];
Peter Ohfbc03a42015-07-15 19:01:19 -0700672 frags[0].tword_addr.paddr_lo =
673 __cpu_to_le32(skb_cb->paddr);
674 frags[0].tword_addr.paddr_hi = 0;
675 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200676
Peter Ohfbc03a42015-07-15 19:01:19 -0700677 frags_paddr = htt->frag_desc.paddr +
678 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
679 } else {
Michal Kazioraca146a2015-11-18 06:59:23 +0100680 frags = txbuf->frags;
Peter Ohfbc03a42015-07-15 19:01:19 -0700681 frags[0].dword_addr.paddr =
682 __cpu_to_le32(skb_cb->paddr);
683 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
684 frags[1].dword_addr.paddr = 0;
685 frags[1].dword_addr.len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300686
Michal Kazioraca146a2015-11-18 06:59:23 +0100687 frags_paddr = txbuf_paddr;
Peter Ohfbc03a42015-07-15 19:01:19 -0700688 }
Michal Kazior8a933962015-11-18 06:59:17 +0100689 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300690 break;
691 case ATH10K_HW_TXRX_MGMT:
Michal Kazior2f3773b2013-09-18 14:43:21 +0200692 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200693 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300694 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300695
Michal Kaziora16942e2014-02-27 18:50:04 +0200696 frags_paddr = skb_cb->paddr;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300697 break;
Michal Kaziora16942e2014-02-27 18:50:04 +0200698 }
699
700 /* Normally all commands go through HTC which manages tx credits for
701 * each endpoint and notifies when tx is completed.
702 *
703 * HTT endpoint is creditless so there's no need to care about HTC
704 * flags. In that case it is trivial to fill the HTC header here.
705 *
706 * MSDU transmission is considered completed upon HTT event. This
707 * implies no relevant resources can be freed until after the event is
708 * received. That's why HTC tx completion handler itself is ignored by
709 * setting NULL to transfer_context for all sg items.
710 *
711 * There is simply no point in pushing HTT TX_FRM through HTC tx path
712 * as it's a waste of resources. By bypassing HTC it is possible to
713 * avoid extra memory allocations, compress data structures and thus
714 * improve performance. */
715
Michal Kazioraca146a2015-11-18 06:59:23 +0100716 txbuf->htc_hdr.eid = htt->eid;
717 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
718 sizeof(txbuf->cmd_tx) +
719 prefetch_len);
720 txbuf->htc_hdr.flags = 0;
Michal Kaziora16942e2014-02-27 18:50:04 +0200721
Michal Kazior66b8a012015-11-18 06:59:20 +0100722 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
David Liuccec9032015-07-24 20:25:32 +0300723 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
724
Kalle Valo5e3dd152013-06-12 20:52:10 +0300725 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
726 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
David Liuccec9032015-07-24 20:25:32 +0300727 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
728 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
Helmut Schaa75930d12015-01-28 11:31:32 +0100729 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
730 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530731 if (ar->hw_params.continuous_frag_desc)
732 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
Helmut Schaa75930d12015-01-28 11:31:32 +0100733 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300734
Michal Kazior708b9bd2014-07-21 20:52:59 +0300735 /* Prevent firmware from sending up tx inspection requests. There's
736 * nothing ath10k can do with frames requested for inspection so force
737 * it to simply rely a regular tx completion with discard status.
738 */
739 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
740
Michal Kazioraca146a2015-11-18 06:59:23 +0100741 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
742 txbuf->cmd_tx.flags0 = flags0;
743 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
744 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
745 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
746 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
Vasanthakumar Thiagarajand39de992015-11-05 11:34:00 +0530747 if (ath10k_mac_tx_frm_has_freq(ar)) {
Michal Kazioraca146a2015-11-18 06:59:23 +0100748 txbuf->cmd_tx.offchan_tx.peerid =
Vasanthakumar Thiagarajand39de992015-11-05 11:34:00 +0530749 __cpu_to_le16(HTT_INVALID_PEERID);
Michal Kazioraca146a2015-11-18 06:59:23 +0100750 txbuf->cmd_tx.offchan_tx.freq =
Michal Kaziorbd877442015-11-18 06:59:19 +0100751 __cpu_to_le16(freq);
Vasanthakumar Thiagarajand39de992015-11-05 11:34:00 +0530752 } else {
Michal Kazioraca146a2015-11-18 06:59:23 +0100753 txbuf->cmd_tx.peerid =
Vasanthakumar Thiagarajand39de992015-11-05 11:34:00 +0530754 __cpu_to_le32(HTT_INVALID_PEERID);
755 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300756
Rajkumar Manoharand1e50f42014-10-03 08:02:54 +0300757 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200758 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100759 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
Michal Kaziora16942e2014-02-27 18:50:04 +0200760 flags0, flags1, msdu->len, msdu_id, frags_paddr,
Michal Kaziorbd877442015-11-18 06:59:19 +0100761 (u32)skb_cb->paddr, vdev_id, tid, freq);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200762 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
Michal Kaziora16942e2014-02-27 18:50:04 +0200763 msdu->data, msdu->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530764 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
765 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300766
Michal Kaziora16942e2014-02-27 18:50:04 +0200767 sg_items[0].transfer_id = 0;
768 sg_items[0].transfer_context = NULL;
Michal Kazioraca146a2015-11-18 06:59:23 +0100769 sg_items[0].vaddr = &txbuf->htc_hdr;
770 sg_items[0].paddr = txbuf_paddr +
771 sizeof(txbuf->frags);
772 sg_items[0].len = sizeof(txbuf->htc_hdr) +
773 sizeof(txbuf->cmd_hdr) +
774 sizeof(txbuf->cmd_tx);
Michal Kaziora16942e2014-02-27 18:50:04 +0200775
776 sg_items[1].transfer_id = 0;
777 sg_items[1].transfer_context = NULL;
778 sg_items[1].vaddr = msdu->data;
779 sg_items[1].paddr = skb_cb->paddr;
780 sg_items[1].len = prefetch_len;
781
782 res = ath10k_hif_tx_sg(htt->ar,
783 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
784 sg_items, ARRAY_SIZE(sg_items));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300785 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200786 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300787
788 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200789
Michal Kazior2f3773b2013-09-18 14:43:21 +0200790err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200791 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200792err_free_msdu_id:
793 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200794 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
795 spin_unlock_bh(&htt->tx_lock);
796err_tx_dec:
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530797 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200798err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300799 return res;
800}