blob: f5960c593810f345f34f759da3a69efba4e2a746 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
26{
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
29 ieee80211_wake_queues(htt->ar->hw);
30}
31
32static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
33{
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
37}
38
39static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
40{
41 int ret = 0;
42
43 spin_lock_bh(&htt->tx_lock);
44
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
46 ret = -EBUSY;
47 goto exit;
48 }
49
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
52 ieee80211_stop_queues(htt->ar->hw);
53
54exit:
55 spin_unlock_bh(&htt->tx_lock);
56 return ret;
57}
58
59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
60{
61 int msdu_id;
62
63 lockdep_assert_held(&htt->tx_lock);
64
65 msdu_id = find_first_zero_bit(htt->used_msdu_ids,
66 htt->max_num_pending_tx);
67 if (msdu_id == htt->max_num_pending_tx)
68 return -ENOBUFS;
69
70 ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
71 __set_bit(msdu_id, htt->used_msdu_ids);
72 return msdu_id;
73}
74
75void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
76{
77 lockdep_assert_held(&htt->tx_lock);
78
79 if (!test_bit(msdu_id, htt->used_msdu_ids))
80 ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
81
82 ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
83 __clear_bit(msdu_id, htt->used_msdu_ids);
84}
85
86int ath10k_htt_tx_attach(struct ath10k_htt *htt)
87{
Kalle Valo5e3dd152013-06-12 20:52:10 +030088 spin_lock_init(&htt->tx_lock);
89 init_waitqueue_head(&htt->empty_tx_wq);
90
Michal Kazior60f85be2013-10-16 16:46:24 +030091 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
92 htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
93 else
94 htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
Kalle Valo5e3dd152013-06-12 20:52:10 +030095
Kalle Valoaad0b652013-09-08 17:56:02 +030096 ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +030097 htt->max_num_pending_tx);
98
99 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
100 htt->max_num_pending_tx, GFP_KERNEL);
101 if (!htt->pending_tx)
102 return -ENOMEM;
103
104 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
105 BITS_TO_LONGS(htt->max_num_pending_tx),
106 GFP_KERNEL);
107 if (!htt->used_msdu_ids) {
108 kfree(htt->pending_tx);
109 return -ENOMEM;
110 }
111
112 return 0;
113}
114
115static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
116{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200117 struct htt_tx_done tx_done = {0};
Kalle Valo5e3dd152013-06-12 20:52:10 +0300118 int msdu_id;
119
120 /* No locks needed. Called after communication with the device has
121 * been stopped. */
122
123 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
124 if (!test_bit(msdu_id, htt->used_msdu_ids))
125 continue;
126
Kalle Valo5e3dd152013-06-12 20:52:10 +0300127 ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
128 msdu_id);
129
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200130 tx_done.discard = 1;
131 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300132
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200133 ath10k_txrx_tx_unref(htt, &tx_done);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300134 }
135}
136
137void ath10k_htt_tx_detach(struct ath10k_htt *htt)
138{
139 ath10k_htt_tx_cleanup_pending(htt);
140 kfree(htt->pending_tx);
141 kfree(htt->used_msdu_ids);
142 return;
143}
144
145void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
146{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200147 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300148}
149
150int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
151{
152 struct sk_buff *skb;
153 struct htt_cmd *cmd;
154 int len = 0;
155 int ret;
156
157 len += sizeof(cmd->hdr);
158 len += sizeof(cmd->ver_req);
159
160 skb = ath10k_htc_alloc_skb(len);
161 if (!skb)
162 return -ENOMEM;
163
164 skb_put(skb, len);
165 cmd = (struct htt_cmd *)skb->data;
166 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
167
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300168 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300169 if (ret) {
170 dev_kfree_skb_any(skb);
171 return ret;
172 }
173
174 return 0;
175}
176
Kalle Valoa3d135e2013-09-03 11:44:10 +0300177int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
178{
179 struct htt_stats_req *req;
180 struct sk_buff *skb;
181 struct htt_cmd *cmd;
182 int len = 0, ret;
183
184 len += sizeof(cmd->hdr);
185 len += sizeof(cmd->stats_req);
186
187 skb = ath10k_htc_alloc_skb(len);
188 if (!skb)
189 return -ENOMEM;
190
191 skb_put(skb, len);
192 cmd = (struct htt_cmd *)skb->data;
193 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
194
195 req = &cmd->stats_req;
196
197 memset(req, 0, sizeof(*req));
198
199 /* currently we support only max 8 bit masks so no need to worry
200 * about endian support */
201 req->upload_types[0] = mask;
202 req->reset_types[0] = mask;
203 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
204 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
205 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
206
Kalle Valoa3d135e2013-09-03 11:44:10 +0300207 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
208 if (ret) {
209 ath10k_warn("failed to send htt type stats request: %d", ret);
210 dev_kfree_skb_any(skb);
211 return ret;
212 }
213
214 return 0;
215}
216
Kalle Valo5e3dd152013-06-12 20:52:10 +0300217int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
218{
219 struct sk_buff *skb;
220 struct htt_cmd *cmd;
221 struct htt_rx_ring_setup_ring *ring;
222 const int num_rx_ring = 1;
223 u16 flags;
224 u32 fw_idx;
225 int len;
226 int ret;
227
228 /*
229 * the HW expects the buffer to be an integral number of 4-byte
230 * "words"
231 */
232 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
233 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
234
235 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
236 + (sizeof(*ring) * num_rx_ring);
237 skb = ath10k_htc_alloc_skb(len);
238 if (!skb)
239 return -ENOMEM;
240
241 skb_put(skb, len);
242
243 cmd = (struct htt_cmd *)skb->data;
244 ring = &cmd->rx_setup.rings[0];
245
246 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
247 cmd->rx_setup.hdr.num_rings = 1;
248
249 /* FIXME: do we need all of this? */
250 flags = 0;
251 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
252 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
253 flags |= HTT_RX_RING_FLAGS_PPDU_START;
254 flags |= HTT_RX_RING_FLAGS_PPDU_END;
255 flags |= HTT_RX_RING_FLAGS_MPDU_START;
256 flags |= HTT_RX_RING_FLAGS_MPDU_END;
257 flags |= HTT_RX_RING_FLAGS_MSDU_START;
258 flags |= HTT_RX_RING_FLAGS_MSDU_END;
259 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
260 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
261 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
262 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
263 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
264 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
265 flags |= HTT_RX_RING_FLAGS_NULL_RX;
266 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
267
268 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
269
270 ring->fw_idx_shadow_reg_paddr =
271 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
272 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
273 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
274 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
275 ring->flags = __cpu_to_le16(flags);
276 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
277
278#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
279
280 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
281 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
282 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
283 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
284 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
285 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
286 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
287 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
288 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
289 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
290
291#undef desc_offset
292
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300293 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300294 if (ret) {
295 dev_kfree_skb_any(skb);
296 return ret;
297 }
298
299 return 0;
300}
301
302int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
303{
304 struct device *dev = htt->ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300305 struct sk_buff *txdesc = NULL;
306 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200307 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200308 u8 vdev_id = skb_cb->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300309 int len = 0;
310 int msdu_id = -1;
311 int res;
312
313
314 res = ath10k_htt_tx_inc_pending(htt);
315 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200316 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300317
318 len += sizeof(cmd->hdr);
319 len += sizeof(cmd->mgmt_tx);
320
Kalle Valo5e3dd152013-06-12 20:52:10 +0300321 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200322 res = ath10k_htt_tx_alloc_msdu_id(htt);
323 if (res < 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300324 spin_unlock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200325 goto err_tx_dec;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300326 }
Michal Kazior2f3773b2013-09-18 14:43:21 +0200327 msdu_id = res;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200328 htt->pending_tx[msdu_id] = msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300329 spin_unlock_bh(&htt->tx_lock);
330
Michal Kazior2f3773b2013-09-18 14:43:21 +0200331 txdesc = ath10k_htc_alloc_skb(len);
332 if (!txdesc) {
333 res = -ENOMEM;
334 goto err_free_msdu_id;
335 }
336
Michal Kazior767d34f2014-02-27 18:50:03 +0200337 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
338 DMA_TO_DEVICE);
339 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300340 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200341 goto err_free_txdesc;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300342
343 skb_put(txdesc, len);
344 cmd = (struct htt_cmd *)txdesc->data;
345 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
346 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
347 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
348 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
349 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
350 memcpy(cmd->mgmt_tx.hdr, msdu->data,
351 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
352
Michal Kazior1f8bb152013-09-18 14:43:22 +0200353 skb_cb->htt.frag_len = 0;
354 skb_cb->htt.pad_len = 0;
355
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300356 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300357 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200358 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300359
360 return 0;
361
Michal Kazior2f3773b2013-09-18 14:43:21 +0200362err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200363 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200364err_free_txdesc:
365 dev_kfree_skb_any(txdesc);
366err_free_msdu_id:
367 spin_lock_bh(&htt->tx_lock);
368 htt->pending_tx[msdu_id] = NULL;
369 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
370 spin_unlock_bh(&htt->tx_lock);
371err_tx_dec:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300372 ath10k_htt_tx_dec_pending(htt);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200373err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300374 return res;
375}
376
377int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
378{
379 struct device *dev = htt->ar->dev;
380 struct htt_cmd *cmd;
381 struct htt_data_tx_desc_frag *tx_frags;
382 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200383 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300384 struct sk_buff *txdesc = NULL;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200385 bool use_frags;
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200386 u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300387 u8 tid;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200388 int prefetch_len, desc_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300389 int msdu_id = -1;
390 int res;
391 u8 flags0;
392 u16 flags1;
393
394 res = ath10k_htt_tx_inc_pending(htt);
395 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200396 goto err;
397
398 spin_lock_bh(&htt->tx_lock);
399 res = ath10k_htt_tx_alloc_msdu_id(htt);
400 if (res < 0) {
401 spin_unlock_bh(&htt->tx_lock);
402 goto err_tx_dec;
403 }
404 msdu_id = res;
405 htt->pending_tx[msdu_id] = msdu;
406 spin_unlock_bh(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300407
408 prefetch_len = min(htt->prefetch_len, msdu->len);
409 prefetch_len = roundup(prefetch_len, 4);
410
411 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300412
413 txdesc = ath10k_htc_alloc_skb(desc_len);
414 if (!txdesc) {
415 res = -ENOMEM;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200416 goto err_free_msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300417 }
418
Michal Kazior961d4c32013-08-09 10:13:34 +0200419 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
420 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
421 * fragment list host driver specifies directly frame pointer. */
Michal Kazior2f3773b2013-09-18 14:43:21 +0200422 use_frags = htt->target_version_major < 3 ||
423 !ieee80211_is_mgmt(hdr->frame_control);
424
Kalle Valo5e3dd152013-06-12 20:52:10 +0300425 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
426 ath10k_warn("htt alignment check failed. dropping packet.\n");
427 res = -EIO;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200428 goto err_free_txdesc;
429 }
430
431 if (use_frags) {
432 skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
433 skb_cb->htt.pad_len = (unsigned long)msdu->data -
434 round_down((unsigned long)msdu->data, 4);
435
436 skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
437 } else {
438 skb_cb->htt.frag_len = 0;
439 skb_cb->htt.pad_len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300440 }
441
Michal Kazior767d34f2014-02-27 18:50:03 +0200442 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
443 DMA_TO_DEVICE);
444 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300445 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200446 goto err_pull_txfrag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300447
Michal Kazior2f3773b2013-09-18 14:43:21 +0200448 if (use_frags) {
Michal Kazior1f8bb152013-09-18 14:43:22 +0200449 dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
450 DMA_TO_DEVICE);
451
Michal Kazior961d4c32013-08-09 10:13:34 +0200452 /* tx fragment list must be terminated with zero-entry */
Michal Kazior1f8bb152013-09-18 14:43:22 +0200453 tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
454 tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
455 skb_cb->htt.frag_len +
456 skb_cb->htt.pad_len);
457 tx_frags[0].len = __cpu_to_le32(msdu->len -
458 skb_cb->htt.frag_len -
459 skb_cb->htt.pad_len);
Michal Kazior961d4c32013-08-09 10:13:34 +0200460 tx_frags[1].paddr = __cpu_to_le32(0);
461 tx_frags[1].len = __cpu_to_le32(0);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300462
Michal Kazior1f8bb152013-09-18 14:43:22 +0200463 dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
464 DMA_TO_DEVICE);
Michal Kazior961d4c32013-08-09 10:13:34 +0200465 }
466
Ben Greear75fb2f92014-02-05 13:58:34 -0800467 ath10k_dbg(ATH10K_DBG_HTT, "tx-msdu 0x%llx\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300468 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
Ben Greear75fb2f92014-02-05 13:58:34 -0800469 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "tx-msdu: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300470 msdu->data, msdu->len);
471
472 skb_put(txdesc, desc_len);
473 cmd = (struct htt_cmd *)txdesc->data;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300474
475 tid = ATH10K_SKB_CB(msdu)->htt.tid;
476
477 ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
478
479 flags0 = 0;
480 if (!ieee80211_has_protected(hdr->frame_control))
481 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
482 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
Michal Kazior961d4c32013-08-09 10:13:34 +0200483
Michal Kazior2f3773b2013-09-18 14:43:21 +0200484 if (use_frags)
485 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
Michal Kazior961d4c32013-08-09 10:13:34 +0200486 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
487 else
Michal Kazior2f3773b2013-09-18 14:43:21 +0200488 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200489 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300490
491 flags1 = 0;
492 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
493 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
Michal Kazior7c199992013-07-31 10:47:57 +0200494 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
495 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300496
Kalle Valo5e3dd152013-06-12 20:52:10 +0300497 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
498 cmd->data_tx.flags0 = flags0;
499 cmd->data_tx.flags1 = __cpu_to_le16(flags1);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200500 cmd->data_tx.len = __cpu_to_le16(msdu->len -
501 skb_cb->htt.frag_len -
502 skb_cb->htt.pad_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300503 cmd->data_tx.id = __cpu_to_le16(msdu_id);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200504 cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300505 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
506
Michal Kazior1f8bb152013-09-18 14:43:22 +0200507 memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300508
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300509 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300510 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200511 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300512
513 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200514
Michal Kazior2f3773b2013-09-18 14:43:21 +0200515err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200516 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200517err_pull_txfrag:
518 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200519err_free_txdesc:
520 dev_kfree_skb_any(txdesc);
521err_free_msdu_id:
522 spin_lock_bh(&htt->tx_lock);
523 htt->pending_tx[msdu_id] = NULL;
524 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
525 spin_unlock_bh(&htt->tx_lock);
526err_tx_dec:
527 ath10k_htt_tx_dec_pending(htt);
528err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300529 return res;
530}