blob: 704bb5e071938b7e3f56b9b42fa8304c3dbe2fed [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
26{
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
Michal Kazior96d828d2015-03-31 10:26:23 +000029 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030030}
31
32static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
33{
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
37}
38
39static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
40{
41 int ret = 0;
42
43 spin_lock_bh(&htt->tx_lock);
44
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
46 ret = -EBUSY;
47 goto exit;
48 }
49
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
Michal Kazior96d828d2015-03-31 10:26:23 +000052 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030053
54exit:
55 spin_unlock_bh(&htt->tx_lock);
56 return ret;
57}
58
Michal Kazior89d6d832015-01-24 12:14:51 +020059int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +030060{
Michal Kazior7aa7a722014-08-25 12:09:38 +020061 struct ath10k *ar = htt->ar;
Michal Kazior89d6d832015-01-24 12:14:51 +020062 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030063
64 lockdep_assert_held(&htt->tx_lock);
65
Peter Ohfbc03a42015-07-15 19:01:19 -070066 ret = idr_alloc(&htt->pending_tx, skb, 0,
67 htt->max_num_pending_tx, GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +030068
Michal Kazior89d6d832015-01-24 12:14:51 +020069 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
70
71 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030072}
73
74void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
75{
Michal Kazior7aa7a722014-08-25 12:09:38 +020076 struct ath10k *ar = htt->ar;
77
Kalle Valo5e3dd152013-06-12 20:52:10 +030078 lockdep_assert_held(&htt->tx_lock);
79
Michal Kazior7aa7a722014-08-25 12:09:38 +020080 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
Michal Kazior89d6d832015-01-24 12:14:51 +020081
82 idr_remove(&htt->pending_tx, msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030083}
84
Michal Kazior95bf21f2014-05-16 17:15:39 +030085int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +030086{
Michal Kazior7aa7a722014-08-25 12:09:38 +020087 struct ath10k *ar = htt->ar;
Raja Manid9156b52015-06-22 20:22:27 +053088 int ret, size;
Michal Kazior7aa7a722014-08-25 12:09:38 +020089
Michal Kazior7aa7a722014-08-25 12:09:38 +020090 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +030091 htt->max_num_pending_tx);
92
Michal Kazior89d6d832015-01-24 12:14:51 +020093 spin_lock_init(&htt->tx_lock);
94 idr_init(&htt->pending_tx);
Kalle Valo5e3dd152013-06-12 20:52:10 +030095
Michal Kaziora16942e2014-02-27 18:50:04 +020096 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
97 sizeof(struct ath10k_htt_txbuf), 4, 0);
98 if (!htt->tx_pool) {
Raja Manid9156b52015-06-22 20:22:27 +053099 ret = -ENOMEM;
100 goto free_idr_pending_tx;
Michal Kaziora16942e2014-02-27 18:50:04 +0200101 }
102
Raja Manid9156b52015-06-22 20:22:27 +0530103 if (!ar->hw_params.continuous_frag_desc)
104 goto skip_frag_desc_alloc;
105
106 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
107 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
108 &htt->frag_desc.paddr,
109 GFP_DMA);
110 if (!htt->frag_desc.vaddr) {
111 ath10k_warn(ar, "failed to alloc fragment desc memory\n");
112 ret = -ENOMEM;
113 goto free_tx_pool;
114 }
115
116skip_frag_desc_alloc:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300117 return 0;
Raja Manid9156b52015-06-22 20:22:27 +0530118
119free_tx_pool:
120 dma_pool_destroy(htt->tx_pool);
121free_idr_pending_tx:
122 idr_destroy(&htt->pending_tx);
123 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300124}
125
Michal Kazior89d6d832015-01-24 12:14:51 +0200126static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300127{
Michal Kazior89d6d832015-01-24 12:14:51 +0200128 struct ath10k *ar = ctx;
129 struct ath10k_htt *htt = &ar->htt;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200130 struct htt_tx_done tx_done = {0};
Michal Kazior89d6d832015-01-24 12:14:51 +0200131
132 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
133
134 tx_done.discard = 1;
135 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300136
Michal Kazior89d6d832015-01-24 12:14:51 +0200137 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior89d6d832015-01-24 12:14:51 +0200138
139 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300140}
141
Michal Kazior95bf21f2014-05-16 17:15:39 +0300142void ath10k_htt_tx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300143{
Raja Manid9156b52015-06-22 20:22:27 +0530144 int size;
145
Michal Kazior89d6d832015-01-24 12:14:51 +0200146 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
147 idr_destroy(&htt->pending_tx);
Michal Kaziora16942e2014-02-27 18:50:04 +0200148 dma_pool_destroy(htt->tx_pool);
Raja Manid9156b52015-06-22 20:22:27 +0530149
150 if (htt->frag_desc.vaddr) {
151 size = htt->max_num_pending_tx *
152 sizeof(struct htt_msdu_ext_desc);
153 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
154 htt->frag_desc.paddr);
155 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300156}
157
158void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
159{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200160 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300161}
162
163int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
164{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200165 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300166 struct sk_buff *skb;
167 struct htt_cmd *cmd;
168 int len = 0;
169 int ret;
170
171 len += sizeof(cmd->hdr);
172 len += sizeof(cmd->ver_req);
173
Michal Kazior7aa7a722014-08-25 12:09:38 +0200174 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300175 if (!skb)
176 return -ENOMEM;
177
178 skb_put(skb, len);
179 cmd = (struct htt_cmd *)skb->data;
180 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
181
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300182 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300183 if (ret) {
184 dev_kfree_skb_any(skb);
185 return ret;
186 }
187
188 return 0;
189}
190
Kalle Valoa3d135e2013-09-03 11:44:10 +0300191int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
192{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200193 struct ath10k *ar = htt->ar;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300194 struct htt_stats_req *req;
195 struct sk_buff *skb;
196 struct htt_cmd *cmd;
197 int len = 0, ret;
198
199 len += sizeof(cmd->hdr);
200 len += sizeof(cmd->stats_req);
201
Michal Kazior7aa7a722014-08-25 12:09:38 +0200202 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300203 if (!skb)
204 return -ENOMEM;
205
206 skb_put(skb, len);
207 cmd = (struct htt_cmd *)skb->data;
208 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
209
210 req = &cmd->stats_req;
211
212 memset(req, 0, sizeof(*req));
213
214 /* currently we support only max 8 bit masks so no need to worry
215 * about endian support */
216 req->upload_types[0] = mask;
217 req->reset_types[0] = mask;
218 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
219 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
220 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
221
Kalle Valoa3d135e2013-09-03 11:44:10 +0300222 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
223 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200224 ath10k_warn(ar, "failed to send htt type stats request: %d",
225 ret);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300226 dev_kfree_skb_any(skb);
227 return ret;
228 }
229
230 return 0;
231}
232
Raja Manid9156b52015-06-22 20:22:27 +0530233int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
234{
235 struct ath10k *ar = htt->ar;
236 struct sk_buff *skb;
237 struct htt_cmd *cmd;
238 int ret, size;
239
240 if (!ar->hw_params.continuous_frag_desc)
241 return 0;
242
243 if (!htt->frag_desc.paddr) {
244 ath10k_warn(ar, "invalid frag desc memory\n");
245 return -EINVAL;
246 }
247
248 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
249 skb = ath10k_htc_alloc_skb(ar, size);
250 if (!skb)
251 return -ENOMEM;
252
253 skb_put(skb, size);
254 cmd = (struct htt_cmd *)skb->data;
255 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
256 cmd->frag_desc_bank_cfg.info = 0;
257 cmd->frag_desc_bank_cfg.num_banks = 1;
258 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
259 cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
260 __cpu_to_le32(htt->frag_desc.paddr);
Peter Ohfbc03a42015-07-15 19:01:19 -0700261 cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
Raja Manid9156b52015-06-22 20:22:27 +0530262 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
263 __cpu_to_le16(htt->max_num_pending_tx - 1);
264
265 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
266 if (ret) {
267 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
268 ret);
269 dev_kfree_skb_any(skb);
270 return ret;
271 }
272
273 return 0;
274}
275
Kalle Valo5e3dd152013-06-12 20:52:10 +0300276int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
277{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200278 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300279 struct sk_buff *skb;
280 struct htt_cmd *cmd;
281 struct htt_rx_ring_setup_ring *ring;
282 const int num_rx_ring = 1;
283 u16 flags;
284 u32 fw_idx;
285 int len;
286 int ret;
287
288 /*
289 * the HW expects the buffer to be an integral number of 4-byte
290 * "words"
291 */
292 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
293 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
294
295 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
296 + (sizeof(*ring) * num_rx_ring);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200297 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300298 if (!skb)
299 return -ENOMEM;
300
301 skb_put(skb, len);
302
303 cmd = (struct htt_cmd *)skb->data;
304 ring = &cmd->rx_setup.rings[0];
305
306 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
307 cmd->rx_setup.hdr.num_rings = 1;
308
309 /* FIXME: do we need all of this? */
310 flags = 0;
311 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
312 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
313 flags |= HTT_RX_RING_FLAGS_PPDU_START;
314 flags |= HTT_RX_RING_FLAGS_PPDU_END;
315 flags |= HTT_RX_RING_FLAGS_MPDU_START;
316 flags |= HTT_RX_RING_FLAGS_MPDU_END;
317 flags |= HTT_RX_RING_FLAGS_MSDU_START;
318 flags |= HTT_RX_RING_FLAGS_MSDU_END;
319 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
320 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
321 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
322 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
323 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
324 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
325 flags |= HTT_RX_RING_FLAGS_NULL_RX;
326 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
327
328 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
329
330 ring->fw_idx_shadow_reg_paddr =
331 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
332 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
333 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
334 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
335 ring->flags = __cpu_to_le16(flags);
336 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
337
338#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
339
340 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
341 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
342 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
343 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
344 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
345 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
346 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
347 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
348 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
349 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
350
351#undef desc_offset
352
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300353 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300354 if (ret) {
355 dev_kfree_skb_any(skb);
356 return ret;
357 }
358
359 return 0;
360}
361
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300362int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
363 u8 max_subfrms_ampdu,
364 u8 max_subfrms_amsdu)
365{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200366 struct ath10k *ar = htt->ar;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300367 struct htt_aggr_conf *aggr_conf;
368 struct sk_buff *skb;
369 struct htt_cmd *cmd;
370 int len;
371 int ret;
372
373 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
374
375 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
376 return -EINVAL;
377
378 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
379 return -EINVAL;
380
381 len = sizeof(cmd->hdr);
382 len += sizeof(cmd->aggr_conf);
383
Michal Kazior7aa7a722014-08-25 12:09:38 +0200384 skb = ath10k_htc_alloc_skb(ar, len);
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300385 if (!skb)
386 return -ENOMEM;
387
388 skb_put(skb, len);
389 cmd = (struct htt_cmd *)skb->data;
390 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
391
392 aggr_conf = &cmd->aggr_conf;
393 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
394 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
395
Michal Kazior7aa7a722014-08-25 12:09:38 +0200396 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300397 aggr_conf->max_num_amsdu_subframes,
398 aggr_conf->max_num_ampdu_subframes);
399
400 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
401 if (ret) {
402 dev_kfree_skb_any(skb);
403 return ret;
404 }
405
406 return 0;
407}
408
Kalle Valo5e3dd152013-06-12 20:52:10 +0300409int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
410{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200411 struct ath10k *ar = htt->ar;
412 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300413 struct sk_buff *txdesc = NULL;
414 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200415 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200416 u8 vdev_id = skb_cb->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300417 int len = 0;
418 int msdu_id = -1;
419 int res;
420
Kalle Valo5e3dd152013-06-12 20:52:10 +0300421 res = ath10k_htt_tx_inc_pending(htt);
422 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200423 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300424
425 len += sizeof(cmd->hdr);
426 len += sizeof(cmd->mgmt_tx);
427
Kalle Valo5e3dd152013-06-12 20:52:10 +0300428 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200429 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400430 spin_unlock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200431 if (res < 0) {
Michal Kazior2f3773b2013-09-18 14:43:21 +0200432 goto err_tx_dec;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300433 }
Michal Kazior2f3773b2013-09-18 14:43:21 +0200434 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300435
Michal Kazior7aa7a722014-08-25 12:09:38 +0200436 txdesc = ath10k_htc_alloc_skb(ar, len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200437 if (!txdesc) {
438 res = -ENOMEM;
439 goto err_free_msdu_id;
440 }
441
Michal Kazior767d34f2014-02-27 18:50:03 +0200442 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
443 DMA_TO_DEVICE);
444 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300445 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200446 goto err_free_txdesc;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300447
448 skb_put(txdesc, len);
449 cmd = (struct htt_cmd *)txdesc->data;
Raja Mani1d0088f2015-07-21 10:52:00 +0530450 memset(cmd, 0, len);
451
Kalle Valo5e3dd152013-06-12 20:52:10 +0300452 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
453 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
454 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
455 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
456 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
457 memcpy(cmd->mgmt_tx.hdr, msdu->data,
458 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
459
Michal Kaziora16942e2014-02-27 18:50:04 +0200460 skb_cb->htt.txbuf = NULL;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200461
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300462 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300463 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200464 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300465
466 return 0;
467
Michal Kazior2f3773b2013-09-18 14:43:21 +0200468err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200469 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200470err_free_txdesc:
471 dev_kfree_skb_any(txdesc);
472err_free_msdu_id:
473 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200474 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
475 spin_unlock_bh(&htt->tx_lock);
476err_tx_dec:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300477 ath10k_htt_tx_dec_pending(htt);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200478err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300479 return res;
480}
481
482int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
483{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200484 struct ath10k *ar = htt->ar;
485 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300486 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200487 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kaziora16942e2014-02-27 18:50:04 +0200488 struct ath10k_hif_sg_item sg_items[2];
489 struct htt_data_tx_desc_frag *frags;
490 u8 vdev_id = skb_cb->vdev_id;
491 u8 tid = skb_cb->htt.tid;
492 int prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300493 int res;
Michal Kaziora16942e2014-02-27 18:50:04 +0200494 u8 flags0 = 0;
495 u16 msdu_id, flags1 = 0;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300496 dma_addr_t paddr = 0;
497 u32 frags_paddr = 0;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530498 struct htt_msdu_ext_desc *ext_desc = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300499
500 res = ath10k_htt_tx_inc_pending(htt);
501 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200502 goto err;
503
504 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200505 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400506 spin_unlock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200507 if (res < 0) {
Michal Kazior2f3773b2013-09-18 14:43:21 +0200508 goto err_tx_dec;
509 }
510 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300511
512 prefetch_len = min(htt->prefetch_len, msdu->len);
513 prefetch_len = roundup(prefetch_len, 4);
514
Michal Kaziora16942e2014-02-27 18:50:04 +0200515 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
516 &paddr);
Julia Lawall8be3b692014-12-29 18:04:43 +0100517 if (!skb_cb->htt.txbuf) {
518 res = -ENOMEM;
Michal Kaziora16942e2014-02-27 18:50:04 +0200519 goto err_free_msdu_id;
Julia Lawall8be3b692014-12-29 18:04:43 +0100520 }
Michal Kaziora16942e2014-02-27 18:50:04 +0200521 skb_cb->htt.txbuf_paddr = paddr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300522
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200523 if ((ieee80211_is_action(hdr->frame_control) ||
524 ieee80211_is_deauth(hdr->frame_control) ||
525 ieee80211_is_disassoc(hdr->frame_control)) &&
David Liuccec9032015-07-24 20:25:32 +0300526 ieee80211_has_protected(hdr->frame_control)) {
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200527 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
David Liuccec9032015-07-24 20:25:32 +0300528 } else if (!skb_cb->htt.nohwcrypt &&
529 skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
530 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
531 }
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200532
Michal Kazior767d34f2014-02-27 18:50:03 +0200533 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
534 DMA_TO_DEVICE);
535 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300536 if (res)
Michal Kaziora16942e2014-02-27 18:50:04 +0200537 goto err_free_txbuf;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300538
Michal Kaziord740d8f2015-03-30 09:51:51 +0300539 switch (skb_cb->txmode) {
540 case ATH10K_HW_TXRX_RAW:
541 case ATH10K_HW_TXRX_NATIVE_WIFI:
542 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
543 /* pass through */
544 case ATH10K_HW_TXRX_ETHERNET:
Peter Ohfbc03a42015-07-15 19:01:19 -0700545 if (ar->hw_params.continuous_frag_desc) {
Peter Ohae7d3822015-07-29 11:58:50 +0300546 memset(&htt->frag_desc.vaddr[msdu_id], 0,
547 sizeof(struct htt_msdu_ext_desc));
Peter Ohfbc03a42015-07-15 19:01:19 -0700548 frags = (struct htt_data_tx_desc_frag *)
549 &htt->frag_desc.vaddr[msdu_id].frags;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530550 ext_desc = &htt->frag_desc.vaddr[msdu_id];
Peter Ohfbc03a42015-07-15 19:01:19 -0700551 frags[0].tword_addr.paddr_lo =
552 __cpu_to_le32(skb_cb->paddr);
553 frags[0].tword_addr.paddr_hi = 0;
554 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200555
Peter Ohfbc03a42015-07-15 19:01:19 -0700556 frags_paddr = htt->frag_desc.paddr +
557 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
558 } else {
559 frags = skb_cb->htt.txbuf->frags;
560 frags[0].dword_addr.paddr =
561 __cpu_to_le32(skb_cb->paddr);
562 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
563 frags[1].dword_addr.paddr = 0;
564 frags[1].dword_addr.len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300565
Peter Ohfbc03a42015-07-15 19:01:19 -0700566 frags_paddr = skb_cb->htt.txbuf_paddr;
567 }
Michal Kaziord740d8f2015-03-30 09:51:51 +0300568 flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300569 break;
570 case ATH10K_HW_TXRX_MGMT:
Michal Kazior2f3773b2013-09-18 14:43:21 +0200571 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200572 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300573 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300574
Michal Kaziora16942e2014-02-27 18:50:04 +0200575 frags_paddr = skb_cb->paddr;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300576 break;
Michal Kaziora16942e2014-02-27 18:50:04 +0200577 }
578
579 /* Normally all commands go through HTC which manages tx credits for
580 * each endpoint and notifies when tx is completed.
581 *
582 * HTT endpoint is creditless so there's no need to care about HTC
583 * flags. In that case it is trivial to fill the HTC header here.
584 *
585 * MSDU transmission is considered completed upon HTT event. This
586 * implies no relevant resources can be freed until after the event is
587 * received. That's why HTC tx completion handler itself is ignored by
588 * setting NULL to transfer_context for all sg items.
589 *
590 * There is simply no point in pushing HTT TX_FRM through HTC tx path
591 * as it's a waste of resources. By bypassing HTC it is possible to
592 * avoid extra memory allocations, compress data structures and thus
593 * improve performance. */
594
595 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
596 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
597 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
598 sizeof(skb_cb->htt.txbuf->cmd_tx) +
599 prefetch_len);
600 skb_cb->htt.txbuf->htc_hdr.flags = 0;
601
David Liuccec9032015-07-24 20:25:32 +0300602 if (skb_cb->htt.nohwcrypt)
603 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
604
Michal Kaziord740d8f2015-03-30 09:51:51 +0300605 if (!skb_cb->is_protected)
Michal Kaziora16942e2014-02-27 18:50:04 +0200606 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
607
Kalle Valo5e3dd152013-06-12 20:52:10 +0300608 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
609 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
David Liuccec9032015-07-24 20:25:32 +0300610 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
611 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
Helmut Schaa75930d12015-01-28 11:31:32 +0100612 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
613 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530614 if (ar->hw_params.continuous_frag_desc)
615 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
Helmut Schaa75930d12015-01-28 11:31:32 +0100616 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300617
Michal Kazior708b9bd2014-07-21 20:52:59 +0300618 /* Prevent firmware from sending up tx inspection requests. There's
619 * nothing ath10k can do with frames requested for inspection so force
620 * it to simply rely a regular tx completion with discard status.
621 */
622 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
623
Michal Kaziora16942e2014-02-27 18:50:04 +0200624 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
625 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
626 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
627 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
628 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
629 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
Michal Kazior8d6d3622014-11-24 14:58:31 +0100630 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
631 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300632
Rajkumar Manoharand1e50f42014-10-03 08:02:54 +0300633 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200634 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100635 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
Michal Kaziora16942e2014-02-27 18:50:04 +0200636 flags0, flags1, msdu->len, msdu_id, frags_paddr,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100637 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200638 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
Michal Kaziora16942e2014-02-27 18:50:04 +0200639 msdu->data, msdu->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530640 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
641 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300642
Michal Kaziora16942e2014-02-27 18:50:04 +0200643 sg_items[0].transfer_id = 0;
644 sg_items[0].transfer_context = NULL;
645 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
646 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
647 sizeof(skb_cb->htt.txbuf->frags);
648 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
649 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
650 sizeof(skb_cb->htt.txbuf->cmd_tx);
651
652 sg_items[1].transfer_id = 0;
653 sg_items[1].transfer_context = NULL;
654 sg_items[1].vaddr = msdu->data;
655 sg_items[1].paddr = skb_cb->paddr;
656 sg_items[1].len = prefetch_len;
657
658 res = ath10k_hif_tx_sg(htt->ar,
659 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
660 sg_items, ARRAY_SIZE(sg_items));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300661 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200662 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300663
664 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200665
Michal Kazior2f3773b2013-09-18 14:43:21 +0200666err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200667 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kaziora16942e2014-02-27 18:50:04 +0200668err_free_txbuf:
669 dma_pool_free(htt->tx_pool,
670 skb_cb->htt.txbuf,
671 skb_cb->htt.txbuf_paddr);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200672err_free_msdu_id:
673 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200674 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
675 spin_unlock_bh(&htt->tx_lock);
676err_tx_dec:
677 ath10k_htt_tx_dec_pending(htt);
678err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300679 return res;
680}