blob: c49ecffc29055757a359df9f7578428f83274947 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
26{
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
Michal Kazior96d828d2015-03-31 10:26:23 +000029 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030030}
31
32static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
33{
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
37}
38
39static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
40{
41 int ret = 0;
42
43 spin_lock_bh(&htt->tx_lock);
44
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
46 ret = -EBUSY;
47 goto exit;
48 }
49
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
Michal Kazior96d828d2015-03-31 10:26:23 +000052 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030053
54exit:
55 spin_unlock_bh(&htt->tx_lock);
56 return ret;
57}
58
Michal Kazior89d6d832015-01-24 12:14:51 +020059int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +030060{
Michal Kazior7aa7a722014-08-25 12:09:38 +020061 struct ath10k *ar = htt->ar;
Michal Kazior89d6d832015-01-24 12:14:51 +020062 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030063
64 lockdep_assert_held(&htt->tx_lock);
65
Peter Ohfbc03a42015-07-15 19:01:19 -070066 ret = idr_alloc(&htt->pending_tx, skb, 0,
67 htt->max_num_pending_tx, GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +030068
Michal Kazior89d6d832015-01-24 12:14:51 +020069 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
70
71 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030072}
73
74void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
75{
Michal Kazior7aa7a722014-08-25 12:09:38 +020076 struct ath10k *ar = htt->ar;
77
Kalle Valo5e3dd152013-06-12 20:52:10 +030078 lockdep_assert_held(&htt->tx_lock);
79
Michal Kazior7aa7a722014-08-25 12:09:38 +020080 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
Michal Kazior89d6d832015-01-24 12:14:51 +020081
82 idr_remove(&htt->pending_tx, msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030083}
84
Michal Kazior95bf21f2014-05-16 17:15:39 +030085int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +030086{
Michal Kazior7aa7a722014-08-25 12:09:38 +020087 struct ath10k *ar = htt->ar;
Raja Manid9156b52015-06-22 20:22:27 +053088 int ret, size;
Michal Kazior7aa7a722014-08-25 12:09:38 +020089
Michal Kazior7aa7a722014-08-25 12:09:38 +020090 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +030091 htt->max_num_pending_tx);
92
Michal Kazior89d6d832015-01-24 12:14:51 +020093 spin_lock_init(&htt->tx_lock);
94 idr_init(&htt->pending_tx);
Kalle Valo5e3dd152013-06-12 20:52:10 +030095
Michal Kaziora16942e2014-02-27 18:50:04 +020096 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
97 sizeof(struct ath10k_htt_txbuf), 4, 0);
98 if (!htt->tx_pool) {
Raja Manid9156b52015-06-22 20:22:27 +053099 ret = -ENOMEM;
100 goto free_idr_pending_tx;
Michal Kaziora16942e2014-02-27 18:50:04 +0200101 }
102
Raja Manid9156b52015-06-22 20:22:27 +0530103 if (!ar->hw_params.continuous_frag_desc)
104 goto skip_frag_desc_alloc;
105
106 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
107 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
108 &htt->frag_desc.paddr,
109 GFP_DMA);
110 if (!htt->frag_desc.vaddr) {
111 ath10k_warn(ar, "failed to alloc fragment desc memory\n");
112 ret = -ENOMEM;
113 goto free_tx_pool;
114 }
115
116skip_frag_desc_alloc:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300117 return 0;
Raja Manid9156b52015-06-22 20:22:27 +0530118
119free_tx_pool:
120 dma_pool_destroy(htt->tx_pool);
121free_idr_pending_tx:
122 idr_destroy(&htt->pending_tx);
123 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300124}
125
Michal Kazior89d6d832015-01-24 12:14:51 +0200126static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300127{
Michal Kazior89d6d832015-01-24 12:14:51 +0200128 struct ath10k *ar = ctx;
129 struct ath10k_htt *htt = &ar->htt;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200130 struct htt_tx_done tx_done = {0};
Michal Kazior89d6d832015-01-24 12:14:51 +0200131
132 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
133
134 tx_done.discard = 1;
135 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300136
Michal Kazior45967082014-02-27 18:50:05 +0200137 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200138 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior45967082014-02-27 18:50:05 +0200139 spin_unlock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200140
141 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300142}
143
Michal Kazior95bf21f2014-05-16 17:15:39 +0300144void ath10k_htt_tx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300145{
Raja Manid9156b52015-06-22 20:22:27 +0530146 int size;
147
Michal Kazior89d6d832015-01-24 12:14:51 +0200148 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
149 idr_destroy(&htt->pending_tx);
Michal Kaziora16942e2014-02-27 18:50:04 +0200150 dma_pool_destroy(htt->tx_pool);
Raja Manid9156b52015-06-22 20:22:27 +0530151
152 if (htt->frag_desc.vaddr) {
153 size = htt->max_num_pending_tx *
154 sizeof(struct htt_msdu_ext_desc);
155 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
156 htt->frag_desc.paddr);
157 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300158}
159
160void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
161{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200162 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300163}
164
165int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
166{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200167 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300168 struct sk_buff *skb;
169 struct htt_cmd *cmd;
170 int len = 0;
171 int ret;
172
173 len += sizeof(cmd->hdr);
174 len += sizeof(cmd->ver_req);
175
Michal Kazior7aa7a722014-08-25 12:09:38 +0200176 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300177 if (!skb)
178 return -ENOMEM;
179
180 skb_put(skb, len);
181 cmd = (struct htt_cmd *)skb->data;
182 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
183
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300184 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300185 if (ret) {
186 dev_kfree_skb_any(skb);
187 return ret;
188 }
189
190 return 0;
191}
192
Kalle Valoa3d135e2013-09-03 11:44:10 +0300193int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
194{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200195 struct ath10k *ar = htt->ar;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300196 struct htt_stats_req *req;
197 struct sk_buff *skb;
198 struct htt_cmd *cmd;
199 int len = 0, ret;
200
201 len += sizeof(cmd->hdr);
202 len += sizeof(cmd->stats_req);
203
Michal Kazior7aa7a722014-08-25 12:09:38 +0200204 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300205 if (!skb)
206 return -ENOMEM;
207
208 skb_put(skb, len);
209 cmd = (struct htt_cmd *)skb->data;
210 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
211
212 req = &cmd->stats_req;
213
214 memset(req, 0, sizeof(*req));
215
216 /* currently we support only max 8 bit masks so no need to worry
217 * about endian support */
218 req->upload_types[0] = mask;
219 req->reset_types[0] = mask;
220 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
221 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
222 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
223
Kalle Valoa3d135e2013-09-03 11:44:10 +0300224 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
225 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200226 ath10k_warn(ar, "failed to send htt type stats request: %d",
227 ret);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300228 dev_kfree_skb_any(skb);
229 return ret;
230 }
231
232 return 0;
233}
234
Raja Manid9156b52015-06-22 20:22:27 +0530235int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
236{
237 struct ath10k *ar = htt->ar;
238 struct sk_buff *skb;
239 struct htt_cmd *cmd;
240 int ret, size;
241
242 if (!ar->hw_params.continuous_frag_desc)
243 return 0;
244
245 if (!htt->frag_desc.paddr) {
246 ath10k_warn(ar, "invalid frag desc memory\n");
247 return -EINVAL;
248 }
249
250 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
251 skb = ath10k_htc_alloc_skb(ar, size);
252 if (!skb)
253 return -ENOMEM;
254
255 skb_put(skb, size);
256 cmd = (struct htt_cmd *)skb->data;
257 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
258 cmd->frag_desc_bank_cfg.info = 0;
259 cmd->frag_desc_bank_cfg.num_banks = 1;
260 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
261 cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
262 __cpu_to_le32(htt->frag_desc.paddr);
Peter Ohfbc03a42015-07-15 19:01:19 -0700263 cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
Raja Manid9156b52015-06-22 20:22:27 +0530264 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
265 __cpu_to_le16(htt->max_num_pending_tx - 1);
266
267 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
268 if (ret) {
269 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
270 ret);
271 dev_kfree_skb_any(skb);
272 return ret;
273 }
274
275 return 0;
276}
277
Kalle Valo5e3dd152013-06-12 20:52:10 +0300278int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
279{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200280 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300281 struct sk_buff *skb;
282 struct htt_cmd *cmd;
283 struct htt_rx_ring_setup_ring *ring;
284 const int num_rx_ring = 1;
285 u16 flags;
286 u32 fw_idx;
287 int len;
288 int ret;
289
290 /*
291 * the HW expects the buffer to be an integral number of 4-byte
292 * "words"
293 */
294 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
295 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
296
297 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
298 + (sizeof(*ring) * num_rx_ring);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200299 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300300 if (!skb)
301 return -ENOMEM;
302
303 skb_put(skb, len);
304
305 cmd = (struct htt_cmd *)skb->data;
306 ring = &cmd->rx_setup.rings[0];
307
308 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
309 cmd->rx_setup.hdr.num_rings = 1;
310
311 /* FIXME: do we need all of this? */
312 flags = 0;
313 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
314 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
315 flags |= HTT_RX_RING_FLAGS_PPDU_START;
316 flags |= HTT_RX_RING_FLAGS_PPDU_END;
317 flags |= HTT_RX_RING_FLAGS_MPDU_START;
318 flags |= HTT_RX_RING_FLAGS_MPDU_END;
319 flags |= HTT_RX_RING_FLAGS_MSDU_START;
320 flags |= HTT_RX_RING_FLAGS_MSDU_END;
321 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
322 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
323 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
324 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
325 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
326 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
327 flags |= HTT_RX_RING_FLAGS_NULL_RX;
328 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
329
330 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
331
332 ring->fw_idx_shadow_reg_paddr =
333 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
334 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
335 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
336 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
337 ring->flags = __cpu_to_le16(flags);
338 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
339
340#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
341
342 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
343 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
344 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
345 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
346 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
347 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
348 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
349 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
350 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
351 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
352
353#undef desc_offset
354
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300355 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300356 if (ret) {
357 dev_kfree_skb_any(skb);
358 return ret;
359 }
360
361 return 0;
362}
363
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300364int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
365 u8 max_subfrms_ampdu,
366 u8 max_subfrms_amsdu)
367{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200368 struct ath10k *ar = htt->ar;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300369 struct htt_aggr_conf *aggr_conf;
370 struct sk_buff *skb;
371 struct htt_cmd *cmd;
372 int len;
373 int ret;
374
375 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
376
377 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
378 return -EINVAL;
379
380 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
381 return -EINVAL;
382
383 len = sizeof(cmd->hdr);
384 len += sizeof(cmd->aggr_conf);
385
Michal Kazior7aa7a722014-08-25 12:09:38 +0200386 skb = ath10k_htc_alloc_skb(ar, len);
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300387 if (!skb)
388 return -ENOMEM;
389
390 skb_put(skb, len);
391 cmd = (struct htt_cmd *)skb->data;
392 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
393
394 aggr_conf = &cmd->aggr_conf;
395 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
396 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
397
Michal Kazior7aa7a722014-08-25 12:09:38 +0200398 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300399 aggr_conf->max_num_amsdu_subframes,
400 aggr_conf->max_num_ampdu_subframes);
401
402 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
403 if (ret) {
404 dev_kfree_skb_any(skb);
405 return ret;
406 }
407
408 return 0;
409}
410
Kalle Valo5e3dd152013-06-12 20:52:10 +0300411int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
412{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200413 struct ath10k *ar = htt->ar;
414 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300415 struct sk_buff *txdesc = NULL;
416 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200417 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200418 u8 vdev_id = skb_cb->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300419 int len = 0;
420 int msdu_id = -1;
421 int res;
422
Kalle Valo5e3dd152013-06-12 20:52:10 +0300423 res = ath10k_htt_tx_inc_pending(htt);
424 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200425 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300426
427 len += sizeof(cmd->hdr);
428 len += sizeof(cmd->mgmt_tx);
429
Kalle Valo5e3dd152013-06-12 20:52:10 +0300430 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200431 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200432 if (res < 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300433 spin_unlock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200434 goto err_tx_dec;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300435 }
Michal Kazior2f3773b2013-09-18 14:43:21 +0200436 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300437 spin_unlock_bh(&htt->tx_lock);
438
Michal Kazior7aa7a722014-08-25 12:09:38 +0200439 txdesc = ath10k_htc_alloc_skb(ar, len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200440 if (!txdesc) {
441 res = -ENOMEM;
442 goto err_free_msdu_id;
443 }
444
Michal Kazior767d34f2014-02-27 18:50:03 +0200445 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
446 DMA_TO_DEVICE);
447 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300448 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200449 goto err_free_txdesc;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300450
451 skb_put(txdesc, len);
452 cmd = (struct htt_cmd *)txdesc->data;
453 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
454 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
455 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
456 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
457 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
458 memcpy(cmd->mgmt_tx.hdr, msdu->data,
459 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
460
Michal Kaziora16942e2014-02-27 18:50:04 +0200461 skb_cb->htt.txbuf = NULL;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200462
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300463 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300464 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200465 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300466
467 return 0;
468
Michal Kazior2f3773b2013-09-18 14:43:21 +0200469err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200470 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200471err_free_txdesc:
472 dev_kfree_skb_any(txdesc);
473err_free_msdu_id:
474 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200475 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
476 spin_unlock_bh(&htt->tx_lock);
477err_tx_dec:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300478 ath10k_htt_tx_dec_pending(htt);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200479err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300480 return res;
481}
482
483int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
484{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200485 struct ath10k *ar = htt->ar;
486 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300487 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200488 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kaziora16942e2014-02-27 18:50:04 +0200489 struct ath10k_hif_sg_item sg_items[2];
490 struct htt_data_tx_desc_frag *frags;
491 u8 vdev_id = skb_cb->vdev_id;
492 u8 tid = skb_cb->htt.tid;
493 int prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300494 int res;
Michal Kaziora16942e2014-02-27 18:50:04 +0200495 u8 flags0 = 0;
496 u16 msdu_id, flags1 = 0;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300497 dma_addr_t paddr = 0;
498 u32 frags_paddr = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300499
500 res = ath10k_htt_tx_inc_pending(htt);
501 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200502 goto err;
503
504 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200505 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200506 if (res < 0) {
507 spin_unlock_bh(&htt->tx_lock);
508 goto err_tx_dec;
509 }
510 msdu_id = res;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200511 spin_unlock_bh(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300512
513 prefetch_len = min(htt->prefetch_len, msdu->len);
514 prefetch_len = roundup(prefetch_len, 4);
515
Michal Kaziora16942e2014-02-27 18:50:04 +0200516 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
517 &paddr);
Julia Lawall8be3b692014-12-29 18:04:43 +0100518 if (!skb_cb->htt.txbuf) {
519 res = -ENOMEM;
Michal Kaziora16942e2014-02-27 18:50:04 +0200520 goto err_free_msdu_id;
Julia Lawall8be3b692014-12-29 18:04:43 +0100521 }
Michal Kaziora16942e2014-02-27 18:50:04 +0200522 skb_cb->htt.txbuf_paddr = paddr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300523
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200524 if ((ieee80211_is_action(hdr->frame_control) ||
525 ieee80211_is_deauth(hdr->frame_control) ||
526 ieee80211_is_disassoc(hdr->frame_control)) &&
527 ieee80211_has_protected(hdr->frame_control))
528 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
529
Michal Kazior767d34f2014-02-27 18:50:03 +0200530 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
531 DMA_TO_DEVICE);
532 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300533 if (res)
Michal Kaziora16942e2014-02-27 18:50:04 +0200534 goto err_free_txbuf;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300535
Michal Kaziord740d8f2015-03-30 09:51:51 +0300536 switch (skb_cb->txmode) {
537 case ATH10K_HW_TXRX_RAW:
538 case ATH10K_HW_TXRX_NATIVE_WIFI:
539 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
540 /* pass through */
541 case ATH10K_HW_TXRX_ETHERNET:
Peter Ohfbc03a42015-07-15 19:01:19 -0700542 if (ar->hw_params.continuous_frag_desc) {
543 frags = (struct htt_data_tx_desc_frag *)
544 &htt->frag_desc.vaddr[msdu_id].frags;
545 frags[0].tword_addr.paddr_lo =
546 __cpu_to_le32(skb_cb->paddr);
547 frags[0].tword_addr.paddr_hi = 0;
548 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
549 frags[1].tword_addr.paddr_lo = 0;
550 frags[1].tword_addr.paddr_hi = 0;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200551
Peter Ohfbc03a42015-07-15 19:01:19 -0700552 frags_paddr = htt->frag_desc.paddr +
553 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
554 } else {
555 frags = skb_cb->htt.txbuf->frags;
556 frags[0].dword_addr.paddr =
557 __cpu_to_le32(skb_cb->paddr);
558 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
559 frags[1].dword_addr.paddr = 0;
560 frags[1].dword_addr.len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300561
Peter Ohfbc03a42015-07-15 19:01:19 -0700562 frags_paddr = skb_cb->htt.txbuf_paddr;
563 }
Michal Kaziord740d8f2015-03-30 09:51:51 +0300564 flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300565 break;
566 case ATH10K_HW_TXRX_MGMT:
Michal Kazior2f3773b2013-09-18 14:43:21 +0200567 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200568 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300569 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300570
Michal Kaziora16942e2014-02-27 18:50:04 +0200571 frags_paddr = skb_cb->paddr;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300572 break;
Michal Kaziora16942e2014-02-27 18:50:04 +0200573 }
574
575 /* Normally all commands go through HTC which manages tx credits for
576 * each endpoint and notifies when tx is completed.
577 *
578 * HTT endpoint is creditless so there's no need to care about HTC
579 * flags. In that case it is trivial to fill the HTC header here.
580 *
581 * MSDU transmission is considered completed upon HTT event. This
582 * implies no relevant resources can be freed until after the event is
583 * received. That's why HTC tx completion handler itself is ignored by
584 * setting NULL to transfer_context for all sg items.
585 *
586 * There is simply no point in pushing HTT TX_FRM through HTC tx path
587 * as it's a waste of resources. By bypassing HTC it is possible to
588 * avoid extra memory allocations, compress data structures and thus
589 * improve performance. */
590
591 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
592 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
593 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
594 sizeof(skb_cb->htt.txbuf->cmd_tx) +
595 prefetch_len);
596 skb_cb->htt.txbuf->htc_hdr.flags = 0;
597
Michal Kaziord740d8f2015-03-30 09:51:51 +0300598 if (!skb_cb->is_protected)
Michal Kaziora16942e2014-02-27 18:50:04 +0200599 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
600
Kalle Valo5e3dd152013-06-12 20:52:10 +0300601 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
602 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
Helmut Schaa75930d12015-01-28 11:31:32 +0100603 if (msdu->ip_summed == CHECKSUM_PARTIAL) {
604 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
605 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
606 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300607
Michal Kazior708b9bd2014-07-21 20:52:59 +0300608 /* Prevent firmware from sending up tx inspection requests. There's
609 * nothing ath10k can do with frames requested for inspection so force
610 * it to simply rely a regular tx completion with discard status.
611 */
612 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
613
Michal Kaziora16942e2014-02-27 18:50:04 +0200614 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
615 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
616 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
617 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
618 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
619 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
Michal Kazior8d6d3622014-11-24 14:58:31 +0100620 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
621 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300622
Rajkumar Manoharand1e50f42014-10-03 08:02:54 +0300623 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200624 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100625 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
Michal Kaziora16942e2014-02-27 18:50:04 +0200626 flags0, flags1, msdu->len, msdu_id, frags_paddr,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100627 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200628 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
Michal Kaziora16942e2014-02-27 18:50:04 +0200629 msdu->data, msdu->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530630 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
631 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300632
Michal Kaziora16942e2014-02-27 18:50:04 +0200633 sg_items[0].transfer_id = 0;
634 sg_items[0].transfer_context = NULL;
635 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
636 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
637 sizeof(skb_cb->htt.txbuf->frags);
638 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
639 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
640 sizeof(skb_cb->htt.txbuf->cmd_tx);
641
642 sg_items[1].transfer_id = 0;
643 sg_items[1].transfer_context = NULL;
644 sg_items[1].vaddr = msdu->data;
645 sg_items[1].paddr = skb_cb->paddr;
646 sg_items[1].len = prefetch_len;
647
648 res = ath10k_hif_tx_sg(htt->ar,
649 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
650 sg_items, ARRAY_SIZE(sg_items));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300651 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200652 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300653
654 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200655
Michal Kazior2f3773b2013-09-18 14:43:21 +0200656err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200657 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kaziora16942e2014-02-27 18:50:04 +0200658err_free_txbuf:
659 dma_pool_free(htt->tx_pool,
660 skb_cb->htt.txbuf,
661 skb_cb->htt.txbuf_paddr);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200662err_free_msdu_id:
663 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200664 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
665 spin_unlock_bh(&htt->tx_lock);
666err_tx_dec:
667 ath10k_htt_tx_dec_pending(htt);
668err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300669 return res;
670}