blob: 5c64139161fc25fc16e65e57b4e1805441c7a764 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
26{
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
29 ieee80211_wake_queues(htt->ar->hw);
30}
31
32static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
33{
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
37}
38
39static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
40{
41 int ret = 0;
42
43 spin_lock_bh(&htt->tx_lock);
44
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
46 ret = -EBUSY;
47 goto exit;
48 }
49
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
52 ieee80211_stop_queues(htt->ar->hw);
53
54exit:
55 spin_unlock_bh(&htt->tx_lock);
56 return ret;
57}
58
59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
60{
Michal Kazior7aa7a722014-08-25 12:09:38 +020061 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +030062 int msdu_id;
63
64 lockdep_assert_held(&htt->tx_lock);
65
66 msdu_id = find_first_zero_bit(htt->used_msdu_ids,
67 htt->max_num_pending_tx);
68 if (msdu_id == htt->max_num_pending_tx)
69 return -ENOBUFS;
70
Michal Kazior7aa7a722014-08-25 12:09:38 +020071 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030072 __set_bit(msdu_id, htt->used_msdu_ids);
73 return msdu_id;
74}
75
76void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
77{
Michal Kazior7aa7a722014-08-25 12:09:38 +020078 struct ath10k *ar = htt->ar;
79
Kalle Valo5e3dd152013-06-12 20:52:10 +030080 lockdep_assert_held(&htt->tx_lock);
81
82 if (!test_bit(msdu_id, htt->used_msdu_ids))
Michal Kazior7aa7a722014-08-25 12:09:38 +020083 ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
84 msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030085
Michal Kazior7aa7a722014-08-25 12:09:38 +020086 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030087 __clear_bit(msdu_id, htt->used_msdu_ids);
88}
89
Michal Kazior95bf21f2014-05-16 17:15:39 +030090int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +030091{
Michal Kazior7aa7a722014-08-25 12:09:38 +020092 struct ath10k *ar = htt->ar;
93
Kalle Valo5e3dd152013-06-12 20:52:10 +030094 spin_lock_init(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +030095
Michal Kazior7aa7a722014-08-25 12:09:38 +020096 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +030097 htt->max_num_pending_tx);
98
99 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
100 htt->max_num_pending_tx, GFP_KERNEL);
101 if (!htt->pending_tx)
102 return -ENOMEM;
103
104 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
105 BITS_TO_LONGS(htt->max_num_pending_tx),
106 GFP_KERNEL);
107 if (!htt->used_msdu_ids) {
108 kfree(htt->pending_tx);
109 return -ENOMEM;
110 }
111
Michal Kaziora16942e2014-02-27 18:50:04 +0200112 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
113 sizeof(struct ath10k_htt_txbuf), 4, 0);
114 if (!htt->tx_pool) {
115 kfree(htt->used_msdu_ids);
116 kfree(htt->pending_tx);
117 return -ENOMEM;
118 }
119
Kalle Valo5e3dd152013-06-12 20:52:10 +0300120 return 0;
121}
122
Michal Kazior95bf21f2014-05-16 17:15:39 +0300123static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300124{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200125 struct ath10k *ar = htt->ar;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200126 struct htt_tx_done tx_done = {0};
Kalle Valo5e3dd152013-06-12 20:52:10 +0300127 int msdu_id;
128
Michal Kazior45967082014-02-27 18:50:05 +0200129 spin_lock_bh(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300130 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
131 if (!test_bit(msdu_id, htt->used_msdu_ids))
132 continue;
133
Michal Kazior7aa7a722014-08-25 12:09:38 +0200134 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300135 msdu_id);
136
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200137 tx_done.discard = 1;
138 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300139
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200140 ath10k_txrx_tx_unref(htt, &tx_done);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300141 }
Michal Kazior45967082014-02-27 18:50:05 +0200142 spin_unlock_bh(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300143}
144
Michal Kazior95bf21f2014-05-16 17:15:39 +0300145void ath10k_htt_tx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300146{
Michal Kazior95bf21f2014-05-16 17:15:39 +0300147 ath10k_htt_tx_free_pending(htt);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300148 kfree(htt->pending_tx);
149 kfree(htt->used_msdu_ids);
Michal Kaziora16942e2014-02-27 18:50:04 +0200150 dma_pool_destroy(htt->tx_pool);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300151}
152
153void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
154{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200155 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300156}
157
158int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
159{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200160 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300161 struct sk_buff *skb;
162 struct htt_cmd *cmd;
163 int len = 0;
164 int ret;
165
166 len += sizeof(cmd->hdr);
167 len += sizeof(cmd->ver_req);
168
Michal Kazior7aa7a722014-08-25 12:09:38 +0200169 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300170 if (!skb)
171 return -ENOMEM;
172
173 skb_put(skb, len);
174 cmd = (struct htt_cmd *)skb->data;
175 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
176
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300177 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300178 if (ret) {
179 dev_kfree_skb_any(skb);
180 return ret;
181 }
182
183 return 0;
184}
185
Kalle Valoa3d135e2013-09-03 11:44:10 +0300186int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
187{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200188 struct ath10k *ar = htt->ar;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300189 struct htt_stats_req *req;
190 struct sk_buff *skb;
191 struct htt_cmd *cmd;
192 int len = 0, ret;
193
194 len += sizeof(cmd->hdr);
195 len += sizeof(cmd->stats_req);
196
Michal Kazior7aa7a722014-08-25 12:09:38 +0200197 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300198 if (!skb)
199 return -ENOMEM;
200
201 skb_put(skb, len);
202 cmd = (struct htt_cmd *)skb->data;
203 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
204
205 req = &cmd->stats_req;
206
207 memset(req, 0, sizeof(*req));
208
209 /* currently we support only max 8 bit masks so no need to worry
210 * about endian support */
211 req->upload_types[0] = mask;
212 req->reset_types[0] = mask;
213 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
214 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
215 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
216
Kalle Valoa3d135e2013-09-03 11:44:10 +0300217 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
218 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200219 ath10k_warn(ar, "failed to send htt type stats request: %d",
220 ret);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300221 dev_kfree_skb_any(skb);
222 return ret;
223 }
224
225 return 0;
226}
227
Kalle Valo5e3dd152013-06-12 20:52:10 +0300228int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
229{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200230 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300231 struct sk_buff *skb;
232 struct htt_cmd *cmd;
233 struct htt_rx_ring_setup_ring *ring;
234 const int num_rx_ring = 1;
235 u16 flags;
236 u32 fw_idx;
237 int len;
238 int ret;
239
240 /*
241 * the HW expects the buffer to be an integral number of 4-byte
242 * "words"
243 */
244 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
245 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
246
247 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
248 + (sizeof(*ring) * num_rx_ring);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200249 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300250 if (!skb)
251 return -ENOMEM;
252
253 skb_put(skb, len);
254
255 cmd = (struct htt_cmd *)skb->data;
256 ring = &cmd->rx_setup.rings[0];
257
258 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
259 cmd->rx_setup.hdr.num_rings = 1;
260
261 /* FIXME: do we need all of this? */
262 flags = 0;
263 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
264 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
265 flags |= HTT_RX_RING_FLAGS_PPDU_START;
266 flags |= HTT_RX_RING_FLAGS_PPDU_END;
267 flags |= HTT_RX_RING_FLAGS_MPDU_START;
268 flags |= HTT_RX_RING_FLAGS_MPDU_END;
269 flags |= HTT_RX_RING_FLAGS_MSDU_START;
270 flags |= HTT_RX_RING_FLAGS_MSDU_END;
271 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
272 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
273 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
274 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
275 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
276 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
277 flags |= HTT_RX_RING_FLAGS_NULL_RX;
278 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
279
280 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
281
282 ring->fw_idx_shadow_reg_paddr =
283 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
284 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
285 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
286 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
287 ring->flags = __cpu_to_le16(flags);
288 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
289
290#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
291
292 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
293 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
294 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
295 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
296 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
297 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
298 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
299 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
300 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
301 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
302
303#undef desc_offset
304
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300305 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300306 if (ret) {
307 dev_kfree_skb_any(skb);
308 return ret;
309 }
310
311 return 0;
312}
313
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300314int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
315 u8 max_subfrms_ampdu,
316 u8 max_subfrms_amsdu)
317{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200318 struct ath10k *ar = htt->ar;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300319 struct htt_aggr_conf *aggr_conf;
320 struct sk_buff *skb;
321 struct htt_cmd *cmd;
322 int len;
323 int ret;
324
325 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
326
327 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
328 return -EINVAL;
329
330 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
331 return -EINVAL;
332
333 len = sizeof(cmd->hdr);
334 len += sizeof(cmd->aggr_conf);
335
Michal Kazior7aa7a722014-08-25 12:09:38 +0200336 skb = ath10k_htc_alloc_skb(ar, len);
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300337 if (!skb)
338 return -ENOMEM;
339
340 skb_put(skb, len);
341 cmd = (struct htt_cmd *)skb->data;
342 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
343
344 aggr_conf = &cmd->aggr_conf;
345 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
346 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
347
Michal Kazior7aa7a722014-08-25 12:09:38 +0200348 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300349 aggr_conf->max_num_amsdu_subframes,
350 aggr_conf->max_num_ampdu_subframes);
351
352 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
353 if (ret) {
354 dev_kfree_skb_any(skb);
355 return ret;
356 }
357
358 return 0;
359}
360
Kalle Valo5e3dd152013-06-12 20:52:10 +0300361int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
362{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200363 struct ath10k *ar = htt->ar;
364 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300365 struct sk_buff *txdesc = NULL;
366 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200367 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200368 u8 vdev_id = skb_cb->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300369 int len = 0;
370 int msdu_id = -1;
371 int res;
372
Kalle Valo5e3dd152013-06-12 20:52:10 +0300373 res = ath10k_htt_tx_inc_pending(htt);
374 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200375 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300376
377 len += sizeof(cmd->hdr);
378 len += sizeof(cmd->mgmt_tx);
379
Kalle Valo5e3dd152013-06-12 20:52:10 +0300380 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200381 res = ath10k_htt_tx_alloc_msdu_id(htt);
382 if (res < 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300383 spin_unlock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200384 goto err_tx_dec;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300385 }
Michal Kazior2f3773b2013-09-18 14:43:21 +0200386 msdu_id = res;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200387 htt->pending_tx[msdu_id] = msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300388 spin_unlock_bh(&htt->tx_lock);
389
Michal Kazior7aa7a722014-08-25 12:09:38 +0200390 txdesc = ath10k_htc_alloc_skb(ar, len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200391 if (!txdesc) {
392 res = -ENOMEM;
393 goto err_free_msdu_id;
394 }
395
Michal Kazior767d34f2014-02-27 18:50:03 +0200396 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
397 DMA_TO_DEVICE);
398 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300399 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200400 goto err_free_txdesc;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300401
402 skb_put(txdesc, len);
403 cmd = (struct htt_cmd *)txdesc->data;
404 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
405 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
406 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
407 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
408 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
409 memcpy(cmd->mgmt_tx.hdr, msdu->data,
410 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
411
Michal Kaziora16942e2014-02-27 18:50:04 +0200412 skb_cb->htt.txbuf = NULL;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200413
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300414 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300415 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200416 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300417
418 return 0;
419
Michal Kazior2f3773b2013-09-18 14:43:21 +0200420err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200421 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200422err_free_txdesc:
423 dev_kfree_skb_any(txdesc);
424err_free_msdu_id:
425 spin_lock_bh(&htt->tx_lock);
426 htt->pending_tx[msdu_id] = NULL;
427 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
428 spin_unlock_bh(&htt->tx_lock);
429err_tx_dec:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300430 ath10k_htt_tx_dec_pending(htt);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200431err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300432 return res;
433}
434
435int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
436{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200437 struct ath10k *ar = htt->ar;
438 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300439 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200440 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kaziora16942e2014-02-27 18:50:04 +0200441 struct ath10k_hif_sg_item sg_items[2];
442 struct htt_data_tx_desc_frag *frags;
443 u8 vdev_id = skb_cb->vdev_id;
444 u8 tid = skb_cb->htt.tid;
445 int prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300446 int res;
Michal Kaziora16942e2014-02-27 18:50:04 +0200447 u8 flags0 = 0;
448 u16 msdu_id, flags1 = 0;
449 dma_addr_t paddr;
450 u32 frags_paddr;
451 bool use_frags;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300452
453 res = ath10k_htt_tx_inc_pending(htt);
454 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200455 goto err;
456
457 spin_lock_bh(&htt->tx_lock);
458 res = ath10k_htt_tx_alloc_msdu_id(htt);
459 if (res < 0) {
460 spin_unlock_bh(&htt->tx_lock);
461 goto err_tx_dec;
462 }
463 msdu_id = res;
464 htt->pending_tx[msdu_id] = msdu;
465 spin_unlock_bh(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300466
467 prefetch_len = min(htt->prefetch_len, msdu->len);
468 prefetch_len = roundup(prefetch_len, 4);
469
Michal Kazior961d4c32013-08-09 10:13:34 +0200470 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
471 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
472 * fragment list host driver specifies directly frame pointer. */
Michal Kazior2f3773b2013-09-18 14:43:21 +0200473 use_frags = htt->target_version_major < 3 ||
474 !ieee80211_is_mgmt(hdr->frame_control);
475
Michal Kaziora16942e2014-02-27 18:50:04 +0200476 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
477 &paddr);
Julia Lawall8be3b692014-12-29 18:04:43 +0100478 if (!skb_cb->htt.txbuf) {
479 res = -ENOMEM;
Michal Kaziora16942e2014-02-27 18:50:04 +0200480 goto err_free_msdu_id;
Julia Lawall8be3b692014-12-29 18:04:43 +0100481 }
Michal Kaziora16942e2014-02-27 18:50:04 +0200482 skb_cb->htt.txbuf_paddr = paddr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300483
Michal Kazior767d34f2014-02-27 18:50:03 +0200484 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
485 DMA_TO_DEVICE);
486 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300487 if (res)
Michal Kaziora16942e2014-02-27 18:50:04 +0200488 goto err_free_txbuf;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300489
Michal Kaziora16942e2014-02-27 18:50:04 +0200490 if (likely(use_frags)) {
491 frags = skb_cb->htt.txbuf->frags;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200492
Michal Kaziora16942e2014-02-27 18:50:04 +0200493 frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
494 frags[0].len = __cpu_to_le32(msdu->len);
495 frags[1].paddr = 0;
496 frags[1].len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300497
Michal Kazior2f3773b2013-09-18 14:43:21 +0200498 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
Michal Kazior961d4c32013-08-09 10:13:34 +0200499 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziora16942e2014-02-27 18:50:04 +0200500
501 frags_paddr = skb_cb->htt.txbuf_paddr;
502 } else {
Michal Kazior2f3773b2013-09-18 14:43:21 +0200503 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200504 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300505
Michal Kaziora16942e2014-02-27 18:50:04 +0200506 frags_paddr = skb_cb->paddr;
507 }
508
509 /* Normally all commands go through HTC which manages tx credits for
510 * each endpoint and notifies when tx is completed.
511 *
512 * HTT endpoint is creditless so there's no need to care about HTC
513 * flags. In that case it is trivial to fill the HTC header here.
514 *
515 * MSDU transmission is considered completed upon HTT event. This
516 * implies no relevant resources can be freed until after the event is
517 * received. That's why HTC tx completion handler itself is ignored by
518 * setting NULL to transfer_context for all sg items.
519 *
520 * There is simply no point in pushing HTT TX_FRM through HTC tx path
521 * as it's a waste of resources. By bypassing HTC it is possible to
522 * avoid extra memory allocations, compress data structures and thus
523 * improve performance. */
524
525 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
526 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
527 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
528 sizeof(skb_cb->htt.txbuf->cmd_tx) +
529 prefetch_len);
530 skb_cb->htt.txbuf->htc_hdr.flags = 0;
531
532 if (!ieee80211_has_protected(hdr->frame_control))
533 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
534
535 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
536
Kalle Valo5e3dd152013-06-12 20:52:10 +0300537 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
538 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
Michal Kazior7c199992013-07-31 10:47:57 +0200539 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
540 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300541
Michal Kazior708b9bd2014-07-21 20:52:59 +0300542 /* Prevent firmware from sending up tx inspection requests. There's
543 * nothing ath10k can do with frames requested for inspection so force
544 * it to simply rely a regular tx completion with discard status.
545 */
546 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
547
Michal Kaziora16942e2014-02-27 18:50:04 +0200548 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
549 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
550 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
551 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
552 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
553 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
Michal Kazior8d6d3622014-11-24 14:58:31 +0100554 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
555 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300556
Rajkumar Manoharand1e50f42014-10-03 08:02:54 +0300557 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200558 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100559 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
Michal Kaziora16942e2014-02-27 18:50:04 +0200560 flags0, flags1, msdu->len, msdu_id, frags_paddr,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100561 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200562 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
Michal Kaziora16942e2014-02-27 18:50:04 +0200563 msdu->data, msdu->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530564 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
565 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300566
Michal Kaziora16942e2014-02-27 18:50:04 +0200567 sg_items[0].transfer_id = 0;
568 sg_items[0].transfer_context = NULL;
569 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
570 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
571 sizeof(skb_cb->htt.txbuf->frags);
572 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
573 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
574 sizeof(skb_cb->htt.txbuf->cmd_tx);
575
576 sg_items[1].transfer_id = 0;
577 sg_items[1].transfer_context = NULL;
578 sg_items[1].vaddr = msdu->data;
579 sg_items[1].paddr = skb_cb->paddr;
580 sg_items[1].len = prefetch_len;
581
582 res = ath10k_hif_tx_sg(htt->ar,
583 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
584 sg_items, ARRAY_SIZE(sg_items));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300585 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200586 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300587
588 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200589
Michal Kazior2f3773b2013-09-18 14:43:21 +0200590err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200591 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kaziora16942e2014-02-27 18:50:04 +0200592err_free_txbuf:
593 dma_pool_free(htt->tx_pool,
594 skb_cb->htt.txbuf,
595 skb_cb->htt.txbuf_paddr);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200596err_free_msdu_id:
597 spin_lock_bh(&htt->tx_lock);
598 htt->pending_tx[msdu_id] = NULL;
599 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
600 spin_unlock_bh(&htt->tx_lock);
601err_tx_dec:
602 ath10k_htt_tx_dec_pending(htt);
603err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300604 return res;
605}