blob: 5b2c61b0390a2d25d680d08b1a7093432764eb54 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
26{
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
29 ieee80211_wake_queues(htt->ar->hw);
30}
31
32static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
33{
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
37}
38
39static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
40{
41 int ret = 0;
42
43 spin_lock_bh(&htt->tx_lock);
44
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
46 ret = -EBUSY;
47 goto exit;
48 }
49
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
52 ieee80211_stop_queues(htt->ar->hw);
53
54exit:
55 spin_unlock_bh(&htt->tx_lock);
56 return ret;
57}
58
Michal Kazior89d6d832015-01-24 12:14:51 +020059int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +030060{
Michal Kazior7aa7a722014-08-25 12:09:38 +020061 struct ath10k *ar = htt->ar;
Michal Kazior89d6d832015-01-24 12:14:51 +020062 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030063
64 lockdep_assert_held(&htt->tx_lock);
65
Michal Kazior89d6d832015-01-24 12:14:51 +020066 ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +030067
Michal Kazior89d6d832015-01-24 12:14:51 +020068 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
69
70 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030071}
72
73void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
74{
Michal Kazior7aa7a722014-08-25 12:09:38 +020075 struct ath10k *ar = htt->ar;
76
Kalle Valo5e3dd152013-06-12 20:52:10 +030077 lockdep_assert_held(&htt->tx_lock);
78
Michal Kazior7aa7a722014-08-25 12:09:38 +020079 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
Michal Kazior89d6d832015-01-24 12:14:51 +020080
81 idr_remove(&htt->pending_tx, msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030082}
83
Michal Kazior95bf21f2014-05-16 17:15:39 +030084int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +030085{
Michal Kazior7aa7a722014-08-25 12:09:38 +020086 struct ath10k *ar = htt->ar;
87
Michal Kazior7aa7a722014-08-25 12:09:38 +020088 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +030089 htt->max_num_pending_tx);
90
Michal Kazior89d6d832015-01-24 12:14:51 +020091 spin_lock_init(&htt->tx_lock);
92 idr_init(&htt->pending_tx);
Kalle Valo5e3dd152013-06-12 20:52:10 +030093
Michal Kaziora16942e2014-02-27 18:50:04 +020094 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
95 sizeof(struct ath10k_htt_txbuf), 4, 0);
96 if (!htt->tx_pool) {
Michal Kazior89d6d832015-01-24 12:14:51 +020097 idr_destroy(&htt->pending_tx);
Michal Kaziora16942e2014-02-27 18:50:04 +020098 return -ENOMEM;
99 }
100
Kalle Valo5e3dd152013-06-12 20:52:10 +0300101 return 0;
102}
103
Michal Kazior89d6d832015-01-24 12:14:51 +0200104static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300105{
Michal Kazior89d6d832015-01-24 12:14:51 +0200106 struct ath10k *ar = ctx;
107 struct ath10k_htt *htt = &ar->htt;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200108 struct htt_tx_done tx_done = {0};
Michal Kazior89d6d832015-01-24 12:14:51 +0200109
110 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
111
112 tx_done.discard = 1;
113 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300114
Michal Kazior45967082014-02-27 18:50:05 +0200115 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200116 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior45967082014-02-27 18:50:05 +0200117 spin_unlock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200118
119 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300120}
121
Michal Kazior95bf21f2014-05-16 17:15:39 +0300122void ath10k_htt_tx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300123{
Michal Kazior89d6d832015-01-24 12:14:51 +0200124 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
125 idr_destroy(&htt->pending_tx);
Michal Kaziora16942e2014-02-27 18:50:04 +0200126 dma_pool_destroy(htt->tx_pool);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300127}
128
129void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
130{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200131 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300132}
133
134int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
135{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200136 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300137 struct sk_buff *skb;
138 struct htt_cmd *cmd;
139 int len = 0;
140 int ret;
141
142 len += sizeof(cmd->hdr);
143 len += sizeof(cmd->ver_req);
144
Michal Kazior7aa7a722014-08-25 12:09:38 +0200145 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300146 if (!skb)
147 return -ENOMEM;
148
149 skb_put(skb, len);
150 cmd = (struct htt_cmd *)skb->data;
151 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
152
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300153 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300154 if (ret) {
155 dev_kfree_skb_any(skb);
156 return ret;
157 }
158
159 return 0;
160}
161
Kalle Valoa3d135e2013-09-03 11:44:10 +0300162int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
163{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200164 struct ath10k *ar = htt->ar;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300165 struct htt_stats_req *req;
166 struct sk_buff *skb;
167 struct htt_cmd *cmd;
168 int len = 0, ret;
169
170 len += sizeof(cmd->hdr);
171 len += sizeof(cmd->stats_req);
172
Michal Kazior7aa7a722014-08-25 12:09:38 +0200173 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300174 if (!skb)
175 return -ENOMEM;
176
177 skb_put(skb, len);
178 cmd = (struct htt_cmd *)skb->data;
179 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
180
181 req = &cmd->stats_req;
182
183 memset(req, 0, sizeof(*req));
184
185 /* currently we support only max 8 bit masks so no need to worry
186 * about endian support */
187 req->upload_types[0] = mask;
188 req->reset_types[0] = mask;
189 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
190 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
191 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
192
Kalle Valoa3d135e2013-09-03 11:44:10 +0300193 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
194 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200195 ath10k_warn(ar, "failed to send htt type stats request: %d",
196 ret);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300197 dev_kfree_skb_any(skb);
198 return ret;
199 }
200
201 return 0;
202}
203
Kalle Valo5e3dd152013-06-12 20:52:10 +0300204int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
205{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200206 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300207 struct sk_buff *skb;
208 struct htt_cmd *cmd;
209 struct htt_rx_ring_setup_ring *ring;
210 const int num_rx_ring = 1;
211 u16 flags;
212 u32 fw_idx;
213 int len;
214 int ret;
215
216 /*
217 * the HW expects the buffer to be an integral number of 4-byte
218 * "words"
219 */
220 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
221 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
222
223 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
224 + (sizeof(*ring) * num_rx_ring);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200225 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300226 if (!skb)
227 return -ENOMEM;
228
229 skb_put(skb, len);
230
231 cmd = (struct htt_cmd *)skb->data;
232 ring = &cmd->rx_setup.rings[0];
233
234 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
235 cmd->rx_setup.hdr.num_rings = 1;
236
237 /* FIXME: do we need all of this? */
238 flags = 0;
239 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
240 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
241 flags |= HTT_RX_RING_FLAGS_PPDU_START;
242 flags |= HTT_RX_RING_FLAGS_PPDU_END;
243 flags |= HTT_RX_RING_FLAGS_MPDU_START;
244 flags |= HTT_RX_RING_FLAGS_MPDU_END;
245 flags |= HTT_RX_RING_FLAGS_MSDU_START;
246 flags |= HTT_RX_RING_FLAGS_MSDU_END;
247 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
248 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
249 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
250 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
251 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
252 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
253 flags |= HTT_RX_RING_FLAGS_NULL_RX;
254 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
255
256 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
257
258 ring->fw_idx_shadow_reg_paddr =
259 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
260 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
261 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
262 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
263 ring->flags = __cpu_to_le16(flags);
264 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
265
266#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
267
268 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
269 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
270 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
271 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
272 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
273 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
274 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
275 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
276 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
277 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
278
279#undef desc_offset
280
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300281 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300282 if (ret) {
283 dev_kfree_skb_any(skb);
284 return ret;
285 }
286
287 return 0;
288}
289
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300290int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
291 u8 max_subfrms_ampdu,
292 u8 max_subfrms_amsdu)
293{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200294 struct ath10k *ar = htt->ar;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300295 struct htt_aggr_conf *aggr_conf;
296 struct sk_buff *skb;
297 struct htt_cmd *cmd;
298 int len;
299 int ret;
300
301 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
302
303 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
304 return -EINVAL;
305
306 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
307 return -EINVAL;
308
309 len = sizeof(cmd->hdr);
310 len += sizeof(cmd->aggr_conf);
311
Michal Kazior7aa7a722014-08-25 12:09:38 +0200312 skb = ath10k_htc_alloc_skb(ar, len);
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300313 if (!skb)
314 return -ENOMEM;
315
316 skb_put(skb, len);
317 cmd = (struct htt_cmd *)skb->data;
318 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
319
320 aggr_conf = &cmd->aggr_conf;
321 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
322 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
323
Michal Kazior7aa7a722014-08-25 12:09:38 +0200324 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300325 aggr_conf->max_num_amsdu_subframes,
326 aggr_conf->max_num_ampdu_subframes);
327
328 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
329 if (ret) {
330 dev_kfree_skb_any(skb);
331 return ret;
332 }
333
334 return 0;
335}
336
Kalle Valo5e3dd152013-06-12 20:52:10 +0300337int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
338{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200339 struct ath10k *ar = htt->ar;
340 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300341 struct sk_buff *txdesc = NULL;
342 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200343 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200344 u8 vdev_id = skb_cb->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300345 int len = 0;
346 int msdu_id = -1;
347 int res;
348
Kalle Valo5e3dd152013-06-12 20:52:10 +0300349 res = ath10k_htt_tx_inc_pending(htt);
350 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200351 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300352
353 len += sizeof(cmd->hdr);
354 len += sizeof(cmd->mgmt_tx);
355
Kalle Valo5e3dd152013-06-12 20:52:10 +0300356 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200357 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200358 if (res < 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300359 spin_unlock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200360 goto err_tx_dec;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300361 }
Michal Kazior2f3773b2013-09-18 14:43:21 +0200362 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300363 spin_unlock_bh(&htt->tx_lock);
364
Michal Kazior7aa7a722014-08-25 12:09:38 +0200365 txdesc = ath10k_htc_alloc_skb(ar, len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200366 if (!txdesc) {
367 res = -ENOMEM;
368 goto err_free_msdu_id;
369 }
370
Michal Kazior767d34f2014-02-27 18:50:03 +0200371 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
372 DMA_TO_DEVICE);
373 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300374 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200375 goto err_free_txdesc;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300376
377 skb_put(txdesc, len);
378 cmd = (struct htt_cmd *)txdesc->data;
379 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
380 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
381 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
382 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
383 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
384 memcpy(cmd->mgmt_tx.hdr, msdu->data,
385 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
386
Michal Kaziora16942e2014-02-27 18:50:04 +0200387 skb_cb->htt.txbuf = NULL;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200388
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300389 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300390 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200391 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300392
393 return 0;
394
Michal Kazior2f3773b2013-09-18 14:43:21 +0200395err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200396 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200397err_free_txdesc:
398 dev_kfree_skb_any(txdesc);
399err_free_msdu_id:
400 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200401 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
402 spin_unlock_bh(&htt->tx_lock);
403err_tx_dec:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300404 ath10k_htt_tx_dec_pending(htt);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200405err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300406 return res;
407}
408
409int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
410{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200411 struct ath10k *ar = htt->ar;
412 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300413 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200414 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kaziora16942e2014-02-27 18:50:04 +0200415 struct ath10k_hif_sg_item sg_items[2];
416 struct htt_data_tx_desc_frag *frags;
417 u8 vdev_id = skb_cb->vdev_id;
418 u8 tid = skb_cb->htt.tid;
419 int prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300420 int res;
Michal Kaziora16942e2014-02-27 18:50:04 +0200421 u8 flags0 = 0;
422 u16 msdu_id, flags1 = 0;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300423 dma_addr_t paddr = 0;
424 u32 frags_paddr = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300425
426 res = ath10k_htt_tx_inc_pending(htt);
427 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200428 goto err;
429
430 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200431 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200432 if (res < 0) {
433 spin_unlock_bh(&htt->tx_lock);
434 goto err_tx_dec;
435 }
436 msdu_id = res;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200437 spin_unlock_bh(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300438
439 prefetch_len = min(htt->prefetch_len, msdu->len);
440 prefetch_len = roundup(prefetch_len, 4);
441
Michal Kaziora16942e2014-02-27 18:50:04 +0200442 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
443 &paddr);
Julia Lawall8be3b692014-12-29 18:04:43 +0100444 if (!skb_cb->htt.txbuf) {
445 res = -ENOMEM;
Michal Kaziora16942e2014-02-27 18:50:04 +0200446 goto err_free_msdu_id;
Julia Lawall8be3b692014-12-29 18:04:43 +0100447 }
Michal Kaziora16942e2014-02-27 18:50:04 +0200448 skb_cb->htt.txbuf_paddr = paddr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300449
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200450 if ((ieee80211_is_action(hdr->frame_control) ||
451 ieee80211_is_deauth(hdr->frame_control) ||
452 ieee80211_is_disassoc(hdr->frame_control)) &&
453 ieee80211_has_protected(hdr->frame_control))
454 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
455
Michal Kazior767d34f2014-02-27 18:50:03 +0200456 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
457 DMA_TO_DEVICE);
458 res = dma_mapping_error(dev, skb_cb->paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300459 if (res)
Michal Kaziora16942e2014-02-27 18:50:04 +0200460 goto err_free_txbuf;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300461
Michal Kaziord740d8f2015-03-30 09:51:51 +0300462 switch (skb_cb->txmode) {
463 case ATH10K_HW_TXRX_RAW:
464 case ATH10K_HW_TXRX_NATIVE_WIFI:
465 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
466 /* pass through */
467 case ATH10K_HW_TXRX_ETHERNET:
Michal Kaziora16942e2014-02-27 18:50:04 +0200468 frags = skb_cb->htt.txbuf->frags;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200469
Michal Kaziora16942e2014-02-27 18:50:04 +0200470 frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
471 frags[0].len = __cpu_to_le32(msdu->len);
472 frags[1].paddr = 0;
473 frags[1].len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300474
Michal Kaziord740d8f2015-03-30 09:51:51 +0300475 flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziora16942e2014-02-27 18:50:04 +0200476
477 frags_paddr = skb_cb->htt.txbuf_paddr;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300478 break;
479 case ATH10K_HW_TXRX_MGMT:
Michal Kazior2f3773b2013-09-18 14:43:21 +0200480 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200481 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300482 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300483
Michal Kaziora16942e2014-02-27 18:50:04 +0200484 frags_paddr = skb_cb->paddr;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300485 break;
Michal Kaziora16942e2014-02-27 18:50:04 +0200486 }
487
488 /* Normally all commands go through HTC which manages tx credits for
489 * each endpoint and notifies when tx is completed.
490 *
491 * HTT endpoint is creditless so there's no need to care about HTC
492 * flags. In that case it is trivial to fill the HTC header here.
493 *
494 * MSDU transmission is considered completed upon HTT event. This
495 * implies no relevant resources can be freed until after the event is
496 * received. That's why HTC tx completion handler itself is ignored by
497 * setting NULL to transfer_context for all sg items.
498 *
499 * There is simply no point in pushing HTT TX_FRM through HTC tx path
500 * as it's a waste of resources. By bypassing HTC it is possible to
501 * avoid extra memory allocations, compress data structures and thus
502 * improve performance. */
503
504 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
505 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
506 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
507 sizeof(skb_cb->htt.txbuf->cmd_tx) +
508 prefetch_len);
509 skb_cb->htt.txbuf->htc_hdr.flags = 0;
510
Michal Kaziord740d8f2015-03-30 09:51:51 +0300511 if (!skb_cb->is_protected)
Michal Kaziora16942e2014-02-27 18:50:04 +0200512 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
513
Kalle Valo5e3dd152013-06-12 20:52:10 +0300514 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
515 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
Helmut Schaa75930d12015-01-28 11:31:32 +0100516 if (msdu->ip_summed == CHECKSUM_PARTIAL) {
517 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
518 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
519 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300520
Michal Kazior708b9bd2014-07-21 20:52:59 +0300521 /* Prevent firmware from sending up tx inspection requests. There's
522 * nothing ath10k can do with frames requested for inspection so force
523 * it to simply rely a regular tx completion with discard status.
524 */
525 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
526
Michal Kaziora16942e2014-02-27 18:50:04 +0200527 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
528 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
529 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
530 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
531 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
532 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
Michal Kazior8d6d3622014-11-24 14:58:31 +0100533 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
534 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300535
Rajkumar Manoharand1e50f42014-10-03 08:02:54 +0300536 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200537 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100538 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
Michal Kaziora16942e2014-02-27 18:50:04 +0200539 flags0, flags1, msdu->len, msdu_id, frags_paddr,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100540 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200541 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
Michal Kaziora16942e2014-02-27 18:50:04 +0200542 msdu->data, msdu->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530543 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
544 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300545
Michal Kaziora16942e2014-02-27 18:50:04 +0200546 sg_items[0].transfer_id = 0;
547 sg_items[0].transfer_context = NULL;
548 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
549 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
550 sizeof(skb_cb->htt.txbuf->frags);
551 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
552 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
553 sizeof(skb_cb->htt.txbuf->cmd_tx);
554
555 sg_items[1].transfer_id = 0;
556 sg_items[1].transfer_context = NULL;
557 sg_items[1].vaddr = msdu->data;
558 sg_items[1].paddr = skb_cb->paddr;
559 sg_items[1].len = prefetch_len;
560
561 res = ath10k_hif_tx_sg(htt->ar,
562 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
563 sg_items, ARRAY_SIZE(sg_items));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300564 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200565 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300566
567 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200568
Michal Kazior2f3773b2013-09-18 14:43:21 +0200569err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200570 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kaziora16942e2014-02-27 18:50:04 +0200571err_free_txbuf:
572 dma_pool_free(htt->tx_pool,
573 skb_cb->htt.txbuf,
574 skb_cb->htt.txbuf_paddr);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200575err_free_msdu_id:
576 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200577 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
578 spin_unlock_bh(&htt->tx_lock);
579err_tx_dec:
580 ath10k_htt_tx_dec_pending(htt);
581err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300582 return res;
583}