blob: f6fb4f131542c51d3268da14f045ea17f880a0ac [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053025void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
Kalle Valo5e3dd152013-06-12 20:52:10 +030026{
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053027 if (limit_mgmt_desc)
28 htt->num_pending_mgmt_tx--;
29
Kalle Valo5e3dd152013-06-12 20:52:10 +030030 htt->num_pending_tx--;
31 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
Michal Kazior96d828d2015-03-31 10:26:23 +000032 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030033}
34
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053035static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
36 bool limit_mgmt_desc)
Kalle Valo5e3dd152013-06-12 20:52:10 +030037{
38 spin_lock_bh(&htt->tx_lock);
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053039 __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Kalle Valo5e3dd152013-06-12 20:52:10 +030040 spin_unlock_bh(&htt->tx_lock);
41}
42
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053043static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
44 bool limit_mgmt_desc, bool is_probe_resp)
Kalle Valo5e3dd152013-06-12 20:52:10 +030045{
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053046 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +030047 int ret = 0;
48
49 spin_lock_bh(&htt->tx_lock);
50
51 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
52 ret = -EBUSY;
53 goto exit;
54 }
55
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +053056 if (limit_mgmt_desc) {
57 if (is_probe_resp && (htt->num_pending_mgmt_tx >
58 ar->hw_params.max_probe_resp_desc_thres)) {
59 ret = -EBUSY;
60 goto exit;
61 }
62 htt->num_pending_mgmt_tx++;
63 }
64
Kalle Valo5e3dd152013-06-12 20:52:10 +030065 htt->num_pending_tx++;
66 if (htt->num_pending_tx == htt->max_num_pending_tx)
Michal Kazior96d828d2015-03-31 10:26:23 +000067 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
Kalle Valo5e3dd152013-06-12 20:52:10 +030068
69exit:
70 spin_unlock_bh(&htt->tx_lock);
71 return ret;
72}
73
Michal Kazior89d6d832015-01-24 12:14:51 +020074int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +030075{
Michal Kazior7aa7a722014-08-25 12:09:38 +020076 struct ath10k *ar = htt->ar;
Michal Kazior89d6d832015-01-24 12:14:51 +020077 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030078
79 lockdep_assert_held(&htt->tx_lock);
80
Peter Ohfbc03a42015-07-15 19:01:19 -070081 ret = idr_alloc(&htt->pending_tx, skb, 0,
82 htt->max_num_pending_tx, GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +030083
Michal Kazior89d6d832015-01-24 12:14:51 +020084 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
85
86 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030087}
88
89void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
90{
Michal Kazior7aa7a722014-08-25 12:09:38 +020091 struct ath10k *ar = htt->ar;
92
Kalle Valo5e3dd152013-06-12 20:52:10 +030093 lockdep_assert_held(&htt->tx_lock);
94
Michal Kazior7aa7a722014-08-25 12:09:38 +020095 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
Michal Kazior89d6d832015-01-24 12:14:51 +020096
97 idr_remove(&htt->pending_tx, msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +030098}
99
Michal Kazior95bf21f2014-05-16 17:15:39 +0300100int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300101{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200102 struct ath10k *ar = htt->ar;
Raja Manid9156b52015-06-22 20:22:27 +0530103 int ret, size;
Michal Kazior7aa7a722014-08-25 12:09:38 +0200104
Michal Kazior7aa7a722014-08-25 12:09:38 +0200105 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300106 htt->max_num_pending_tx);
107
Michal Kazior89d6d832015-01-24 12:14:51 +0200108 spin_lock_init(&htt->tx_lock);
109 idr_init(&htt->pending_tx);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300110
Peter Oh683b95e2015-10-05 17:56:40 +0300111 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
112 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
113 &htt->txbuf.paddr,
114 GFP_DMA);
115 if (!htt->txbuf.vaddr) {
116 ath10k_err(ar, "failed to alloc tx buffer\n");
Raja Manid9156b52015-06-22 20:22:27 +0530117 ret = -ENOMEM;
118 goto free_idr_pending_tx;
Michal Kaziora16942e2014-02-27 18:50:04 +0200119 }
120
Raja Manid9156b52015-06-22 20:22:27 +0530121 if (!ar->hw_params.continuous_frag_desc)
122 goto skip_frag_desc_alloc;
123
124 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
125 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
126 &htt->frag_desc.paddr,
127 GFP_DMA);
128 if (!htt->frag_desc.vaddr) {
129 ath10k_warn(ar, "failed to alloc fragment desc memory\n");
130 ret = -ENOMEM;
Peter Oh683b95e2015-10-05 17:56:40 +0300131 goto free_txbuf;
Raja Manid9156b52015-06-22 20:22:27 +0530132 }
133
134skip_frag_desc_alloc:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300135 return 0;
Raja Manid9156b52015-06-22 20:22:27 +0530136
Peter Oh683b95e2015-10-05 17:56:40 +0300137free_txbuf:
138 size = htt->max_num_pending_tx *
139 sizeof(struct ath10k_htt_txbuf);
140 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
141 htt->txbuf.paddr);
Raja Manid9156b52015-06-22 20:22:27 +0530142free_idr_pending_tx:
143 idr_destroy(&htt->pending_tx);
144 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300145}
146
Michal Kazior89d6d832015-01-24 12:14:51 +0200147static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300148{
Michal Kazior89d6d832015-01-24 12:14:51 +0200149 struct ath10k *ar = ctx;
150 struct ath10k_htt *htt = &ar->htt;
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200151 struct htt_tx_done tx_done = {0};
Michal Kazior89d6d832015-01-24 12:14:51 +0200152
153 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
154
155 tx_done.discard = 1;
156 tx_done.msdu_id = msdu_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300157
Michal Kazior89d6d832015-01-24 12:14:51 +0200158 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior89d6d832015-01-24 12:14:51 +0200159
160 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300161}
162
Michal Kazior95bf21f2014-05-16 17:15:39 +0300163void ath10k_htt_tx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300164{
Raja Manid9156b52015-06-22 20:22:27 +0530165 int size;
166
Michal Kazior89d6d832015-01-24 12:14:51 +0200167 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
168 idr_destroy(&htt->pending_tx);
Peter Oh683b95e2015-10-05 17:56:40 +0300169
170 if (htt->txbuf.vaddr) {
171 size = htt->max_num_pending_tx *
172 sizeof(struct ath10k_htt_txbuf);
173 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
174 htt->txbuf.paddr);
175 }
Raja Manid9156b52015-06-22 20:22:27 +0530176
177 if (htt->frag_desc.vaddr) {
178 size = htt->max_num_pending_tx *
179 sizeof(struct htt_msdu_ext_desc);
180 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
181 htt->frag_desc.paddr);
182 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300183}
184
185void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
186{
Michal Kazior0a89f8a2013-09-18 14:43:20 +0200187 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300188}
189
Rajkumar Manoharan3f0f7ed2015-10-12 18:27:03 +0530190void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
191{
192 dev_kfree_skb_any(skb);
193}
194EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
195
Kalle Valo5e3dd152013-06-12 20:52:10 +0300196int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
197{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200198 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300199 struct sk_buff *skb;
200 struct htt_cmd *cmd;
201 int len = 0;
202 int ret;
203
204 len += sizeof(cmd->hdr);
205 len += sizeof(cmd->ver_req);
206
Michal Kazior7aa7a722014-08-25 12:09:38 +0200207 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300208 if (!skb)
209 return -ENOMEM;
210
211 skb_put(skb, len);
212 cmd = (struct htt_cmd *)skb->data;
213 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
214
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300215 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300216 if (ret) {
217 dev_kfree_skb_any(skb);
218 return ret;
219 }
220
221 return 0;
222}
223
Kalle Valoa3d135e2013-09-03 11:44:10 +0300224int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
225{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200226 struct ath10k *ar = htt->ar;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300227 struct htt_stats_req *req;
228 struct sk_buff *skb;
229 struct htt_cmd *cmd;
230 int len = 0, ret;
231
232 len += sizeof(cmd->hdr);
233 len += sizeof(cmd->stats_req);
234
Michal Kazior7aa7a722014-08-25 12:09:38 +0200235 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300236 if (!skb)
237 return -ENOMEM;
238
239 skb_put(skb, len);
240 cmd = (struct htt_cmd *)skb->data;
241 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
242
243 req = &cmd->stats_req;
244
245 memset(req, 0, sizeof(*req));
246
247 /* currently we support only max 8 bit masks so no need to worry
248 * about endian support */
249 req->upload_types[0] = mask;
250 req->reset_types[0] = mask;
251 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
252 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
253 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
254
Kalle Valoa3d135e2013-09-03 11:44:10 +0300255 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
256 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200257 ath10k_warn(ar, "failed to send htt type stats request: %d",
258 ret);
Kalle Valoa3d135e2013-09-03 11:44:10 +0300259 dev_kfree_skb_any(skb);
260 return ret;
261 }
262
263 return 0;
264}
265
Raja Manid9156b52015-06-22 20:22:27 +0530266int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
267{
268 struct ath10k *ar = htt->ar;
269 struct sk_buff *skb;
270 struct htt_cmd *cmd;
271 int ret, size;
272
273 if (!ar->hw_params.continuous_frag_desc)
274 return 0;
275
276 if (!htt->frag_desc.paddr) {
277 ath10k_warn(ar, "invalid frag desc memory\n");
278 return -EINVAL;
279 }
280
281 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
282 skb = ath10k_htc_alloc_skb(ar, size);
283 if (!skb)
284 return -ENOMEM;
285
286 skb_put(skb, size);
287 cmd = (struct htt_cmd *)skb->data;
288 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
289 cmd->frag_desc_bank_cfg.info = 0;
290 cmd->frag_desc_bank_cfg.num_banks = 1;
291 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
292 cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
293 __cpu_to_le32(htt->frag_desc.paddr);
Peter Ohfbc03a42015-07-15 19:01:19 -0700294 cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
Raja Manid9156b52015-06-22 20:22:27 +0530295 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
296 __cpu_to_le16(htt->max_num_pending_tx - 1);
297
298 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
299 if (ret) {
300 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
301 ret);
302 dev_kfree_skb_any(skb);
303 return ret;
304 }
305
306 return 0;
307}
308
Kalle Valo5e3dd152013-06-12 20:52:10 +0300309int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
310{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200311 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300312 struct sk_buff *skb;
313 struct htt_cmd *cmd;
314 struct htt_rx_ring_setup_ring *ring;
315 const int num_rx_ring = 1;
316 u16 flags;
317 u32 fw_idx;
318 int len;
319 int ret;
320
321 /*
322 * the HW expects the buffer to be an integral number of 4-byte
323 * "words"
324 */
325 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
326 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
327
328 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
329 + (sizeof(*ring) * num_rx_ring);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200330 skb = ath10k_htc_alloc_skb(ar, len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300331 if (!skb)
332 return -ENOMEM;
333
334 skb_put(skb, len);
335
336 cmd = (struct htt_cmd *)skb->data;
337 ring = &cmd->rx_setup.rings[0];
338
339 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
340 cmd->rx_setup.hdr.num_rings = 1;
341
342 /* FIXME: do we need all of this? */
343 flags = 0;
344 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
345 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
346 flags |= HTT_RX_RING_FLAGS_PPDU_START;
347 flags |= HTT_RX_RING_FLAGS_PPDU_END;
348 flags |= HTT_RX_RING_FLAGS_MPDU_START;
349 flags |= HTT_RX_RING_FLAGS_MPDU_END;
350 flags |= HTT_RX_RING_FLAGS_MSDU_START;
351 flags |= HTT_RX_RING_FLAGS_MSDU_END;
352 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
353 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
354 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
355 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
356 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
357 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
358 flags |= HTT_RX_RING_FLAGS_NULL_RX;
359 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
360
361 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
362
363 ring->fw_idx_shadow_reg_paddr =
364 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
365 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
366 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
367 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
368 ring->flags = __cpu_to_le16(flags);
369 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
370
371#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
372
373 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
374 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
375 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
376 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
377 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
378 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
379 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
380 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
381 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
382 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
383
384#undef desc_offset
385
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300386 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300387 if (ret) {
388 dev_kfree_skb_any(skb);
389 return ret;
390 }
391
392 return 0;
393}
394
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300395int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
396 u8 max_subfrms_ampdu,
397 u8 max_subfrms_amsdu)
398{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200399 struct ath10k *ar = htt->ar;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300400 struct htt_aggr_conf *aggr_conf;
401 struct sk_buff *skb;
402 struct htt_cmd *cmd;
403 int len;
404 int ret;
405
406 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
407
408 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
409 return -EINVAL;
410
411 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
412 return -EINVAL;
413
414 len = sizeof(cmd->hdr);
415 len += sizeof(cmd->aggr_conf);
416
Michal Kazior7aa7a722014-08-25 12:09:38 +0200417 skb = ath10k_htc_alloc_skb(ar, len);
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300418 if (!skb)
419 return -ENOMEM;
420
421 skb_put(skb, len);
422 cmd = (struct htt_cmd *)skb->data;
423 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
424
425 aggr_conf = &cmd->aggr_conf;
426 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
427 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
428
Michal Kazior7aa7a722014-08-25 12:09:38 +0200429 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300430 aggr_conf->max_num_amsdu_subframes,
431 aggr_conf->max_num_ampdu_subframes);
432
433 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
434 if (ret) {
435 dev_kfree_skb_any(skb);
436 return ret;
437 }
438
439 return 0;
440}
441
Kalle Valo5e3dd152013-06-12 20:52:10 +0300442int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
443{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200444 struct ath10k *ar = htt->ar;
445 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300446 struct sk_buff *txdesc = NULL;
447 struct htt_cmd *cmd;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200448 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200449 u8 vdev_id = skb_cb->vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300450 int len = 0;
451 int msdu_id = -1;
452 int res;
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530453 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
454 bool limit_mgmt_desc = false;
455 bool is_probe_resp = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300456
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530457 if (ar->hw_params.max_probe_resp_desc_thres) {
458 limit_mgmt_desc = true;
459
460 if (ieee80211_is_probe_resp(hdr->frame_control))
461 is_probe_resp = true;
462 }
463
464 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
465
Kalle Valo5e3dd152013-06-12 20:52:10 +0300466 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200467 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300468
469 len += sizeof(cmd->hdr);
470 len += sizeof(cmd->mgmt_tx);
471
Kalle Valo5e3dd152013-06-12 20:52:10 +0300472 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200473 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400474 spin_unlock_bh(&htt->tx_lock);
Kalle Valob9e284e2015-10-05 17:56:35 +0300475 if (res < 0)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200476 goto err_tx_dec;
Kalle Valob9e284e2015-10-05 17:56:35 +0300477
Michal Kazior2f3773b2013-09-18 14:43:21 +0200478 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300479
Tamizh chelvam90eceb32015-10-29 14:27:42 +0200480 if ((ieee80211_is_action(hdr->frame_control) ||
481 ieee80211_is_deauth(hdr->frame_control) ||
482 ieee80211_is_disassoc(hdr->frame_control)) &&
483 ieee80211_has_protected(hdr->frame_control)) {
484 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
485 }
486
Michal Kazior7aa7a722014-08-25 12:09:38 +0200487 txdesc = ath10k_htc_alloc_skb(ar, len);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200488 if (!txdesc) {
489 res = -ENOMEM;
490 goto err_free_msdu_id;
491 }
492
Michal Kazior767d34f2014-02-27 18:50:03 +0200493 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
494 DMA_TO_DEVICE);
495 res = dma_mapping_error(dev, skb_cb->paddr);
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200496 if (res) {
497 res = -EIO;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200498 goto err_free_txdesc;
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200499 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300500
501 skb_put(txdesc, len);
502 cmd = (struct htt_cmd *)txdesc->data;
Raja Mani1d0088f2015-07-21 10:52:00 +0530503 memset(cmd, 0, len);
504
Kalle Valo5e3dd152013-06-12 20:52:10 +0300505 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
506 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
507 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
508 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
509 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
510 memcpy(cmd->mgmt_tx.hdr, msdu->data,
511 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
512
Michal Kaziora16942e2014-02-27 18:50:04 +0200513 skb_cb->htt.txbuf = NULL;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200514
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300515 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300516 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200517 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300518
519 return 0;
520
Michal Kazior2f3773b2013-09-18 14:43:21 +0200521err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200522 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200523err_free_txdesc:
524 dev_kfree_skb_any(txdesc);
525err_free_msdu_id:
526 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200527 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
528 spin_unlock_bh(&htt->tx_lock);
529err_tx_dec:
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530530 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200531err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300532 return res;
533}
534
Michal Kazior8a933962015-11-18 06:59:17 +0100535int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
536 struct sk_buff *msdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300537{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200538 struct ath10k *ar = htt->ar;
539 struct device *dev = ar->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300540 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior1f8bb152013-09-18 14:43:22 +0200541 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
Michal Kaziora16942e2014-02-27 18:50:04 +0200542 struct ath10k_hif_sg_item sg_items[2];
543 struct htt_data_tx_desc_frag *frags;
544 u8 vdev_id = skb_cb->vdev_id;
545 u8 tid = skb_cb->htt.tid;
546 int prefetch_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300547 int res;
Michal Kaziora16942e2014-02-27 18:50:04 +0200548 u8 flags0 = 0;
549 u16 msdu_id, flags1 = 0;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300550 u32 frags_paddr = 0;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530551 struct htt_msdu_ext_desc *ext_desc = NULL;
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530552 bool limit_mgmt_desc = false;
553 bool is_probe_resp = false;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300554
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530555 if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
556 ar->hw_params.max_probe_resp_desc_thres) {
557 limit_mgmt_desc = true;
558
559 if (ieee80211_is_probe_resp(hdr->frame_control))
560 is_probe_resp = true;
561 }
562
563 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300564 if (res)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200565 goto err;
566
567 spin_lock_bh(&htt->tx_lock);
Michal Kazior89d6d832015-01-24 12:14:51 +0200568 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
Qi Zhou005fb162015-07-22 16:38:24 -0400569 spin_unlock_bh(&htt->tx_lock);
Kalle Valob9e284e2015-10-05 17:56:35 +0300570 if (res < 0)
Michal Kazior2f3773b2013-09-18 14:43:21 +0200571 goto err_tx_dec;
Kalle Valob9e284e2015-10-05 17:56:35 +0300572
Michal Kazior2f3773b2013-09-18 14:43:21 +0200573 msdu_id = res;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300574
575 prefetch_len = min(htt->prefetch_len, msdu->len);
576 prefetch_len = roundup(prefetch_len, 4);
577
Peter Oh683b95e2015-10-05 17:56:40 +0300578 skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
579 skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
580 (sizeof(struct ath10k_htt_txbuf) * msdu_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300581
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200582 if ((ieee80211_is_action(hdr->frame_control) ||
583 ieee80211_is_deauth(hdr->frame_control) ||
584 ieee80211_is_disassoc(hdr->frame_control)) &&
David Liuccec9032015-07-24 20:25:32 +0300585 ieee80211_has_protected(hdr->frame_control)) {
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200586 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
David Liuccec9032015-07-24 20:25:32 +0300587 } else if (!skb_cb->htt.nohwcrypt &&
Michal Kazior8a933962015-11-18 06:59:17 +0100588 txmode == ATH10K_HW_TXRX_RAW &&
Bob Copelandbc76c282015-09-09 12:47:35 -0400589 ieee80211_has_protected(hdr->frame_control)) {
David Liuccec9032015-07-24 20:25:32 +0300590 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
591 }
Marek Kwaczynskieebc67f2015-01-24 12:14:53 +0200592
Michal Kazior767d34f2014-02-27 18:50:03 +0200593 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
594 DMA_TO_DEVICE);
595 res = dma_mapping_error(dev, skb_cb->paddr);
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200596 if (res) {
597 res = -EIO;
Peter Oh683b95e2015-10-05 17:56:40 +0300598 goto err_free_msdu_id;
Michal Kazior5e55e3c2015-08-19 13:10:43 +0200599 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300600
Michal Kazior8a933962015-11-18 06:59:17 +0100601 switch (txmode) {
Michal Kaziord740d8f2015-03-30 09:51:51 +0300602 case ATH10K_HW_TXRX_RAW:
603 case ATH10K_HW_TXRX_NATIVE_WIFI:
604 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
605 /* pass through */
606 case ATH10K_HW_TXRX_ETHERNET:
Peter Ohfbc03a42015-07-15 19:01:19 -0700607 if (ar->hw_params.continuous_frag_desc) {
Peter Ohae7d3822015-07-29 11:58:50 +0300608 memset(&htt->frag_desc.vaddr[msdu_id], 0,
609 sizeof(struct htt_msdu_ext_desc));
Peter Ohfbc03a42015-07-15 19:01:19 -0700610 frags = (struct htt_data_tx_desc_frag *)
611 &htt->frag_desc.vaddr[msdu_id].frags;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530612 ext_desc = &htt->frag_desc.vaddr[msdu_id];
Peter Ohfbc03a42015-07-15 19:01:19 -0700613 frags[0].tword_addr.paddr_lo =
614 __cpu_to_le32(skb_cb->paddr);
615 frags[0].tword_addr.paddr_hi = 0;
616 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
Michal Kazior1f8bb152013-09-18 14:43:22 +0200617
Peter Ohfbc03a42015-07-15 19:01:19 -0700618 frags_paddr = htt->frag_desc.paddr +
619 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
620 } else {
621 frags = skb_cb->htt.txbuf->frags;
622 frags[0].dword_addr.paddr =
623 __cpu_to_le32(skb_cb->paddr);
624 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
625 frags[1].dword_addr.paddr = 0;
626 frags[1].dword_addr.len = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300627
Peter Ohfbc03a42015-07-15 19:01:19 -0700628 frags_paddr = skb_cb->htt.txbuf_paddr;
629 }
Michal Kazior8a933962015-11-18 06:59:17 +0100630 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300631 break;
632 case ATH10K_HW_TXRX_MGMT:
Michal Kazior2f3773b2013-09-18 14:43:21 +0200633 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
Michal Kazior961d4c32013-08-09 10:13:34 +0200634 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
Michal Kaziord740d8f2015-03-30 09:51:51 +0300635 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300636
Michal Kaziora16942e2014-02-27 18:50:04 +0200637 frags_paddr = skb_cb->paddr;
Michal Kaziord740d8f2015-03-30 09:51:51 +0300638 break;
Michal Kaziora16942e2014-02-27 18:50:04 +0200639 }
640
641 /* Normally all commands go through HTC which manages tx credits for
642 * each endpoint and notifies when tx is completed.
643 *
644 * HTT endpoint is creditless so there's no need to care about HTC
645 * flags. In that case it is trivial to fill the HTC header here.
646 *
647 * MSDU transmission is considered completed upon HTT event. This
648 * implies no relevant resources can be freed until after the event is
649 * received. That's why HTC tx completion handler itself is ignored by
650 * setting NULL to transfer_context for all sg items.
651 *
652 * There is simply no point in pushing HTT TX_FRM through HTC tx path
653 * as it's a waste of resources. By bypassing HTC it is possible to
654 * avoid extra memory allocations, compress data structures and thus
655 * improve performance. */
656
657 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
658 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
659 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
660 sizeof(skb_cb->htt.txbuf->cmd_tx) +
661 prefetch_len);
662 skb_cb->htt.txbuf->htc_hdr.flags = 0;
663
David Liuccec9032015-07-24 20:25:32 +0300664 if (skb_cb->htt.nohwcrypt)
665 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
666
Kalle Valo5e3dd152013-06-12 20:52:10 +0300667 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
668 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
David Liuccec9032015-07-24 20:25:32 +0300669 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
670 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
Helmut Schaa75930d12015-01-28 11:31:32 +0100671 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
672 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
Manikanta Pubbisettyb9635192015-07-20 17:56:12 +0530673 if (ar->hw_params.continuous_frag_desc)
674 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
Helmut Schaa75930d12015-01-28 11:31:32 +0100675 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300676
Michal Kazior708b9bd2014-07-21 20:52:59 +0300677 /* Prevent firmware from sending up tx inspection requests. There's
678 * nothing ath10k can do with frames requested for inspection so force
679 * it to simply rely a regular tx completion with discard status.
680 */
681 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
682
Michal Kaziora16942e2014-02-27 18:50:04 +0200683 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
684 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
685 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
686 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
687 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
688 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
Vasanthakumar Thiagarajand39de992015-11-05 11:34:00 +0530689 if (ath10k_mac_tx_frm_has_freq(ar)) {
690 skb_cb->htt.txbuf->cmd_tx.offchan_tx.peerid =
691 __cpu_to_le16(HTT_INVALID_PEERID);
692 skb_cb->htt.txbuf->cmd_tx.offchan_tx.freq =
693 __cpu_to_le16(skb_cb->htt.freq);
694 } else {
695 skb_cb->htt.txbuf->cmd_tx.peerid =
696 __cpu_to_le32(HTT_INVALID_PEERID);
697 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300698
Rajkumar Manoharand1e50f42014-10-03 08:02:54 +0300699 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200700 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100701 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
Michal Kaziora16942e2014-02-27 18:50:04 +0200702 flags0, flags1, msdu->len, msdu_id, frags_paddr,
Michal Kazior8d6d3622014-11-24 14:58:31 +0100703 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200704 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
Michal Kaziora16942e2014-02-27 18:50:04 +0200705 msdu->data, msdu->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530706 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
707 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300708
Michal Kaziora16942e2014-02-27 18:50:04 +0200709 sg_items[0].transfer_id = 0;
710 sg_items[0].transfer_context = NULL;
711 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
712 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
713 sizeof(skb_cb->htt.txbuf->frags);
714 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
715 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
716 sizeof(skb_cb->htt.txbuf->cmd_tx);
717
718 sg_items[1].transfer_id = 0;
719 sg_items[1].transfer_context = NULL;
720 sg_items[1].vaddr = msdu->data;
721 sg_items[1].paddr = skb_cb->paddr;
722 sg_items[1].len = prefetch_len;
723
724 res = ath10k_hif_tx_sg(htt->ar,
725 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
726 sg_items, ARRAY_SIZE(sg_items));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300727 if (res)
Michal Kazior1f8bb152013-09-18 14:43:22 +0200728 goto err_unmap_msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300729
730 return 0;
Michal Kazior2f3773b2013-09-18 14:43:21 +0200731
Michal Kazior2f3773b2013-09-18 14:43:21 +0200732err_unmap_msdu:
Michal Kazior767d34f2014-02-27 18:50:03 +0200733 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200734err_free_msdu_id:
735 spin_lock_bh(&htt->tx_lock);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200736 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
737 spin_unlock_bh(&htt->tx_lock);
738err_tx_dec:
Vivek Natarajan7b7da0a2015-08-31 16:34:55 +0530739 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
Michal Kazior2f3773b2013-09-18 14:43:21 +0200740err:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300741 return res;
742}