blob: 874bf44ff7a2f910945e492cf6086ccc4a0d945a [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HTT_H_
19#define _HTT_H_
20
21#include <linux/bug.h>
Michal Kazior6e712d42013-09-24 10:18:36 +020022#include <linux/interrupt.h>
Michal Kaziora16942e2014-02-27 18:50:04 +020023#include <linux/dmapool.h>
Michal Kaziorc5450702015-01-24 12:14:48 +020024#include <linux/hashtable.h>
Janusz Dziedzic8f739db2014-03-24 21:23:17 +010025#include <net/mac80211.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030026
Kalle Valo5e3dd152013-06-12 20:52:10 +030027#include "htc.h"
28#include "rx_desc.h"
29
Kalle Valo5e3dd152013-06-12 20:52:10 +030030enum htt_dbg_stats_type {
31 HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
32 HTT_DBG_STATS_RX_REORDER = 1 << 1,
33 HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
34 HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
35 HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
36 /* bits 5-23 currently reserved */
37
38 HTT_DBG_NUM_STATS /* keep this last */
39};
40
41enum htt_h2t_msg_type { /* host-to-target */
42 HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
43 HTT_H2T_MSG_TYPE_TX_FRM = 1,
44 HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
45 HTT_H2T_MSG_TYPE_STATS_REQ = 3,
46 HTT_H2T_MSG_TYPE_SYNC = 4,
47 HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
48 HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
Michal Kazior961d4c32013-08-09 10:13:34 +020049
50 /* This command is used for sending management frames in HTT < 3.0.
51 * HTT >= 3.0 uses TX_FRM for everything. */
Kalle Valo5e3dd152013-06-12 20:52:10 +030052 HTT_H2T_MSG_TYPE_MGMT_TX = 7,
53
54 HTT_H2T_NUM_MSGS /* keep this last */
55};
56
57struct htt_cmd_hdr {
58 u8 msg_type;
59} __packed;
60
61struct htt_ver_req {
62 u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
63} __packed;
64
65/*
66 * HTT tx MSDU descriptor
67 *
68 * The HTT tx MSDU descriptor is created by the host HTT SW for each
69 * tx MSDU. The HTT tx MSDU descriptor contains the information that
70 * the target firmware needs for the FW's tx processing, particularly
71 * for creating the HW msdu descriptor.
72 * The same HTT tx descriptor is used for HL and LL systems, though
73 * a few fields within the tx descriptor are used only by LL or
74 * only by HL.
75 * The HTT tx descriptor is defined in two manners: by a struct with
76 * bitfields, and by a series of [dword offset, bit mask, bit shift]
77 * definitions.
78 * The target should use the struct def, for simplicitly and clarity,
79 * but the host shall use the bit-mast + bit-shift defs, to be endian-
80 * neutral. Specifically, the host shall use the get/set macros built
81 * around the mask + shift defs.
82 */
83struct htt_data_tx_desc_frag {
84 __le32 paddr;
85 __le32 len;
86} __packed;
87
88enum htt_data_tx_desc_flags0 {
89 HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
90 HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
91 HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
92 HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
93 HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
94#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
95#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
96};
97
98enum htt_data_tx_desc_flags1 {
99#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
100#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
101#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
102#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
103#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
104#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
105 HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
106 HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
107 HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
108 HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
109 HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
110};
111
112enum htt_data_tx_ext_tid {
113 HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
114 HTT_DATA_TX_EXT_TID_MGMT = 17,
115 HTT_DATA_TX_EXT_TID_INVALID = 31
116};
117
118#define HTT_INVALID_PEERID 0xFFFF
119
120/*
121 * htt_data_tx_desc - used for data tx path
122 *
123 * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
124 * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
125 * for special kinds of tids
126 * postponed: only for HL hosts. indicates if this is a resend
127 * (HL hosts manage queues on the host )
128 * more_in_batch: only for HL hosts. indicates if more packets are
129 * pending. this allows target to wait and aggregate
Michal Kazior8d6d3622014-11-24 14:58:31 +0100130 * freq: 0 means home channel of given vdev. intended for offchannel
Kalle Valo5e3dd152013-06-12 20:52:10 +0300131 */
132struct htt_data_tx_desc {
133 u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
134 __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
135 __le16 len;
136 __le16 id;
137 __le32 frags_paddr;
Michal Kazior8d6d3622014-11-24 14:58:31 +0100138 __le16 peerid;
139 __le16 freq;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300140 u8 prefetch[0]; /* start of frame, for FW classification engine */
141} __packed;
142
143enum htt_rx_ring_flags {
144 HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
145 HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
146 HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
147 HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
148 HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
149 HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
150 HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
151 HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
152 HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
153 HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
154 HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
155 HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
156 HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
157 HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
158 HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
159 HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
160};
161
Michal Kaziorfe2407a2014-11-27 11:12:43 +0100162#define HTT_RX_RING_SIZE_MIN 128
163#define HTT_RX_RING_SIZE_MAX 2048
164
Kalle Valo5e3dd152013-06-12 20:52:10 +0300165struct htt_rx_ring_setup_ring {
166 __le32 fw_idx_shadow_reg_paddr;
167 __le32 rx_ring_base_paddr;
168 __le16 rx_ring_len; /* in 4-byte words */
169 __le16 rx_ring_bufsize; /* rx skb size - in bytes */
170 __le16 flags; /* %HTT_RX_RING_FLAGS_ */
171 __le16 fw_idx_init_val;
172
173 /* the following offsets are in 4-byte units */
174 __le16 mac80211_hdr_offset;
175 __le16 msdu_payload_offset;
176 __le16 ppdu_start_offset;
177 __le16 ppdu_end_offset;
178 __le16 mpdu_start_offset;
179 __le16 mpdu_end_offset;
180 __le16 msdu_start_offset;
181 __le16 msdu_end_offset;
182 __le16 rx_attention_offset;
183 __le16 frag_info_offset;
184} __packed;
185
186struct htt_rx_ring_setup_hdr {
187 u8 num_rings; /* supported values: 1, 2 */
188 __le16 rsvd0;
189} __packed;
190
191struct htt_rx_ring_setup {
192 struct htt_rx_ring_setup_hdr hdr;
193 struct htt_rx_ring_setup_ring rings[0];
194} __packed;
195
196/*
197 * htt_stats_req - request target to send specified statistics
198 *
199 * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
200 * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
201 * so make sure its little-endian.
202 * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
203 * so make sure its little-endian.
204 * @cfg_val: stat_type specific configuration
205 * @stat_type: see %htt_dbg_stats_type
206 * @cookie_lsb: used for confirmation message from target->host
207 * @cookie_msb: ditto as %cookie
208 */
209struct htt_stats_req {
210 u8 upload_types[3];
211 u8 rsvd0;
212 u8 reset_types[3];
213 struct {
214 u8 mpdu_bytes;
215 u8 mpdu_num_msdus;
216 u8 msdu_bytes;
217 } __packed;
218 u8 stat_type;
219 __le32 cookie_lsb;
220 __le32 cookie_msb;
221} __packed;
222
223#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
224
225/*
226 * htt_oob_sync_req - request out-of-band sync
227 *
228 * The HTT SYNC tells the target to suspend processing of subsequent
229 * HTT host-to-target messages until some other target agent locally
230 * informs the target HTT FW that the current sync counter is equal to
231 * or greater than (in a modulo sense) the sync counter specified in
232 * the SYNC message.
233 *
234 * This allows other host-target components to synchronize their operation
235 * with HTT, e.g. to ensure that tx frames don't get transmitted until a
236 * security key has been downloaded to and activated by the target.
237 * In the absence of any explicit synchronization counter value
238 * specification, the target HTT FW will use zero as the default current
239 * sync value.
240 *
241 * The HTT target FW will suspend its host->target message processing as long
242 * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
243 */
244struct htt_oob_sync_req {
245 u8 sync_count;
246 __le16 rsvd0;
247} __packed;
248
Kalle Valo5e3dd152013-06-12 20:52:10 +0300249struct htt_aggr_conf {
250 u8 max_num_ampdu_subframes;
Janusz Dziedzicd3856232014-06-02 21:19:46 +0300251 /* amsdu_subframes is limited by 0x1F mask */
252 u8 max_num_amsdu_subframes;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300253} __packed;
254
255#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
256
257struct htt_mgmt_tx_desc {
258 u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
259 __le32 msdu_paddr;
260 __le32 desc_id;
261 __le32 len;
262 __le32 vdev_id;
263 u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
264} __packed;
265
266enum htt_mgmt_tx_status {
267 HTT_MGMT_TX_STATUS_OK = 0,
268 HTT_MGMT_TX_STATUS_RETRY = 1,
269 HTT_MGMT_TX_STATUS_DROP = 2
270};
271
272/*=== target -> host messages ===============================================*/
273
Kalle Valo5e3dd152013-06-12 20:52:10 +0300274enum htt_t2h_msg_type {
275 HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
276 HTT_T2H_MSG_TYPE_RX_IND = 0x1,
277 HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
278 HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
279 HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
280 HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
281 HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
282 HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
283 HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
284 HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
285 HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
286 HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
287 HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
288 HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
289 HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
Michal Kaziorc5450702015-01-24 12:14:48 +0200290 HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
291 HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
292 HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
293 HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
294 /* 0x13 reservd */
295 HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
296
297 /* FIXME: Do not depend on this event id. Numbering of this event id is
298 * broken across different firmware revisions and HTT version fails to
299 * indicate this.
300 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300301 HTT_T2H_MSG_TYPE_TEST,
Michal Kaziorc5450702015-01-24 12:14:48 +0200302
Kalle Valo5e3dd152013-06-12 20:52:10 +0300303 /* keep this last */
304 HTT_T2H_NUM_MSGS
305};
306
307/*
308 * htt_resp_hdr - header for target-to-host messages
309 *
310 * msg_type: see htt_t2h_msg_type
311 */
312struct htt_resp_hdr {
313 u8 msg_type;
314} __packed;
315
316#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
317#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
318#define HTT_RESP_HDR_MSG_TYPE_LSB 0
319
320/* htt_ver_resp - response sent for htt_ver_req */
321struct htt_ver_resp {
322 u8 minor;
323 u8 major;
324 u8 rsvd0;
325} __packed;
326
327struct htt_mgmt_tx_completion {
328 u8 rsvd0;
329 u8 rsvd1;
330 u8 rsvd2;
331 __le32 desc_id;
332 __le32 status;
333} __packed;
334
335#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F)
336#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
337#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6)
338#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
339
340#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
341#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
342#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
343#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
344#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
345#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
346#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
347#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
348#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
349#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
350
351struct htt_rx_indication_hdr {
352 u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
353 __le16 peer_id;
354 __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
355} __packed;
356
357#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
358#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
359#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
360#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
361#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
362#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
363
364#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
365#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
366#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
367#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
368
369#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
370#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
371#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
372#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
373
374enum htt_rx_legacy_rate {
375 HTT_RX_OFDM_48 = 0,
376 HTT_RX_OFDM_24 = 1,
377 HTT_RX_OFDM_12,
378 HTT_RX_OFDM_6,
379 HTT_RX_OFDM_54,
380 HTT_RX_OFDM_36,
381 HTT_RX_OFDM_18,
382 HTT_RX_OFDM_9,
383
384 /* long preamble */
385 HTT_RX_CCK_11_LP = 0,
386 HTT_RX_CCK_5_5_LP = 1,
387 HTT_RX_CCK_2_LP,
388 HTT_RX_CCK_1_LP,
389 /* short preamble */
390 HTT_RX_CCK_11_SP,
391 HTT_RX_CCK_5_5_SP,
392 HTT_RX_CCK_2_SP
393};
394
395enum htt_rx_legacy_rate_type {
396 HTT_RX_LEGACY_RATE_OFDM = 0,
397 HTT_RX_LEGACY_RATE_CCK
398};
399
400enum htt_rx_preamble_type {
401 HTT_RX_LEGACY = 0x4,
402 HTT_RX_HT = 0x8,
403 HTT_RX_HT_WITH_TXBF = 0x9,
404 HTT_RX_VHT = 0xC,
405 HTT_RX_VHT_WITH_TXBF = 0xD,
406};
407
408/*
409 * Fields: phy_err_valid, phy_err_code, tsf,
410 * usec_timestamp, sub_usec_timestamp
411 * ..are valid only if end_valid == 1.
412 *
413 * Fields: rssi_chains, legacy_rate_type,
414 * legacy_rate_cck, preamble_type, service,
415 * vht_sig_*
416 * ..are valid only if start_valid == 1;
417 */
418struct htt_rx_indication_ppdu {
419 u8 combined_rssi;
420 u8 sub_usec_timestamp;
421 u8 phy_err_code;
422 u8 info0; /* HTT_RX_INDICATION_INFO0_ */
423 struct {
424 u8 pri20_db;
425 u8 ext20_db;
426 u8 ext40_db;
427 u8 ext80_db;
428 } __packed rssi_chains[4];
429 __le32 tsf;
430 __le32 usec_timestamp;
431 __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
432 __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
433} __packed;
434
435enum htt_rx_mpdu_status {
436 HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
437 HTT_RX_IND_MPDU_STATUS_OK,
438 HTT_RX_IND_MPDU_STATUS_ERR_FCS,
439 HTT_RX_IND_MPDU_STATUS_ERR_DUP,
440 HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
441 HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
442 /* only accept EAPOL frames */
443 HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
444 HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
445 /* Non-data in promiscous mode */
446 HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
447 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
448 HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
449 HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
450 HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
451 HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
452
453 /*
454 * MISC: discard for unspecified reasons.
455 * Leave this enum value last.
456 */
457 HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
458};
459
460struct htt_rx_indication_mpdu_range {
461 u8 mpdu_count;
462 u8 mpdu_range_status; /* %htt_rx_mpdu_status */
463 u8 pad0;
464 u8 pad1;
465} __packed;
466
467struct htt_rx_indication_prefix {
468 __le16 fw_rx_desc_bytes;
469 u8 pad0;
470 u8 pad1;
471};
472
473struct htt_rx_indication {
474 struct htt_rx_indication_hdr hdr;
475 struct htt_rx_indication_ppdu ppdu;
476 struct htt_rx_indication_prefix prefix;
477
478 /*
479 * the following fields are both dynamically sized, so
480 * take care addressing them
481 */
482
483 /* the size of this is %fw_rx_desc_bytes */
484 struct fw_rx_desc_base fw_desc;
485
486 /*
487 * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
488 * and has %num_mpdu_ranges elements.
489 */
490 struct htt_rx_indication_mpdu_range mpdu_ranges[0];
491} __packed;
492
493static inline struct htt_rx_indication_mpdu_range *
494 htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
495{
496 void *ptr = rx_ind;
497
498 ptr += sizeof(rx_ind->hdr)
499 + sizeof(rx_ind->ppdu)
500 + sizeof(rx_ind->prefix)
501 + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
502 return ptr;
503}
504
505enum htt_rx_flush_mpdu_status {
506 HTT_RX_FLUSH_MPDU_DISCARD = 0,
507 HTT_RX_FLUSH_MPDU_REORDER = 1,
508};
509
510/*
511 * htt_rx_flush - discard or reorder given range of mpdus
512 *
513 * Note: host must check if all sequence numbers between
514 * [seq_num_start, seq_num_end-1] are valid.
515 */
516struct htt_rx_flush {
517 __le16 peer_id;
518 u8 tid;
519 u8 rsvd0;
520 u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
521 u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
522 u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
523};
524
525struct htt_rx_peer_map {
526 u8 vdev_id;
527 __le16 peer_id;
528 u8 addr[6];
529 u8 rsvd0;
530 u8 rsvd1;
531} __packed;
532
533struct htt_rx_peer_unmap {
534 u8 rsvd0;
535 __le16 peer_id;
536} __packed;
537
538enum htt_security_types {
539 HTT_SECURITY_NONE,
540 HTT_SECURITY_WEP128,
541 HTT_SECURITY_WEP104,
542 HTT_SECURITY_WEP40,
543 HTT_SECURITY_TKIP,
544 HTT_SECURITY_TKIP_NOMIC,
545 HTT_SECURITY_AES_CCMP,
546 HTT_SECURITY_WAPI,
547
548 HTT_NUM_SECURITY_TYPES /* keep this last! */
549};
550
551enum htt_security_flags {
552#define HTT_SECURITY_TYPE_MASK 0x7F
553#define HTT_SECURITY_TYPE_LSB 0
554 HTT_SECURITY_IS_UNICAST = 1 << 7
555};
556
557struct htt_security_indication {
558 union {
559 /* dont use bitfields; undefined behaviour */
560 u8 flags; /* %htt_security_flags */
561 struct {
562 u8 security_type:7, /* %htt_security_types */
563 is_unicast:1;
564 } __packed;
565 } __packed;
566 __le16 peer_id;
567 u8 michael_key[8];
568 u8 wapi_rsc[16];
569} __packed;
570
571#define HTT_RX_BA_INFO0_TID_MASK 0x000F
572#define HTT_RX_BA_INFO0_TID_LSB 0
573#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
574#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
575
576struct htt_rx_addba {
577 u8 window_size;
578 __le16 info0; /* %HTT_RX_BA_INFO0_ */
579} __packed;
580
581struct htt_rx_delba {
582 u8 rsvd0;
583 __le16 info0; /* %HTT_RX_BA_INFO0_ */
584} __packed;
585
586enum htt_data_tx_status {
587 HTT_DATA_TX_STATUS_OK = 0,
588 HTT_DATA_TX_STATUS_DISCARD = 1,
589 HTT_DATA_TX_STATUS_NO_ACK = 2,
590 HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */
591 HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
592};
593
594enum htt_data_tx_flags {
595#define HTT_DATA_TX_STATUS_MASK 0x07
596#define HTT_DATA_TX_STATUS_LSB 0
597#define HTT_DATA_TX_TID_MASK 0x78
598#define HTT_DATA_TX_TID_LSB 3
599 HTT_DATA_TX_TID_INVALID = 1 << 7
600};
601
602#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
603
604struct htt_data_tx_completion {
605 union {
606 u8 flags;
607 struct {
608 u8 status:3,
609 tid:4,
610 tid_invalid:1;
611 } __packed;
612 } __packed;
613 u8 num_msdus;
614 u8 rsvd0;
615 __le16 msdus[0]; /* variable length based on %num_msdus */
616} __packed;
617
618struct htt_tx_compl_ind_base {
619 u32 hdr;
620 u16 payload[1/*or more*/];
621} __packed;
622
623struct htt_rc_tx_done_params {
624 u32 rate_code;
625 u32 rate_code_flags;
626 u32 flags;
627 u32 num_enqued; /* 1 for non-AMPDU */
628 u32 num_retries;
629 u32 num_failed; /* for AMPDU */
630 u32 ack_rssi;
631 u32 time_stamp;
632 u32 is_probe;
633};
634
635struct htt_rc_update {
636 u8 vdev_id;
637 __le16 peer_id;
638 u8 addr[6];
639 u8 num_elems;
640 u8 rsvd0;
641 struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
642} __packed;
643
644/* see htt_rx_indication for similar fields and descriptions */
645struct htt_rx_fragment_indication {
646 union {
647 u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
648 struct {
649 u8 ext_tid:5,
650 flush_valid:1;
651 } __packed;
652 } __packed;
653 __le16 peer_id;
654 __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
655 __le16 fw_rx_desc_bytes;
656 __le16 rsvd0;
657
658 u8 fw_msdu_rx_desc[0];
659} __packed;
660
661#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
662#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
663#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
664#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
665
666#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
667#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
668#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
669#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
670
Michal Kaziorc5450702015-01-24 12:14:48 +0200671struct htt_rx_pn_ind {
672 __le16 peer_id;
673 u8 tid;
674 u8 seqno_start;
675 u8 seqno_end;
676 u8 pn_ie_count;
677 u8 reserved;
678 u8 pn_ies[0];
679} __packed;
680
681struct htt_rx_offload_msdu {
682 __le16 msdu_len;
683 __le16 peer_id;
684 u8 vdev_id;
685 u8 tid;
686 u8 fw_desc;
687 u8 payload[0];
688} __packed;
689
690struct htt_rx_offload_ind {
691 u8 reserved;
692 __le16 msdu_count;
693} __packed;
694
695struct htt_rx_in_ord_msdu_desc {
696 __le32 msdu_paddr;
697 __le16 msdu_len;
698 u8 fw_desc;
699 u8 reserved;
700} __packed;
701
702struct htt_rx_in_ord_ind {
703 u8 info;
704 __le16 peer_id;
705 u8 vdev_id;
706 u8 reserved;
707 __le16 msdu_count;
708 struct htt_rx_in_ord_msdu_desc msdu_descs[0];
709} __packed;
710
711#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
712#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
713#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
714#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
715#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
716#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
717
Kalle Valo5e3dd152013-06-12 20:52:10 +0300718/*
719 * target -> host test message definition
720 *
721 * The following field definitions describe the format of the test
722 * message sent from the target to the host.
723 * The message consists of a 4-octet header, followed by a variable
724 * number of 32-bit integer values, followed by a variable number
725 * of 8-bit character values.
726 *
727 * |31 16|15 8|7 0|
728 * |-----------------------------------------------------------|
729 * | num chars | num ints | msg type |
730 * |-----------------------------------------------------------|
731 * | int 0 |
732 * |-----------------------------------------------------------|
733 * | int 1 |
734 * |-----------------------------------------------------------|
735 * | ... |
736 * |-----------------------------------------------------------|
737 * | char 3 | char 2 | char 1 | char 0 |
738 * |-----------------------------------------------------------|
739 * | | | ... | char 4 |
740 * |-----------------------------------------------------------|
741 * - MSG_TYPE
742 * Bits 7:0
743 * Purpose: identifies this as a test message
744 * Value: HTT_MSG_TYPE_TEST
745 * - NUM_INTS
746 * Bits 15:8
747 * Purpose: indicate how many 32-bit integers follow the message header
748 * - NUM_CHARS
749 * Bits 31:16
750 * Purpose: indicate how many 8-bit charaters follow the series of integers
751 */
752struct htt_rx_test {
753 u8 num_ints;
754 __le16 num_chars;
755
756 /* payload consists of 2 lists:
757 * a) num_ints * sizeof(__le32)
758 * b) num_chars * sizeof(u8) aligned to 4bytes */
759 u8 payload[0];
760} __packed;
761
762static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
763{
764 return (__le32 *)rx_test->payload;
765}
766
767static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
768{
769 return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
770}
771
772/*
773 * target -> host packet log message
774 *
775 * The following field definitions describe the format of the packet log
776 * message sent from the target to the host.
777 * The message consists of a 4-octet header,followed by a variable number
778 * of 32-bit character values.
779 *
780 * |31 24|23 16|15 8|7 0|
781 * |-----------------------------------------------------------|
782 * | | | | msg type |
783 * |-----------------------------------------------------------|
784 * | payload |
785 * |-----------------------------------------------------------|
786 * - MSG_TYPE
787 * Bits 7:0
788 * Purpose: identifies this as a test message
789 * Value: HTT_MSG_TYPE_PACKETLOG
790 */
791struct htt_pktlog_msg {
792 u8 pad[3];
Rajkumar Manoharanbfdd7932014-10-03 08:02:40 +0300793 u8 payload[0];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300794} __packed;
795
796struct htt_dbg_stats_rx_reorder_stats {
797 /* Non QoS MPDUs received */
798 __le32 deliver_non_qos;
799
800 /* MPDUs received in-order */
801 __le32 deliver_in_order;
802
803 /* Flush due to reorder timer expired */
804 __le32 deliver_flush_timeout;
805
806 /* Flush due to move out of window */
807 __le32 deliver_flush_oow;
808
809 /* Flush due to DELBA */
810 __le32 deliver_flush_delba;
811
812 /* MPDUs dropped due to FCS error */
813 __le32 fcs_error;
814
815 /* MPDUs dropped due to monitor mode non-data packet */
816 __le32 mgmt_ctrl;
817
818 /* MPDUs dropped due to invalid peer */
819 __le32 invalid_peer;
820
821 /* MPDUs dropped due to duplication (non aggregation) */
822 __le32 dup_non_aggr;
823
824 /* MPDUs dropped due to processed before */
825 __le32 dup_past;
826
827 /* MPDUs dropped due to duplicate in reorder queue */
828 __le32 dup_in_reorder;
829
830 /* Reorder timeout happened */
831 __le32 reorder_timeout;
832
833 /* invalid bar ssn */
834 __le32 invalid_bar_ssn;
835
836 /* reorder reset due to bar ssn */
837 __le32 ssn_reset;
838};
839
840struct htt_dbg_stats_wal_tx_stats {
841 /* Num HTT cookies queued to dispatch list */
842 __le32 comp_queued;
843
844 /* Num HTT cookies dispatched */
845 __le32 comp_delivered;
846
847 /* Num MSDU queued to WAL */
848 __le32 msdu_enqued;
849
850 /* Num MPDU queue to WAL */
851 __le32 mpdu_enqued;
852
853 /* Num MSDUs dropped by WMM limit */
854 __le32 wmm_drop;
855
856 /* Num Local frames queued */
857 __le32 local_enqued;
858
859 /* Num Local frames done */
860 __le32 local_freed;
861
862 /* Num queued to HW */
863 __le32 hw_queued;
864
865 /* Num PPDU reaped from HW */
866 __le32 hw_reaped;
867
868 /* Num underruns */
869 __le32 underrun;
870
871 /* Num PPDUs cleaned up in TX abort */
872 __le32 tx_abort;
873
874 /* Num MPDUs requed by SW */
875 __le32 mpdus_requed;
876
877 /* excessive retries */
878 __le32 tx_ko;
879
880 /* data hw rate code */
881 __le32 data_rc;
882
883 /* Scheduler self triggers */
884 __le32 self_triggers;
885
886 /* frames dropped due to excessive sw retries */
887 __le32 sw_retry_failure;
888
889 /* illegal rate phy errors */
890 __le32 illgl_rate_phy_err;
891
892 /* wal pdev continous xretry */
893 __le32 pdev_cont_xretry;
894
895 /* wal pdev continous xretry */
896 __le32 pdev_tx_timeout;
897
898 /* wal pdev resets */
899 __le32 pdev_resets;
900
901 __le32 phy_underrun;
902
903 /* MPDU is more than txop limit */
904 __le32 txop_ovf;
905} __packed;
906
907struct htt_dbg_stats_wal_rx_stats {
908 /* Cnts any change in ring routing mid-ppdu */
909 __le32 mid_ppdu_route_change;
910
911 /* Total number of statuses processed */
912 __le32 status_rcvd;
913
914 /* Extra frags on rings 0-3 */
915 __le32 r0_frags;
916 __le32 r1_frags;
917 __le32 r2_frags;
918 __le32 r3_frags;
919
920 /* MSDUs / MPDUs delivered to HTT */
921 __le32 htt_msdus;
922 __le32 htt_mpdus;
923
924 /* MSDUs / MPDUs delivered to local stack */
925 __le32 loc_msdus;
926 __le32 loc_mpdus;
927
928 /* AMSDUs that have more MSDUs than the status ring size */
929 __le32 oversize_amsdu;
930
931 /* Number of PHY errors */
932 __le32 phy_errs;
933
934 /* Number of PHY errors drops */
935 __le32 phy_err_drop;
936
937 /* Number of mpdu errors - FCS, MIC, ENC etc. */
938 __le32 mpdu_errs;
939} __packed;
940
941struct htt_dbg_stats_wal_peer_stats {
942 __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
943} __packed;
944
945struct htt_dbg_stats_wal_pdev_txrx {
946 struct htt_dbg_stats_wal_tx_stats tx_stats;
947 struct htt_dbg_stats_wal_rx_stats rx_stats;
948 struct htt_dbg_stats_wal_peer_stats peer_stats;
949} __packed;
950
951struct htt_dbg_stats_rx_rate_info {
952 __le32 mcs[10];
953 __le32 sgi[10];
954 __le32 nss[4];
955 __le32 stbc[10];
956 __le32 bw[3];
957 __le32 pream[6];
958 __le32 ldpc;
959 __le32 txbf;
960};
961
962/*
963 * htt_dbg_stats_status -
964 * present - The requested stats have been delivered in full.
965 * This indicates that either the stats information was contained
966 * in its entirety within this message, or else this message
967 * completes the delivery of the requested stats info that was
968 * partially delivered through earlier STATS_CONF messages.
969 * partial - The requested stats have been delivered in part.
970 * One or more subsequent STATS_CONF messages with the same
971 * cookie value will be sent to deliver the remainder of the
972 * information.
973 * error - The requested stats could not be delivered, for example due
974 * to a shortage of memory to construct a message holding the
975 * requested stats.
976 * invalid - The requested stat type is either not recognized, or the
977 * target is configured to not gather the stats type in question.
978 * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
979 * series_done - This special value indicates that no further stats info
980 * elements are present within a series of stats info elems
981 * (within a stats upload confirmation message).
982 */
983enum htt_dbg_stats_status {
984 HTT_DBG_STATS_STATUS_PRESENT = 0,
985 HTT_DBG_STATS_STATUS_PARTIAL = 1,
986 HTT_DBG_STATS_STATUS_ERROR = 2,
987 HTT_DBG_STATS_STATUS_INVALID = 3,
988 HTT_DBG_STATS_STATUS_SERIES_DONE = 7
989};
990
991/*
992 * target -> host statistics upload
993 *
994 * The following field definitions describe the format of the HTT target
995 * to host stats upload confirmation message.
996 * The message contains a cookie echoed from the HTT host->target stats
997 * upload request, which identifies which request the confirmation is
998 * for, and a series of tag-length-value stats information elements.
999 * The tag-length header for each stats info element also includes a
1000 * status field, to indicate whether the request for the stat type in
1001 * question was fully met, partially met, unable to be met, or invalid
1002 * (if the stat type in question is disabled in the target).
1003 * A special value of all 1's in this status field is used to indicate
1004 * the end of the series of stats info elements.
1005 *
1006 *
1007 * |31 16|15 8|7 5|4 0|
1008 * |------------------------------------------------------------|
1009 * | reserved | msg type |
1010 * |------------------------------------------------------------|
1011 * | cookie LSBs |
1012 * |------------------------------------------------------------|
1013 * | cookie MSBs |
1014 * |------------------------------------------------------------|
1015 * | stats entry length | reserved | S |stat type|
1016 * |------------------------------------------------------------|
1017 * | |
1018 * | type-specific stats info |
1019 * | |
1020 * |------------------------------------------------------------|
1021 * | stats entry length | reserved | S |stat type|
1022 * |------------------------------------------------------------|
1023 * | |
1024 * | type-specific stats info |
1025 * | |
1026 * |------------------------------------------------------------|
1027 * | n/a | reserved | 111 | n/a |
1028 * |------------------------------------------------------------|
1029 * Header fields:
1030 * - MSG_TYPE
1031 * Bits 7:0
1032 * Purpose: identifies this is a statistics upload confirmation message
1033 * Value: 0x9
1034 * - COOKIE_LSBS
1035 * Bits 31:0
1036 * Purpose: Provide a mechanism to match a target->host stats confirmation
1037 * message with its preceding host->target stats request message.
1038 * Value: LSBs of the opaque cookie specified by the host-side requestor
1039 * - COOKIE_MSBS
1040 * Bits 31:0
1041 * Purpose: Provide a mechanism to match a target->host stats confirmation
1042 * message with its preceding host->target stats request message.
1043 * Value: MSBs of the opaque cookie specified by the host-side requestor
1044 *
1045 * Stats Information Element tag-length header fields:
1046 * - STAT_TYPE
1047 * Bits 4:0
1048 * Purpose: identifies the type of statistics info held in the
1049 * following information element
1050 * Value: htt_dbg_stats_type
1051 * - STATUS
1052 * Bits 7:5
1053 * Purpose: indicate whether the requested stats are present
1054 * Value: htt_dbg_stats_status, including a special value (0x7) to mark
1055 * the completion of the stats entry series
1056 * - LENGTH
1057 * Bits 31:16
1058 * Purpose: indicate the stats information size
1059 * Value: This field specifies the number of bytes of stats information
1060 * that follows the element tag-length header.
1061 * It is expected but not required that this length is a multiple of
1062 * 4 bytes. Even if the length is not an integer multiple of 4, the
1063 * subsequent stats entry header will begin on a 4-byte aligned
1064 * boundary.
1065 */
1066
1067#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
1068#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
1069#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
1070#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
1071
1072struct htt_stats_conf_item {
1073 union {
1074 u8 info;
1075 struct {
1076 u8 stat_type:5; /* %HTT_DBG_STATS_ */
1077 u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
1078 } __packed;
1079 } __packed;
1080 u8 pad;
1081 __le16 length;
1082 u8 payload[0]; /* roundup(length, 4) long */
1083} __packed;
1084
1085struct htt_stats_conf {
1086 u8 pad[3];
1087 __le32 cookie_lsb;
1088 __le32 cookie_msb;
1089
1090 /* each item has variable length! */
1091 struct htt_stats_conf_item items[0];
1092} __packed;
1093
1094static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
1095 const struct htt_stats_conf_item *item)
1096{
1097 return (void *)item + sizeof(*item) + roundup(item->length, 4);
1098}
Kalle Valo8cc7f262014-09-14 12:50:39 +03001099
Kalle Valo5e3dd152013-06-12 20:52:10 +03001100/*
1101 * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
1102 *
1103 * The following field definitions describe the format of the HTT host
1104 * to target frag_desc/msdu_ext bank configuration message.
1105 * The message contains the based address and the min and max id of the
1106 * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
1107 * MSDU_EXT/FRAG_DESC.
1108 * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
1109 * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
1110 * the hardware does the mapping/translation.
1111 *
1112 * Total banks that can be configured is configured to 16.
1113 *
1114 * This should be called before any TX has be initiated by the HTT
1115 *
1116 * |31 16|15 8|7 5|4 0|
1117 * |------------------------------------------------------------|
1118 * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
1119 * |------------------------------------------------------------|
1120 * | BANK0_BASE_ADDRESS |
1121 * |------------------------------------------------------------|
1122 * | ... |
1123 * |------------------------------------------------------------|
1124 * | BANK15_BASE_ADDRESS |
1125 * |------------------------------------------------------------|
1126 * | BANK0_MAX_ID | BANK0_MIN_ID |
1127 * |------------------------------------------------------------|
1128 * | ... |
1129 * |------------------------------------------------------------|
1130 * | BANK15_MAX_ID | BANK15_MIN_ID |
1131 * |------------------------------------------------------------|
1132 * Header fields:
1133 * - MSG_TYPE
1134 * Bits 7:0
1135 * Value: 0x6
1136 * - BANKx_BASE_ADDRESS
1137 * Bits 31:0
1138 * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
1139 * bank physical/bus address.
1140 * - BANKx_MIN_ID
1141 * Bits 15:0
1142 * Purpose: Provide a mechanism to specify the min index that needs to
1143 * mapped.
1144 * - BANKx_MAX_ID
1145 * Bits 31:16
1146 * Purpose: Provide a mechanism to specify the max index that needs to
1147 *
1148 */
1149struct htt_frag_desc_bank_id {
1150 __le16 bank_min_id;
1151 __le16 bank_max_id;
1152} __packed;
1153
1154/* real is 16 but it wouldn't fit in the max htt message size
1155 * so we use a conservatively safe value for now */
1156#define HTT_FRAG_DESC_BANK_MAX 4
1157
1158#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
1159#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
1160#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2)
1161
1162struct htt_frag_desc_bank_cfg {
1163 u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1164 u8 num_banks;
1165 u8 desc_size;
1166 __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1167 struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1168} __packed;
1169
1170union htt_rx_pn_t {
1171 /* WEP: 24-bit PN */
1172 u32 pn24;
1173
1174 /* TKIP or CCMP: 48-bit PN */
1175 u_int64_t pn48;
1176
1177 /* WAPI: 128-bit PN */
1178 u_int64_t pn128[2];
1179};
1180
1181struct htt_cmd {
1182 struct htt_cmd_hdr hdr;
1183 union {
1184 struct htt_ver_req ver_req;
1185 struct htt_mgmt_tx_desc mgmt_tx;
1186 struct htt_data_tx_desc data_tx;
1187 struct htt_rx_ring_setup rx_setup;
1188 struct htt_stats_req stats_req;
1189 struct htt_oob_sync_req oob_sync_req;
1190 struct htt_aggr_conf aggr_conf;
1191 struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
1192 };
1193} __packed;
1194
1195struct htt_resp {
1196 struct htt_resp_hdr hdr;
1197 union {
1198 struct htt_ver_resp ver_resp;
1199 struct htt_mgmt_tx_completion mgmt_tx_completion;
1200 struct htt_data_tx_completion data_tx_completion;
1201 struct htt_rx_indication rx_ind;
1202 struct htt_rx_fragment_indication rx_frag_ind;
1203 struct htt_rx_peer_map peer_map;
1204 struct htt_rx_peer_unmap peer_unmap;
1205 struct htt_rx_flush rx_flush;
1206 struct htt_rx_addba rx_addba;
1207 struct htt_rx_delba rx_delba;
1208 struct htt_security_indication security_indication;
1209 struct htt_rc_update rc_update;
1210 struct htt_rx_test rx_test;
1211 struct htt_pktlog_msg pktlog_msg;
1212 struct htt_stats_conf stats_conf;
Michal Kaziorc5450702015-01-24 12:14:48 +02001213 struct htt_rx_pn_ind rx_pn_ind;
1214 struct htt_rx_offload_ind rx_offload_ind;
1215 struct htt_rx_in_ord_ind rx_in_ord_ind;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001216 };
1217} __packed;
1218
Kalle Valo5e3dd152013-06-12 20:52:10 +03001219/*** host side structures follow ***/
1220
1221struct htt_tx_done {
1222 u32 msdu_id;
1223 bool discard;
1224 bool no_ack;
1225};
1226
1227struct htt_peer_map_event {
1228 u8 vdev_id;
1229 u16 peer_id;
1230 u8 addr[ETH_ALEN];
1231};
1232
1233struct htt_peer_unmap_event {
1234 u16 peer_id;
1235};
1236
Michal Kaziora16942e2014-02-27 18:50:04 +02001237struct ath10k_htt_txbuf {
1238 struct htt_data_tx_desc_frag frags[2];
1239 struct ath10k_htc_hdr htc_hdr;
1240 struct htt_cmd_hdr cmd_hdr;
1241 struct htt_data_tx_desc cmd_tx;
1242} __packed;
1243
Kalle Valo5e3dd152013-06-12 20:52:10 +03001244struct ath10k_htt {
1245 struct ath10k *ar;
1246 enum ath10k_htc_ep_id eid;
1247
Kalle Valo5e3dd152013-06-12 20:52:10 +03001248 u8 target_version_major;
1249 u8 target_version_minor;
1250 struct completion target_version_received;
1251
1252 struct {
1253 /*
1254 * Ring of network buffer objects - This ring is
1255 * used exclusively by the host SW. This ring
1256 * mirrors the dev_addrs_ring that is shared
1257 * between the host SW and the MAC HW. The host SW
1258 * uses this netbufs ring to locate the network
1259 * buffer objects whose data buffers the HW has
1260 * filled.
1261 */
1262 struct sk_buff **netbufs_ring;
Michal Kaziorc5450702015-01-24 12:14:48 +02001263
1264 /* This is used only with firmware supporting IN_ORD_IND.
1265 *
1266 * With Full Rx Reorder the HTT Rx Ring is more of a temporary
1267 * buffer ring from which buffer addresses are copied by the
1268 * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
1269 * pointing to specific (re-ordered) buffers.
1270 *
1271 * FIXME: With kernel generic hashing functions there's a lot
1272 * of hash collisions for sk_buffs.
1273 */
1274 bool in_ord_rx;
1275 DECLARE_HASHTABLE(skb_table, 4);
1276
Kalle Valo5e3dd152013-06-12 20:52:10 +03001277 /*
1278 * Ring of buffer addresses -
1279 * This ring holds the "physical" device address of the
1280 * rx buffers the host SW provides for the MAC HW to
1281 * fill.
1282 */
1283 __le32 *paddrs_ring;
1284
1285 /*
1286 * Base address of ring, as a "physical" device address
1287 * rather than a CPU address.
1288 */
1289 dma_addr_t base_paddr;
1290
1291 /* how many elems in the ring (power of 2) */
1292 int size;
1293
1294 /* size - 1 */
1295 unsigned size_mask;
1296
1297 /* how many rx buffers to keep in the ring */
1298 int fill_level;
1299
1300 /* how many rx buffers (full+empty) are in the ring */
1301 int fill_cnt;
1302
1303 /*
1304 * alloc_idx - where HTT SW has deposited empty buffers
1305 * This is allocated in consistent mem, so that the FW can
1306 * read this variable, and program the HW's FW_IDX reg with
1307 * the value of this shadow register.
1308 */
1309 struct {
1310 __le32 *vaddr;
1311 dma_addr_t paddr;
1312 } alloc_idx;
1313
1314 /* where HTT SW has processed bufs filled by rx MAC DMA */
1315 struct {
1316 unsigned msdu_payld;
1317 } sw_rd_idx;
1318
1319 /*
1320 * refill_retry_timer - timer triggered when the ring is
1321 * not refilled to the level expected
1322 */
1323 struct timer_list refill_retry_timer;
1324
1325 /* Protects access to all rx ring buffer state variables */
1326 spinlock_t lock;
1327 } rx_ring;
1328
1329 unsigned int prefetch_len;
1330
Michal Kazior89d6d832015-01-24 12:14:51 +02001331 /* Protects access to pending_tx, num_pending_tx */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001332 spinlock_t tx_lock;
1333 int max_num_pending_tx;
1334 int num_pending_tx;
Michal Kazior89d6d832015-01-24 12:14:51 +02001335 struct idr pending_tx;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001336 wait_queue_head_t empty_tx_wq;
Michal Kaziora16942e2014-02-27 18:50:04 +02001337 struct dma_pool *tx_pool;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001338
1339 /* set if host-fw communication goes haywire
1340 * used to avoid further failures */
1341 bool rx_confused;
Michal Kazior6e712d42013-09-24 10:18:36 +02001342 struct tasklet_struct rx_replenish_task;
Michal Kazior6c5151a2014-02-27 18:50:04 +02001343
1344 /* This is used to group tx/rx completions separately and process them
1345 * in batches to reduce cache stalls */
1346 struct tasklet_struct txrx_compl_task;
1347 struct sk_buff_head tx_compl_q;
1348 struct sk_buff_head rx_compl_q;
Michal Kaziorc5450702015-01-24 12:14:48 +02001349 struct sk_buff_head rx_in_ord_compl_q;
Janusz Dziedzic6df92a32014-03-24 21:24:57 +01001350
1351 /* rx_status template */
1352 struct ieee80211_rx_status rx_status;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001353};
1354
1355#define RX_HTT_HDR_STATUS_LEN 64
1356
1357/* This structure layout is programmed via rx ring setup
1358 * so that FW knows how to transfer the rx descriptor to the host.
1359 * Buffers like this are placed on the rx ring. */
1360struct htt_rx_desc {
1361 union {
1362 /* This field is filled on the host using the msdu buffer
1363 * from htt_rx_indication */
1364 struct fw_rx_desc_base fw_desc;
1365 u32 pad;
1366 } __packed;
1367 struct {
1368 struct rx_attention attention;
1369 struct rx_frag_info frag_info;
1370 struct rx_mpdu_start mpdu_start;
1371 struct rx_msdu_start msdu_start;
1372 struct rx_msdu_end msdu_end;
1373 struct rx_mpdu_end mpdu_end;
1374 struct rx_ppdu_start ppdu_start;
1375 struct rx_ppdu_end ppdu_end;
1376 } __packed;
1377 u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
1378 u8 msdu_payload[0];
1379};
1380
1381#define HTT_RX_DESC_ALIGN 8
1382
1383#define HTT_MAC_ADDR_LEN 6
1384
1385/*
1386 * FIX THIS
1387 * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
1388 * rounded up to a cache line size.
1389 */
1390#define HTT_RX_BUF_SIZE 1920
1391#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
1392
Michal Kazior6e712d42013-09-24 10:18:36 +02001393/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
1394 * aggregated traffic more nicely. */
1395#define ATH10K_HTT_MAX_NUM_REFILL 16
1396
Kalle Valo5e3dd152013-06-12 20:52:10 +03001397/*
1398 * DMA_MAP expects the buffer to be an integral number of cache lines.
1399 * Rather than checking the actual cache line size, this code makes a
1400 * conservative estimate of what the cache line size could be.
1401 */
1402#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
1403#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
1404
Michal Kazior95bf21f2014-05-16 17:15:39 +03001405int ath10k_htt_connect(struct ath10k_htt *htt);
1406int ath10k_htt_init(struct ath10k *ar);
1407int ath10k_htt_setup(struct ath10k_htt *htt);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001408
Michal Kazior95bf21f2014-05-16 17:15:39 +03001409int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
1410void ath10k_htt_tx_free(struct ath10k_htt *htt);
1411
1412int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
Michal Kaziorc5450702015-01-24 12:14:48 +02001413int ath10k_htt_rx_ring_refill(struct ath10k *ar);
Michal Kazior95bf21f2014-05-16 17:15:39 +03001414void ath10k_htt_rx_free(struct ath10k_htt *htt);
1415
Kalle Valo5e3dd152013-06-12 20:52:10 +03001416void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1417void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
1418int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
Kalle Valoa3d135e2013-09-03 11:44:10 +03001419int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001420int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
Janusz Dziedzicd3856232014-06-02 21:19:46 +03001421int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
1422 u8 max_subfrms_ampdu,
1423 u8 max_subfrms_amsdu);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001424
1425void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
Michal Kazior89d6d832015-01-24 12:14:51 +02001426int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001427void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
1428int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
1429int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001430
Kalle Valo5e3dd152013-06-12 20:52:10 +03001431#endif