blob: 67447506c74442dde342e13e47369d8e7851ddac [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#ifndef _HTT_TYPES__H_
29#define _HTT_TYPES__H_
30
31#include <osdep.h> /* uint16_t, dma_addr_t */
32#include <cdf_types.h> /* cdf_device_t */
33#include <cdf_lock.h> /* cdf_spinlock_t */
34#include <cdf_softirq_timer.h> /* cdf_softirq_timer_t */
35#include <cdf_atomic.h> /* cdf_atomic_inc */
36#include <cdf_nbuf.h> /* cdf_nbuf_t */
37#include <htc_api.h> /* HTC_PACKET */
38
39#include <ol_ctrl_api.h> /* ol_pdev_handle */
40#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
41
42#define DEBUG_DMA_DONE
43
44#define HTT_TX_MUTEX_TYPE cdf_spinlock_t
45
46#ifdef QCA_TX_HTT2_SUPPORT
47#ifndef HTC_TX_HTT2_MAX_SIZE
48/* Should sync to the target's implementation. */
49#define HTC_TX_HTT2_MAX_SIZE (120)
50#endif
51#endif /* QCA_TX_HTT2_SUPPORT */
52
53
54struct htt_htc_pkt {
55 void *pdev_ctxt;
56 dma_addr_t nbuf_paddr;
57 HTC_PACKET htc_pkt;
58 uint16_t msdu_id;
59};
60
61struct htt_htc_pkt_union {
62 union {
63 struct htt_htc_pkt pkt;
64 struct htt_htc_pkt_union *next;
65 } u;
66};
67
68/*
69 * HTT host descriptor:
70 * Include the htt_tx_msdu_desc that gets downloaded to the target,
71 * but also include the HTC_FRAME_HDR and alignment padding that
72 * precede the htt_tx_msdu_desc.
73 * htc_send_data_pkt expects this header space at the front of the
74 * initial fragment (i.e. tx descriptor) that is downloaded.
75 */
76struct htt_host_tx_desc_t {
77 uint8_t htc_header[HTC_HEADER_LEN];
78 /* force the tx_desc field to begin on a 4-byte boundary */
79 union {
80 uint32_t dummy_force_align;
81 struct htt_tx_msdu_desc_t tx_desc;
82 } align32;
83};
84
85struct htt_tx_mgmt_desc_buf {
86 cdf_nbuf_t msg_buf;
87 A_BOOL is_inuse;
88 cdf_nbuf_t mgmt_frm;
89};
90
91struct htt_tx_mgmt_desc_ctxt {
92 struct htt_tx_mgmt_desc_buf *pool;
93 A_UINT32 pending_cnt;
94};
95
96struct htt_list_node {
97 struct htt_list_node *prev;
98 struct htt_list_node *next;
99};
100
101struct htt_rx_hash_entry {
102 A_UINT32 paddr;
103 cdf_nbuf_t netbuf;
104 A_UINT8 fromlist;
105 struct htt_list_node listnode;
106#ifdef RX_HASH_DEBUG
107 A_UINT32 cookie;
108#endif
109};
110
111struct htt_rx_hash_bucket {
112 struct htt_list_node listhead;
113 struct htt_rx_hash_entry *entries;
114 struct htt_list_node freepool;
115#ifdef RX_HASH_DEBUG
116 A_UINT32 count;
117#endif
118};
119
120/* IPA micro controller
121 wlan host driver
122 firmware shared memory structure */
123struct uc_shared_mem_t {
124 uint32_t *vaddr;
125 cdf_dma_addr_t paddr;
126 cdf_dma_mem_context(memctx);
127};
128
129/* Micro controller datapath offload
130 * WLAN TX resources */
131struct htt_ipa_uc_tx_resource_t {
132 struct uc_shared_mem_t tx_ce_idx;
133 struct uc_shared_mem_t tx_comp_base;
134
135 uint32_t tx_comp_idx_paddr;
136 cdf_nbuf_t *tx_buf_pool_vaddr_strg;
137 uint32_t alloc_tx_buf_cnt;
138};
139
140/* Micro controller datapath offload
141 * WLAN RX resources */
142struct htt_ipa_uc_rx_resource_t {
143 cdf_dma_addr_t rx_rdy_idx_paddr;
144 struct uc_shared_mem_t rx_ind_ring_base;
145 struct uc_shared_mem_t rx_ipa_prc_done_idx;
146 uint32_t rx_ind_ring_size;
147
148 /* 2nd RX ring */
149 cdf_dma_addr_t rx2_rdy_idx_paddr;
150 struct uc_shared_mem_t rx2_ind_ring_base;
151 struct uc_shared_mem_t rx2_ipa_prc_done_idx;
152 uint32_t rx2_ind_ring_size;
153};
154
155struct ipa_uc_rx_ring_elem_t {
156 uint32_t rx_packet_paddr;
157 uint16_t vdev_id;
158 uint16_t rx_packet_leng;
159};
160
161#if defined(HELIUMPLUS_PADDR64)
162struct msdu_ext_desc_t {
163#if defined(FEATURE_TSO)
164 struct cdf_tso_flags_t tso_flags;
165#else
166 u_int32_t tso_flag0;
167 u_int32_t tso_flag1;
168 u_int32_t tso_flag2;
169 u_int32_t tso_flag3;
170 u_int32_t tso_flag4;
171 u_int32_t tso_flag5;
172#endif
173 u_int32_t frag_ptr0;
174 u_int32_t frag_len0;
175 u_int32_t frag_ptr1;
176 u_int32_t frag_len1;
177 u_int32_t frag_ptr2;
178 u_int32_t frag_len2;
179 u_int32_t frag_ptr3;
180 u_int32_t frag_len3;
181 u_int32_t frag_ptr4;
182 u_int32_t frag_len4;
183 u_int32_t frag_ptr5;
184 u_int32_t frag_len5;
185};
186#endif /* defined(HELIUMPLUS_PADDR64) */
187
188struct htt_pdev_t {
189 ol_pdev_handle ctrl_pdev;
190 ol_txrx_pdev_handle txrx_pdev;
191 HTC_HANDLE htc_pdev;
192 cdf_device_t osdev;
193
194 HTC_ENDPOINT_ID htc_endpoint;
195
196#ifdef QCA_TX_HTT2_SUPPORT
197 HTC_ENDPOINT_ID htc_tx_htt2_endpoint;
198 uint16_t htc_tx_htt2_max_size;
199#endif /* QCA_TX_HTT2_SUPPORT */
200
201#ifdef ATH_11AC_TXCOMPACT
202 HTT_TX_MUTEX_TYPE txnbufq_mutex;
203 cdf_nbuf_queue_t txnbufq;
204 struct htt_htc_pkt_union *htt_htc_pkt_misclist;
205#endif
206
207 struct htt_htc_pkt_union *htt_htc_pkt_freelist;
208 struct {
209 int is_full_reorder_offload;
210 int default_tx_comp_req;
211 int ce_classify_enabled;
212 } cfg;
213 struct {
214 uint8_t major;
215 uint8_t minor;
216 } tgt_ver;
217#if defined(HELIUMPLUS_PADDR64)
218 struct {
219 u_int8_t major;
220 u_int8_t minor;
221 } wifi_ip_ver;
222#endif /* defined(HELIUMPLUS_PADDR64) */
223 struct {
224 struct {
225 /*
226 * Ring of network buffer objects -
227 * This ring is used exclusively by the host SW.
228 * This ring mirrors the dev_addrs_ring that is shared
229 * between the host SW and the MAC HW.
230 * The host SW uses this netbufs ring to locate the nw
231 * buffer objects whose data buffers the HW has filled.
232 */
233 cdf_nbuf_t *netbufs_ring;
234 /*
235 * Ring of buffer addresses -
236 * This ring holds the "physical" device address of the
237 * rx buffers the host SW provides for MAC HW to fill.
238 */
239#if HTT_PADDR64
240 uint64_t *paddrs_ring;
241#else /* ! HTT_PADDR64 */
242 uint32_t *paddrs_ring;
243#endif
244 cdf_dma_mem_context(memctx);
245 } buf;
246 /*
247 * Base address of ring, as a "physical" device address rather
248 * than a CPU address.
249 */
250 uint32_t base_paddr;
251 int size; /* how many elems in the ring (power of 2) */
252 unsigned size_mask; /* size - 1 */
253
254 int fill_level; /* how many rx buffers to keep in the ring */
255 int fill_cnt; /* # of rx buffers (full+empty) in the ring */
256
257 /*
258 * target_idx -
259 * Without reorder offload:
260 * not used
261 * With reorder offload:
262 * points to the location in the rx ring from which rx buffers
263 * are available to copy into the MAC DMA ring
264 */
265 struct {
266 uint32_t *vaddr;
267 uint32_t paddr;
268 cdf_dma_mem_context(memctx);
269 } target_idx;
270
271 /*
272 * alloc_idx/host_idx -
273 * Without reorder offload:
274 * where HTT SW has deposited empty buffers
275 * This is allocated in consistent mem, so that the FW can read
276 * this variable, and program the HW's FW_IDX reg with the value
277 * of this shadow register
278 * With reorder offload:
279 * points to the end of the available free rx buffers
280 */
281 struct {
282 uint32_t *vaddr;
283 uint32_t paddr;
284 cdf_dma_mem_context(memctx);
285 } alloc_idx;
286
287 /* sw_rd_idx -
288 * where HTT SW has processed bufs filled by rx MAC DMA */
289 struct {
290 unsigned msdu_desc;
291 unsigned msdu_payld;
292 } sw_rd_idx;
293
294 /*
295 * refill_retry_timer - timer triggered when the ring is not
296 * refilled to the level expected
297 */
298 cdf_softirq_timer_t refill_retry_timer;
299
300 /*
301 * refill_ref_cnt - ref cnt for Rx buffer replenishment - this
302 * variable is used to guarantee that only one thread tries
303 * to replenish Rx ring.
304 */
305 cdf_atomic_t refill_ref_cnt;
306#ifdef DEBUG_DMA_DONE
307 uint32_t dbg_initial_msdu_payld;
308 uint32_t dbg_mpdu_range;
309 uint32_t dbg_mpdu_count;
310 uint32_t dbg_ring_idx;
311 uint32_t dbg_refill_cnt;
312 uint32_t dbg_sync_success;
313#endif
314#ifdef HTT_RX_RESTORE
315 int rx_reset;
316 uint8_t htt_rx_restore;
317#endif
318 struct htt_rx_hash_bucket *hash_table;
319 uint32_t listnode_offset;
320 } rx_ring;
321 long rx_fw_desc_offset;
322 int rx_mpdu_range_offset_words;
323 int rx_ind_msdu_byte_idx;
324
325 struct {
326 int size; /* of each HTT tx desc */
Leo Chang376398b2015-10-23 14:19:02 -0700327 uint16_t pool_elems;
328 uint16_t alloc_cnt;
329 struct cdf_mem_multi_page_t desc_pages;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330 uint32_t *freelist;
331 cdf_dma_mem_context(memctx);
332 } tx_descs;
333#if defined(HELIUMPLUS_PADDR64)
334 struct {
335 int size; /* of each Fragment/MSDU-Ext descriptor */
336 int pool_elems;
Leo Chang376398b2015-10-23 14:19:02 -0700337 struct cdf_mem_multi_page_t desc_pages;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800338 cdf_dma_mem_context(memctx);
339 } frag_descs;
340#endif /* defined(HELIUMPLUS_PADDR64) */
341
342 int download_len;
343 void (*tx_send_complete_part2)(void *pdev, A_STATUS status,
344 cdf_nbuf_t msdu, uint16_t msdu_id);
345
346 HTT_TX_MUTEX_TYPE htt_tx_mutex;
347
348 struct {
349 int htc_err_cnt;
350 } stats;
351
352 struct htt_tx_mgmt_desc_ctxt tx_mgmt_desc_ctxt;
353 struct targetdef_s *targetdef;
354 struct ce_reg_def *target_ce_def;
355
356 struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
357 struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
Govind Singhd79e1342015-11-03 16:20:02 +0530358#ifdef DEBUG_RX_RING_BUFFER
359 struct rx_buf_debug *rx_buff_list;
360 int rx_buff_index;
361#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362};
363
364#define HTT_EPID_GET(_htt_pdev_hdl) \
365 (((struct htt_pdev_t *)(_htt_pdev_hdl))->htc_endpoint)
366
367#if defined(HELIUMPLUS_PADDR64)
368#define HTT_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major == (x)) && \
369 ((pdev)->wifi_ip_ver.minor == (y)))
370
371#define HTT_SET_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major = (x)) && \
372 ((pdev)->wifi_ip_ver.minor = (y)))
373#endif /* defined(HELIUMPLUS_PADDR64) */
374
375#endif /* _HTT_TYPES__H_ */