blob: ac850c0b711a8090b2a0d4c536d171714a5db06c [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _CE_H_
19#define _CE_H_
20
21#include "hif.h"
22
23
24/* Maximum number of Copy Engine's supported */
25#define CE_COUNT_MAX 8
26#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
27
28/* Descriptor rings must be aligned to this boundary */
29#define CE_DESC_RING_ALIGN 8
30#define CE_SENDLIST_ITEMS_MAX 12
31#define CE_SEND_FLAG_GATHER 0x00010000
32
33/*
34 * Copy Engine support: low-level Target-side Copy Engine API.
35 * This is a hardware access layer used by code that understands
36 * how to use copy engines.
37 */
38
Michal Kazior2aa39112013-08-27 13:08:02 +020039struct ath10k_ce_pipe;
Kalle Valo5e3dd152013-06-12 20:52:10 +030040
41
Kalle Valo5e3dd152013-06-12 20:52:10 +030042#define CE_DESC_FLAGS_GATHER (1 << 0)
43#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
44#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
45#define CE_DESC_FLAGS_META_DATA_LSB 3
46
47struct ce_desc {
48 __le32 addr;
49 __le16 nbytes;
50 __le16 flags; /* %CE_DESC_FLAGS_ */
51};
52
53/* Copy Engine Ring internal state */
54struct ce_ring_state {
55 /* Number of entries in this ring; must be power of 2 */
56 unsigned int nentries;
57 unsigned int nentries_mask;
58
59 /*
60 * For dest ring, this is the next index to be processed
61 * by software after it was/is received into.
62 *
63 * For src ring, this is the last descriptor that was sent
64 * and completion processed by software.
65 *
66 * Regardless of src or dest ring, this is an invariant
67 * (modulo ring size):
68 * write index >= read index >= sw_index
69 */
70 unsigned int sw_index;
71 /* cached copy */
72 unsigned int write_index;
73 /*
74 * For src ring, this is the next index not yet processed by HW.
75 * This is a cached copy of the real HW index (read index), used
76 * for avoiding reading the HW index register more often than
77 * necessary.
78 * This extends the invariant:
79 * write index >= read index >= hw_index >= sw_index
80 *
81 * For dest ring, this is currently unused.
82 */
83 /* cached copy */
84 unsigned int hw_index;
85
86 /* Start of DMA-coherent area reserved for descriptors */
87 /* Host address space */
88 void *base_addr_owner_space_unaligned;
89 /* CE address space */
90 u32 base_addr_ce_space_unaligned;
91
92 /*
93 * Actual start of descriptors.
94 * Aligned to descriptor-size boundary.
95 * Points into reserved DMA-coherent area, above.
96 */
97 /* Host address space */
98 void *base_addr_owner_space;
99
100 /* CE address space */
101 u32 base_addr_ce_space;
102 /*
103 * Start of shadow copy of descriptors, within regular memory.
104 * Aligned to descriptor-size boundary.
105 */
106 void *shadow_base_unaligned;
107 struct ce_desc *shadow_base;
108
109 void **per_transfer_context;
110};
111
Michal Kazior2aa39112013-08-27 13:08:02 +0200112struct ath10k_ce_pipe {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300113 struct ath10k *ar;
114 unsigned int id;
115
116 unsigned int attr_flags;
117
118 u32 ctrl_addr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300119
Michal Kazior2aa39112013-08-27 13:08:02 +0200120 void (*send_cb) (struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300121 void *per_transfer_send_context,
122 u32 buffer,
123 unsigned int nbytes,
124 unsigned int transfer_id);
Michal Kazior2aa39112013-08-27 13:08:02 +0200125 void (*recv_cb) (struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300126 void *per_transfer_recv_context,
127 u32 buffer,
128 unsigned int nbytes,
129 unsigned int transfer_id,
130 unsigned int flags);
131
132 unsigned int src_sz_max;
133 struct ce_ring_state *src_ring;
134 struct ce_ring_state *dest_ring;
135};
136
137struct ce_sendlist_item {
138 /* e.g. buffer or desc list */
139 dma_addr_t data;
140 union {
141 /* simple buffer */
142 unsigned int nbytes;
143 /* Rx descriptor list */
144 unsigned int ndesc;
145 } u;
146 /* externally-specified flags; OR-ed with internal flags */
147 u32 flags;
148};
149
150struct ce_sendlist {
151 unsigned int num_items;
152 struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
153};
154
155/* Copy Engine settable attributes */
156struct ce_attr;
157
158/*==================Send====================*/
159
160/* ath10k_ce_send flags */
161#define CE_SEND_FLAG_BYTE_SWAP 1
162
163/*
164 * Queue a source buffer to be sent to an anonymous destination buffer.
165 * ce - which copy engine to use
166 * buffer - address of buffer
167 * nbytes - number of bytes to send
168 * transfer_id - arbitrary ID; reflected to destination
169 * flags - CE_SEND_FLAG_* values
170 * Returns 0 on success; otherwise an error status.
171 *
172 * Note: If no flags are specified, use CE's default data swap mode.
173 *
174 * Implementation note: pushes 1 buffer to Source ring
175 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200176int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300177 void *per_transfer_send_context,
178 u32 buffer,
179 unsigned int nbytes,
180 /* 14 bits */
181 unsigned int transfer_id,
182 unsigned int flags);
183
Michal Kazior2aa39112013-08-27 13:08:02 +0200184void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
185 void (*send_cb)(struct ath10k_ce_pipe *ce_state,
186 void *transfer_context,
187 u32 buffer,
188 unsigned int nbytes,
189 unsigned int transfer_id),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300190 int disable_interrupts);
191
192/* Append a simple buffer (address/length) to a sendlist. */
193void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
194 u32 buffer,
195 unsigned int nbytes,
196 /* OR-ed with internal flags */
197 u32 flags);
198
199/*
200 * Queue a "sendlist" of buffers to be sent using gather to a single
201 * anonymous destination buffer
202 * ce - which copy engine to use
203 * sendlist - list of simple buffers to send using gather
204 * transfer_id - arbitrary ID; reflected to destination
205 * Returns 0 on success; otherwise an error status.
206 *
207 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
208 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200209int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300210 void *per_transfer_send_context,
211 struct ce_sendlist *sendlist,
212 /* 14 bits */
213 unsigned int transfer_id);
214
215/*==================Recv=======================*/
216
217/*
218 * Make a buffer available to receive. The buffer must be at least of a
219 * minimal size appropriate for this copy engine (src_sz_max attribute).
220 * ce - which copy engine to use
221 * per_transfer_recv_context - context passed back to caller's recv_cb
222 * buffer - address of buffer in CE space
223 * Returns 0 on success; otherwise an error status.
224 *
225 * Implemenation note: Pushes a buffer to Dest ring.
226 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200227int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300228 void *per_transfer_recv_context,
229 u32 buffer);
230
Michal Kazior2aa39112013-08-27 13:08:02 +0200231void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
232 void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
233 void *transfer_context,
234 u32 buffer,
235 unsigned int nbytes,
236 unsigned int transfer_id,
237 unsigned int flags));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300238
239/* recv flags */
240/* Data is byte-swapped */
241#define CE_RECV_FLAG_SWAPPED 1
242
243/*
244 * Supply data for the next completed unprocessed receive descriptor.
245 * Pops buffer from Dest ring.
246 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200247int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300248 void **per_transfer_contextp,
249 u32 *bufferp,
250 unsigned int *nbytesp,
251 unsigned int *transfer_idp,
252 unsigned int *flagsp);
253/*
254 * Supply data for the next completed unprocessed send descriptor.
255 * Pops 1 completed send buffer from Source ring.
256 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200257int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300258 void **per_transfer_contextp,
259 u32 *bufferp,
260 unsigned int *nbytesp,
261 unsigned int *transfer_idp);
262
263/*==================CE Engine Initialization=======================*/
264
265/* Initialize an instance of a CE */
Michal Kazior2aa39112013-08-27 13:08:02 +0200266struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300267 unsigned int ce_id,
268 const struct ce_attr *attr);
269
270/*==================CE Engine Shutdown=======================*/
271/*
272 * Support clean shutdown by allowing the caller to revoke
273 * receive buffers. Target DMA must be stopped before using
274 * this API.
275 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200276int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300277 void **per_transfer_contextp,
278 u32 *bufferp);
279
280/*
281 * Support clean shutdown by allowing the caller to cancel
282 * pending sends. Target DMA must be stopped before using
283 * this API.
284 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200285int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300286 void **per_transfer_contextp,
287 u32 *bufferp,
288 unsigned int *nbytesp,
289 unsigned int *transfer_idp);
290
Michal Kazior2aa39112013-08-27 13:08:02 +0200291void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300292
293/*==================CE Interrupt Handlers====================*/
294void ath10k_ce_per_engine_service_any(struct ath10k *ar);
295void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
296void ath10k_ce_disable_interrupts(struct ath10k *ar);
297
298/* ce_attr.flags values */
299/* Use NonSnooping PCIe accesses? */
300#define CE_ATTR_NO_SNOOP 1
301
302/* Byte swap data words */
303#define CE_ATTR_BYTE_SWAP_DATA 2
304
305/* Swizzle descriptors? */
306#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
307
308/* no interrupt on copy completion */
309#define CE_ATTR_DIS_INTR 8
310
311/* Attributes of an instance of a Copy Engine */
312struct ce_attr {
313 /* CE_ATTR_* values */
314 unsigned int flags;
315
Kalle Valo5e3dd152013-06-12 20:52:10 +0300316 /* #entries in source ring - Must be a power of 2 */
317 unsigned int src_nentries;
318
319 /*
320 * Max source send size for this CE.
321 * This is also the minimum size of a destination buffer.
322 */
323 unsigned int src_sz_max;
324
325 /* #entries in destination ring - Must be a power of 2 */
326 unsigned int dest_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300327};
328
329/*
330 * When using sendlist_send to transfer multiple buffer fragments, the
331 * transfer context of each fragment, except last one, will be filled
332 * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
333 * each fragment done with send and the transfer context would be
334 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
335 * status of a send completion.
336 */
337#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
338
339#define SR_BA_ADDRESS 0x0000
340#define SR_SIZE_ADDRESS 0x0004
341#define DR_BA_ADDRESS 0x0008
342#define DR_SIZE_ADDRESS 0x000c
343#define CE_CMD_ADDRESS 0x0018
344
345#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
346#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
347#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
348#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
349 (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
350 CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
351
352#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
353#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
354#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
355#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
356 (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
357 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
358#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
359 (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
360 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
361
362#define CE_CTRL1_DMAX_LENGTH_MSB 15
363#define CE_CTRL1_DMAX_LENGTH_LSB 0
364#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
365#define CE_CTRL1_DMAX_LENGTH_GET(x) \
366 (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
367#define CE_CTRL1_DMAX_LENGTH_SET(x) \
368 (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
369
370#define CE_CTRL1_ADDRESS 0x0010
371#define CE_CTRL1_HW_MASK 0x0007ffff
372#define CE_CTRL1_SW_MASK 0x0007ffff
373#define CE_CTRL1_HW_WRITE_MASK 0x00000000
374#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
375#define CE_CTRL1_RSTMASK 0xffffffff
376#define CE_CTRL1_RESET 0x00000080
377
378#define CE_CMD_HALT_STATUS_MSB 3
379#define CE_CMD_HALT_STATUS_LSB 3
380#define CE_CMD_HALT_STATUS_MASK 0x00000008
381#define CE_CMD_HALT_STATUS_GET(x) \
382 (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
383#define CE_CMD_HALT_STATUS_SET(x) \
384 (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
385#define CE_CMD_HALT_STATUS_RESET 0
386#define CE_CMD_HALT_MSB 0
387#define CE_CMD_HALT_MASK 0x00000001
388
389#define HOST_IE_COPY_COMPLETE_MSB 0
390#define HOST_IE_COPY_COMPLETE_LSB 0
391#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
392#define HOST_IE_COPY_COMPLETE_GET(x) \
393 (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
394#define HOST_IE_COPY_COMPLETE_SET(x) \
395 (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
396#define HOST_IE_COPY_COMPLETE_RESET 0
397#define HOST_IE_ADDRESS 0x002c
398
399#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
400#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
401#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
402#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
403#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
404#define HOST_IS_ADDRESS 0x0030
405
406#define MISC_IE_ADDRESS 0x0034
407
408#define MISC_IS_AXI_ERR_MASK 0x00000400
409
410#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
411#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
412#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
413#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
414#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
415
416#define MISC_IS_ADDRESS 0x0038
417
418#define SR_WR_INDEX_ADDRESS 0x003c
419
420#define DST_WR_INDEX_ADDRESS 0x0040
421
422#define CURRENT_SRRI_ADDRESS 0x0044
423
424#define CURRENT_DRRI_ADDRESS 0x0048
425
426#define SRC_WATERMARK_LOW_MSB 31
427#define SRC_WATERMARK_LOW_LSB 16
428#define SRC_WATERMARK_LOW_MASK 0xffff0000
429#define SRC_WATERMARK_LOW_GET(x) \
430 (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
431#define SRC_WATERMARK_LOW_SET(x) \
432 (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
433#define SRC_WATERMARK_LOW_RESET 0
434#define SRC_WATERMARK_HIGH_MSB 15
435#define SRC_WATERMARK_HIGH_LSB 0
436#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
437#define SRC_WATERMARK_HIGH_GET(x) \
438 (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
439#define SRC_WATERMARK_HIGH_SET(x) \
440 (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
441#define SRC_WATERMARK_HIGH_RESET 0
442#define SRC_WATERMARK_ADDRESS 0x004c
443
444#define DST_WATERMARK_LOW_LSB 16
445#define DST_WATERMARK_LOW_MASK 0xffff0000
446#define DST_WATERMARK_LOW_SET(x) \
447 (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
448#define DST_WATERMARK_LOW_RESET 0
449#define DST_WATERMARK_HIGH_MSB 15
450#define DST_WATERMARK_HIGH_LSB 0
451#define DST_WATERMARK_HIGH_MASK 0x0000ffff
452#define DST_WATERMARK_HIGH_GET(x) \
453 (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
454#define DST_WATERMARK_HIGH_SET(x) \
455 (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
456#define DST_WATERMARK_HIGH_RESET 0
457#define DST_WATERMARK_ADDRESS 0x0050
458
459
460static inline u32 ath10k_ce_base_address(unsigned int ce_id)
461{
462 return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
463}
464
465#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
466 HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
467 HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
468 HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
469
470#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
471 MISC_IS_DST_ADDR_ERR_MASK | \
472 MISC_IS_SRC_LEN_ERR_MASK | \
473 MISC_IS_DST_MAX_LEN_VIO_MASK | \
474 MISC_IS_DST_RING_OVERFLOW_MASK | \
475 MISC_IS_SRC_RING_OVERFLOW_MASK)
476
477#define CE_SRC_RING_TO_DESC(baddr, idx) \
478 (&(((struct ce_desc *)baddr)[idx]))
479
480#define CE_DEST_RING_TO_DESC(baddr, idx) \
481 (&(((struct ce_desc *)baddr)[idx]))
482
483/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
484#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
485 (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
486
487#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
488
489#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
490#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
491#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
492 (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
493 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
494#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
495
496#define CE_INTERRUPT_SUMMARY(ar) \
497 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
498 ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
499 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
500
501#endif /* _CE_H_ */