blob: 653a240142e5caab6263927a4dae8262c5a4da2f [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "hif.h"
19#include "pci.h"
20#include "ce.h"
21#include "debug.h"
22
23/*
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
26 */
27
28/*
29 * A single CopyEngine (CE) comprises two "rings":
30 * a source ring
31 * a destination ring
32 *
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
35 *
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
43 *
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
48 *
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
60 */
61
62static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
63 u32 ce_ctrl_addr,
64 unsigned int n)
65{
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
67}
68
69static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
70 u32 ce_ctrl_addr)
71{
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
73}
74
75static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
76 u32 ce_ctrl_addr,
77 unsigned int n)
78{
Bartosz Markowski57a89302013-08-07 15:17:45 +020079 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
Kalle Valo5e3dd152013-06-12 20:52:10 +030080}
81
82static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
83 u32 ce_ctrl_addr)
84{
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
86}
87
88static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
89 u32 ce_ctrl_addr)
90{
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
92}
93
94static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
95 u32 ce_ctrl_addr,
96 unsigned int addr)
97{
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
99}
100
101static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
102 u32 ce_ctrl_addr,
103 unsigned int n)
104{
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
106}
107
108static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
109 u32 ce_ctrl_addr,
110 unsigned int n)
111{
112 u32 ctrl1_addr = ath10k_pci_read32((ar),
113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
114
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
117 CE_CTRL1_DMAX_LENGTH_SET(n));
118}
119
120static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
121 u32 ce_ctrl_addr,
122 unsigned int n)
123{
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
125
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
129}
130
131static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
132 u32 ce_ctrl_addr,
133 unsigned int n)
134{
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
136
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
140}
141
142static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
143 u32 ce_ctrl_addr)
144{
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
146}
147
148static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
149 u32 ce_ctrl_addr,
150 u32 addr)
151{
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
153}
154
155static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
156 u32 ce_ctrl_addr,
157 unsigned int n)
158{
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
160}
161
162static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
163 u32 ce_ctrl_addr,
164 unsigned int n)
165{
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
167
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
169 (addr & ~SRC_WATERMARK_HIGH_MASK) |
170 SRC_WATERMARK_HIGH_SET(n));
171}
172
173static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
174 u32 ce_ctrl_addr,
175 unsigned int n)
176{
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
178
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
180 (addr & ~SRC_WATERMARK_LOW_MASK) |
181 SRC_WATERMARK_LOW_SET(n));
182}
183
184static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
185 u32 ce_ctrl_addr,
186 unsigned int n)
187{
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
189
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
191 (addr & ~DST_WATERMARK_HIGH_MASK) |
192 DST_WATERMARK_HIGH_SET(n));
193}
194
195static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
196 u32 ce_ctrl_addr,
197 unsigned int n)
198{
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
200
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
202 (addr & ~DST_WATERMARK_LOW_MASK) |
203 DST_WATERMARK_LOW_SET(n));
204}
205
206static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
207 u32 ce_ctrl_addr)
208{
209 u32 host_ie_addr = ath10k_pci_read32(ar,
210 ce_ctrl_addr + HOST_IE_ADDRESS);
211
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
214}
215
216static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
217 u32 ce_ctrl_addr)
218{
219 u32 host_ie_addr = ath10k_pci_read32(ar,
220 ce_ctrl_addr + HOST_IE_ADDRESS);
221
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
224}
225
226static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
227 u32 ce_ctrl_addr)
228{
229 u32 host_ie_addr = ath10k_pci_read32(ar,
230 ce_ctrl_addr + HOST_IE_ADDRESS);
231
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
233 host_ie_addr & ~CE_WATERMARK_MASK);
234}
235
236static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
237 u32 ce_ctrl_addr)
238{
239 u32 misc_ie_addr = ath10k_pci_read32(ar,
240 ce_ctrl_addr + MISC_IE_ADDRESS);
241
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
243 misc_ie_addr | CE_ERROR_MASK);
244}
245
Michal Kazior93e0daa2013-11-08 08:01:27 +0100246static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
247 u32 ce_ctrl_addr)
248{
249 u32 misc_ie_addr = ath10k_pci_read32(ar,
250 ce_ctrl_addr + MISC_IE_ADDRESS);
251
252 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
253 misc_ie_addr & ~CE_ERROR_MASK);
254}
255
Kalle Valo5e3dd152013-06-12 20:52:10 +0300256static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
257 u32 ce_ctrl_addr,
258 unsigned int mask)
259{
260 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
261}
262
263
264/*
265 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
266 * ath10k_ce_sendlist_send.
267 * The caller takes responsibility for any needed locking.
268 */
Michal Kazior726346f2014-02-27 18:50:04 +0200269int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
270 void *per_transfer_context,
271 u32 buffer,
272 unsigned int nbytes,
273 unsigned int transfer_id,
274 unsigned int flags)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300275{
276 struct ath10k *ar = ce_state->ar;
Michal Kaziord21fb952013-08-27 13:08:03 +0200277 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300278 struct ce_desc *desc, *sdesc;
279 unsigned int nentries_mask = src_ring->nentries_mask;
280 unsigned int sw_index = src_ring->sw_index;
281 unsigned int write_index = src_ring->write_index;
282 u32 ctrl_addr = ce_state->ctrl_addr;
283 u32 desc_flags = 0;
284 int ret = 0;
285
286 if (nbytes > ce_state->src_sz_max)
287 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
288 __func__, nbytes, ce_state->src_sz_max);
289
Kalle Valo3aebe542013-09-01 10:02:07 +0300290 ret = ath10k_pci_wake(ar);
291 if (ret)
292 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300293
294 if (unlikely(CE_RING_DELTA(nentries_mask,
295 write_index, sw_index - 1) <= 0)) {
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200296 ret = -ENOSR;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300297 goto exit;
298 }
299
300 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
301 write_index);
302 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
303
304 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
305
306 if (flags & CE_SEND_FLAG_GATHER)
307 desc_flags |= CE_DESC_FLAGS_GATHER;
308 if (flags & CE_SEND_FLAG_BYTE_SWAP)
309 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
310
311 sdesc->addr = __cpu_to_le32(buffer);
312 sdesc->nbytes = __cpu_to_le16(nbytes);
313 sdesc->flags = __cpu_to_le16(desc_flags);
314
315 *desc = *sdesc;
316
317 src_ring->per_transfer_context[write_index] = per_transfer_context;
318
319 /* Update Source Ring Write Index */
320 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
321
322 /* WORKAROUND */
323 if (!(flags & CE_SEND_FLAG_GATHER))
324 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
325
326 src_ring->write_index = write_index;
327exit:
328 ath10k_pci_sleep(ar);
329 return ret;
330}
331
Michal Kazior2aa39112013-08-27 13:08:02 +0200332int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300333 void *per_transfer_context,
334 u32 buffer,
335 unsigned int nbytes,
336 unsigned int transfer_id,
337 unsigned int flags)
338{
339 struct ath10k *ar = ce_state->ar;
340 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
341 int ret;
342
343 spin_lock_bh(&ar_pci->ce_lock);
344 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
345 buffer, nbytes, transfer_id, flags);
346 spin_unlock_bh(&ar_pci->ce_lock);
347
348 return ret;
349}
350
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200351int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
352{
353 struct ath10k *ar = pipe->ar;
354 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
355 int delta;
356
357 spin_lock_bh(&ar_pci->ce_lock);
358 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
359 pipe->src_ring->write_index,
360 pipe->src_ring->sw_index - 1);
361 spin_unlock_bh(&ar_pci->ce_lock);
362
363 return delta;
364}
365
Michal Kazior2aa39112013-08-27 13:08:02 +0200366int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300367 void *per_recv_context,
368 u32 buffer)
369{
Michal Kaziord21fb952013-08-27 13:08:03 +0200370 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300371 u32 ctrl_addr = ce_state->ctrl_addr;
372 struct ath10k *ar = ce_state->ar;
373 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
374 unsigned int nentries_mask = dest_ring->nentries_mask;
375 unsigned int write_index;
376 unsigned int sw_index;
377 int ret;
378
379 spin_lock_bh(&ar_pci->ce_lock);
380 write_index = dest_ring->write_index;
381 sw_index = dest_ring->sw_index;
382
Kalle Valo3aebe542013-09-01 10:02:07 +0300383 ret = ath10k_pci_wake(ar);
384 if (ret)
385 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300386
387 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
388 struct ce_desc *base = dest_ring->base_addr_owner_space;
389 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
390
391 /* Update destination descriptor */
392 desc->addr = __cpu_to_le32(buffer);
393 desc->nbytes = 0;
394
395 dest_ring->per_transfer_context[write_index] =
396 per_recv_context;
397
398 /* Update Destination Ring Write Index */
399 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
400 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
401 dest_ring->write_index = write_index;
402 ret = 0;
403 } else {
404 ret = -EIO;
405 }
406 ath10k_pci_sleep(ar);
Kalle Valo3aebe542013-09-01 10:02:07 +0300407
408out:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300409 spin_unlock_bh(&ar_pci->ce_lock);
410
411 return ret;
412}
413
414/*
415 * Guts of ath10k_ce_completed_recv_next.
416 * The caller takes responsibility for any necessary locking.
417 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200418static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300419 void **per_transfer_contextp,
420 u32 *bufferp,
421 unsigned int *nbytesp,
422 unsigned int *transfer_idp,
423 unsigned int *flagsp)
424{
Michal Kaziord21fb952013-08-27 13:08:03 +0200425 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300426 unsigned int nentries_mask = dest_ring->nentries_mask;
427 unsigned int sw_index = dest_ring->sw_index;
428
429 struct ce_desc *base = dest_ring->base_addr_owner_space;
430 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
431 struct ce_desc sdesc;
432 u16 nbytes;
433
434 /* Copy in one go for performance reasons */
435 sdesc = *desc;
436
437 nbytes = __le16_to_cpu(sdesc.nbytes);
438 if (nbytes == 0) {
439 /*
440 * This closes a relatively unusual race where the Host
441 * sees the updated DRRI before the update to the
442 * corresponding descriptor has completed. We treat this
443 * as a descriptor that is not yet done.
444 */
445 return -EIO;
446 }
447
448 desc->nbytes = 0;
449
450 /* Return data from completed destination descriptor */
451 *bufferp = __le32_to_cpu(sdesc.addr);
452 *nbytesp = nbytes;
453 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
454
455 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
456 *flagsp = CE_RECV_FLAG_SWAPPED;
457 else
458 *flagsp = 0;
459
460 if (per_transfer_contextp)
461 *per_transfer_contextp =
462 dest_ring->per_transfer_context[sw_index];
463
464 /* sanity */
465 dest_ring->per_transfer_context[sw_index] = NULL;
466
467 /* Update sw_index */
468 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
469 dest_ring->sw_index = sw_index;
470
471 return 0;
472}
473
Michal Kazior2aa39112013-08-27 13:08:02 +0200474int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300475 void **per_transfer_contextp,
476 u32 *bufferp,
477 unsigned int *nbytesp,
478 unsigned int *transfer_idp,
479 unsigned int *flagsp)
480{
481 struct ath10k *ar = ce_state->ar;
482 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
483 int ret;
484
485 spin_lock_bh(&ar_pci->ce_lock);
486 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
487 per_transfer_contextp,
488 bufferp, nbytesp,
489 transfer_idp, flagsp);
490 spin_unlock_bh(&ar_pci->ce_lock);
491
492 return ret;
493}
494
Michal Kazior2aa39112013-08-27 13:08:02 +0200495int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300496 void **per_transfer_contextp,
497 u32 *bufferp)
498{
Michal Kaziord21fb952013-08-27 13:08:03 +0200499 struct ath10k_ce_ring *dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300500 unsigned int nentries_mask;
501 unsigned int sw_index;
502 unsigned int write_index;
503 int ret;
504 struct ath10k *ar;
505 struct ath10k_pci *ar_pci;
506
507 dest_ring = ce_state->dest_ring;
508
509 if (!dest_ring)
510 return -EIO;
511
512 ar = ce_state->ar;
513 ar_pci = ath10k_pci_priv(ar);
514
515 spin_lock_bh(&ar_pci->ce_lock);
516
517 nentries_mask = dest_ring->nentries_mask;
518 sw_index = dest_ring->sw_index;
519 write_index = dest_ring->write_index;
520 if (write_index != sw_index) {
521 struct ce_desc *base = dest_ring->base_addr_owner_space;
522 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
523
524 /* Return data from completed destination descriptor */
525 *bufferp = __le32_to_cpu(desc->addr);
526
527 if (per_transfer_contextp)
528 *per_transfer_contextp =
529 dest_ring->per_transfer_context[sw_index];
530
531 /* sanity */
532 dest_ring->per_transfer_context[sw_index] = NULL;
533
534 /* Update sw_index */
535 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
536 dest_ring->sw_index = sw_index;
537 ret = 0;
538 } else {
539 ret = -EIO;
540 }
541
542 spin_unlock_bh(&ar_pci->ce_lock);
543
544 return ret;
545}
546
547/*
548 * Guts of ath10k_ce_completed_send_next.
549 * The caller takes responsibility for any necessary locking.
550 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200551static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300552 void **per_transfer_contextp,
553 u32 *bufferp,
554 unsigned int *nbytesp,
555 unsigned int *transfer_idp)
556{
Michal Kaziord21fb952013-08-27 13:08:03 +0200557 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300558 u32 ctrl_addr = ce_state->ctrl_addr;
559 struct ath10k *ar = ce_state->ar;
560 unsigned int nentries_mask = src_ring->nentries_mask;
561 unsigned int sw_index = src_ring->sw_index;
Kalle Valoa40d3e42013-09-01 10:02:00 +0300562 struct ce_desc *sdesc, *sbase;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300563 unsigned int read_index;
Kalle Valo3aebe542013-09-01 10:02:07 +0300564 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300565
566 if (src_ring->hw_index == sw_index) {
567 /*
568 * The SW completion index has caught up with the cached
569 * version of the HW completion index.
570 * Update the cached HW completion index to see whether
571 * the SW has really caught up to the HW, or if the cached
572 * value of the HW index has become stale.
573 */
Kalle Valo3aebe542013-09-01 10:02:07 +0300574
575 ret = ath10k_pci_wake(ar);
576 if (ret)
577 return ret;
578
Kalle Valo5e3dd152013-06-12 20:52:10 +0300579 src_ring->hw_index =
580 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200581 src_ring->hw_index &= nentries_mask;
Kalle Valo3aebe542013-09-01 10:02:07 +0300582
Kalle Valo5e3dd152013-06-12 20:52:10 +0300583 ath10k_pci_sleep(ar);
584 }
Kalle Valoa40d3e42013-09-01 10:02:00 +0300585
Kalle Valo5e3dd152013-06-12 20:52:10 +0300586 read_index = src_ring->hw_index;
587
Kalle Valoa40d3e42013-09-01 10:02:00 +0300588 if ((read_index == sw_index) || (read_index == 0xffffffff))
589 return -EIO;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300590
Kalle Valoa40d3e42013-09-01 10:02:00 +0300591 sbase = src_ring->shadow_base;
592 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300593
Kalle Valoa40d3e42013-09-01 10:02:00 +0300594 /* Return data from completed source descriptor */
595 *bufferp = __le32_to_cpu(sdesc->addr);
596 *nbytesp = __le16_to_cpu(sdesc->nbytes);
597 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
598 CE_DESC_FLAGS_META_DATA);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300599
Kalle Valoa40d3e42013-09-01 10:02:00 +0300600 if (per_transfer_contextp)
601 *per_transfer_contextp =
602 src_ring->per_transfer_context[sw_index];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300603
Kalle Valoa40d3e42013-09-01 10:02:00 +0300604 /* sanity */
605 src_ring->per_transfer_context[sw_index] = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300606
Kalle Valoa40d3e42013-09-01 10:02:00 +0300607 /* Update sw_index */
608 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
609 src_ring->sw_index = sw_index;
610
611 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300612}
613
614/* NB: Modeled after ath10k_ce_completed_send_next */
Michal Kazior2aa39112013-08-27 13:08:02 +0200615int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300616 void **per_transfer_contextp,
617 u32 *bufferp,
618 unsigned int *nbytesp,
619 unsigned int *transfer_idp)
620{
Michal Kaziord21fb952013-08-27 13:08:03 +0200621 struct ath10k_ce_ring *src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300622 unsigned int nentries_mask;
623 unsigned int sw_index;
624 unsigned int write_index;
625 int ret;
626 struct ath10k *ar;
627 struct ath10k_pci *ar_pci;
628
629 src_ring = ce_state->src_ring;
630
631 if (!src_ring)
632 return -EIO;
633
634 ar = ce_state->ar;
635 ar_pci = ath10k_pci_priv(ar);
636
637 spin_lock_bh(&ar_pci->ce_lock);
638
639 nentries_mask = src_ring->nentries_mask;
640 sw_index = src_ring->sw_index;
641 write_index = src_ring->write_index;
642
643 if (write_index != sw_index) {
644 struct ce_desc *base = src_ring->base_addr_owner_space;
645 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
646
647 /* Return data from completed source descriptor */
648 *bufferp = __le32_to_cpu(desc->addr);
649 *nbytesp = __le16_to_cpu(desc->nbytes);
650 *transfer_idp = MS(__le16_to_cpu(desc->flags),
651 CE_DESC_FLAGS_META_DATA);
652
653 if (per_transfer_contextp)
654 *per_transfer_contextp =
655 src_ring->per_transfer_context[sw_index];
656
657 /* sanity */
658 src_ring->per_transfer_context[sw_index] = NULL;
659
660 /* Update sw_index */
661 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
662 src_ring->sw_index = sw_index;
663 ret = 0;
664 } else {
665 ret = -EIO;
666 }
667
668 spin_unlock_bh(&ar_pci->ce_lock);
669
670 return ret;
671}
672
Michal Kazior2aa39112013-08-27 13:08:02 +0200673int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300674 void **per_transfer_contextp,
675 u32 *bufferp,
676 unsigned int *nbytesp,
677 unsigned int *transfer_idp)
678{
679 struct ath10k *ar = ce_state->ar;
680 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
681 int ret;
682
683 spin_lock_bh(&ar_pci->ce_lock);
684 ret = ath10k_ce_completed_send_next_nolock(ce_state,
685 per_transfer_contextp,
686 bufferp, nbytesp,
687 transfer_idp);
688 spin_unlock_bh(&ar_pci->ce_lock);
689
690 return ret;
691}
692
693/*
694 * Guts of interrupt handler for per-engine interrupts on a particular CE.
695 *
696 * Invokes registered callbacks for recv_complete,
697 * send_complete, and watermarks.
698 */
699void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
700{
701 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +0200702 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300703 u32 ctrl_addr = ce_state->ctrl_addr;
Kalle Valo3aebe542013-09-01 10:02:07 +0300704 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300705
Kalle Valo3aebe542013-09-01 10:02:07 +0300706 ret = ath10k_pci_wake(ar);
707 if (ret)
708 return;
709
Kalle Valo5e3dd152013-06-12 20:52:10 +0300710 spin_lock_bh(&ar_pci->ce_lock);
711
712 /* Clear the copy-complete interrupts that will be handled here. */
713 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
714 HOST_IS_COPY_COMPLETE_MASK);
715
Michal Kazior5440ce22013-09-03 15:09:58 +0200716 spin_unlock_bh(&ar_pci->ce_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300717
Michal Kazior5440ce22013-09-03 15:09:58 +0200718 if (ce_state->recv_cb)
719 ce_state->recv_cb(ce_state);
720
721 if (ce_state->send_cb)
722 ce_state->send_cb(ce_state);
723
724 spin_lock_bh(&ar_pci->ce_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300725
726 /*
727 * Misc CE interrupts are not being handled, but still need
728 * to be cleared.
729 */
730 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
731
732 spin_unlock_bh(&ar_pci->ce_lock);
733 ath10k_pci_sleep(ar);
734}
735
736/*
737 * Handler for per-engine interrupts on ALL active CEs.
738 * This is used in cases where the system is sharing a
739 * single interrput for all CEs
740 */
741
742void ath10k_ce_per_engine_service_any(struct ath10k *ar)
743{
Kalle Valo3aebe542013-09-01 10:02:07 +0300744 int ce_id, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300745 u32 intr_summary;
746
Kalle Valo3aebe542013-09-01 10:02:07 +0300747 ret = ath10k_pci_wake(ar);
748 if (ret)
749 return;
750
Kalle Valo5e3dd152013-06-12 20:52:10 +0300751 intr_summary = CE_INTERRUPT_SUMMARY(ar);
752
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100753 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300754 if (intr_summary & (1 << ce_id))
755 intr_summary &= ~(1 << ce_id);
756 else
757 /* no intr pending on this CE */
758 continue;
759
760 ath10k_ce_per_engine_service(ar, ce_id);
761 }
762
763 ath10k_pci_sleep(ar);
764}
765
766/*
767 * Adjust interrupts for the copy complete handler.
768 * If it's needed for either send or recv, then unmask
769 * this interrupt; otherwise, mask it.
770 *
771 * Called with ce_lock held.
772 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200773static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300774 int disable_copy_compl_intr)
775{
776 u32 ctrl_addr = ce_state->ctrl_addr;
777 struct ath10k *ar = ce_state->ar;
Kalle Valo3aebe542013-09-01 10:02:07 +0300778 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300779
Kalle Valo3aebe542013-09-01 10:02:07 +0300780 ret = ath10k_pci_wake(ar);
781 if (ret)
782 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300783
784 if ((!disable_copy_compl_intr) &&
785 (ce_state->send_cb || ce_state->recv_cb))
786 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
787 else
788 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
789
790 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
791
792 ath10k_pci_sleep(ar);
793}
794
Michal Kazior28642f42013-11-08 08:01:31 +0100795int ath10k_ce_disable_interrupts(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300796{
Kalle Valo3aebe542013-09-01 10:02:07 +0300797 int ce_id, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300798
Kalle Valo3aebe542013-09-01 10:02:07 +0300799 ret = ath10k_pci_wake(ar);
800 if (ret)
Michal Kazior28642f42013-11-08 08:01:31 +0100801 return ret;
Kalle Valo3aebe542013-09-01 10:02:07 +0300802
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100803 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
804 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300805
806 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
Michal Kazior93e0daa2013-11-08 08:01:27 +0100807 ath10k_ce_error_intr_disable(ar, ctrl_addr);
808 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300809 }
Michal Kazior28642f42013-11-08 08:01:31 +0100810
Kalle Valo5e3dd152013-06-12 20:52:10 +0300811 ath10k_pci_sleep(ar);
Michal Kazior28642f42013-11-08 08:01:31 +0100812
813 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300814}
815
Michal Kazior2aa39112013-08-27 13:08:02 +0200816void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
Michal Kazior5440ce22013-09-03 15:09:58 +0200817 void (*send_cb)(struct ath10k_ce_pipe *),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300818 int disable_interrupts)
819{
820 struct ath10k *ar = ce_state->ar;
821 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
822
823 spin_lock_bh(&ar_pci->ce_lock);
824 ce_state->send_cb = send_cb;
825 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
826 spin_unlock_bh(&ar_pci->ce_lock);
827}
828
Michal Kazior2aa39112013-08-27 13:08:02 +0200829void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
Michal Kazior5440ce22013-09-03 15:09:58 +0200830 void (*recv_cb)(struct ath10k_ce_pipe *))
Kalle Valo5e3dd152013-06-12 20:52:10 +0300831{
832 struct ath10k *ar = ce_state->ar;
833 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
834
835 spin_lock_bh(&ar_pci->ce_lock);
836 ce_state->recv_cb = recv_cb;
837 ath10k_ce_per_engine_handler_adjust(ce_state, 0);
838 spin_unlock_bh(&ar_pci->ce_lock);
839}
840
841static int ath10k_ce_init_src_ring(struct ath10k *ar,
842 unsigned int ce_id,
Michal Kazior2aa39112013-08-27 13:08:02 +0200843 struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300844 const struct ce_attr *attr)
845{
Michal Kaziord21fb952013-08-27 13:08:03 +0200846 struct ath10k_ce_ring *src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300847 unsigned int nentries = attr->src_nentries;
848 unsigned int ce_nbytes;
849 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
850 dma_addr_t base_addr;
851 char *ptr;
852
853 nentries = roundup_pow_of_two(nentries);
854
855 if (ce_state->src_ring) {
856 WARN_ON(ce_state->src_ring->nentries != nentries);
857 return 0;
858 }
859
Michal Kaziord21fb952013-08-27 13:08:03 +0200860 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300861 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
862 if (ptr == NULL)
863 return -ENOMEM;
864
Michal Kaziord21fb952013-08-27 13:08:03 +0200865 ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300866 src_ring = ce_state->src_ring;
867
Michal Kaziord21fb952013-08-27 13:08:03 +0200868 ptr += sizeof(struct ath10k_ce_ring);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300869 src_ring->nentries = nentries;
870 src_ring->nentries_mask = nentries - 1;
871
Kalle Valo5e3dd152013-06-12 20:52:10 +0300872 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200873 src_ring->sw_index &= src_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300874 src_ring->hw_index = src_ring->sw_index;
875
876 src_ring->write_index =
877 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200878 src_ring->write_index &= src_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300879
880 src_ring->per_transfer_context = (void **)ptr;
881
882 /*
883 * Legacy platforms that do not support cache
884 * coherent DMA are unsupported
885 */
886 src_ring->base_addr_owner_space_unaligned =
Michal Kazior68c03242014-03-28 10:02:35 +0200887 dma_alloc_coherent(ar->dev,
888 (nentries * sizeof(struct ce_desc) +
889 CE_DESC_RING_ALIGN),
890 &base_addr, GFP_KERNEL);
Janusz Dziedzic9c5ae692013-08-09 08:39:13 +0200891 if (!src_ring->base_addr_owner_space_unaligned) {
892 kfree(ce_state->src_ring);
893 ce_state->src_ring = NULL;
894 return -ENOMEM;
895 }
896
Kalle Valo5e3dd152013-06-12 20:52:10 +0300897 src_ring->base_addr_ce_space_unaligned = base_addr;
898
899 src_ring->base_addr_owner_space = PTR_ALIGN(
900 src_ring->base_addr_owner_space_unaligned,
901 CE_DESC_RING_ALIGN);
902 src_ring->base_addr_ce_space = ALIGN(
903 src_ring->base_addr_ce_space_unaligned,
904 CE_DESC_RING_ALIGN);
905
906 /*
907 * Also allocate a shadow src ring in regular
908 * mem to use for faster access.
909 */
910 src_ring->shadow_base_unaligned =
911 kmalloc((nentries * sizeof(struct ce_desc) +
912 CE_DESC_RING_ALIGN), GFP_KERNEL);
Janusz Dziedzic9c5ae692013-08-09 08:39:13 +0200913 if (!src_ring->shadow_base_unaligned) {
Michal Kazior68c03242014-03-28 10:02:35 +0200914 dma_free_coherent(ar->dev,
915 (nentries * sizeof(struct ce_desc) +
916 CE_DESC_RING_ALIGN),
917 src_ring->base_addr_owner_space,
918 src_ring->base_addr_ce_space);
Janusz Dziedzic9c5ae692013-08-09 08:39:13 +0200919 kfree(ce_state->src_ring);
920 ce_state->src_ring = NULL;
921 return -ENOMEM;
922 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300923
924 src_ring->shadow_base = PTR_ALIGN(
925 src_ring->shadow_base_unaligned,
926 CE_DESC_RING_ALIGN);
927
Kalle Valo5e3dd152013-06-12 20:52:10 +0300928 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
929 src_ring->base_addr_ce_space);
930 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
931 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
932 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
933 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
934 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300935
Kalle Valo24cfade2013-09-08 17:55:50 +0300936 ath10k_dbg(ATH10K_DBG_BOOT,
937 "boot ce src ring id %d entries %d base_addr %p\n",
938 ce_id, nentries, src_ring->base_addr_owner_space);
939
Kalle Valo5e3dd152013-06-12 20:52:10 +0300940 return 0;
941}
942
943static int ath10k_ce_init_dest_ring(struct ath10k *ar,
944 unsigned int ce_id,
Michal Kazior2aa39112013-08-27 13:08:02 +0200945 struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300946 const struct ce_attr *attr)
947{
Michal Kaziord21fb952013-08-27 13:08:03 +0200948 struct ath10k_ce_ring *dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300949 unsigned int nentries = attr->dest_nentries;
950 unsigned int ce_nbytes;
951 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
952 dma_addr_t base_addr;
953 char *ptr;
954
955 nentries = roundup_pow_of_two(nentries);
956
957 if (ce_state->dest_ring) {
958 WARN_ON(ce_state->dest_ring->nentries != nentries);
959 return 0;
960 }
961
Michal Kaziord21fb952013-08-27 13:08:03 +0200962 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300963 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
964 if (ptr == NULL)
965 return -ENOMEM;
966
Michal Kaziord21fb952013-08-27 13:08:03 +0200967 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300968 dest_ring = ce_state->dest_ring;
969
Michal Kaziord21fb952013-08-27 13:08:03 +0200970 ptr += sizeof(struct ath10k_ce_ring);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300971 dest_ring->nentries = nentries;
972 dest_ring->nentries_mask = nentries - 1;
973
Kalle Valo5e3dd152013-06-12 20:52:10 +0300974 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200975 dest_ring->sw_index &= dest_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300976 dest_ring->write_index =
977 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200978 dest_ring->write_index &= dest_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300979
980 dest_ring->per_transfer_context = (void **)ptr;
981
982 /*
983 * Legacy platforms that do not support cache
984 * coherent DMA are unsupported
985 */
986 dest_ring->base_addr_owner_space_unaligned =
Michal Kazior68c03242014-03-28 10:02:35 +0200987 dma_alloc_coherent(ar->dev,
988 (nentries * sizeof(struct ce_desc) +
989 CE_DESC_RING_ALIGN),
990 &base_addr, GFP_KERNEL);
Janusz Dziedzic9c5ae692013-08-09 08:39:13 +0200991 if (!dest_ring->base_addr_owner_space_unaligned) {
992 kfree(ce_state->dest_ring);
993 ce_state->dest_ring = NULL;
994 return -ENOMEM;
995 }
996
Kalle Valo5e3dd152013-06-12 20:52:10 +0300997 dest_ring->base_addr_ce_space_unaligned = base_addr;
998
999 /*
1000 * Correctly initialize memory to 0 to prevent garbage
1001 * data crashing system when download firmware
1002 */
1003 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1004 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1005
1006 dest_ring->base_addr_owner_space = PTR_ALIGN(
1007 dest_ring->base_addr_owner_space_unaligned,
1008 CE_DESC_RING_ALIGN);
1009 dest_ring->base_addr_ce_space = ALIGN(
1010 dest_ring->base_addr_ce_space_unaligned,
1011 CE_DESC_RING_ALIGN);
1012
Kalle Valo5e3dd152013-06-12 20:52:10 +03001013 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1014 dest_ring->base_addr_ce_space);
1015 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1016 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1017 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1018 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001019
Kalle Valo24cfade2013-09-08 17:55:50 +03001020 ath10k_dbg(ATH10K_DBG_BOOT,
1021 "boot ce dest ring id %d entries %d base_addr %p\n",
1022 ce_id, nentries, dest_ring->base_addr_owner_space);
1023
Kalle Valo5e3dd152013-06-12 20:52:10 +03001024 return 0;
1025}
1026
Michal Kazior2aa39112013-08-27 13:08:02 +02001027static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001028 unsigned int ce_id,
1029 const struct ce_attr *attr)
1030{
1031 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001032 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
Kalle Valo5e3dd152013-06-12 20:52:10 +03001033 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1034
1035 spin_lock_bh(&ar_pci->ce_lock);
1036
Michal Kazior39e40862013-08-27 13:07:58 +02001037 ce_state->ar = ar;
1038 ce_state->id = ce_id;
1039 ce_state->ctrl_addr = ctrl_addr;
Michal Kazior39e40862013-08-27 13:07:58 +02001040 ce_state->attr_flags = attr->flags;
1041 ce_state->src_sz_max = attr->src_sz_max;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001042
1043 spin_unlock_bh(&ar_pci->ce_lock);
1044
1045 return ce_state;
1046}
1047
1048/*
1049 * Initialize a Copy Engine based on caller-supplied attributes.
1050 * This may be called once to initialize both source and destination
1051 * rings or it may be called twice for separate source and destination
1052 * initialization. It may be that only one side or the other is
1053 * initialized by software/firmware.
1054 */
Michal Kazior2aa39112013-08-27 13:08:02 +02001055struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001056 unsigned int ce_id,
1057 const struct ce_attr *attr)
1058{
Michal Kazior2aa39112013-08-27 13:08:02 +02001059 struct ath10k_ce_pipe *ce_state;
Michal Kaziorba7ee552013-08-13 07:54:57 +02001060 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001061
Michal Kazior60f85be2013-10-16 16:46:24 +03001062 /*
1063 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1064 * additional TX locking checks.
1065 *
1066 * For the lack of a better place do the check here.
1067 */
Michal Kaziora16942e2014-02-27 18:50:04 +02001068 BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
Michal Kazior60f85be2013-10-16 16:46:24 +03001069 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
Michal Kaziora16942e2014-02-27 18:50:04 +02001070 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
Michal Kazior60f85be2013-10-16 16:46:24 +03001071 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1072
Kalle Valoe9780362013-09-01 10:02:15 +03001073 ret = ath10k_pci_wake(ar);
1074 if (ret)
1075 return NULL;
1076
Kalle Valo5e3dd152013-06-12 20:52:10 +03001077 ce_state = ath10k_ce_init_state(ar, ce_id, attr);
1078 if (!ce_state) {
1079 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
Michal Kazior1d349022013-11-08 08:01:28 +01001080 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001081 }
1082
1083 if (attr->src_nentries) {
Michal Kaziorba7ee552013-08-13 07:54:57 +02001084 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
1085 if (ret) {
1086 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1087 ce_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001088 ath10k_ce_deinit(ce_state);
Michal Kazior1d349022013-11-08 08:01:28 +01001089 ce_state = NULL;
1090 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001091 }
1092 }
1093
1094 if (attr->dest_nentries) {
Michal Kaziorba7ee552013-08-13 07:54:57 +02001095 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
1096 if (ret) {
1097 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1098 ce_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001099 ath10k_ce_deinit(ce_state);
Michal Kazior1d349022013-11-08 08:01:28 +01001100 ce_state = NULL;
1101 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001102 }
1103 }
1104
Michal Kazior1d349022013-11-08 08:01:28 +01001105out:
Kalle Valo5e3dd152013-06-12 20:52:10 +03001106 ath10k_pci_sleep(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001107 return ce_state;
1108}
1109
Michal Kazior2aa39112013-08-27 13:08:02 +02001110void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001111{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001112 struct ath10k *ar = ce_state->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001113
Kalle Valo5e3dd152013-06-12 20:52:10 +03001114 if (ce_state->src_ring) {
1115 kfree(ce_state->src_ring->shadow_base_unaligned);
Michal Kazior68c03242014-03-28 10:02:35 +02001116 dma_free_coherent(ar->dev,
1117 (ce_state->src_ring->nentries *
1118 sizeof(struct ce_desc) +
1119 CE_DESC_RING_ALIGN),
1120 ce_state->src_ring->base_addr_owner_space,
1121 ce_state->src_ring->base_addr_ce_space);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001122 kfree(ce_state->src_ring);
1123 }
1124
1125 if (ce_state->dest_ring) {
Michal Kazior68c03242014-03-28 10:02:35 +02001126 dma_free_coherent(ar->dev,
1127 (ce_state->dest_ring->nentries *
1128 sizeof(struct ce_desc) +
1129 CE_DESC_RING_ALIGN),
1130 ce_state->dest_ring->base_addr_owner_space,
1131 ce_state->dest_ring->base_addr_ce_space);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001132 kfree(ce_state->dest_ring);
1133 }
Michal Kazior39e40862013-08-27 13:07:58 +02001134
Michal Kazior39e40862013-08-27 13:07:58 +02001135 ce_state->src_ring = NULL;
1136 ce_state->dest_ring = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001137}