blob: 19f23a22f854da89484f8f0321698527928774ed [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "hif.h"
19#include "pci.h"
20#include "ce.h"
21#include "debug.h"
22
23/*
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
26 */
27
28/*
29 * A single CopyEngine (CE) comprises two "rings":
30 * a source ring
31 * a destination ring
32 *
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
35 *
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
43 *
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
48 *
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
60 */
61
62static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
63 u32 ce_ctrl_addr,
64 unsigned int n)
65{
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
67}
68
69static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
70 u32 ce_ctrl_addr)
71{
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
73}
74
75static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
76 u32 ce_ctrl_addr,
77 unsigned int n)
78{
Bartosz Markowski57a89302013-08-07 15:17:45 +020079 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
Kalle Valo5e3dd152013-06-12 20:52:10 +030080}
81
82static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
83 u32 ce_ctrl_addr)
84{
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
86}
87
88static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
89 u32 ce_ctrl_addr)
90{
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
92}
93
94static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
95 u32 ce_ctrl_addr,
96 unsigned int addr)
97{
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
99}
100
101static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
102 u32 ce_ctrl_addr,
103 unsigned int n)
104{
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
106}
107
108static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
109 u32 ce_ctrl_addr,
110 unsigned int n)
111{
112 u32 ctrl1_addr = ath10k_pci_read32((ar),
113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
114
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
117 CE_CTRL1_DMAX_LENGTH_SET(n));
118}
119
120static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
121 u32 ce_ctrl_addr,
122 unsigned int n)
123{
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
125
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
129}
130
131static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
132 u32 ce_ctrl_addr,
133 unsigned int n)
134{
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
136
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
140}
141
142static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
143 u32 ce_ctrl_addr)
144{
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
146}
147
148static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
149 u32 ce_ctrl_addr,
150 u32 addr)
151{
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
153}
154
155static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
156 u32 ce_ctrl_addr,
157 unsigned int n)
158{
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
160}
161
162static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
163 u32 ce_ctrl_addr,
164 unsigned int n)
165{
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
167
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
169 (addr & ~SRC_WATERMARK_HIGH_MASK) |
170 SRC_WATERMARK_HIGH_SET(n));
171}
172
173static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
174 u32 ce_ctrl_addr,
175 unsigned int n)
176{
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
178
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
180 (addr & ~SRC_WATERMARK_LOW_MASK) |
181 SRC_WATERMARK_LOW_SET(n));
182}
183
184static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
185 u32 ce_ctrl_addr,
186 unsigned int n)
187{
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
189
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
191 (addr & ~DST_WATERMARK_HIGH_MASK) |
192 DST_WATERMARK_HIGH_SET(n));
193}
194
195static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
196 u32 ce_ctrl_addr,
197 unsigned int n)
198{
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
200
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
202 (addr & ~DST_WATERMARK_LOW_MASK) |
203 DST_WATERMARK_LOW_SET(n));
204}
205
206static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
207 u32 ce_ctrl_addr)
208{
209 u32 host_ie_addr = ath10k_pci_read32(ar,
210 ce_ctrl_addr + HOST_IE_ADDRESS);
211
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
214}
215
216static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
217 u32 ce_ctrl_addr)
218{
219 u32 host_ie_addr = ath10k_pci_read32(ar,
220 ce_ctrl_addr + HOST_IE_ADDRESS);
221
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
224}
225
226static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
227 u32 ce_ctrl_addr)
228{
229 u32 host_ie_addr = ath10k_pci_read32(ar,
230 ce_ctrl_addr + HOST_IE_ADDRESS);
231
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
233 host_ie_addr & ~CE_WATERMARK_MASK);
234}
235
236static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
237 u32 ce_ctrl_addr)
238{
239 u32 misc_ie_addr = ath10k_pci_read32(ar,
240 ce_ctrl_addr + MISC_IE_ADDRESS);
241
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
243 misc_ie_addr | CE_ERROR_MASK);
244}
245
246static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
247 u32 ce_ctrl_addr,
248 unsigned int mask)
249{
250 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
251}
252
253
254/*
255 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
256 * ath10k_ce_sendlist_send.
257 * The caller takes responsibility for any needed locking.
258 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200259static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300260 void *per_transfer_context,
261 u32 buffer,
262 unsigned int nbytes,
263 unsigned int transfer_id,
264 unsigned int flags)
265{
266 struct ath10k *ar = ce_state->ar;
Michal Kaziord21fb952013-08-27 13:08:03 +0200267 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300268 struct ce_desc *desc, *sdesc;
269 unsigned int nentries_mask = src_ring->nentries_mask;
270 unsigned int sw_index = src_ring->sw_index;
271 unsigned int write_index = src_ring->write_index;
272 u32 ctrl_addr = ce_state->ctrl_addr;
273 u32 desc_flags = 0;
274 int ret = 0;
275
276 if (nbytes > ce_state->src_sz_max)
277 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
278 __func__, nbytes, ce_state->src_sz_max);
279
Kalle Valo3aebe542013-09-01 10:02:07 +0300280 ret = ath10k_pci_wake(ar);
281 if (ret)
282 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300283
284 if (unlikely(CE_RING_DELTA(nentries_mask,
285 write_index, sw_index - 1) <= 0)) {
286 ret = -EIO;
287 goto exit;
288 }
289
290 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
291 write_index);
292 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
293
294 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
295
296 if (flags & CE_SEND_FLAG_GATHER)
297 desc_flags |= CE_DESC_FLAGS_GATHER;
298 if (flags & CE_SEND_FLAG_BYTE_SWAP)
299 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
300
301 sdesc->addr = __cpu_to_le32(buffer);
302 sdesc->nbytes = __cpu_to_le16(nbytes);
303 sdesc->flags = __cpu_to_le16(desc_flags);
304
305 *desc = *sdesc;
306
307 src_ring->per_transfer_context[write_index] = per_transfer_context;
308
309 /* Update Source Ring Write Index */
310 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
311
312 /* WORKAROUND */
313 if (!(flags & CE_SEND_FLAG_GATHER))
314 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
315
316 src_ring->write_index = write_index;
317exit:
318 ath10k_pci_sleep(ar);
319 return ret;
320}
321
Michal Kazior2aa39112013-08-27 13:08:02 +0200322int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300323 void *per_transfer_context,
324 u32 buffer,
325 unsigned int nbytes,
326 unsigned int transfer_id,
327 unsigned int flags)
328{
329 struct ath10k *ar = ce_state->ar;
330 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
331 int ret;
332
333 spin_lock_bh(&ar_pci->ce_lock);
334 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
335 buffer, nbytes, transfer_id, flags);
336 spin_unlock_bh(&ar_pci->ce_lock);
337
338 return ret;
339}
340
341void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
342 unsigned int nbytes, u32 flags)
343{
344 unsigned int num_items = sendlist->num_items;
345 struct ce_sendlist_item *item;
346
347 item = &sendlist->item[num_items];
348 item->data = buffer;
349 item->u.nbytes = nbytes;
350 item->flags = flags;
351 sendlist->num_items++;
352}
353
Michal Kazior2aa39112013-08-27 13:08:02 +0200354int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300355 void *per_transfer_context,
356 struct ce_sendlist *sendlist,
357 unsigned int transfer_id)
358{
Michal Kaziord21fb952013-08-27 13:08:03 +0200359 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300360 struct ce_sendlist_item *item;
361 struct ath10k *ar = ce_state->ar;
362 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
363 unsigned int nentries_mask = src_ring->nentries_mask;
364 unsigned int num_items = sendlist->num_items;
365 unsigned int sw_index;
366 unsigned int write_index;
367 int i, delta, ret = -ENOMEM;
368
369 spin_lock_bh(&ar_pci->ce_lock);
370
371 sw_index = src_ring->sw_index;
372 write_index = src_ring->write_index;
373
374 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
375
376 if (delta >= num_items) {
377 /*
378 * Handle all but the last item uniformly.
379 */
380 for (i = 0; i < num_items - 1; i++) {
381 item = &sendlist->item[i];
382 ret = ath10k_ce_send_nolock(ce_state,
383 CE_SENDLIST_ITEM_CTXT,
384 (u32) item->data,
385 item->u.nbytes, transfer_id,
386 item->flags |
387 CE_SEND_FLAG_GATHER);
388 if (ret)
389 ath10k_warn("CE send failed for item: %d\n", i);
390 }
391 /*
392 * Provide valid context pointer for final item.
393 */
394 item = &sendlist->item[i];
395 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
396 (u32) item->data, item->u.nbytes,
397 transfer_id, item->flags);
398 if (ret)
399 ath10k_warn("CE send failed for last item: %d\n", i);
400 }
401
402 spin_unlock_bh(&ar_pci->ce_lock);
403
404 return ret;
405}
406
Michal Kazior2aa39112013-08-27 13:08:02 +0200407int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300408 void *per_recv_context,
409 u32 buffer)
410{
Michal Kaziord21fb952013-08-27 13:08:03 +0200411 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300412 u32 ctrl_addr = ce_state->ctrl_addr;
413 struct ath10k *ar = ce_state->ar;
414 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
415 unsigned int nentries_mask = dest_ring->nentries_mask;
416 unsigned int write_index;
417 unsigned int sw_index;
418 int ret;
419
420 spin_lock_bh(&ar_pci->ce_lock);
421 write_index = dest_ring->write_index;
422 sw_index = dest_ring->sw_index;
423
Kalle Valo3aebe542013-09-01 10:02:07 +0300424 ret = ath10k_pci_wake(ar);
425 if (ret)
426 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300427
428 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
429 struct ce_desc *base = dest_ring->base_addr_owner_space;
430 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
431
432 /* Update destination descriptor */
433 desc->addr = __cpu_to_le32(buffer);
434 desc->nbytes = 0;
435
436 dest_ring->per_transfer_context[write_index] =
437 per_recv_context;
438
439 /* Update Destination Ring Write Index */
440 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
441 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
442 dest_ring->write_index = write_index;
443 ret = 0;
444 } else {
445 ret = -EIO;
446 }
447 ath10k_pci_sleep(ar);
Kalle Valo3aebe542013-09-01 10:02:07 +0300448
449out:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300450 spin_unlock_bh(&ar_pci->ce_lock);
451
452 return ret;
453}
454
455/*
456 * Guts of ath10k_ce_completed_recv_next.
457 * The caller takes responsibility for any necessary locking.
458 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200459static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300460 void **per_transfer_contextp,
461 u32 *bufferp,
462 unsigned int *nbytesp,
463 unsigned int *transfer_idp,
464 unsigned int *flagsp)
465{
Michal Kaziord21fb952013-08-27 13:08:03 +0200466 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300467 unsigned int nentries_mask = dest_ring->nentries_mask;
468 unsigned int sw_index = dest_ring->sw_index;
469
470 struct ce_desc *base = dest_ring->base_addr_owner_space;
471 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
472 struct ce_desc sdesc;
473 u16 nbytes;
474
475 /* Copy in one go for performance reasons */
476 sdesc = *desc;
477
478 nbytes = __le16_to_cpu(sdesc.nbytes);
479 if (nbytes == 0) {
480 /*
481 * This closes a relatively unusual race where the Host
482 * sees the updated DRRI before the update to the
483 * corresponding descriptor has completed. We treat this
484 * as a descriptor that is not yet done.
485 */
486 return -EIO;
487 }
488
489 desc->nbytes = 0;
490
491 /* Return data from completed destination descriptor */
492 *bufferp = __le32_to_cpu(sdesc.addr);
493 *nbytesp = nbytes;
494 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
495
496 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
497 *flagsp = CE_RECV_FLAG_SWAPPED;
498 else
499 *flagsp = 0;
500
501 if (per_transfer_contextp)
502 *per_transfer_contextp =
503 dest_ring->per_transfer_context[sw_index];
504
505 /* sanity */
506 dest_ring->per_transfer_context[sw_index] = NULL;
507
508 /* Update sw_index */
509 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
510 dest_ring->sw_index = sw_index;
511
512 return 0;
513}
514
Michal Kazior2aa39112013-08-27 13:08:02 +0200515int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300516 void **per_transfer_contextp,
517 u32 *bufferp,
518 unsigned int *nbytesp,
519 unsigned int *transfer_idp,
520 unsigned int *flagsp)
521{
522 struct ath10k *ar = ce_state->ar;
523 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
524 int ret;
525
526 spin_lock_bh(&ar_pci->ce_lock);
527 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
528 per_transfer_contextp,
529 bufferp, nbytesp,
530 transfer_idp, flagsp);
531 spin_unlock_bh(&ar_pci->ce_lock);
532
533 return ret;
534}
535
Michal Kazior2aa39112013-08-27 13:08:02 +0200536int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300537 void **per_transfer_contextp,
538 u32 *bufferp)
539{
Michal Kaziord21fb952013-08-27 13:08:03 +0200540 struct ath10k_ce_ring *dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300541 unsigned int nentries_mask;
542 unsigned int sw_index;
543 unsigned int write_index;
544 int ret;
545 struct ath10k *ar;
546 struct ath10k_pci *ar_pci;
547
548 dest_ring = ce_state->dest_ring;
549
550 if (!dest_ring)
551 return -EIO;
552
553 ar = ce_state->ar;
554 ar_pci = ath10k_pci_priv(ar);
555
556 spin_lock_bh(&ar_pci->ce_lock);
557
558 nentries_mask = dest_ring->nentries_mask;
559 sw_index = dest_ring->sw_index;
560 write_index = dest_ring->write_index;
561 if (write_index != sw_index) {
562 struct ce_desc *base = dest_ring->base_addr_owner_space;
563 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
564
565 /* Return data from completed destination descriptor */
566 *bufferp = __le32_to_cpu(desc->addr);
567
568 if (per_transfer_contextp)
569 *per_transfer_contextp =
570 dest_ring->per_transfer_context[sw_index];
571
572 /* sanity */
573 dest_ring->per_transfer_context[sw_index] = NULL;
574
575 /* Update sw_index */
576 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
577 dest_ring->sw_index = sw_index;
578 ret = 0;
579 } else {
580 ret = -EIO;
581 }
582
583 spin_unlock_bh(&ar_pci->ce_lock);
584
585 return ret;
586}
587
588/*
589 * Guts of ath10k_ce_completed_send_next.
590 * The caller takes responsibility for any necessary locking.
591 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200592static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300593 void **per_transfer_contextp,
594 u32 *bufferp,
595 unsigned int *nbytesp,
596 unsigned int *transfer_idp)
597{
Michal Kaziord21fb952013-08-27 13:08:03 +0200598 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300599 u32 ctrl_addr = ce_state->ctrl_addr;
600 struct ath10k *ar = ce_state->ar;
601 unsigned int nentries_mask = src_ring->nentries_mask;
602 unsigned int sw_index = src_ring->sw_index;
Kalle Valoa40d3e42013-09-01 10:02:00 +0300603 struct ce_desc *sdesc, *sbase;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300604 unsigned int read_index;
Kalle Valo3aebe542013-09-01 10:02:07 +0300605 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300606
607 if (src_ring->hw_index == sw_index) {
608 /*
609 * The SW completion index has caught up with the cached
610 * version of the HW completion index.
611 * Update the cached HW completion index to see whether
612 * the SW has really caught up to the HW, or if the cached
613 * value of the HW index has become stale.
614 */
Kalle Valo3aebe542013-09-01 10:02:07 +0300615
616 ret = ath10k_pci_wake(ar);
617 if (ret)
618 return ret;
619
Kalle Valo5e3dd152013-06-12 20:52:10 +0300620 src_ring->hw_index =
621 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200622 src_ring->hw_index &= nentries_mask;
Kalle Valo3aebe542013-09-01 10:02:07 +0300623
Kalle Valo5e3dd152013-06-12 20:52:10 +0300624 ath10k_pci_sleep(ar);
625 }
Kalle Valoa40d3e42013-09-01 10:02:00 +0300626
Kalle Valo5e3dd152013-06-12 20:52:10 +0300627 read_index = src_ring->hw_index;
628
Kalle Valoa40d3e42013-09-01 10:02:00 +0300629 if ((read_index == sw_index) || (read_index == 0xffffffff))
630 return -EIO;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300631
Kalle Valoa40d3e42013-09-01 10:02:00 +0300632 sbase = src_ring->shadow_base;
633 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300634
Kalle Valoa40d3e42013-09-01 10:02:00 +0300635 /* Return data from completed source descriptor */
636 *bufferp = __le32_to_cpu(sdesc->addr);
637 *nbytesp = __le16_to_cpu(sdesc->nbytes);
638 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
639 CE_DESC_FLAGS_META_DATA);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300640
Kalle Valoa40d3e42013-09-01 10:02:00 +0300641 if (per_transfer_contextp)
642 *per_transfer_contextp =
643 src_ring->per_transfer_context[sw_index];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300644
Kalle Valoa40d3e42013-09-01 10:02:00 +0300645 /* sanity */
646 src_ring->per_transfer_context[sw_index] = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300647
Kalle Valoa40d3e42013-09-01 10:02:00 +0300648 /* Update sw_index */
649 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
650 src_ring->sw_index = sw_index;
651
652 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300653}
654
655/* NB: Modeled after ath10k_ce_completed_send_next */
Michal Kazior2aa39112013-08-27 13:08:02 +0200656int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300657 void **per_transfer_contextp,
658 u32 *bufferp,
659 unsigned int *nbytesp,
660 unsigned int *transfer_idp)
661{
Michal Kaziord21fb952013-08-27 13:08:03 +0200662 struct ath10k_ce_ring *src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300663 unsigned int nentries_mask;
664 unsigned int sw_index;
665 unsigned int write_index;
666 int ret;
667 struct ath10k *ar;
668 struct ath10k_pci *ar_pci;
669
670 src_ring = ce_state->src_ring;
671
672 if (!src_ring)
673 return -EIO;
674
675 ar = ce_state->ar;
676 ar_pci = ath10k_pci_priv(ar);
677
678 spin_lock_bh(&ar_pci->ce_lock);
679
680 nentries_mask = src_ring->nentries_mask;
681 sw_index = src_ring->sw_index;
682 write_index = src_ring->write_index;
683
684 if (write_index != sw_index) {
685 struct ce_desc *base = src_ring->base_addr_owner_space;
686 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
687
688 /* Return data from completed source descriptor */
689 *bufferp = __le32_to_cpu(desc->addr);
690 *nbytesp = __le16_to_cpu(desc->nbytes);
691 *transfer_idp = MS(__le16_to_cpu(desc->flags),
692 CE_DESC_FLAGS_META_DATA);
693
694 if (per_transfer_contextp)
695 *per_transfer_contextp =
696 src_ring->per_transfer_context[sw_index];
697
698 /* sanity */
699 src_ring->per_transfer_context[sw_index] = NULL;
700
701 /* Update sw_index */
702 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
703 src_ring->sw_index = sw_index;
704 ret = 0;
705 } else {
706 ret = -EIO;
707 }
708
709 spin_unlock_bh(&ar_pci->ce_lock);
710
711 return ret;
712}
713
Michal Kazior2aa39112013-08-27 13:08:02 +0200714int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300715 void **per_transfer_contextp,
716 u32 *bufferp,
717 unsigned int *nbytesp,
718 unsigned int *transfer_idp)
719{
720 struct ath10k *ar = ce_state->ar;
721 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
722 int ret;
723
724 spin_lock_bh(&ar_pci->ce_lock);
725 ret = ath10k_ce_completed_send_next_nolock(ce_state,
726 per_transfer_contextp,
727 bufferp, nbytesp,
728 transfer_idp);
729 spin_unlock_bh(&ar_pci->ce_lock);
730
731 return ret;
732}
733
734/*
735 * Guts of interrupt handler for per-engine interrupts on a particular CE.
736 *
737 * Invokes registered callbacks for recv_complete,
738 * send_complete, and watermarks.
739 */
740void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
741{
742 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +0200743 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300744 u32 ctrl_addr = ce_state->ctrl_addr;
745 void *transfer_context;
746 u32 buf;
747 unsigned int nbytes;
748 unsigned int id;
749 unsigned int flags;
Kalle Valo3aebe542013-09-01 10:02:07 +0300750 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300751
Kalle Valo3aebe542013-09-01 10:02:07 +0300752 ret = ath10k_pci_wake(ar);
753 if (ret)
754 return;
755
Kalle Valo5e3dd152013-06-12 20:52:10 +0300756 spin_lock_bh(&ar_pci->ce_lock);
757
758 /* Clear the copy-complete interrupts that will be handled here. */
759 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
760 HOST_IS_COPY_COMPLETE_MASK);
761
762 if (ce_state->recv_cb) {
763 /*
764 * Pop completed recv buffers and call the registered
765 * recv callback for each
766 */
767 while (ath10k_ce_completed_recv_next_nolock(ce_state,
768 &transfer_context,
769 &buf, &nbytes,
770 &id, &flags) == 0) {
771 spin_unlock_bh(&ar_pci->ce_lock);
772 ce_state->recv_cb(ce_state, transfer_context, buf,
773 nbytes, id, flags);
774 spin_lock_bh(&ar_pci->ce_lock);
775 }
776 }
777
778 if (ce_state->send_cb) {
779 /*
780 * Pop completed send buffers and call the registered
781 * send callback for each
782 */
783 while (ath10k_ce_completed_send_next_nolock(ce_state,
784 &transfer_context,
785 &buf,
786 &nbytes,
787 &id) == 0) {
788 spin_unlock_bh(&ar_pci->ce_lock);
789 ce_state->send_cb(ce_state, transfer_context,
790 buf, nbytes, id);
791 spin_lock_bh(&ar_pci->ce_lock);
792 }
793 }
794
795 /*
796 * Misc CE interrupts are not being handled, but still need
797 * to be cleared.
798 */
799 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
800
801 spin_unlock_bh(&ar_pci->ce_lock);
802 ath10k_pci_sleep(ar);
803}
804
805/*
806 * Handler for per-engine interrupts on ALL active CEs.
807 * This is used in cases where the system is sharing a
808 * single interrput for all CEs
809 */
810
811void ath10k_ce_per_engine_service_any(struct ath10k *ar)
812{
813 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo3aebe542013-09-01 10:02:07 +0300814 int ce_id, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300815 u32 intr_summary;
816
Kalle Valo3aebe542013-09-01 10:02:07 +0300817 ret = ath10k_pci_wake(ar);
818 if (ret)
819 return;
820
Kalle Valo5e3dd152013-06-12 20:52:10 +0300821 intr_summary = CE_INTERRUPT_SUMMARY(ar);
822
823 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
824 if (intr_summary & (1 << ce_id))
825 intr_summary &= ~(1 << ce_id);
826 else
827 /* no intr pending on this CE */
828 continue;
829
830 ath10k_ce_per_engine_service(ar, ce_id);
831 }
832
833 ath10k_pci_sleep(ar);
834}
835
836/*
837 * Adjust interrupts for the copy complete handler.
838 * If it's needed for either send or recv, then unmask
839 * this interrupt; otherwise, mask it.
840 *
841 * Called with ce_lock held.
842 */
Michal Kazior2aa39112013-08-27 13:08:02 +0200843static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300844 int disable_copy_compl_intr)
845{
846 u32 ctrl_addr = ce_state->ctrl_addr;
847 struct ath10k *ar = ce_state->ar;
Kalle Valo3aebe542013-09-01 10:02:07 +0300848 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300849
Kalle Valo3aebe542013-09-01 10:02:07 +0300850 ret = ath10k_pci_wake(ar);
851 if (ret)
852 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300853
854 if ((!disable_copy_compl_intr) &&
855 (ce_state->send_cb || ce_state->recv_cb))
856 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
857 else
858 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
859
860 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
861
862 ath10k_pci_sleep(ar);
863}
864
865void ath10k_ce_disable_interrupts(struct ath10k *ar)
866{
867 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo3aebe542013-09-01 10:02:07 +0300868 int ce_id, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300869
Kalle Valo3aebe542013-09-01 10:02:07 +0300870 ret = ath10k_pci_wake(ar);
871 if (ret)
872 return;
873
Kalle Valo5e3dd152013-06-12 20:52:10 +0300874 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
Michal Kazior2aa39112013-08-27 13:08:02 +0200875 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300876 u32 ctrl_addr = ce_state->ctrl_addr;
877
878 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
879 }
880 ath10k_pci_sleep(ar);
881}
882
Michal Kazior2aa39112013-08-27 13:08:02 +0200883void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
884 void (*send_cb)(struct ath10k_ce_pipe *ce_state,
885 void *transfer_context,
886 u32 buffer,
887 unsigned int nbytes,
888 unsigned int transfer_id),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300889 int disable_interrupts)
890{
891 struct ath10k *ar = ce_state->ar;
892 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
893
894 spin_lock_bh(&ar_pci->ce_lock);
895 ce_state->send_cb = send_cb;
896 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
897 spin_unlock_bh(&ar_pci->ce_lock);
898}
899
Michal Kazior2aa39112013-08-27 13:08:02 +0200900void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
901 void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
902 void *transfer_context,
903 u32 buffer,
904 unsigned int nbytes,
905 unsigned int transfer_id,
906 unsigned int flags))
Kalle Valo5e3dd152013-06-12 20:52:10 +0300907{
908 struct ath10k *ar = ce_state->ar;
909 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
910
911 spin_lock_bh(&ar_pci->ce_lock);
912 ce_state->recv_cb = recv_cb;
913 ath10k_ce_per_engine_handler_adjust(ce_state, 0);
914 spin_unlock_bh(&ar_pci->ce_lock);
915}
916
917static int ath10k_ce_init_src_ring(struct ath10k *ar,
918 unsigned int ce_id,
Michal Kazior2aa39112013-08-27 13:08:02 +0200919 struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300920 const struct ce_attr *attr)
921{
922 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziord21fb952013-08-27 13:08:03 +0200923 struct ath10k_ce_ring *src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300924 unsigned int nentries = attr->src_nentries;
925 unsigned int ce_nbytes;
926 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
927 dma_addr_t base_addr;
928 char *ptr;
929
930 nentries = roundup_pow_of_two(nentries);
931
932 if (ce_state->src_ring) {
933 WARN_ON(ce_state->src_ring->nentries != nentries);
934 return 0;
935 }
936
Michal Kaziord21fb952013-08-27 13:08:03 +0200937 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300938 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
939 if (ptr == NULL)
940 return -ENOMEM;
941
Michal Kaziord21fb952013-08-27 13:08:03 +0200942 ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300943 src_ring = ce_state->src_ring;
944
Michal Kaziord21fb952013-08-27 13:08:03 +0200945 ptr += sizeof(struct ath10k_ce_ring);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300946 src_ring->nentries = nentries;
947 src_ring->nentries_mask = nentries - 1;
948
949 ath10k_pci_wake(ar);
950 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200951 src_ring->sw_index &= src_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300952 src_ring->hw_index = src_ring->sw_index;
953
954 src_ring->write_index =
955 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +0200956 src_ring->write_index &= src_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300957 ath10k_pci_sleep(ar);
958
959 src_ring->per_transfer_context = (void **)ptr;
960
961 /*
962 * Legacy platforms that do not support cache
963 * coherent DMA are unsupported
964 */
965 src_ring->base_addr_owner_space_unaligned =
966 pci_alloc_consistent(ar_pci->pdev,
967 (nentries * sizeof(struct ce_desc) +
968 CE_DESC_RING_ALIGN),
969 &base_addr);
Janusz Dziedzic9c5ae692013-08-09 08:39:13 +0200970 if (!src_ring->base_addr_owner_space_unaligned) {
971 kfree(ce_state->src_ring);
972 ce_state->src_ring = NULL;
973 return -ENOMEM;
974 }
975
Kalle Valo5e3dd152013-06-12 20:52:10 +0300976 src_ring->base_addr_ce_space_unaligned = base_addr;
977
978 src_ring->base_addr_owner_space = PTR_ALIGN(
979 src_ring->base_addr_owner_space_unaligned,
980 CE_DESC_RING_ALIGN);
981 src_ring->base_addr_ce_space = ALIGN(
982 src_ring->base_addr_ce_space_unaligned,
983 CE_DESC_RING_ALIGN);
984
985 /*
986 * Also allocate a shadow src ring in regular
987 * mem to use for faster access.
988 */
989 src_ring->shadow_base_unaligned =
990 kmalloc((nentries * sizeof(struct ce_desc) +
991 CE_DESC_RING_ALIGN), GFP_KERNEL);
Janusz Dziedzic9c5ae692013-08-09 08:39:13 +0200992 if (!src_ring->shadow_base_unaligned) {
993 pci_free_consistent(ar_pci->pdev,
994 (nentries * sizeof(struct ce_desc) +
995 CE_DESC_RING_ALIGN),
996 src_ring->base_addr_owner_space,
997 src_ring->base_addr_ce_space);
998 kfree(ce_state->src_ring);
999 ce_state->src_ring = NULL;
1000 return -ENOMEM;
1001 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001002
1003 src_ring->shadow_base = PTR_ALIGN(
1004 src_ring->shadow_base_unaligned,
1005 CE_DESC_RING_ALIGN);
1006
1007 ath10k_pci_wake(ar);
1008 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
1009 src_ring->base_addr_ce_space);
1010 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1011 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1012 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1013 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1014 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1015 ath10k_pci_sleep(ar);
1016
1017 return 0;
1018}
1019
1020static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1021 unsigned int ce_id,
Michal Kazior2aa39112013-08-27 13:08:02 +02001022 struct ath10k_ce_pipe *ce_state,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001023 const struct ce_attr *attr)
1024{
1025 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziord21fb952013-08-27 13:08:03 +02001026 struct ath10k_ce_ring *dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001027 unsigned int nentries = attr->dest_nentries;
1028 unsigned int ce_nbytes;
1029 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1030 dma_addr_t base_addr;
1031 char *ptr;
1032
1033 nentries = roundup_pow_of_two(nentries);
1034
1035 if (ce_state->dest_ring) {
1036 WARN_ON(ce_state->dest_ring->nentries != nentries);
1037 return 0;
1038 }
1039
Michal Kaziord21fb952013-08-27 13:08:03 +02001040 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
Kalle Valo5e3dd152013-06-12 20:52:10 +03001041 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
1042 if (ptr == NULL)
1043 return -ENOMEM;
1044
Michal Kaziord21fb952013-08-27 13:08:03 +02001045 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001046 dest_ring = ce_state->dest_ring;
1047
Michal Kaziord21fb952013-08-27 13:08:03 +02001048 ptr += sizeof(struct ath10k_ce_ring);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001049 dest_ring->nentries = nentries;
1050 dest_ring->nentries_mask = nentries - 1;
1051
1052 ath10k_pci_wake(ar);
1053 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +02001054 dest_ring->sw_index &= dest_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001055 dest_ring->write_index =
1056 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
Michal Kazior432358e2013-07-31 10:55:11 +02001057 dest_ring->write_index &= dest_ring->nentries_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001058 ath10k_pci_sleep(ar);
1059
1060 dest_ring->per_transfer_context = (void **)ptr;
1061
1062 /*
1063 * Legacy platforms that do not support cache
1064 * coherent DMA are unsupported
1065 */
1066 dest_ring->base_addr_owner_space_unaligned =
1067 pci_alloc_consistent(ar_pci->pdev,
1068 (nentries * sizeof(struct ce_desc) +
1069 CE_DESC_RING_ALIGN),
1070 &base_addr);
Janusz Dziedzic9c5ae692013-08-09 08:39:13 +02001071 if (!dest_ring->base_addr_owner_space_unaligned) {
1072 kfree(ce_state->dest_ring);
1073 ce_state->dest_ring = NULL;
1074 return -ENOMEM;
1075 }
1076
Kalle Valo5e3dd152013-06-12 20:52:10 +03001077 dest_ring->base_addr_ce_space_unaligned = base_addr;
1078
1079 /*
1080 * Correctly initialize memory to 0 to prevent garbage
1081 * data crashing system when download firmware
1082 */
1083 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1084 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1085
1086 dest_ring->base_addr_owner_space = PTR_ALIGN(
1087 dest_ring->base_addr_owner_space_unaligned,
1088 CE_DESC_RING_ALIGN);
1089 dest_ring->base_addr_ce_space = ALIGN(
1090 dest_ring->base_addr_ce_space_unaligned,
1091 CE_DESC_RING_ALIGN);
1092
1093 ath10k_pci_wake(ar);
1094 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1095 dest_ring->base_addr_ce_space);
1096 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1097 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1098 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1099 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1100 ath10k_pci_sleep(ar);
1101
1102 return 0;
1103}
1104
Michal Kazior2aa39112013-08-27 13:08:02 +02001105static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001106 unsigned int ce_id,
1107 const struct ce_attr *attr)
1108{
1109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001110 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
Kalle Valo5e3dd152013-06-12 20:52:10 +03001111 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1112
1113 spin_lock_bh(&ar_pci->ce_lock);
1114
Michal Kazior39e40862013-08-27 13:07:58 +02001115 ce_state->ar = ar;
1116 ce_state->id = ce_id;
1117 ce_state->ctrl_addr = ctrl_addr;
Michal Kazior39e40862013-08-27 13:07:58 +02001118 ce_state->attr_flags = attr->flags;
1119 ce_state->src_sz_max = attr->src_sz_max;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001120
1121 spin_unlock_bh(&ar_pci->ce_lock);
1122
1123 return ce_state;
1124}
1125
1126/*
1127 * Initialize a Copy Engine based on caller-supplied attributes.
1128 * This may be called once to initialize both source and destination
1129 * rings or it may be called twice for separate source and destination
1130 * initialization. It may be that only one side or the other is
1131 * initialized by software/firmware.
1132 */
Michal Kazior2aa39112013-08-27 13:08:02 +02001133struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001134 unsigned int ce_id,
1135 const struct ce_attr *attr)
1136{
Michal Kazior2aa39112013-08-27 13:08:02 +02001137 struct ath10k_ce_pipe *ce_state;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001138 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
Michal Kaziorba7ee552013-08-13 07:54:57 +02001139 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001140
1141 ce_state = ath10k_ce_init_state(ar, ce_id, attr);
1142 if (!ce_state) {
1143 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
1144 return NULL;
1145 }
1146
1147 if (attr->src_nentries) {
Michal Kaziorba7ee552013-08-13 07:54:57 +02001148 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
1149 if (ret) {
1150 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1151 ce_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001152 ath10k_ce_deinit(ce_state);
1153 return NULL;
1154 }
1155 }
1156
1157 if (attr->dest_nentries) {
Michal Kaziorba7ee552013-08-13 07:54:57 +02001158 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
1159 if (ret) {
1160 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1161 ce_id, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001162 ath10k_ce_deinit(ce_state);
1163 return NULL;
1164 }
1165 }
1166
1167 /* Enable CE error interrupts */
1168 ath10k_pci_wake(ar);
1169 ath10k_ce_error_intr_enable(ar, ctrl_addr);
1170 ath10k_pci_sleep(ar);
1171
1172 return ce_state;
1173}
1174
Michal Kazior2aa39112013-08-27 13:08:02 +02001175void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001176{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001177 struct ath10k *ar = ce_state->ar;
1178 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1179
Kalle Valo5e3dd152013-06-12 20:52:10 +03001180 if (ce_state->src_ring) {
1181 kfree(ce_state->src_ring->shadow_base_unaligned);
1182 pci_free_consistent(ar_pci->pdev,
1183 (ce_state->src_ring->nentries *
1184 sizeof(struct ce_desc) +
1185 CE_DESC_RING_ALIGN),
1186 ce_state->src_ring->base_addr_owner_space,
1187 ce_state->src_ring->base_addr_ce_space);
1188 kfree(ce_state->src_ring);
1189 }
1190
1191 if (ce_state->dest_ring) {
1192 pci_free_consistent(ar_pci->pdev,
1193 (ce_state->dest_ring->nentries *
1194 sizeof(struct ce_desc) +
1195 CE_DESC_RING_ALIGN),
1196 ce_state->dest_ring->base_addr_owner_space,
1197 ce_state->dest_ring->base_addr_ce_space);
1198 kfree(ce_state->dest_ring);
1199 }
Michal Kazior39e40862013-08-27 13:07:58 +02001200
Michal Kazior39e40862013-08-27 13:07:58 +02001201 ce_state->src_ring = NULL;
1202 ce_state->dest_ring = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001203}