blob: deb009c3121f29cc53f3011c6f379a5aa18472cc [file] [log] [blame]
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001/*
2 * Driver for the Cirrus Logic EP93xx DMA Controller
3 *
4 * Copyright (C) 2011 Mika Westerberg
5 *
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8 *
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12 *
13 * This driver is based on dw_dmac and amba-pl08x drivers.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 */
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
Mika Westerberg2389d672011-10-16 11:19:30 +030025#include <linux/module.h>
Mika Westerberg5fa29a12011-05-29 13:10:02 +030026#include <linux/platform_device.h>
27#include <linux/slab.h>
28
Arnd Bergmanna3b29242012-08-24 15:12:11 +020029#include <linux/platform_data/dma-ep93xx.h>
Mika Westerberg5fa29a12011-05-29 13:10:02 +030030
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000031#include "dmaengine.h"
32
Mika Westerberg5fa29a12011-05-29 13:10:02 +030033/* M2P registers */
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59/* M2M registers */
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +020074#define M2M_CONTROL_NFBINT BIT(21)
Mika Westerberg5fa29a12011-05-29 13:10:02 +030075#define M2M_CONTROL_RSS_SHIFT 22
76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT 25
81
82#define M2M_INTERRUPT 0x0004
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +020083#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
Mika Westerberg5fa29a12011-05-29 13:10:02 +030099
100#define M2M_BCR0 0x0010
101#define M2M_BCR1 0x0014
102#define M2M_SAR_BASE0 0x0018
103#define M2M_SAR_BASE1 0x001c
104#define M2M_DAR_BASE0 0x002c
105#define M2M_DAR_BASE1 0x0030
106
107#define DMA_MAX_CHAN_BYTES 0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS 32
109
110struct ep93xx_dma_engine;
111
112/**
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
114 * @src_addr: source address of the transaction
115 * @dst_addr: destination address of the transaction
116 * @size: size of the transaction (in bytes)
117 * @complete: this descriptor is completed
118 * @txd: dmaengine API descriptor
119 * @tx_list: list of linked descriptors
120 * @node: link used for putting this into a channel queue
121 */
122struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130};
131
132/**
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134 * @chan: dmaengine API channel
135 * @edma: pointer to to the engine device
136 * @regs: memory mapped registers
137 * @irq: interrupt number of the channel
138 * @clk: clock used by this channel
139 * @tasklet: channel specific tasklet used for callbacks
140 * @lock: lock protecting the fields following
141 * @flags: flags for the channel
142 * @buffer: which buffer to use next (0/1)
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300143 * @active: flattened chain of descriptors currently being processed
144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
Vinod Koulb2be07d2014-12-22 20:24:14 +0530147 * is set via .device_config before slave operation is
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300148 * prepared
149 * @runtime_ctrl: M2M runtime values for the control register.
150 *
151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
152 * will have slightly different scheme here: @active points to a head of
153 * flattened DMA descriptor chain.
154 *
155 * @queue holds pending transactions. These are linked through the first
156 * descriptor in the chain. When a descriptor is moved to the @active queue,
157 * the first and chained descriptors are flattened into a single list.
158 *
159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
160 * necessary channel configuration information. For memcpy channels this must
161 * be %NULL.
162 */
163struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170 /* protects the fields following */
171 spinlock_t lock;
172 unsigned long flags;
173/* Channel is configured for cyclic transfers */
174#define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182};
183
184/**
185 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
186 * @dma_dev: holds the dmaengine device
187 * @m2m: is this an M2M or M2P device
188 * @hw_setup: method which sets the channel up for operation
189 * @hw_shutdown: shuts the channel down and flushes whatever is left
190 * @hw_submit: pushes active descriptor(s) to the hardware
191 * @hw_interrupt: handle the interrupt
192 * @num_channels: number of channels for this instance
193 * @channels: array of channels
194 *
195 * There is one instance of this struct for the M2P channels and one for the
196 * M2M channels. hw_xxx() methods are used to perform operations which are
197 * different on M2M and M2P channels. These methods are called with channel
198 * lock held and interrupts disabled so they cannot sleep.
199 */
200struct ep93xx_dma_engine {
201 struct dma_device dma_dev;
202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_shutdown)(struct ep93xx_dma_chan *);
205 void (*hw_submit)(struct ep93xx_dma_chan *);
206 int (*hw_interrupt)(struct ep93xx_dma_chan *);
207#define INTERRUPT_UNKNOWN 0
208#define INTERRUPT_DONE 1
209#define INTERRUPT_NEXT_BUFFER 2
210
211 size_t num_channels;
212 struct ep93xx_dma_chan channels[];
213};
214
215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216{
217 return &edmac->chan.dev->device;
218}
219
220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221{
222 return container_of(chan, struct ep93xx_dma_chan, chan);
223}
224
225/**
226 * ep93xx_dma_set_active - set new active descriptor chain
227 * @edmac: channel
228 * @desc: head of the new active descriptor chain
229 *
230 * Sets @desc to be the head of the new active descriptor chain. This is the
231 * chain which is processed next. The active list must be empty before calling
232 * this function.
233 *
234 * Called with @edmac->lock held and interrupts disabled.
235 */
236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237 struct ep93xx_dma_desc *desc)
238{
239 BUG_ON(!list_empty(&edmac->active));
240
241 list_add_tail(&desc->node, &edmac->active);
242
243 /* Flatten the @desc->tx_list chain into @edmac->active list */
244 while (!list_empty(&desc->tx_list)) {
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246 struct ep93xx_dma_desc, node);
247
248 /*
249 * We copy the callback parameters from the first descriptor
250 * to all the chained descriptors. This way we can call the
251 * callback without having to find out the first descriptor in
252 * the chain. Useful for cyclic transfers.
253 */
254 d->txd.callback = desc->txd.callback;
255 d->txd.callback_param = desc->txd.callback_param;
256
257 list_move_tail(&d->node, &edmac->active);
258 }
259}
260
261/* Called with @edmac->lock held and interrupts disabled */
262static struct ep93xx_dma_desc *
263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264{
Masahiro Yamada360af352016-09-13 03:08:17 +0900265 return list_first_entry_or_null(&edmac->active,
266 struct ep93xx_dma_desc, node);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300267}
268
269/**
270 * ep93xx_dma_advance_active - advances to the next active descriptor
271 * @edmac: channel
272 *
273 * Function advances active descriptor to the next in the @edmac->active and
274 * returns %true if we still have descriptors in the chain to process.
275 * Otherwise returns %false.
276 *
277 * When the channel is in cyclic mode always returns %true.
278 *
279 * Called with @edmac->lock held and interrupts disabled.
280 */
281static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
282{
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200283 struct ep93xx_dma_desc *desc;
284
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300285 list_rotate_left(&edmac->active);
286
287 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
288 return true;
289
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200290 desc = ep93xx_dma_get_active(edmac);
291 if (!desc)
292 return false;
293
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300294 /*
295 * If txd.cookie is set it means that we are back in the first
296 * descriptor in the chain and hence done with it.
297 */
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200298 return !desc->txd.cookie;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300299}
300
301/*
302 * M2P DMA implementation
303 */
304
305static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
306{
307 writel(control, edmac->regs + M2P_CONTROL);
308 /*
309 * EP93xx User's Guide states that we must perform a dummy read after
310 * write to the control register.
311 */
312 readl(edmac->regs + M2P_CONTROL);
313}
314
315static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
316{
317 struct ep93xx_dma_data *data = edmac->chan.private;
318 u32 control;
319
320 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
321
322 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
323 | M2P_CONTROL_ENABLE;
324 m2p_set_control(edmac, control);
325
Alexander Sverdlinb7e7a4d2017-05-22 16:05:22 +0200326 edmac->buffer = 0;
327
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300328 return 0;
329}
330
331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332{
333 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334}
335
336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337{
338 u32 control;
339
340 control = readl(edmac->regs + M2P_CONTROL);
341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342 m2p_set_control(edmac, control);
343
344 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345 cpu_relax();
346
347 m2p_set_control(edmac, 0);
348
349 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350 cpu_relax();
351}
352
353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354{
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200355 struct ep93xx_dma_desc *desc;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300356 u32 bus_addr;
357
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200358 desc = ep93xx_dma_get_active(edmac);
359 if (!desc) {
360 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361 return;
362 }
363
Vinod Kouldb8196d2011-10-13 22:34:23 +0530364 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300365 bus_addr = desc->src_addr;
366 else
367 bus_addr = desc->dst_addr;
368
369 if (edmac->buffer == 0) {
370 writel(desc->size, edmac->regs + M2P_MAXCNT0);
371 writel(bus_addr, edmac->regs + M2P_BASE0);
372 } else {
373 writel(desc->size, edmac->regs + M2P_MAXCNT1);
374 writel(bus_addr, edmac->regs + M2P_BASE1);
375 }
376
377 edmac->buffer ^= 1;
378}
379
380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381{
382 u32 control = readl(edmac->regs + M2P_CONTROL);
383
384 m2p_fill_desc(edmac);
385 control |= M2P_CONTROL_STALLINT;
386
387 if (ep93xx_dma_advance_active(edmac)) {
388 m2p_fill_desc(edmac);
389 control |= M2P_CONTROL_NFBINT;
390 }
391
392 m2p_set_control(edmac, control);
393}
394
395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396{
397 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398 u32 control;
399
400 if (irq_status & M2P_INTERRUPT_ERROR) {
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403 /* Clear the error interrupt */
404 writel(1, edmac->regs + M2P_INTERRUPT);
405
406 /*
407 * It seems that there is no easy way of reporting errors back
408 * to client so we just report the error here and continue as
409 * usual.
410 *
411 * Revisit this when there is a mechanism to report back the
412 * errors.
413 */
414 dev_err(chan2dev(edmac),
415 "DMA transfer failed! Details:\n"
416 "\tcookie : %d\n"
417 "\tsrc_addr : 0x%08x\n"
418 "\tdst_addr : 0x%08x\n"
419 "\tsize : %zu\n",
420 desc->txd.cookie, desc->src_addr, desc->dst_addr,
421 desc->size);
422 }
423
Alexander Sverdlin94901e12016-01-07 23:05:48 +0100424 /*
425 * Even latest E2 silicon revision sometimes assert STALL interrupt
426 * instead of NFB. Therefore we treat them equally, basing on the
427 * amount of data we still have to transfer.
428 */
429 if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
430 return INTERRUPT_UNKNOWN;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300431
Alexander Sverdlin94901e12016-01-07 23:05:48 +0100432 if (ep93xx_dma_advance_active(edmac)) {
433 m2p_fill_desc(edmac);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300434 return INTERRUPT_NEXT_BUFFER;
435 }
436
Alexander Sverdlin94901e12016-01-07 23:05:48 +0100437 /* Disable interrupts */
438 control = readl(edmac->regs + M2P_CONTROL);
439 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
440 m2p_set_control(edmac, control);
441
442 return INTERRUPT_DONE;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300443}
444
445/*
446 * M2M DMA implementation
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300447 */
448
449static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
450{
451 const struct ep93xx_dma_data *data = edmac->chan.private;
452 u32 control = 0;
453
454 if (!data) {
455 /* This is memcpy channel, nothing to configure */
456 writel(control, edmac->regs + M2M_CONTROL);
457 return 0;
458 }
459
460 switch (data->port) {
461 case EP93XX_DMA_SSP:
462 /*
463 * This was found via experimenting - anything less than 5
464 * causes the channel to perform only a partial transfer which
465 * leads to problems since we don't get DONE interrupt then.
466 */
467 control = (5 << M2M_CONTROL_PWSC_SHIFT);
468 control |= M2M_CONTROL_NO_HDSK;
469
Vinod Kouldb8196d2011-10-13 22:34:23 +0530470 if (data->direction == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300471 control |= M2M_CONTROL_DAH;
472 control |= M2M_CONTROL_TM_TX;
473 control |= M2M_CONTROL_RSS_SSPTX;
474 } else {
475 control |= M2M_CONTROL_SAH;
476 control |= M2M_CONTROL_TM_RX;
477 control |= M2M_CONTROL_RSS_SSPRX;
478 }
479 break;
480
481 case EP93XX_DMA_IDE:
482 /*
483 * This IDE part is totally untested. Values below are taken
484 * from the EP93xx Users's Guide and might not be correct.
485 */
Vinod Kouldb8196d2011-10-13 22:34:23 +0530486 if (data->direction == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300487 /* Worst case from the UG */
488 control = (3 << M2M_CONTROL_PWSC_SHIFT);
489 control |= M2M_CONTROL_DAH;
490 control |= M2M_CONTROL_TM_TX;
491 } else {
492 control = (2 << M2M_CONTROL_PWSC_SHIFT);
493 control |= M2M_CONTROL_SAH;
494 control |= M2M_CONTROL_TM_RX;
495 }
Rafal Prylowskib62cfc52011-11-26 12:54:07 +0200496
497 control |= M2M_CONTROL_NO_HDSK;
498 control |= M2M_CONTROL_RSS_IDE;
499 control |= M2M_CONTROL_PW_16;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300500 break;
501
502 default:
503 return -EINVAL;
504 }
505
506 writel(control, edmac->regs + M2M_CONTROL);
507 return 0;
508}
509
510static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
511{
512 /* Just disable the channel */
513 writel(0, edmac->regs + M2M_CONTROL);
514}
515
516static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
517{
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200518 struct ep93xx_dma_desc *desc;
519
520 desc = ep93xx_dma_get_active(edmac);
521 if (!desc) {
522 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
523 return;
524 }
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300525
526 if (edmac->buffer == 0) {
527 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
528 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
529 writel(desc->size, edmac->regs + M2M_BCR0);
530 } else {
531 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
532 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
533 writel(desc->size, edmac->regs + M2M_BCR1);
534 }
535
536 edmac->buffer ^= 1;
537}
538
539static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
540{
541 struct ep93xx_dma_data *data = edmac->chan.private;
542 u32 control = readl(edmac->regs + M2M_CONTROL);
543
544 /*
545 * Since we allow clients to configure PW (peripheral width) we always
546 * clear PW bits here and then set them according what is given in
547 * the runtime configuration.
548 */
549 control &= ~M2M_CONTROL_PW_MASK;
550 control |= edmac->runtime_ctrl;
551
552 m2m_fill_desc(edmac);
553 control |= M2M_CONTROL_DONEINT;
554
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200555 if (ep93xx_dma_advance_active(edmac)) {
556 m2m_fill_desc(edmac);
557 control |= M2M_CONTROL_NFBINT;
558 }
559
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300560 /*
561 * Now we can finally enable the channel. For M2M channel this must be
562 * done _after_ the BCRx registers are programmed.
563 */
564 control |= M2M_CONTROL_ENABLE;
565 writel(control, edmac->regs + M2M_CONTROL);
566
567 if (!data) {
568 /*
569 * For memcpy channels the software trigger must be asserted
570 * in order to start the memcpy operation.
571 */
572 control |= M2M_CONTROL_START;
573 writel(control, edmac->regs + M2M_CONTROL);
574 }
575}
576
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200577/*
578 * According to EP93xx User's Guide, we should receive DONE interrupt when all
579 * M2M DMA controller transactions complete normally. This is not always the
580 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
581 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
582 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
583 * In effect, disabling the channel when only DONE bit is set could stop
584 * currently running DMA transfer. To avoid this, we use Buffer FSM and
585 * Control FSM to check current state of DMA channel.
586 */
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300587static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
588{
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200589 u32 status = readl(edmac->regs + M2M_STATUS);
590 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
591 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
592 bool done = status & M2M_STATUS_DONE;
593 bool last_done;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300594 u32 control;
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200595 struct ep93xx_dma_desc *desc;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300596
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200597 /* Accept only DONE and NFB interrupts */
598 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300599 return INTERRUPT_UNKNOWN;
600
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200601 if (done) {
602 /* Clear the DONE bit */
603 writel(0, edmac->regs + M2M_INTERRUPT);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300604 }
605
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200606 /*
607 * Check whether we are done with descriptors or not. This, together
608 * with DMA channel state, determines action to take in interrupt.
609 */
610 desc = ep93xx_dma_get_active(edmac);
611 last_done = !desc || desc->txd.cookie;
612
613 /*
614 * Use M2M DMA Buffer FSM and Control FSM to check current state of
615 * DMA channel. Using DONE and NFB bits from channel status register
616 * or bits from channel interrupt register is not reliable.
617 */
618 if (!last_done &&
619 (buf_fsm == M2M_STATUS_BUF_NO ||
620 buf_fsm == M2M_STATUS_BUF_ON)) {
621 /*
622 * Two buffers are ready for update when Buffer FSM is in
623 * DMA_NO_BUF state. Only one buffer can be prepared without
624 * disabling the channel or polling the DONE bit.
625 * To simplify things, always prepare only one buffer.
626 */
627 if (ep93xx_dma_advance_active(edmac)) {
628 m2m_fill_desc(edmac);
629 if (done && !edmac->chan.private) {
630 /* Software trigger for memcpy channel */
631 control = readl(edmac->regs + M2M_CONTROL);
632 control |= M2M_CONTROL_START;
633 writel(control, edmac->regs + M2M_CONTROL);
634 }
635 return INTERRUPT_NEXT_BUFFER;
636 } else {
637 last_done = true;
638 }
639 }
640
641 /*
642 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
643 * and Control FSM is in DMA_STALL state.
644 */
645 if (last_done &&
646 buf_fsm == M2M_STATUS_BUF_NO &&
647 ctl_fsm == M2M_STATUS_CTL_STALL) {
648 /* Disable interrupts and the channel */
649 control = readl(edmac->regs + M2M_CONTROL);
650 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
651 | M2M_CONTROL_ENABLE);
652 writel(control, edmac->regs + M2M_CONTROL);
653 return INTERRUPT_DONE;
654 }
655
656 /*
657 * Nothing to do this time.
658 */
659 return INTERRUPT_NEXT_BUFFER;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300660}
661
662/*
663 * DMA engine API implementation
664 */
665
666static struct ep93xx_dma_desc *
667ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
668{
669 struct ep93xx_dma_desc *desc, *_desc;
670 struct ep93xx_dma_desc *ret = NULL;
671 unsigned long flags;
672
673 spin_lock_irqsave(&edmac->lock, flags);
674 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
675 if (async_tx_test_ack(&desc->txd)) {
676 list_del_init(&desc->node);
677
678 /* Re-initialize the descriptor */
679 desc->src_addr = 0;
680 desc->dst_addr = 0;
681 desc->size = 0;
682 desc->complete = false;
683 desc->txd.cookie = 0;
684 desc->txd.callback = NULL;
685 desc->txd.callback_param = NULL;
686
687 ret = desc;
688 break;
689 }
690 }
691 spin_unlock_irqrestore(&edmac->lock, flags);
692 return ret;
693}
694
695static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
696 struct ep93xx_dma_desc *desc)
697{
698 if (desc) {
699 unsigned long flags;
700
701 spin_lock_irqsave(&edmac->lock, flags);
702 list_splice_init(&desc->tx_list, &edmac->free_list);
703 list_add(&desc->node, &edmac->free_list);
704 spin_unlock_irqrestore(&edmac->lock, flags);
705 }
706}
707
708/**
709 * ep93xx_dma_advance_work - start processing the next pending transaction
710 * @edmac: channel
711 *
712 * If we have pending transactions queued and we are currently idling, this
713 * function takes the next queued transaction from the @edmac->queue and
714 * pushes it to the hardware for execution.
715 */
716static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
717{
718 struct ep93xx_dma_desc *new;
719 unsigned long flags;
720
721 spin_lock_irqsave(&edmac->lock, flags);
722 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
723 spin_unlock_irqrestore(&edmac->lock, flags);
724 return;
725 }
726
727 /* Take the next descriptor from the pending queue */
728 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
729 list_del_init(&new->node);
730
731 ep93xx_dma_set_active(edmac, new);
732
733 /* Push it to the hardware */
734 edmac->edma->hw_submit(edmac);
735 spin_unlock_irqrestore(&edmac->lock, flags);
736}
737
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300738static void ep93xx_dma_tasklet(unsigned long data)
739{
740 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
741 struct ep93xx_dma_desc *desc, *d;
Dave Jiangdac86a12016-07-20 13:11:05 -0700742 struct dmaengine_desc_callback cb;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300743 LIST_HEAD(list);
744
Dave Jiangdac86a12016-07-20 13:11:05 -0700745 memset(&cb, 0, sizeof(cb));
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300746 spin_lock_irq(&edmac->lock);
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200747 /*
748 * If dma_terminate_all() was called before we get to run, the active
749 * list has become empty. If that happens we aren't supposed to do
750 * anything more than call ep93xx_dma_advance_work().
751 */
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300752 desc = ep93xx_dma_get_active(edmac);
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200753 if (desc) {
754 if (desc->complete) {
Vinod Kould4116052012-05-11 11:48:21 +0530755 /* mark descriptor complete for non cyclic case only */
756 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
757 dma_cookie_complete(&desc->txd);
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200758 list_splice_init(&edmac->active, &list);
759 }
Dave Jiangdac86a12016-07-20 13:11:05 -0700760 dmaengine_desc_get_callback(&desc->txd, &cb);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300761 }
762 spin_unlock_irq(&edmac->lock);
763
764 /* Pick up the next descriptor from the queue */
765 ep93xx_dma_advance_work(edmac);
766
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300767 /* Now we can release all the chained descriptors */
768 list_for_each_entry_safe(desc, d, &list, node) {
Dan Williamsd38a8c62013-10-18 19:35:23 +0200769 dma_descriptor_unmap(&desc->txd);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300770 ep93xx_dma_desc_put(edmac, desc);
771 }
772
Dave Jiangdac86a12016-07-20 13:11:05 -0700773 dmaengine_desc_callback_invoke(&cb, NULL);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300774}
775
776static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
777{
778 struct ep93xx_dma_chan *edmac = dev_id;
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200779 struct ep93xx_dma_desc *desc;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300780 irqreturn_t ret = IRQ_HANDLED;
781
782 spin_lock(&edmac->lock);
783
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200784 desc = ep93xx_dma_get_active(edmac);
785 if (!desc) {
786 dev_warn(chan2dev(edmac),
787 "got interrupt while active list is empty\n");
788 spin_unlock(&edmac->lock);
789 return IRQ_NONE;
790 }
791
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300792 switch (edmac->edma->hw_interrupt(edmac)) {
793 case INTERRUPT_DONE:
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200794 desc->complete = true;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300795 tasklet_schedule(&edmac->tasklet);
796 break;
797
798 case INTERRUPT_NEXT_BUFFER:
799 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
800 tasklet_schedule(&edmac->tasklet);
801 break;
802
803 default:
804 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
805 ret = IRQ_NONE;
806 break;
807 }
808
809 spin_unlock(&edmac->lock);
810 return ret;
811}
812
813/**
814 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
815 * @tx: descriptor to be executed
816 *
817 * Function will execute given descriptor on the hardware or if the hardware
818 * is busy, queue the descriptor to be executed later on. Returns cookie which
819 * can be used to poll the status of the descriptor.
820 */
821static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
822{
823 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
824 struct ep93xx_dma_desc *desc;
825 dma_cookie_t cookie;
826 unsigned long flags;
827
828 spin_lock_irqsave(&edmac->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000829 cookie = dma_cookie_assign(tx);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300830
831 desc = container_of(tx, struct ep93xx_dma_desc, txd);
832
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300833 /*
834 * If nothing is currently prosessed, we push this descriptor
835 * directly to the hardware. Otherwise we put the descriptor
836 * to the pending queue.
837 */
838 if (list_empty(&edmac->active)) {
839 ep93xx_dma_set_active(edmac, desc);
840 edmac->edma->hw_submit(edmac);
841 } else {
842 list_add_tail(&desc->node, &edmac->queue);
843 }
844
845 spin_unlock_irqrestore(&edmac->lock, flags);
846 return cookie;
847}
848
849/**
850 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
851 * @chan: channel to allocate resources
852 *
853 * Function allocates necessary resources for the given DMA channel and
854 * returns number of allocated descriptors for the channel. Negative errno
855 * is returned in case of failure.
856 */
857static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
858{
859 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
860 struct ep93xx_dma_data *data = chan->private;
861 const char *name = dma_chan_name(chan);
862 int ret, i;
863
864 /* Sanity check the channel parameters */
865 if (!edmac->edma->m2m) {
866 if (!data)
867 return -EINVAL;
868 if (data->port < EP93XX_DMA_I2S1 ||
869 data->port > EP93XX_DMA_IRDA)
870 return -EINVAL;
871 if (data->direction != ep93xx_dma_chan_direction(chan))
872 return -EINVAL;
873 } else {
874 if (data) {
875 switch (data->port) {
876 case EP93XX_DMA_SSP:
877 case EP93XX_DMA_IDE:
Andy Shevchenko0efcdb22013-01-10 10:52:59 +0200878 if (!is_slave_direction(data->direction))
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300879 return -EINVAL;
880 break;
881 default:
882 return -EINVAL;
883 }
884 }
885 }
886
887 if (data && data->name)
888 name = data->name;
889
890 ret = clk_enable(edmac->clk);
891 if (ret)
892 return ret;
893
894 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
895 if (ret)
896 goto fail_clk_disable;
897
898 spin_lock_irq(&edmac->lock);
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +0000899 dma_cookie_init(&edmac->chan);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300900 ret = edmac->edma->hw_setup(edmac);
901 spin_unlock_irq(&edmac->lock);
902
903 if (ret)
904 goto fail_free_irq;
905
906 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
907 struct ep93xx_dma_desc *desc;
908
909 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
910 if (!desc) {
911 dev_warn(chan2dev(edmac), "not enough descriptors\n");
912 break;
913 }
914
915 INIT_LIST_HEAD(&desc->tx_list);
916
917 dma_async_tx_descriptor_init(&desc->txd, chan);
918 desc->txd.flags = DMA_CTRL_ACK;
919 desc->txd.tx_submit = ep93xx_dma_tx_submit;
920
921 ep93xx_dma_desc_put(edmac, desc);
922 }
923
924 return i;
925
926fail_free_irq:
927 free_irq(edmac->irq, edmac);
928fail_clk_disable:
929 clk_disable(edmac->clk);
930
931 return ret;
932}
933
934/**
935 * ep93xx_dma_free_chan_resources - release resources for the channel
936 * @chan: channel
937 *
938 * Function releases all the resources allocated for the given channel.
939 * The channel must be idle when this is called.
940 */
941static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
942{
943 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
944 struct ep93xx_dma_desc *desc, *d;
945 unsigned long flags;
946 LIST_HEAD(list);
947
948 BUG_ON(!list_empty(&edmac->active));
949 BUG_ON(!list_empty(&edmac->queue));
950
951 spin_lock_irqsave(&edmac->lock, flags);
952 edmac->edma->hw_shutdown(edmac);
953 edmac->runtime_addr = 0;
954 edmac->runtime_ctrl = 0;
955 edmac->buffer = 0;
956 list_splice_init(&edmac->free_list, &list);
957 spin_unlock_irqrestore(&edmac->lock, flags);
958
959 list_for_each_entry_safe(desc, d, &list, node)
960 kfree(desc);
961
962 clk_disable(edmac->clk);
963 free_irq(edmac->irq, edmac);
964}
965
966/**
967 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
968 * @chan: channel
969 * @dest: destination bus address
970 * @src: source bus address
971 * @len: size of the transaction
972 * @flags: flags for the descriptor
973 *
974 * Returns a valid DMA descriptor or %NULL in case of failure.
975 */
H Hartley Sweetene2f5e5a2011-06-10 15:15:05 -0700976static struct dma_async_tx_descriptor *
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300977ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
978 dma_addr_t src, size_t len, unsigned long flags)
979{
980 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
981 struct ep93xx_dma_desc *desc, *first;
982 size_t bytes, offset;
983
984 first = NULL;
985 for (offset = 0; offset < len; offset += bytes) {
986 desc = ep93xx_dma_desc_get(edmac);
987 if (!desc) {
988 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
989 goto fail;
990 }
991
992 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
993
994 desc->src_addr = src + offset;
995 desc->dst_addr = dest + offset;
996 desc->size = bytes;
997
998 if (!first)
999 first = desc;
1000 else
1001 list_add_tail(&desc->node, &first->tx_list);
1002 }
1003
1004 first->txd.cookie = -EBUSY;
1005 first->txd.flags = flags;
1006
1007 return &first->txd;
1008fail:
1009 ep93xx_dma_desc_put(edmac, first);
1010 return NULL;
1011}
1012
1013/**
1014 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1015 * @chan: channel
1016 * @sgl: list of buffers to transfer
1017 * @sg_len: number of entries in @sgl
1018 * @dir: direction of tha DMA transfer
1019 * @flags: flags for the descriptor
Alexandre Bounine185ecb52012-03-08 15:35:13 -05001020 * @context: operation context (ignored)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001021 *
1022 * Returns a valid DMA descriptor or %NULL in case of failure.
1023 */
1024static struct dma_async_tx_descriptor *
1025ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +05301026 unsigned int sg_len, enum dma_transfer_direction dir,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05001027 unsigned long flags, void *context)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001028{
1029 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1030 struct ep93xx_dma_desc *desc, *first;
1031 struct scatterlist *sg;
1032 int i;
1033
1034 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1035 dev_warn(chan2dev(edmac),
1036 "channel was configured with different direction\n");
1037 return NULL;
1038 }
1039
1040 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1041 dev_warn(chan2dev(edmac),
1042 "channel is already used for cyclic transfers\n");
1043 return NULL;
1044 }
1045
1046 first = NULL;
1047 for_each_sg(sgl, sg, sg_len, i) {
Vinod Koul8f913bf2016-09-14 15:39:56 +05301048 size_t len = sg_dma_len(sg);
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001049
Vinod Koul8f913bf2016-09-14 15:39:56 +05301050 if (len > DMA_MAX_CHAN_BYTES) {
Vinod Koul567df5e2016-09-14 15:39:56 +05301051 dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
Vinod Koul8f913bf2016-09-14 15:39:56 +05301052 len);
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001053 goto fail;
1054 }
1055
1056 desc = ep93xx_dma_desc_get(edmac);
1057 if (!desc) {
1058 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1059 goto fail;
1060 }
1061
Vinod Kouldb8196d2011-10-13 22:34:23 +05301062 if (dir == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001063 desc->src_addr = sg_dma_address(sg);
1064 desc->dst_addr = edmac->runtime_addr;
1065 } else {
1066 desc->src_addr = edmac->runtime_addr;
1067 desc->dst_addr = sg_dma_address(sg);
1068 }
Vinod Koul8f913bf2016-09-14 15:39:56 +05301069 desc->size = len;
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001070
1071 if (!first)
1072 first = desc;
1073 else
1074 list_add_tail(&desc->node, &first->tx_list);
1075 }
1076
1077 first->txd.cookie = -EBUSY;
1078 first->txd.flags = flags;
1079
1080 return &first->txd;
1081
1082fail:
1083 ep93xx_dma_desc_put(edmac, first);
1084 return NULL;
1085}
1086
1087/**
1088 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1089 * @chan: channel
1090 * @dma_addr: DMA mapped address of the buffer
1091 * @buf_len: length of the buffer (in bytes)
Masanari Iidad73111c2012-08-04 23:37:53 +09001092 * @period_len: length of a single period
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001093 * @dir: direction of the operation
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +03001094 * @flags: tx descriptor status flags
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001095 *
1096 * Prepares a descriptor for cyclic DMA operation. This means that once the
1097 * descriptor is submitted, we will be submitting in a @period_len sized
1098 * buffers and calling callback once the period has been elapsed. Transfer
1099 * terminates only when client calls dmaengine_terminate_all() for this
1100 * channel.
1101 *
1102 * Returns a valid DMA descriptor or %NULL in case of failure.
1103 */
1104static struct dma_async_tx_descriptor *
1105ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1106 size_t buf_len, size_t period_len,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +02001107 enum dma_transfer_direction dir, unsigned long flags)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001108{
1109 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1110 struct ep93xx_dma_desc *desc, *first;
1111 size_t offset = 0;
1112
1113 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1114 dev_warn(chan2dev(edmac),
1115 "channel was configured with different direction\n");
1116 return NULL;
1117 }
1118
1119 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1120 dev_warn(chan2dev(edmac),
1121 "channel is already used for cyclic transfers\n");
1122 return NULL;
1123 }
1124
1125 if (period_len > DMA_MAX_CHAN_BYTES) {
Vinod Koul567df5e2016-09-14 15:39:56 +05301126 dev_warn(chan2dev(edmac), "too big period length %zu\n",
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001127 period_len);
1128 return NULL;
1129 }
1130
1131 /* Split the buffer into period size chunks */
1132 first = NULL;
1133 for (offset = 0; offset < buf_len; offset += period_len) {
1134 desc = ep93xx_dma_desc_get(edmac);
1135 if (!desc) {
1136 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1137 goto fail;
1138 }
1139
Vinod Kouldb8196d2011-10-13 22:34:23 +05301140 if (dir == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001141 desc->src_addr = dma_addr + offset;
1142 desc->dst_addr = edmac->runtime_addr;
1143 } else {
1144 desc->src_addr = edmac->runtime_addr;
1145 desc->dst_addr = dma_addr + offset;
1146 }
1147
1148 desc->size = period_len;
1149
1150 if (!first)
1151 first = desc;
1152 else
1153 list_add_tail(&desc->node, &first->tx_list);
1154 }
1155
1156 first->txd.cookie = -EBUSY;
1157
1158 return &first->txd;
1159
1160fail:
1161 ep93xx_dma_desc_put(edmac, first);
1162 return NULL;
1163}
1164
1165/**
1166 * ep93xx_dma_terminate_all - terminate all transactions
Maxime Ripard2258b672014-11-17 14:42:14 +01001167 * @chan: channel
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001168 *
1169 * Stops all DMA transactions. All descriptors are put back to the
1170 * @edmac->free_list and callbacks are _not_ called.
1171 */
Maxime Ripard2258b672014-11-17 14:42:14 +01001172static int ep93xx_dma_terminate_all(struct dma_chan *chan)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001173{
Maxime Ripard2258b672014-11-17 14:42:14 +01001174 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001175 struct ep93xx_dma_desc *desc, *_d;
1176 unsigned long flags;
1177 LIST_HEAD(list);
1178
1179 spin_lock_irqsave(&edmac->lock, flags);
1180 /* First we disable and flush the DMA channel */
1181 edmac->edma->hw_shutdown(edmac);
1182 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1183 list_splice_init(&edmac->active, &list);
1184 list_splice_init(&edmac->queue, &list);
1185 /*
1186 * We then re-enable the channel. This way we can continue submitting
1187 * the descriptors by just calling ->hw_submit() again.
1188 */
1189 edmac->edma->hw_setup(edmac);
1190 spin_unlock_irqrestore(&edmac->lock, flags);
1191
1192 list_for_each_entry_safe(desc, _d, &list, node)
1193 ep93xx_dma_desc_put(edmac, desc);
1194
1195 return 0;
1196}
1197
Maxime Ripard2258b672014-11-17 14:42:14 +01001198static int ep93xx_dma_slave_config(struct dma_chan *chan,
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001199 struct dma_slave_config *config)
1200{
Maxime Ripard2258b672014-11-17 14:42:14 +01001201 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001202 enum dma_slave_buswidth width;
1203 unsigned long flags;
1204 u32 addr, ctrl;
1205
1206 if (!edmac->edma->m2m)
1207 return -EINVAL;
1208
1209 switch (config->direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +05301210 case DMA_DEV_TO_MEM:
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001211 width = config->src_addr_width;
1212 addr = config->src_addr;
1213 break;
1214
Vinod Kouldb8196d2011-10-13 22:34:23 +05301215 case DMA_MEM_TO_DEV:
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001216 width = config->dst_addr_width;
1217 addr = config->dst_addr;
1218 break;
1219
1220 default:
1221 return -EINVAL;
1222 }
1223
1224 switch (width) {
1225 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1226 ctrl = 0;
1227 break;
1228 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1229 ctrl = M2M_CONTROL_PW_16;
1230 break;
1231 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1232 ctrl = M2M_CONTROL_PW_32;
1233 break;
1234 default:
1235 return -EINVAL;
1236 }
1237
1238 spin_lock_irqsave(&edmac->lock, flags);
1239 edmac->runtime_addr = addr;
1240 edmac->runtime_ctrl = ctrl;
1241 spin_unlock_irqrestore(&edmac->lock, flags);
1242
1243 return 0;
1244}
1245
1246/**
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001247 * ep93xx_dma_tx_status - check if a transaction is completed
1248 * @chan: channel
1249 * @cookie: transaction specific cookie
1250 * @state: state of the transaction is stored here if given
1251 *
1252 * This function can be used to query state of a given transaction.
1253 */
1254static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1255 dma_cookie_t cookie,
1256 struct dma_tx_state *state)
1257{
Andy Shevchenko2302cec2013-05-27 15:14:34 +03001258 return dma_cookie_status(chan, cookie, state);
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001259}
1260
1261/**
1262 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1263 * @chan: channel
1264 *
1265 * When this function is called, all pending transactions are pushed to the
1266 * hardware and executed.
1267 */
1268static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1269{
1270 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1271}
1272
1273static int __init ep93xx_dma_probe(struct platform_device *pdev)
1274{
1275 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1276 struct ep93xx_dma_engine *edma;
1277 struct dma_device *dma_dev;
1278 size_t edma_size;
1279 int ret, i;
1280
1281 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1282 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1283 if (!edma)
1284 return -ENOMEM;
1285
1286 dma_dev = &edma->dma_dev;
1287 edma->m2m = platform_get_device_id(pdev)->driver_data;
1288 edma->num_channels = pdata->num_channels;
1289
1290 INIT_LIST_HEAD(&dma_dev->channels);
1291 for (i = 0; i < pdata->num_channels; i++) {
1292 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1293 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1294
1295 edmac->chan.device = dma_dev;
1296 edmac->regs = cdata->base;
1297 edmac->irq = cdata->irq;
1298 edmac->edma = edma;
1299
1300 edmac->clk = clk_get(NULL, cdata->name);
1301 if (IS_ERR(edmac->clk)) {
1302 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1303 cdata->name);
1304 continue;
1305 }
1306
1307 spin_lock_init(&edmac->lock);
1308 INIT_LIST_HEAD(&edmac->active);
1309 INIT_LIST_HEAD(&edmac->queue);
1310 INIT_LIST_HEAD(&edmac->free_list);
1311 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1312 (unsigned long)edmac);
1313
1314 list_add_tail(&edmac->chan.device_node,
1315 &dma_dev->channels);
1316 }
1317
1318 dma_cap_zero(dma_dev->cap_mask);
1319 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1320 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1321
1322 dma_dev->dev = &pdev->dev;
1323 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1324 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1325 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1326 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
Maxime Ripard2258b672014-11-17 14:42:14 +01001327 dma_dev->device_config = ep93xx_dma_slave_config;
1328 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001329 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1330 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1331
1332 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1333
1334 if (edma->m2m) {
1335 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1336 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1337
1338 edma->hw_setup = m2m_hw_setup;
1339 edma->hw_shutdown = m2m_hw_shutdown;
1340 edma->hw_submit = m2m_hw_submit;
1341 edma->hw_interrupt = m2m_hw_interrupt;
1342 } else {
1343 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1344
1345 edma->hw_setup = m2p_hw_setup;
1346 edma->hw_shutdown = m2p_hw_shutdown;
1347 edma->hw_submit = m2p_hw_submit;
1348 edma->hw_interrupt = m2p_hw_interrupt;
1349 }
1350
1351 ret = dma_async_device_register(dma_dev);
1352 if (unlikely(ret)) {
1353 for (i = 0; i < edma->num_channels; i++) {
1354 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1355 if (!IS_ERR_OR_NULL(edmac->clk))
1356 clk_put(edmac->clk);
1357 }
1358 kfree(edma);
1359 } else {
1360 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1361 edma->m2m ? "M" : "P");
1362 }
1363
1364 return ret;
1365}
1366
Krzysztof Kozlowski577d2e02015-05-02 00:57:45 +09001367static const struct platform_device_id ep93xx_dma_driver_ids[] = {
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001368 { "ep93xx-dma-m2p", 0 },
1369 { "ep93xx-dma-m2m", 1 },
1370 { },
1371};
1372
1373static struct platform_driver ep93xx_dma_driver = {
1374 .driver = {
1375 .name = "ep93xx-dma",
1376 },
1377 .id_table = ep93xx_dma_driver_ids,
1378};
1379
1380static int __init ep93xx_dma_module_init(void)
1381{
1382 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1383}
1384subsys_initcall(ep93xx_dma_module_init);
1385
1386MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1387MODULE_DESCRIPTION("EP93xx DMA driver");
1388MODULE_LICENSE("GPL");