blob: bcfde400904ff9a6dd05e957eb1159d5fc98f771 [file] [log] [blame]
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001/*
2 * Driver for the Cirrus Logic EP93xx DMA Controller
3 *
4 * Copyright (C) 2011 Mika Westerberg
5 *
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8 *
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12 *
13 * This driver is based on dw_dmac and amba-pl08x drivers.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 */
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
Mika Westerberg2389d672011-10-16 11:19:30 +030025#include <linux/module.h>
Mika Westerberg5fa29a12011-05-29 13:10:02 +030026#include <linux/platform_device.h>
27#include <linux/slab.h>
28
Arnd Bergmanna3b292452012-08-24 15:12:11 +020029#include <linux/platform_data/dma-ep93xx.h>
Mika Westerberg5fa29a12011-05-29 13:10:02 +030030
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000031#include "dmaengine.h"
32
Mika Westerberg5fa29a12011-05-29 13:10:02 +030033/* M2P registers */
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59/* M2M registers */
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +020074#define M2M_CONTROL_NFBINT BIT(21)
Mika Westerberg5fa29a12011-05-29 13:10:02 +030075#define M2M_CONTROL_RSS_SHIFT 22
76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT 25
81
82#define M2M_INTERRUPT 0x0004
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +020083#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
Mika Westerberg5fa29a12011-05-29 13:10:02 +030099
100#define M2M_BCR0 0x0010
101#define M2M_BCR1 0x0014
102#define M2M_SAR_BASE0 0x0018
103#define M2M_SAR_BASE1 0x001c
104#define M2M_DAR_BASE0 0x002c
105#define M2M_DAR_BASE1 0x0030
106
107#define DMA_MAX_CHAN_BYTES 0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS 32
109
110struct ep93xx_dma_engine;
111
112/**
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
114 * @src_addr: source address of the transaction
115 * @dst_addr: destination address of the transaction
116 * @size: size of the transaction (in bytes)
117 * @complete: this descriptor is completed
118 * @txd: dmaengine API descriptor
119 * @tx_list: list of linked descriptors
120 * @node: link used for putting this into a channel queue
121 */
122struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130};
131
132/**
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134 * @chan: dmaengine API channel
135 * @edma: pointer to to the engine device
136 * @regs: memory mapped registers
137 * @irq: interrupt number of the channel
138 * @clk: clock used by this channel
139 * @tasklet: channel specific tasklet used for callbacks
140 * @lock: lock protecting the fields following
141 * @flags: flags for the channel
142 * @buffer: which buffer to use next (0/1)
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300143 * @active: flattened chain of descriptors currently being processed
144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 * is set via %DMA_SLAVE_CONFIG before slave operation is
148 * prepared
149 * @runtime_ctrl: M2M runtime values for the control register.
150 *
151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
152 * will have slightly different scheme here: @active points to a head of
153 * flattened DMA descriptor chain.
154 *
155 * @queue holds pending transactions. These are linked through the first
156 * descriptor in the chain. When a descriptor is moved to the @active queue,
157 * the first and chained descriptors are flattened into a single list.
158 *
159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
160 * necessary channel configuration information. For memcpy channels this must
161 * be %NULL.
162 */
163struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170 /* protects the fields following */
171 spinlock_t lock;
172 unsigned long flags;
173/* Channel is configured for cyclic transfers */
174#define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182};
183
184/**
185 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
186 * @dma_dev: holds the dmaengine device
187 * @m2m: is this an M2M or M2P device
188 * @hw_setup: method which sets the channel up for operation
189 * @hw_shutdown: shuts the channel down and flushes whatever is left
190 * @hw_submit: pushes active descriptor(s) to the hardware
191 * @hw_interrupt: handle the interrupt
192 * @num_channels: number of channels for this instance
193 * @channels: array of channels
194 *
195 * There is one instance of this struct for the M2P channels and one for the
196 * M2M channels. hw_xxx() methods are used to perform operations which are
197 * different on M2M and M2P channels. These methods are called with channel
198 * lock held and interrupts disabled so they cannot sleep.
199 */
200struct ep93xx_dma_engine {
201 struct dma_device dma_dev;
202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_shutdown)(struct ep93xx_dma_chan *);
205 void (*hw_submit)(struct ep93xx_dma_chan *);
206 int (*hw_interrupt)(struct ep93xx_dma_chan *);
207#define INTERRUPT_UNKNOWN 0
208#define INTERRUPT_DONE 1
209#define INTERRUPT_NEXT_BUFFER 2
210
211 size_t num_channels;
212 struct ep93xx_dma_chan channels[];
213};
214
215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216{
217 return &edmac->chan.dev->device;
218}
219
220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221{
222 return container_of(chan, struct ep93xx_dma_chan, chan);
223}
224
225/**
226 * ep93xx_dma_set_active - set new active descriptor chain
227 * @edmac: channel
228 * @desc: head of the new active descriptor chain
229 *
230 * Sets @desc to be the head of the new active descriptor chain. This is the
231 * chain which is processed next. The active list must be empty before calling
232 * this function.
233 *
234 * Called with @edmac->lock held and interrupts disabled.
235 */
236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237 struct ep93xx_dma_desc *desc)
238{
239 BUG_ON(!list_empty(&edmac->active));
240
241 list_add_tail(&desc->node, &edmac->active);
242
243 /* Flatten the @desc->tx_list chain into @edmac->active list */
244 while (!list_empty(&desc->tx_list)) {
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246 struct ep93xx_dma_desc, node);
247
248 /*
249 * We copy the callback parameters from the first descriptor
250 * to all the chained descriptors. This way we can call the
251 * callback without having to find out the first descriptor in
252 * the chain. Useful for cyclic transfers.
253 */
254 d->txd.callback = desc->txd.callback;
255 d->txd.callback_param = desc->txd.callback_param;
256
257 list_move_tail(&d->node, &edmac->active);
258 }
259}
260
261/* Called with @edmac->lock held and interrupts disabled */
262static struct ep93xx_dma_desc *
263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264{
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200265 if (list_empty(&edmac->active))
266 return NULL;
267
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300268 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
269}
270
271/**
272 * ep93xx_dma_advance_active - advances to the next active descriptor
273 * @edmac: channel
274 *
275 * Function advances active descriptor to the next in the @edmac->active and
276 * returns %true if we still have descriptors in the chain to process.
277 * Otherwise returns %false.
278 *
279 * When the channel is in cyclic mode always returns %true.
280 *
281 * Called with @edmac->lock held and interrupts disabled.
282 */
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200285 struct ep93xx_dma_desc *desc;
286
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300287 list_rotate_left(&edmac->active);
288
289 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 return true;
291
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200292 desc = ep93xx_dma_get_active(edmac);
293 if (!desc)
294 return false;
295
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300296 /*
297 * If txd.cookie is set it means that we are back in the first
298 * descriptor in the chain and hence done with it.
299 */
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200300 return !desc->txd.cookie;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300301}
302
303/*
304 * M2P DMA implementation
305 */
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309 writel(control, edmac->regs + M2P_CONTROL);
310 /*
311 * EP93xx User's Guide states that we must perform a dummy read after
312 * write to the control register.
313 */
314 readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319 struct ep93xx_dma_data *data = edmac->chan.private;
320 u32 control;
321
322 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 | M2P_CONTROL_ENABLE;
326 m2p_set_control(edmac, control);
327
328 return 0;
329}
330
331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332{
333 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334}
335
336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337{
338 u32 control;
339
340 control = readl(edmac->regs + M2P_CONTROL);
341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342 m2p_set_control(edmac, control);
343
344 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345 cpu_relax();
346
347 m2p_set_control(edmac, 0);
348
349 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350 cpu_relax();
351}
352
353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354{
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200355 struct ep93xx_dma_desc *desc;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300356 u32 bus_addr;
357
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200358 desc = ep93xx_dma_get_active(edmac);
359 if (!desc) {
360 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361 return;
362 }
363
Vinod Kouldb8196d2011-10-13 22:34:23 +0530364 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300365 bus_addr = desc->src_addr;
366 else
367 bus_addr = desc->dst_addr;
368
369 if (edmac->buffer == 0) {
370 writel(desc->size, edmac->regs + M2P_MAXCNT0);
371 writel(bus_addr, edmac->regs + M2P_BASE0);
372 } else {
373 writel(desc->size, edmac->regs + M2P_MAXCNT1);
374 writel(bus_addr, edmac->regs + M2P_BASE1);
375 }
376
377 edmac->buffer ^= 1;
378}
379
380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381{
382 u32 control = readl(edmac->regs + M2P_CONTROL);
383
384 m2p_fill_desc(edmac);
385 control |= M2P_CONTROL_STALLINT;
386
387 if (ep93xx_dma_advance_active(edmac)) {
388 m2p_fill_desc(edmac);
389 control |= M2P_CONTROL_NFBINT;
390 }
391
392 m2p_set_control(edmac, control);
393}
394
395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396{
397 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398 u32 control;
399
400 if (irq_status & M2P_INTERRUPT_ERROR) {
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403 /* Clear the error interrupt */
404 writel(1, edmac->regs + M2P_INTERRUPT);
405
406 /*
407 * It seems that there is no easy way of reporting errors back
408 * to client so we just report the error here and continue as
409 * usual.
410 *
411 * Revisit this when there is a mechanism to report back the
412 * errors.
413 */
414 dev_err(chan2dev(edmac),
415 "DMA transfer failed! Details:\n"
416 "\tcookie : %d\n"
417 "\tsrc_addr : 0x%08x\n"
418 "\tdst_addr : 0x%08x\n"
419 "\tsize : %zu\n",
420 desc->txd.cookie, desc->src_addr, desc->dst_addr,
421 desc->size);
422 }
423
424 switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
425 case M2P_INTERRUPT_STALL:
426 /* Disable interrupts */
427 control = readl(edmac->regs + M2P_CONTROL);
428 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
429 m2p_set_control(edmac, control);
430
431 return INTERRUPT_DONE;
432
433 case M2P_INTERRUPT_NFB:
434 if (ep93xx_dma_advance_active(edmac))
435 m2p_fill_desc(edmac);
436
437 return INTERRUPT_NEXT_BUFFER;
438 }
439
440 return INTERRUPT_UNKNOWN;
441}
442
443/*
444 * M2M DMA implementation
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300445 */
446
447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
448{
449 const struct ep93xx_dma_data *data = edmac->chan.private;
450 u32 control = 0;
451
452 if (!data) {
453 /* This is memcpy channel, nothing to configure */
454 writel(control, edmac->regs + M2M_CONTROL);
455 return 0;
456 }
457
458 switch (data->port) {
459 case EP93XX_DMA_SSP:
460 /*
461 * This was found via experimenting - anything less than 5
462 * causes the channel to perform only a partial transfer which
463 * leads to problems since we don't get DONE interrupt then.
464 */
465 control = (5 << M2M_CONTROL_PWSC_SHIFT);
466 control |= M2M_CONTROL_NO_HDSK;
467
Vinod Kouldb8196d2011-10-13 22:34:23 +0530468 if (data->direction == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300469 control |= M2M_CONTROL_DAH;
470 control |= M2M_CONTROL_TM_TX;
471 control |= M2M_CONTROL_RSS_SSPTX;
472 } else {
473 control |= M2M_CONTROL_SAH;
474 control |= M2M_CONTROL_TM_RX;
475 control |= M2M_CONTROL_RSS_SSPRX;
476 }
477 break;
478
479 case EP93XX_DMA_IDE:
480 /*
481 * This IDE part is totally untested. Values below are taken
482 * from the EP93xx Users's Guide and might not be correct.
483 */
Vinod Kouldb8196d2011-10-13 22:34:23 +0530484 if (data->direction == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300485 /* Worst case from the UG */
486 control = (3 << M2M_CONTROL_PWSC_SHIFT);
487 control |= M2M_CONTROL_DAH;
488 control |= M2M_CONTROL_TM_TX;
489 } else {
490 control = (2 << M2M_CONTROL_PWSC_SHIFT);
491 control |= M2M_CONTROL_SAH;
492 control |= M2M_CONTROL_TM_RX;
493 }
Rafal Prylowskib62cfc52011-11-26 12:54:07 +0200494
495 control |= M2M_CONTROL_NO_HDSK;
496 control |= M2M_CONTROL_RSS_IDE;
497 control |= M2M_CONTROL_PW_16;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300498 break;
499
500 default:
501 return -EINVAL;
502 }
503
504 writel(control, edmac->regs + M2M_CONTROL);
505 return 0;
506}
507
508static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
509{
510 /* Just disable the channel */
511 writel(0, edmac->regs + M2M_CONTROL);
512}
513
514static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
515{
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200516 struct ep93xx_dma_desc *desc;
517
518 desc = ep93xx_dma_get_active(edmac);
519 if (!desc) {
520 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
521 return;
522 }
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300523
524 if (edmac->buffer == 0) {
525 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
526 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
527 writel(desc->size, edmac->regs + M2M_BCR0);
528 } else {
529 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
530 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
531 writel(desc->size, edmac->regs + M2M_BCR1);
532 }
533
534 edmac->buffer ^= 1;
535}
536
537static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
538{
539 struct ep93xx_dma_data *data = edmac->chan.private;
540 u32 control = readl(edmac->regs + M2M_CONTROL);
541
542 /*
543 * Since we allow clients to configure PW (peripheral width) we always
544 * clear PW bits here and then set them according what is given in
545 * the runtime configuration.
546 */
547 control &= ~M2M_CONTROL_PW_MASK;
548 control |= edmac->runtime_ctrl;
549
550 m2m_fill_desc(edmac);
551 control |= M2M_CONTROL_DONEINT;
552
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200553 if (ep93xx_dma_advance_active(edmac)) {
554 m2m_fill_desc(edmac);
555 control |= M2M_CONTROL_NFBINT;
556 }
557
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300558 /*
559 * Now we can finally enable the channel. For M2M channel this must be
560 * done _after_ the BCRx registers are programmed.
561 */
562 control |= M2M_CONTROL_ENABLE;
563 writel(control, edmac->regs + M2M_CONTROL);
564
565 if (!data) {
566 /*
567 * For memcpy channels the software trigger must be asserted
568 * in order to start the memcpy operation.
569 */
570 control |= M2M_CONTROL_START;
571 writel(control, edmac->regs + M2M_CONTROL);
572 }
573}
574
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200575/*
576 * According to EP93xx User's Guide, we should receive DONE interrupt when all
577 * M2M DMA controller transactions complete normally. This is not always the
578 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
579 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
580 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
581 * In effect, disabling the channel when only DONE bit is set could stop
582 * currently running DMA transfer. To avoid this, we use Buffer FSM and
583 * Control FSM to check current state of DMA channel.
584 */
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
586{
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200587 u32 status = readl(edmac->regs + M2M_STATUS);
588 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590 bool done = status & M2M_STATUS_DONE;
591 bool last_done;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300592 u32 control;
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200593 struct ep93xx_dma_desc *desc;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300594
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200595 /* Accept only DONE and NFB interrupts */
596 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300597 return INTERRUPT_UNKNOWN;
598
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200599 if (done) {
600 /* Clear the DONE bit */
601 writel(0, edmac->regs + M2M_INTERRUPT);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300602 }
603
Rafal Prylowski2b3c83e2012-04-19 11:19:00 +0200604 /*
605 * Check whether we are done with descriptors or not. This, together
606 * with DMA channel state, determines action to take in interrupt.
607 */
608 desc = ep93xx_dma_get_active(edmac);
609 last_done = !desc || desc->txd.cookie;
610
611 /*
612 * Use M2M DMA Buffer FSM and Control FSM to check current state of
613 * DMA channel. Using DONE and NFB bits from channel status register
614 * or bits from channel interrupt register is not reliable.
615 */
616 if (!last_done &&
617 (buf_fsm == M2M_STATUS_BUF_NO ||
618 buf_fsm == M2M_STATUS_BUF_ON)) {
619 /*
620 * Two buffers are ready for update when Buffer FSM is in
621 * DMA_NO_BUF state. Only one buffer can be prepared without
622 * disabling the channel or polling the DONE bit.
623 * To simplify things, always prepare only one buffer.
624 */
625 if (ep93xx_dma_advance_active(edmac)) {
626 m2m_fill_desc(edmac);
627 if (done && !edmac->chan.private) {
628 /* Software trigger for memcpy channel */
629 control = readl(edmac->regs + M2M_CONTROL);
630 control |= M2M_CONTROL_START;
631 writel(control, edmac->regs + M2M_CONTROL);
632 }
633 return INTERRUPT_NEXT_BUFFER;
634 } else {
635 last_done = true;
636 }
637 }
638
639 /*
640 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
641 * and Control FSM is in DMA_STALL state.
642 */
643 if (last_done &&
644 buf_fsm == M2M_STATUS_BUF_NO &&
645 ctl_fsm == M2M_STATUS_CTL_STALL) {
646 /* Disable interrupts and the channel */
647 control = readl(edmac->regs + M2M_CONTROL);
648 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649 | M2M_CONTROL_ENABLE);
650 writel(control, edmac->regs + M2M_CONTROL);
651 return INTERRUPT_DONE;
652 }
653
654 /*
655 * Nothing to do this time.
656 */
657 return INTERRUPT_NEXT_BUFFER;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300658}
659
660/*
661 * DMA engine API implementation
662 */
663
664static struct ep93xx_dma_desc *
665ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
666{
667 struct ep93xx_dma_desc *desc, *_desc;
668 struct ep93xx_dma_desc *ret = NULL;
669 unsigned long flags;
670
671 spin_lock_irqsave(&edmac->lock, flags);
672 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
673 if (async_tx_test_ack(&desc->txd)) {
674 list_del_init(&desc->node);
675
676 /* Re-initialize the descriptor */
677 desc->src_addr = 0;
678 desc->dst_addr = 0;
679 desc->size = 0;
680 desc->complete = false;
681 desc->txd.cookie = 0;
682 desc->txd.callback = NULL;
683 desc->txd.callback_param = NULL;
684
685 ret = desc;
686 break;
687 }
688 }
689 spin_unlock_irqrestore(&edmac->lock, flags);
690 return ret;
691}
692
693static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
694 struct ep93xx_dma_desc *desc)
695{
696 if (desc) {
697 unsigned long flags;
698
699 spin_lock_irqsave(&edmac->lock, flags);
700 list_splice_init(&desc->tx_list, &edmac->free_list);
701 list_add(&desc->node, &edmac->free_list);
702 spin_unlock_irqrestore(&edmac->lock, flags);
703 }
704}
705
706/**
707 * ep93xx_dma_advance_work - start processing the next pending transaction
708 * @edmac: channel
709 *
710 * If we have pending transactions queued and we are currently idling, this
711 * function takes the next queued transaction from the @edmac->queue and
712 * pushes it to the hardware for execution.
713 */
714static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
715{
716 struct ep93xx_dma_desc *new;
717 unsigned long flags;
718
719 spin_lock_irqsave(&edmac->lock, flags);
720 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
721 spin_unlock_irqrestore(&edmac->lock, flags);
722 return;
723 }
724
725 /* Take the next descriptor from the pending queue */
726 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
727 list_del_init(&new->node);
728
729 ep93xx_dma_set_active(edmac, new);
730
731 /* Push it to the hardware */
732 edmac->edma->hw_submit(edmac);
733 spin_unlock_irqrestore(&edmac->lock, flags);
734}
735
736static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
737{
738 struct device *dev = desc->txd.chan->device->dev;
739
740 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
741 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
742 dma_unmap_single(dev, desc->src_addr, desc->size,
743 DMA_TO_DEVICE);
744 else
745 dma_unmap_page(dev, desc->src_addr, desc->size,
746 DMA_TO_DEVICE);
747 }
748 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
749 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
750 dma_unmap_single(dev, desc->dst_addr, desc->size,
751 DMA_FROM_DEVICE);
752 else
753 dma_unmap_page(dev, desc->dst_addr, desc->size,
754 DMA_FROM_DEVICE);
755 }
756}
757
758static void ep93xx_dma_tasklet(unsigned long data)
759{
760 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
761 struct ep93xx_dma_desc *desc, *d;
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200762 dma_async_tx_callback callback = NULL;
763 void *callback_param = NULL;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300764 LIST_HEAD(list);
765
766 spin_lock_irq(&edmac->lock);
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200767 /*
768 * If dma_terminate_all() was called before we get to run, the active
769 * list has become empty. If that happens we aren't supposed to do
770 * anything more than call ep93xx_dma_advance_work().
771 */
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300772 desc = ep93xx_dma_get_active(edmac);
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200773 if (desc) {
774 if (desc->complete) {
Vinod Kould4116052012-05-11 11:48:21 +0530775 /* mark descriptor complete for non cyclic case only */
776 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
777 dma_cookie_complete(&desc->txd);
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200778 list_splice_init(&edmac->active, &list);
779 }
780 callback = desc->txd.callback;
781 callback_param = desc->txd.callback_param;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300782 }
783 spin_unlock_irq(&edmac->lock);
784
785 /* Pick up the next descriptor from the queue */
786 ep93xx_dma_advance_work(edmac);
787
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300788 /* Now we can release all the chained descriptors */
789 list_for_each_entry_safe(desc, d, &list, node) {
790 /*
791 * For the memcpy channels the API requires us to unmap the
792 * buffers unless requested otherwise.
793 */
794 if (!edmac->chan.private)
795 ep93xx_dma_unmap_buffers(desc);
796
797 ep93xx_dma_desc_put(edmac, desc);
798 }
799
800 if (callback)
801 callback(callback_param);
802}
803
804static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
805{
806 struct ep93xx_dma_chan *edmac = dev_id;
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200807 struct ep93xx_dma_desc *desc;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300808 irqreturn_t ret = IRQ_HANDLED;
809
810 spin_lock(&edmac->lock);
811
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200812 desc = ep93xx_dma_get_active(edmac);
813 if (!desc) {
814 dev_warn(chan2dev(edmac),
815 "got interrupt while active list is empty\n");
816 spin_unlock(&edmac->lock);
817 return IRQ_NONE;
818 }
819
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300820 switch (edmac->edma->hw_interrupt(edmac)) {
821 case INTERRUPT_DONE:
Mika Westerberg6d0709d2011-11-26 12:54:08 +0200822 desc->complete = true;
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300823 tasklet_schedule(&edmac->tasklet);
824 break;
825
826 case INTERRUPT_NEXT_BUFFER:
827 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
828 tasklet_schedule(&edmac->tasklet);
829 break;
830
831 default:
832 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
833 ret = IRQ_NONE;
834 break;
835 }
836
837 spin_unlock(&edmac->lock);
838 return ret;
839}
840
841/**
842 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
843 * @tx: descriptor to be executed
844 *
845 * Function will execute given descriptor on the hardware or if the hardware
846 * is busy, queue the descriptor to be executed later on. Returns cookie which
847 * can be used to poll the status of the descriptor.
848 */
849static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
850{
851 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
852 struct ep93xx_dma_desc *desc;
853 dma_cookie_t cookie;
854 unsigned long flags;
855
856 spin_lock_irqsave(&edmac->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000857 cookie = dma_cookie_assign(tx);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300858
859 desc = container_of(tx, struct ep93xx_dma_desc, txd);
860
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300861 /*
862 * If nothing is currently prosessed, we push this descriptor
863 * directly to the hardware. Otherwise we put the descriptor
864 * to the pending queue.
865 */
866 if (list_empty(&edmac->active)) {
867 ep93xx_dma_set_active(edmac, desc);
868 edmac->edma->hw_submit(edmac);
869 } else {
870 list_add_tail(&desc->node, &edmac->queue);
871 }
872
873 spin_unlock_irqrestore(&edmac->lock, flags);
874 return cookie;
875}
876
877/**
878 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
879 * @chan: channel to allocate resources
880 *
881 * Function allocates necessary resources for the given DMA channel and
882 * returns number of allocated descriptors for the channel. Negative errno
883 * is returned in case of failure.
884 */
885static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
886{
887 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
888 struct ep93xx_dma_data *data = chan->private;
889 const char *name = dma_chan_name(chan);
890 int ret, i;
891
892 /* Sanity check the channel parameters */
893 if (!edmac->edma->m2m) {
894 if (!data)
895 return -EINVAL;
896 if (data->port < EP93XX_DMA_I2S1 ||
897 data->port > EP93XX_DMA_IRDA)
898 return -EINVAL;
899 if (data->direction != ep93xx_dma_chan_direction(chan))
900 return -EINVAL;
901 } else {
902 if (data) {
903 switch (data->port) {
904 case EP93XX_DMA_SSP:
905 case EP93XX_DMA_IDE:
Vinod Kouldb8196d2011-10-13 22:34:23 +0530906 if (data->direction != DMA_MEM_TO_DEV &&
907 data->direction != DMA_DEV_TO_MEM)
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300908 return -EINVAL;
909 break;
910 default:
911 return -EINVAL;
912 }
913 }
914 }
915
916 if (data && data->name)
917 name = data->name;
918
919 ret = clk_enable(edmac->clk);
920 if (ret)
921 return ret;
922
923 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
924 if (ret)
925 goto fail_clk_disable;
926
927 spin_lock_irq(&edmac->lock);
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +0000928 dma_cookie_init(&edmac->chan);
Mika Westerberg5fa29a12011-05-29 13:10:02 +0300929 ret = edmac->edma->hw_setup(edmac);
930 spin_unlock_irq(&edmac->lock);
931
932 if (ret)
933 goto fail_free_irq;
934
935 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
936 struct ep93xx_dma_desc *desc;
937
938 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
939 if (!desc) {
940 dev_warn(chan2dev(edmac), "not enough descriptors\n");
941 break;
942 }
943
944 INIT_LIST_HEAD(&desc->tx_list);
945
946 dma_async_tx_descriptor_init(&desc->txd, chan);
947 desc->txd.flags = DMA_CTRL_ACK;
948 desc->txd.tx_submit = ep93xx_dma_tx_submit;
949
950 ep93xx_dma_desc_put(edmac, desc);
951 }
952
953 return i;
954
955fail_free_irq:
956 free_irq(edmac->irq, edmac);
957fail_clk_disable:
958 clk_disable(edmac->clk);
959
960 return ret;
961}
962
963/**
964 * ep93xx_dma_free_chan_resources - release resources for the channel
965 * @chan: channel
966 *
967 * Function releases all the resources allocated for the given channel.
968 * The channel must be idle when this is called.
969 */
970static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
971{
972 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
973 struct ep93xx_dma_desc *desc, *d;
974 unsigned long flags;
975 LIST_HEAD(list);
976
977 BUG_ON(!list_empty(&edmac->active));
978 BUG_ON(!list_empty(&edmac->queue));
979
980 spin_lock_irqsave(&edmac->lock, flags);
981 edmac->edma->hw_shutdown(edmac);
982 edmac->runtime_addr = 0;
983 edmac->runtime_ctrl = 0;
984 edmac->buffer = 0;
985 list_splice_init(&edmac->free_list, &list);
986 spin_unlock_irqrestore(&edmac->lock, flags);
987
988 list_for_each_entry_safe(desc, d, &list, node)
989 kfree(desc);
990
991 clk_disable(edmac->clk);
992 free_irq(edmac->irq, edmac);
993}
994
995/**
996 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
997 * @chan: channel
998 * @dest: destination bus address
999 * @src: source bus address
1000 * @len: size of the transaction
1001 * @flags: flags for the descriptor
1002 *
1003 * Returns a valid DMA descriptor or %NULL in case of failure.
1004 */
H Hartley Sweetene2f5e5a2011-06-10 15:15:05 -07001005static struct dma_async_tx_descriptor *
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001006ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
1007 dma_addr_t src, size_t len, unsigned long flags)
1008{
1009 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1010 struct ep93xx_dma_desc *desc, *first;
1011 size_t bytes, offset;
1012
1013 first = NULL;
1014 for (offset = 0; offset < len; offset += bytes) {
1015 desc = ep93xx_dma_desc_get(edmac);
1016 if (!desc) {
1017 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1018 goto fail;
1019 }
1020
1021 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1022
1023 desc->src_addr = src + offset;
1024 desc->dst_addr = dest + offset;
1025 desc->size = bytes;
1026
1027 if (!first)
1028 first = desc;
1029 else
1030 list_add_tail(&desc->node, &first->tx_list);
1031 }
1032
1033 first->txd.cookie = -EBUSY;
1034 first->txd.flags = flags;
1035
1036 return &first->txd;
1037fail:
1038 ep93xx_dma_desc_put(edmac, first);
1039 return NULL;
1040}
1041
1042/**
1043 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1044 * @chan: channel
1045 * @sgl: list of buffers to transfer
1046 * @sg_len: number of entries in @sgl
1047 * @dir: direction of tha DMA transfer
1048 * @flags: flags for the descriptor
Alexandre Bounine185ecb52012-03-08 15:35:13 -05001049 * @context: operation context (ignored)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001050 *
1051 * Returns a valid DMA descriptor or %NULL in case of failure.
1052 */
1053static struct dma_async_tx_descriptor *
1054ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +05301055 unsigned int sg_len, enum dma_transfer_direction dir,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05001056 unsigned long flags, void *context)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001057{
1058 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1059 struct ep93xx_dma_desc *desc, *first;
1060 struct scatterlist *sg;
1061 int i;
1062
1063 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1064 dev_warn(chan2dev(edmac),
1065 "channel was configured with different direction\n");
1066 return NULL;
1067 }
1068
1069 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1070 dev_warn(chan2dev(edmac),
1071 "channel is already used for cyclic transfers\n");
1072 return NULL;
1073 }
1074
1075 first = NULL;
1076 for_each_sg(sgl, sg, sg_len, i) {
1077 size_t sg_len = sg_dma_len(sg);
1078
1079 if (sg_len > DMA_MAX_CHAN_BYTES) {
1080 dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1081 sg_len);
1082 goto fail;
1083 }
1084
1085 desc = ep93xx_dma_desc_get(edmac);
1086 if (!desc) {
1087 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1088 goto fail;
1089 }
1090
Vinod Kouldb8196d2011-10-13 22:34:23 +05301091 if (dir == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001092 desc->src_addr = sg_dma_address(sg);
1093 desc->dst_addr = edmac->runtime_addr;
1094 } else {
1095 desc->src_addr = edmac->runtime_addr;
1096 desc->dst_addr = sg_dma_address(sg);
1097 }
1098 desc->size = sg_len;
1099
1100 if (!first)
1101 first = desc;
1102 else
1103 list_add_tail(&desc->node, &first->tx_list);
1104 }
1105
1106 first->txd.cookie = -EBUSY;
1107 first->txd.flags = flags;
1108
1109 return &first->txd;
1110
1111fail:
1112 ep93xx_dma_desc_put(edmac, first);
1113 return NULL;
1114}
1115
1116/**
1117 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1118 * @chan: channel
1119 * @dma_addr: DMA mapped address of the buffer
1120 * @buf_len: length of the buffer (in bytes)
Masanari Iidad73111c2012-08-04 23:37:53 +09001121 * @period_len: length of a single period
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001122 * @dir: direction of the operation
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +03001123 * @flags: tx descriptor status flags
Alexandre Bounine185ecb52012-03-08 15:35:13 -05001124 * @context: operation context (ignored)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001125 *
1126 * Prepares a descriptor for cyclic DMA operation. This means that once the
1127 * descriptor is submitted, we will be submitting in a @period_len sized
1128 * buffers and calling callback once the period has been elapsed. Transfer
1129 * terminates only when client calls dmaengine_terminate_all() for this
1130 * channel.
1131 *
1132 * Returns a valid DMA descriptor or %NULL in case of failure.
1133 */
1134static struct dma_async_tx_descriptor *
1135ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1136 size_t buf_len, size_t period_len,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +03001137 enum dma_transfer_direction dir, unsigned long flags,
1138 void *context)
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001139{
1140 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1141 struct ep93xx_dma_desc *desc, *first;
1142 size_t offset = 0;
1143
1144 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1145 dev_warn(chan2dev(edmac),
1146 "channel was configured with different direction\n");
1147 return NULL;
1148 }
1149
1150 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1151 dev_warn(chan2dev(edmac),
1152 "channel is already used for cyclic transfers\n");
1153 return NULL;
1154 }
1155
1156 if (period_len > DMA_MAX_CHAN_BYTES) {
1157 dev_warn(chan2dev(edmac), "too big period length %d\n",
1158 period_len);
1159 return NULL;
1160 }
1161
1162 /* Split the buffer into period size chunks */
1163 first = NULL;
1164 for (offset = 0; offset < buf_len; offset += period_len) {
1165 desc = ep93xx_dma_desc_get(edmac);
1166 if (!desc) {
1167 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1168 goto fail;
1169 }
1170
Vinod Kouldb8196d2011-10-13 22:34:23 +05301171 if (dir == DMA_MEM_TO_DEV) {
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001172 desc->src_addr = dma_addr + offset;
1173 desc->dst_addr = edmac->runtime_addr;
1174 } else {
1175 desc->src_addr = edmac->runtime_addr;
1176 desc->dst_addr = dma_addr + offset;
1177 }
1178
1179 desc->size = period_len;
1180
1181 if (!first)
1182 first = desc;
1183 else
1184 list_add_tail(&desc->node, &first->tx_list);
1185 }
1186
1187 first->txd.cookie = -EBUSY;
1188
1189 return &first->txd;
1190
1191fail:
1192 ep93xx_dma_desc_put(edmac, first);
1193 return NULL;
1194}
1195
1196/**
1197 * ep93xx_dma_terminate_all - terminate all transactions
1198 * @edmac: channel
1199 *
1200 * Stops all DMA transactions. All descriptors are put back to the
1201 * @edmac->free_list and callbacks are _not_ called.
1202 */
1203static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1204{
1205 struct ep93xx_dma_desc *desc, *_d;
1206 unsigned long flags;
1207 LIST_HEAD(list);
1208
1209 spin_lock_irqsave(&edmac->lock, flags);
1210 /* First we disable and flush the DMA channel */
1211 edmac->edma->hw_shutdown(edmac);
1212 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1213 list_splice_init(&edmac->active, &list);
1214 list_splice_init(&edmac->queue, &list);
1215 /*
1216 * We then re-enable the channel. This way we can continue submitting
1217 * the descriptors by just calling ->hw_submit() again.
1218 */
1219 edmac->edma->hw_setup(edmac);
1220 spin_unlock_irqrestore(&edmac->lock, flags);
1221
1222 list_for_each_entry_safe(desc, _d, &list, node)
1223 ep93xx_dma_desc_put(edmac, desc);
1224
1225 return 0;
1226}
1227
1228static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1229 struct dma_slave_config *config)
1230{
1231 enum dma_slave_buswidth width;
1232 unsigned long flags;
1233 u32 addr, ctrl;
1234
1235 if (!edmac->edma->m2m)
1236 return -EINVAL;
1237
1238 switch (config->direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +05301239 case DMA_DEV_TO_MEM:
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001240 width = config->src_addr_width;
1241 addr = config->src_addr;
1242 break;
1243
Vinod Kouldb8196d2011-10-13 22:34:23 +05301244 case DMA_MEM_TO_DEV:
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001245 width = config->dst_addr_width;
1246 addr = config->dst_addr;
1247 break;
1248
1249 default:
1250 return -EINVAL;
1251 }
1252
1253 switch (width) {
1254 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1255 ctrl = 0;
1256 break;
1257 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1258 ctrl = M2M_CONTROL_PW_16;
1259 break;
1260 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1261 ctrl = M2M_CONTROL_PW_32;
1262 break;
1263 default:
1264 return -EINVAL;
1265 }
1266
1267 spin_lock_irqsave(&edmac->lock, flags);
1268 edmac->runtime_addr = addr;
1269 edmac->runtime_ctrl = ctrl;
1270 spin_unlock_irqrestore(&edmac->lock, flags);
1271
1272 return 0;
1273}
1274
1275/**
1276 * ep93xx_dma_control - manipulate all pending operations on a channel
1277 * @chan: channel
1278 * @cmd: control command to perform
1279 * @arg: optional argument
1280 *
1281 * Controls the channel. Function returns %0 in case of success or negative
1282 * error in case of failure.
1283 */
1284static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1285 unsigned long arg)
1286{
1287 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1288 struct dma_slave_config *config;
1289
1290 switch (cmd) {
1291 case DMA_TERMINATE_ALL:
1292 return ep93xx_dma_terminate_all(edmac);
1293
1294 case DMA_SLAVE_CONFIG:
1295 config = (struct dma_slave_config *)arg;
1296 return ep93xx_dma_slave_config(edmac, config);
1297
1298 default:
1299 break;
1300 }
1301
1302 return -ENOSYS;
1303}
1304
1305/**
1306 * ep93xx_dma_tx_status - check if a transaction is completed
1307 * @chan: channel
1308 * @cookie: transaction specific cookie
1309 * @state: state of the transaction is stored here if given
1310 *
1311 * This function can be used to query state of a given transaction.
1312 */
1313static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1314 dma_cookie_t cookie,
1315 struct dma_tx_state *state)
1316{
1317 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001318 enum dma_status ret;
1319 unsigned long flags;
1320
1321 spin_lock_irqsave(&edmac->lock, flags);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001322 ret = dma_cookie_status(chan, cookie, state);
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001323 spin_unlock_irqrestore(&edmac->lock, flags);
1324
Mika Westerberg5fa29a12011-05-29 13:10:02 +03001325 return ret;
1326}
1327
1328/**
1329 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1330 * @chan: channel
1331 *
1332 * When this function is called, all pending transactions are pushed to the
1333 * hardware and executed.
1334 */
1335static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1336{
1337 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1338}
1339
1340static int __init ep93xx_dma_probe(struct platform_device *pdev)
1341{
1342 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1343 struct ep93xx_dma_engine *edma;
1344 struct dma_device *dma_dev;
1345 size_t edma_size;
1346 int ret, i;
1347
1348 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1349 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1350 if (!edma)
1351 return -ENOMEM;
1352
1353 dma_dev = &edma->dma_dev;
1354 edma->m2m = platform_get_device_id(pdev)->driver_data;
1355 edma->num_channels = pdata->num_channels;
1356
1357 INIT_LIST_HEAD(&dma_dev->channels);
1358 for (i = 0; i < pdata->num_channels; i++) {
1359 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1360 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1361
1362 edmac->chan.device = dma_dev;
1363 edmac->regs = cdata->base;
1364 edmac->irq = cdata->irq;
1365 edmac->edma = edma;
1366
1367 edmac->clk = clk_get(NULL, cdata->name);
1368 if (IS_ERR(edmac->clk)) {
1369 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1370 cdata->name);
1371 continue;
1372 }
1373
1374 spin_lock_init(&edmac->lock);
1375 INIT_LIST_HEAD(&edmac->active);
1376 INIT_LIST_HEAD(&edmac->queue);
1377 INIT_LIST_HEAD(&edmac->free_list);
1378 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1379 (unsigned long)edmac);
1380
1381 list_add_tail(&edmac->chan.device_node,
1382 &dma_dev->channels);
1383 }
1384
1385 dma_cap_zero(dma_dev->cap_mask);
1386 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1387 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1388
1389 dma_dev->dev = &pdev->dev;
1390 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1391 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1392 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1393 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1394 dma_dev->device_control = ep93xx_dma_control;
1395 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1396 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1397
1398 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1399
1400 if (edma->m2m) {
1401 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1402 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1403
1404 edma->hw_setup = m2m_hw_setup;
1405 edma->hw_shutdown = m2m_hw_shutdown;
1406 edma->hw_submit = m2m_hw_submit;
1407 edma->hw_interrupt = m2m_hw_interrupt;
1408 } else {
1409 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1410
1411 edma->hw_setup = m2p_hw_setup;
1412 edma->hw_shutdown = m2p_hw_shutdown;
1413 edma->hw_submit = m2p_hw_submit;
1414 edma->hw_interrupt = m2p_hw_interrupt;
1415 }
1416
1417 ret = dma_async_device_register(dma_dev);
1418 if (unlikely(ret)) {
1419 for (i = 0; i < edma->num_channels; i++) {
1420 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1421 if (!IS_ERR_OR_NULL(edmac->clk))
1422 clk_put(edmac->clk);
1423 }
1424 kfree(edma);
1425 } else {
1426 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1427 edma->m2m ? "M" : "P");
1428 }
1429
1430 return ret;
1431}
1432
1433static struct platform_device_id ep93xx_dma_driver_ids[] = {
1434 { "ep93xx-dma-m2p", 0 },
1435 { "ep93xx-dma-m2m", 1 },
1436 { },
1437};
1438
1439static struct platform_driver ep93xx_dma_driver = {
1440 .driver = {
1441 .name = "ep93xx-dma",
1442 },
1443 .id_table = ep93xx_dma_driver_ids,
1444};
1445
1446static int __init ep93xx_dma_module_init(void)
1447{
1448 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1449}
1450subsys_initcall(ep93xx_dma_module_init);
1451
1452MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1453MODULE_DESCRIPTION("EP93xx DMA driver");
1454MODULE_LICENSE("GPL");