blob: b4a7041c4f813b44d6e3e0e61baaeec1aa1fb849 [file] [log] [blame]
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001/*
2 * S3C24XX DMA handling
3 *
4 * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
5 *
6 * based on amba-pl08x.c
7 *
8 * Copyright (c) 2006 ARM Ltd.
9 * Copyright (c) 2010 ST-Ericsson SA
10 *
11 * Author: Peter Pearse <peter.pearse@arm.com>
12 * Author: Linus Walleij <linus.walleij@stericsson.com>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
20 * that can be routed to any of the 4 to 8 hardware-channels.
21 *
22 * Therefore on these DMA controllers the number of channels
23 * and the number of incoming DMA signals are two totally different things.
24 * It is usually not possible to theoretically handle all physical signals,
25 * so a multiplexing scheme with possible denial of use is necessary.
26 *
27 * Open items:
28 * - bursts
29 */
30
31#include <linux/platform_device.h>
32#include <linux/types.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/interrupt.h>
36#include <linux/clk.h>
37#include <linux/module.h>
38#include <linux/slab.h>
39#include <linux/platform_data/dma-s3c24xx.h>
40
41#include "dmaengine.h"
42#include "virt-dma.h"
43
44#define MAX_DMA_CHANNELS 8
45
46#define S3C24XX_DISRC 0x00
47#define S3C24XX_DISRCC 0x04
48#define S3C24XX_DISRCC_INC_INCREMENT 0
49#define S3C24XX_DISRCC_INC_FIXED BIT(0)
50#define S3C24XX_DISRCC_LOC_AHB 0
51#define S3C24XX_DISRCC_LOC_APB BIT(1)
52
53#define S3C24XX_DIDST 0x08
54#define S3C24XX_DIDSTC 0x0c
55#define S3C24XX_DIDSTC_INC_INCREMENT 0
56#define S3C24XX_DIDSTC_INC_FIXED BIT(0)
57#define S3C24XX_DIDSTC_LOC_AHB 0
58#define S3C24XX_DIDSTC_LOC_APB BIT(1)
59#define S3C24XX_DIDSTC_INT_TC0 0
60#define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
61
62#define S3C24XX_DCON 0x10
63
64#define S3C24XX_DCON_TC_MASK 0xfffff
65#define S3C24XX_DCON_DSZ_BYTE (0 << 20)
66#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
67#define S3C24XX_DCON_DSZ_WORD (2 << 20)
68#define S3C24XX_DCON_DSZ_MASK (3 << 20)
69#define S3C24XX_DCON_DSZ_SHIFT 20
70#define S3C24XX_DCON_AUTORELOAD 0
71#define S3C24XX_DCON_NORELOAD BIT(22)
72#define S3C24XX_DCON_HWTRIG BIT(23)
73#define S3C24XX_DCON_HWSRC_SHIFT 24
74#define S3C24XX_DCON_SERV_SINGLE 0
75#define S3C24XX_DCON_SERV_WHOLE BIT(27)
76#define S3C24XX_DCON_TSZ_UNIT 0
77#define S3C24XX_DCON_TSZ_BURST4 BIT(28)
78#define S3C24XX_DCON_INT BIT(29)
79#define S3C24XX_DCON_SYNC_PCLK 0
80#define S3C24XX_DCON_SYNC_HCLK BIT(30)
81#define S3C24XX_DCON_DEMAND 0
82#define S3C24XX_DCON_HANDSHAKE BIT(31)
83
84#define S3C24XX_DSTAT 0x14
85#define S3C24XX_DSTAT_STAT_BUSY BIT(20)
86#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
87
88#define S3C24XX_DMASKTRIG 0x20
89#define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
90#define S3C24XX_DMASKTRIG_ON BIT(1)
91#define S3C24XX_DMASKTRIG_STOP BIT(2)
92
93#define S3C24XX_DMAREQSEL 0x24
94#define S3C24XX_DMAREQSEL_HW BIT(0)
95
96/*
97 * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
98 * for a DMA source. Instead only specific channels are valid.
99 * All of these SoCs have 4 physical channels and the number of request
100 * source bits is 3. Additionally we also need 1 bit to mark the channel
101 * as valid.
102 * Therefore we separate the chansel element of the channel data into 4
103 * parts of 4 bits each, to hold the information if the channel is valid
104 * and the hw request source to use.
105 *
106 * Example:
107 * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
108 * For it the chansel field would look like
109 *
110 * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
111 * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
112 * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
113 */
114#define S3C24XX_CHANSEL_WIDTH 4
115#define S3C24XX_CHANSEL_VALID BIT(3)
116#define S3C24XX_CHANSEL_REQ_MASK 7
117
118/*
119 * struct soc_data - vendor-specific config parameters for individual SoCs
120 * @stride: spacing between the registers of each channel
121 * @has_reqsel: does the controller use the newer requestselection mechanism
122 * @has_clocks: are controllable dma-clocks present
123 */
124struct soc_data {
125 int stride;
126 bool has_reqsel;
127 bool has_clocks;
128};
129
130/*
131 * enum s3c24xx_dma_chan_state - holds the virtual channel states
132 * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
133 * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
134 * channel and is running a transfer on it
135 * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
136 * channel to become available (only pertains to memcpy channels)
137 */
138enum s3c24xx_dma_chan_state {
139 S3C24XX_DMA_CHAN_IDLE,
140 S3C24XX_DMA_CHAN_RUNNING,
141 S3C24XX_DMA_CHAN_WAITING,
142};
143
144/*
145 * struct s3c24xx_sg - structure containing data per sg
146 * @src_addr: src address of sg
147 * @dst_addr: dst address of sg
148 * @len: transfer len in bytes
149 * @node: node for txd's dsg_list
150 */
151struct s3c24xx_sg {
152 dma_addr_t src_addr;
153 dma_addr_t dst_addr;
154 size_t len;
155 struct list_head node;
156};
157
158/*
159 * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
160 * @vd: virtual DMA descriptor
161 * @dsg_list: list of children sg's
162 * @at: sg currently being transfered
163 * @width: transfer width
164 * @disrcc: value for source control register
165 * @didstc: value for destination control register
166 * @dcon: base value for dcon register
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +0300167 * @cyclic: indicate cyclic transfer
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900168 */
169struct s3c24xx_txd {
170 struct virt_dma_desc vd;
171 struct list_head dsg_list;
172 struct list_head *at;
173 u8 width;
174 u32 disrcc;
175 u32 didstc;
176 u32 dcon;
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +0300177 bool cyclic;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900178};
179
180struct s3c24xx_dma_chan;
181
182/*
183 * struct s3c24xx_dma_phy - holder for the physical channels
184 * @id: physical index to this channel
185 * @valid: does the channel have all required elements
186 * @base: virtual memory base (remapped) for the this channel
187 * @irq: interrupt for this channel
188 * @clk: clock for this channel
189 * @lock: a lock to use when altering an instance of this struct
190 * @serving: virtual channel currently being served by this physicalchannel
191 * @host: a pointer to the host (internal use)
192 */
193struct s3c24xx_dma_phy {
194 unsigned int id;
195 bool valid;
196 void __iomem *base;
Dan Carpenter8f83f502013-11-07 10:52:00 +0300197 int irq;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900198 struct clk *clk;
199 spinlock_t lock;
200 struct s3c24xx_dma_chan *serving;
201 struct s3c24xx_dma_engine *host;
202};
203
204/*
205 * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
206 * @id: the id of the channel
207 * @name: name of the channel
208 * @vc: wrappped virtual channel
209 * @phy: the physical channel utilized by this channel, if there is one
210 * @runtime_addr: address for RX/TX according to the runtime config
211 * @at: active transaction on this channel
212 * @lock: a lock for this channel data
213 * @host: a pointer to the host (internal use)
214 * @state: whether the channel is idle, running etc
215 * @slave: whether this channel is a device (slave) or for memcpy
216 */
217struct s3c24xx_dma_chan {
218 int id;
219 const char *name;
220 struct virt_dma_chan vc;
221 struct s3c24xx_dma_phy *phy;
222 struct dma_slave_config cfg;
223 struct s3c24xx_txd *at;
224 struct s3c24xx_dma_engine *host;
225 enum s3c24xx_dma_chan_state state;
226 bool slave;
227};
228
229/*
230 * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
231 * @pdev: the corresponding platform device
232 * @pdata: platform data passed in from the platform/machine
233 * @base: virtual memory base (remapped)
234 * @slave: slave engine for this instance
235 * @memcpy: memcpy engine for this instance
236 * @phy_chans: array of data for the physical channels
237 */
238struct s3c24xx_dma_engine {
239 struct platform_device *pdev;
240 const struct s3c24xx_dma_platdata *pdata;
241 struct soc_data *sdata;
242 void __iomem *base;
243 struct dma_device slave;
244 struct dma_device memcpy;
245 struct s3c24xx_dma_phy *phy_chans;
246};
247
248/*
249 * Physical channel handling
250 */
251
252/*
253 * Check whether a certain channel is busy or not.
254 */
255static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
256{
257 unsigned int val = readl(phy->base + S3C24XX_DSTAT);
258 return val & S3C24XX_DSTAT_STAT_BUSY;
259}
260
261static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
262 struct s3c24xx_dma_phy *phy)
263{
264 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
265 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
266 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
267 int phyvalid;
268
269 /* every phy is valid for memcopy channels */
270 if (!s3cchan->slave)
271 return true;
272
273 /* On newer variants all phys can be used for all virtual channels */
274 if (s3cdma->sdata->has_reqsel)
275 return true;
276
277 phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
278 return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
279}
280
281/*
282 * Allocate a physical channel for a virtual channel
283 *
284 * Try to locate a physical channel to be used for this transfer. If all
285 * are taken return NULL and the requester will have to cope by using
286 * some fallback PIO mode or retrying later.
287 */
288static
289struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
290{
291 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
292 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
293 struct s3c24xx_dma_channel *cdata;
294 struct s3c24xx_dma_phy *phy = NULL;
295 unsigned long flags;
296 int i;
297 int ret;
298
299 if (s3cchan->slave)
300 cdata = &pdata->channels[s3cchan->id];
301
302 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
303 phy = &s3cdma->phy_chans[i];
304
305 if (!phy->valid)
306 continue;
307
308 if (!s3c24xx_dma_phy_valid(s3cchan, phy))
309 continue;
310
311 spin_lock_irqsave(&phy->lock, flags);
312
313 if (!phy->serving) {
314 phy->serving = s3cchan;
315 spin_unlock_irqrestore(&phy->lock, flags);
316 break;
317 }
318
319 spin_unlock_irqrestore(&phy->lock, flags);
320 }
321
322 /* No physical channel available, cope with it */
323 if (i == s3cdma->pdata->num_phy_channels) {
324 dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
325 return NULL;
326 }
327
328 /* start the phy clock */
329 if (s3cdma->sdata->has_clocks) {
330 ret = clk_enable(phy->clk);
331 if (ret) {
332 dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
333 phy->id, ret);
334 phy->serving = NULL;
335 return NULL;
336 }
337 }
338
339 return phy;
340}
341
342/*
343 * Mark the physical channel as free.
344 *
345 * This drops the link between the physical and virtual channel.
346 */
347static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
348{
349 struct s3c24xx_dma_engine *s3cdma = phy->host;
350
351 if (s3cdma->sdata->has_clocks)
352 clk_disable(phy->clk);
353
354 phy->serving = NULL;
355}
356
357/*
358 * Stops the channel by writing the stop bit.
359 * This should not be used for an on-going transfer, but as a method of
360 * shutting down a channel (eg, when it's no longer used) or terminating a
361 * transfer.
362 */
363static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
364{
365 writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
366}
367
368/*
369 * Virtual channel handling
370 */
371
372static inline
373struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
374{
375 return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
376}
377
378static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
379{
380 struct s3c24xx_dma_phy *phy = s3cchan->phy;
381 struct s3c24xx_txd *txd = s3cchan->at;
382 u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
383
384 return tc * txd->width;
385}
386
Maxime Ripard39ad4602014-11-17 14:42:31 +0100387static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900388 struct dma_slave_config *config)
389{
Maxime Ripard39ad4602014-11-17 14:42:31 +0100390 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
391 unsigned long flags;
392 int ret = 0;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900393
394 /* Reject definitely invalid configurations */
395 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
396 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
397 return -EINVAL;
398
Maxime Ripard39ad4602014-11-17 14:42:31 +0100399 spin_lock_irqsave(&s3cchan->vc.lock, flags);
400
401 if (!s3cchan->slave) {
402 ret = -EINVAL;
403 goto out;
404 }
405
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900406 s3cchan->cfg = *config;
407
Maxime Ripard39ad4602014-11-17 14:42:31 +0100408out:
Maxime Ripard848e10b2015-01-13 21:16:23 +0100409 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
Maxime Ripard39ad4602014-11-17 14:42:31 +0100410 return ret;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900411}
412
413/*
414 * Transfer handling
415 */
416
417static inline
418struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
419{
420 return container_of(tx, struct s3c24xx_txd, vd.tx);
421}
422
423static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
424{
425 struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
426
427 if (txd) {
428 INIT_LIST_HEAD(&txd->dsg_list);
429 txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
430 }
431
432 return txd;
433}
434
435static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
436{
437 struct s3c24xx_sg *dsg, *_dsg;
438
439 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
440 list_del(&dsg->node);
441 kfree(dsg);
442 }
443
444 kfree(txd);
445}
446
447static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
448 struct s3c24xx_txd *txd)
449{
450 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
451 struct s3c24xx_dma_phy *phy = s3cchan->phy;
452 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
453 struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
454 u32 dcon = txd->dcon;
455 u32 val;
456
457 /* transfer-size and -count from len and width */
458 switch (txd->width) {
459 case 1:
460 dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
461 break;
462 case 2:
463 dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
464 break;
465 case 4:
466 dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
467 break;
468 }
469
470 if (s3cchan->slave) {
471 struct s3c24xx_dma_channel *cdata =
472 &pdata->channels[s3cchan->id];
473
474 if (s3cdma->sdata->has_reqsel) {
475 writel_relaxed((cdata->chansel << 1) |
476 S3C24XX_DMAREQSEL_HW,
477 phy->base + S3C24XX_DMAREQSEL);
478 } else {
479 int csel = cdata->chansel >> (phy->id *
480 S3C24XX_CHANSEL_WIDTH);
481
482 csel &= S3C24XX_CHANSEL_REQ_MASK;
483 dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
484 dcon |= S3C24XX_DCON_HWTRIG;
485 }
486 } else {
487 if (s3cdma->sdata->has_reqsel)
488 writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
489 }
490
491 writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
492 writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
493 writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
494 writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
495 writel_relaxed(dcon, phy->base + S3C24XX_DCON);
496
497 val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
498 val &= ~S3C24XX_DMASKTRIG_STOP;
499 val |= S3C24XX_DMASKTRIG_ON;
500
501 /* trigger the dma operation for memcpy transfers */
502 if (!s3cchan->slave)
503 val |= S3C24XX_DMASKTRIG_SWTRIG;
504
505 writel(val, phy->base + S3C24XX_DMASKTRIG);
506}
507
508/*
509 * Set the initial DMA register values and start first sg.
510 */
511static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
512{
513 struct s3c24xx_dma_phy *phy = s3cchan->phy;
514 struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
515 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
516
517 list_del(&txd->vd.node);
518
519 s3cchan->at = txd;
520
521 /* Wait for channel inactive */
522 while (s3c24xx_dma_phy_busy(phy))
523 cpu_relax();
524
525 /* point to the first element of the sg list */
526 txd->at = txd->dsg_list.next;
527 s3c24xx_dma_start_next_sg(s3cchan, txd);
528}
529
530static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
531 struct s3c24xx_dma_chan *s3cchan)
532{
533 LIST_HEAD(head);
534
535 vchan_get_all_descriptors(&s3cchan->vc, &head);
536 vchan_dma_desc_free_list(&s3cchan->vc, &head);
537}
538
539/*
540 * Try to allocate a physical channel. When successful, assign it to
541 * this virtual channel, and initiate the next descriptor. The
542 * virtual channel lock must be held at this point.
543 */
544static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
545{
546 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
547 struct s3c24xx_dma_phy *phy;
548
549 phy = s3c24xx_dma_get_phy(s3cchan);
550 if (!phy) {
551 dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
552 s3cchan->name);
553 s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
554 return;
555 }
556
557 dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
558 phy->id, s3cchan->name);
559
560 s3cchan->phy = phy;
561 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
562
563 s3c24xx_dma_start_next_txd(s3cchan);
564}
565
566static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
567 struct s3c24xx_dma_chan *s3cchan)
568{
569 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
570
571 dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
572 phy->id, s3cchan->name);
573
574 /*
575 * We do this without taking the lock; we're really only concerned
576 * about whether this pointer is NULL or not, and we're guaranteed
577 * that this will only be called when it _already_ is non-NULL.
578 */
579 phy->serving = s3cchan;
580 s3cchan->phy = phy;
581 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
582 s3c24xx_dma_start_next_txd(s3cchan);
583}
584
585/*
586 * Free a physical DMA channel, potentially reallocating it to another
587 * virtual channel if we have any pending.
588 */
589static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
590{
591 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
592 struct s3c24xx_dma_chan *p, *next;
593
594retry:
595 next = NULL;
596
597 /* Find a waiting virtual channel for the next transfer. */
598 list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
599 if (p->state == S3C24XX_DMA_CHAN_WAITING) {
600 next = p;
601 break;
602 }
603
604 if (!next) {
605 list_for_each_entry(p, &s3cdma->slave.channels,
606 vc.chan.device_node)
607 if (p->state == S3C24XX_DMA_CHAN_WAITING &&
608 s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
609 next = p;
610 break;
611 }
612 }
613
614 /* Ensure that the physical channel is stopped */
615 s3c24xx_dma_terminate_phy(s3cchan->phy);
616
617 if (next) {
618 bool success;
619
620 /*
621 * Eww. We know this isn't going to deadlock
622 * but lockdep probably doesn't.
623 */
624 spin_lock(&next->vc.lock);
625 /* Re-check the state now that we have the lock */
626 success = next->state == S3C24XX_DMA_CHAN_WAITING;
627 if (success)
628 s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
629 spin_unlock(&next->vc.lock);
630
631 /* If the state changed, try to find another channel */
632 if (!success)
633 goto retry;
634 } else {
635 /* No more jobs, so free up the physical channel */
636 s3c24xx_dma_put_phy(s3cchan->phy);
637 }
638
639 s3cchan->phy = NULL;
640 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
641}
642
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900643static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
644{
645 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
646 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
647
648 if (!s3cchan->slave)
Dan Williams85726de2013-12-03 14:55:50 -0800649 dma_descriptor_unmap(&vd->tx);
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900650
651 s3c24xx_dma_free_txd(txd);
652}
653
654static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
655{
656 struct s3c24xx_dma_phy *phy = data;
657 struct s3c24xx_dma_chan *s3cchan = phy->serving;
658 struct s3c24xx_txd *txd;
659
660 dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
661
662 /*
663 * Interrupts happen to notify the completion of a transfer and the
664 * channel should have moved into its stop state already on its own.
665 * Therefore interrupts on channels not bound to a virtual channel
666 * should never happen. Nevertheless send a terminate command to the
667 * channel if the unlikely case happens.
668 */
669 if (unlikely(!s3cchan)) {
670 dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
671 phy->id);
672
673 s3c24xx_dma_terminate_phy(phy);
674
675 return IRQ_HANDLED;
676 }
677
678 spin_lock(&s3cchan->vc.lock);
679 txd = s3cchan->at;
680 if (txd) {
681 /* when more sg's are in this txd, start the next one */
682 if (!list_is_last(txd->at, &txd->dsg_list)) {
683 txd->at = txd->at->next;
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +0300684 if (txd->cyclic)
685 vchan_cyclic_callback(&txd->vd);
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900686 s3c24xx_dma_start_next_sg(s3cchan, txd);
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +0300687 } else if (!txd->cyclic) {
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900688 s3cchan->at = NULL;
689 vchan_cookie_complete(&txd->vd);
690
691 /*
692 * And start the next descriptor (if any),
693 * otherwise free this channel.
694 */
695 if (vchan_next_desc(&s3cchan->vc))
696 s3c24xx_dma_start_next_txd(s3cchan);
697 else
698 s3c24xx_dma_phy_free(s3cchan);
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +0300699 } else {
700 vchan_cyclic_callback(&txd->vd);
701
702 /* Cyclic: reset at beginning */
703 txd->at = txd->dsg_list.next;
704 s3c24xx_dma_start_next_sg(s3cchan, txd);
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900705 }
706 }
707 spin_unlock(&s3cchan->vc.lock);
708
709 return IRQ_HANDLED;
710}
711
712/*
713 * The DMA ENGINE API
714 */
715
Maxime Ripard39ad4602014-11-17 14:42:31 +0100716static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900717{
718 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
719 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
720 unsigned long flags;
Dan Carpenter30287182015-01-30 11:29:33 +0300721 int ret = 0;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900722
723 spin_lock_irqsave(&s3cchan->vc.lock, flags);
724
Maxime Ripard39ad4602014-11-17 14:42:31 +0100725 if (!s3cchan->phy && !s3cchan->at) {
726 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
727 s3cchan->id);
Dan Carpenter30287182015-01-30 11:29:33 +0300728 ret = -EINVAL;
729 goto unlock;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900730 }
731
Maxime Ripard39ad4602014-11-17 14:42:31 +0100732 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
733
734 /* Mark physical channel as free */
735 if (s3cchan->phy)
736 s3c24xx_dma_phy_free(s3cchan);
737
738 /* Dequeue current job */
739 if (s3cchan->at) {
740 s3c24xx_dma_desc_free(&s3cchan->at->vd);
741 s3cchan->at = NULL;
742 }
743
744 /* Dequeue jobs not yet fired as well */
745 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
Dan Carpenter30287182015-01-30 11:29:33 +0300746unlock:
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900747 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
748
Dan Carpenter30287182015-01-30 11:29:33 +0300749 return ret;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900750}
751
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900752static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
753{
754 /* Ensure all queued descriptors are freed */
755 vchan_free_chan_resources(to_virt_chan(chan));
756}
757
758static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
759 dma_cookie_t cookie, struct dma_tx_state *txstate)
760{
761 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
762 struct s3c24xx_txd *txd;
763 struct s3c24xx_sg *dsg;
764 struct virt_dma_desc *vd;
765 unsigned long flags;
766 enum dma_status ret;
767 size_t bytes = 0;
768
769 spin_lock_irqsave(&s3cchan->vc.lock, flags);
770 ret = dma_cookie_status(chan, cookie, txstate);
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900771
772 /*
773 * There's no point calculating the residue if there's
774 * no txstate to store the value.
775 */
Peter Griffine841b802016-06-07 18:38:37 +0100776 if (ret == DMA_COMPLETE || !txstate) {
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900777 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
778 return ret;
779 }
780
781 vd = vchan_find_desc(&s3cchan->vc, cookie);
782 if (vd) {
783 /* On the issued list, so hasn't been processed yet */
784 txd = to_s3c24xx_txd(&vd->tx);
785
786 list_for_each_entry(dsg, &txd->dsg_list, node)
787 bytes += dsg->len;
788 } else {
789 /*
790 * Currently running, so sum over the pending sg's and
791 * the currently active one.
792 */
793 txd = s3cchan->at;
794
795 dsg = list_entry(txd->at, struct s3c24xx_sg, node);
796 list_for_each_entry_from(dsg, &txd->dsg_list, node)
797 bytes += dsg->len;
798
799 bytes += s3c24xx_dma_getbytes_chan(s3cchan);
800 }
801 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
802
803 /*
804 * This cookie not complete yet
805 * Get number of bytes left in the active transactions and queue
806 */
807 dma_set_residue(txstate, bytes);
808
809 /* Whether waiting or running, we're in progress */
810 return ret;
811}
812
813/*
814 * Initialize a descriptor to be used by memcpy submit
815 */
816static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
817 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
818 size_t len, unsigned long flags)
819{
820 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
821 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
822 struct s3c24xx_txd *txd;
823 struct s3c24xx_sg *dsg;
824 int src_mod, dest_mod;
825
Vinod Koulabdad502016-09-14 16:15:20 +0530826 dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900827 len, s3cchan->name);
828
829 if ((len & S3C24XX_DCON_TC_MASK) != len) {
Vinod Koulabdad502016-09-14 16:15:20 +0530830 dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900831 return NULL;
832 }
833
834 txd = s3c24xx_dma_get_txd();
835 if (!txd)
836 return NULL;
837
838 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
839 if (!dsg) {
840 s3c24xx_dma_free_txd(txd);
841 return NULL;
842 }
843 list_add_tail(&dsg->node, &txd->dsg_list);
844
845 dsg->src_addr = src;
846 dsg->dst_addr = dest;
847 dsg->len = len;
848
849 /*
850 * Determine a suitable transfer width.
851 * The DMA controller cannot fetch/store information which is not
852 * naturally aligned on the bus, i.e., a 4 byte fetch must start at
853 * an address divisible by 4 - more generally addr % width must be 0.
854 */
855 src_mod = src % 4;
856 dest_mod = dest % 4;
857 switch (len % 4) {
858 case 0:
859 txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
860 break;
861 case 2:
862 txd->width = ((src_mod == 2 || src_mod == 0) &&
863 (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
864 break;
865 default:
866 txd->width = 1;
867 break;
868 }
869
870 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
871 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
872 txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
873 S3C24XX_DCON_SERV_WHOLE;
874
875 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
876}
877
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +0300878static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
879 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +0200880 enum dma_transfer_direction direction, unsigned long flags)
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +0300881{
882 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
883 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
884 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
885 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
886 struct s3c24xx_txd *txd;
887 struct s3c24xx_sg *dsg;
888 unsigned sg_len;
889 dma_addr_t slave_addr;
890 u32 hwcfg = 0;
891 int i;
892
893 dev_dbg(&s3cdma->pdev->dev,
894 "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
895 size, period, s3cchan->name);
896
897 if (!is_slave_direction(direction)) {
898 dev_err(&s3cdma->pdev->dev,
899 "direction %d unsupported\n", direction);
900 return NULL;
901 }
902
903 txd = s3c24xx_dma_get_txd();
904 if (!txd)
905 return NULL;
906
907 txd->cyclic = 1;
908
909 if (cdata->handshake)
910 txd->dcon |= S3C24XX_DCON_HANDSHAKE;
911
912 switch (cdata->bus) {
913 case S3C24XX_DMA_APB:
914 txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
915 hwcfg |= S3C24XX_DISRCC_LOC_APB;
916 break;
917 case S3C24XX_DMA_AHB:
918 txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
919 hwcfg |= S3C24XX_DISRCC_LOC_AHB;
920 break;
921 }
922
923 /*
924 * Always assume our peripheral desintation is a fixed
925 * address in memory.
926 */
927 hwcfg |= S3C24XX_DISRCC_INC_FIXED;
928
929 /*
930 * Individual dma operations are requested by the slave,
931 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
932 */
933 txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
934
935 if (direction == DMA_MEM_TO_DEV) {
936 txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
937 S3C24XX_DISRCC_INC_INCREMENT;
938 txd->didstc = hwcfg;
939 slave_addr = s3cchan->cfg.dst_addr;
940 txd->width = s3cchan->cfg.dst_addr_width;
941 } else {
942 txd->disrcc = hwcfg;
943 txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
944 S3C24XX_DIDSTC_INC_INCREMENT;
945 slave_addr = s3cchan->cfg.src_addr;
946 txd->width = s3cchan->cfg.src_addr_width;
947 }
948
949 sg_len = size / period;
950
951 for (i = 0; i < sg_len; i++) {
952 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
953 if (!dsg) {
954 s3c24xx_dma_free_txd(txd);
955 return NULL;
956 }
957 list_add_tail(&dsg->node, &txd->dsg_list);
958
959 dsg->len = period;
960 /* Check last period length */
961 if (i == sg_len - 1)
962 dsg->len = size - period * i;
963 if (direction == DMA_MEM_TO_DEV) {
964 dsg->src_addr = addr + period * i;
965 dsg->dst_addr = slave_addr;
966 } else { /* DMA_DEV_TO_MEM */
967 dsg->src_addr = slave_addr;
968 dsg->dst_addr = addr + period * i;
969 }
970 }
971
972 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
973}
974
Heiko Stuebnerddeccb82013-10-08 06:42:10 +0900975static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
976 struct dma_chan *chan, struct scatterlist *sgl,
977 unsigned int sg_len, enum dma_transfer_direction direction,
978 unsigned long flags, void *context)
979{
980 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
981 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
982 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
983 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
984 struct s3c24xx_txd *txd;
985 struct s3c24xx_sg *dsg;
986 struct scatterlist *sg;
987 dma_addr_t slave_addr;
988 u32 hwcfg = 0;
989 int tmp;
990
991 dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
992 sg_dma_len(sgl), s3cchan->name);
993
994 txd = s3c24xx_dma_get_txd();
995 if (!txd)
996 return NULL;
997
998 if (cdata->handshake)
999 txd->dcon |= S3C24XX_DCON_HANDSHAKE;
1000
1001 switch (cdata->bus) {
1002 case S3C24XX_DMA_APB:
1003 txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
1004 hwcfg |= S3C24XX_DISRCC_LOC_APB;
1005 break;
1006 case S3C24XX_DMA_AHB:
1007 txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
1008 hwcfg |= S3C24XX_DISRCC_LOC_AHB;
1009 break;
1010 }
1011
1012 /*
1013 * Always assume our peripheral desintation is a fixed
1014 * address in memory.
1015 */
1016 hwcfg |= S3C24XX_DISRCC_INC_FIXED;
1017
1018 /*
1019 * Individual dma operations are requested by the slave,
1020 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
1021 */
1022 txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
1023
1024 if (direction == DMA_MEM_TO_DEV) {
1025 txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
1026 S3C24XX_DISRCC_INC_INCREMENT;
1027 txd->didstc = hwcfg;
1028 slave_addr = s3cchan->cfg.dst_addr;
1029 txd->width = s3cchan->cfg.dst_addr_width;
1030 } else if (direction == DMA_DEV_TO_MEM) {
1031 txd->disrcc = hwcfg;
1032 txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
1033 S3C24XX_DIDSTC_INC_INCREMENT;
1034 slave_addr = s3cchan->cfg.src_addr;
1035 txd->width = s3cchan->cfg.src_addr_width;
1036 } else {
1037 s3c24xx_dma_free_txd(txd);
1038 dev_err(&s3cdma->pdev->dev,
1039 "direction %d unsupported\n", direction);
1040 return NULL;
1041 }
1042
1043 for_each_sg(sgl, sg, sg_len, tmp) {
1044 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
1045 if (!dsg) {
1046 s3c24xx_dma_free_txd(txd);
1047 return NULL;
1048 }
1049 list_add_tail(&dsg->node, &txd->dsg_list);
1050
1051 dsg->len = sg_dma_len(sg);
1052 if (direction == DMA_MEM_TO_DEV) {
1053 dsg->src_addr = sg_dma_address(sg);
1054 dsg->dst_addr = slave_addr;
1055 } else { /* DMA_DEV_TO_MEM */
1056 dsg->src_addr = slave_addr;
1057 dsg->dst_addr = sg_dma_address(sg);
1058 }
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001059 }
1060
1061 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
1062}
1063
1064/*
1065 * Slave transactions callback to the slave device to allow
1066 * synchronization of slave DMA signals with the DMAC enable
1067 */
1068static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
1069{
1070 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&s3cchan->vc.lock, flags);
1074 if (vchan_issue_pending(&s3cchan->vc)) {
1075 if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
1076 s3c24xx_dma_phy_alloc_and_start(s3cchan);
1077 }
1078 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
1079}
1080
1081/*
1082 * Bringup and teardown
1083 */
1084
1085/*
1086 * Initialise the DMAC memcpy/slave channels.
1087 * Make a local wrapper to hold required data
1088 */
1089static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
1090 struct dma_device *dmadev, unsigned int channels, bool slave)
1091{
1092 struct s3c24xx_dma_chan *chan;
1093 int i;
1094
1095 INIT_LIST_HEAD(&dmadev->channels);
1096
1097 /*
1098 * Register as many many memcpy as we have physical channels,
1099 * we won't always be able to use all but the code will have
1100 * to cope with that situation.
1101 */
1102 for (i = 0; i < channels; i++) {
1103 chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
Peter Griffinaef94fe2016-06-07 18:38:41 +01001104 if (!chan)
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001105 return -ENOMEM;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001106
1107 chan->id = i;
1108 chan->host = s3cdma;
1109 chan->state = S3C24XX_DMA_CHAN_IDLE;
1110
1111 if (slave) {
1112 chan->slave = true;
1113 chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
1114 if (!chan->name)
1115 return -ENOMEM;
1116 } else {
1117 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1118 if (!chan->name)
1119 return -ENOMEM;
1120 }
1121 dev_dbg(dmadev->dev,
1122 "initialize virtual channel \"%s\"\n",
1123 chan->name);
1124
1125 chan->vc.desc_free = s3c24xx_dma_desc_free;
1126 vchan_init(&chan->vc, dmadev);
1127 }
1128 dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
1129 i, slave ? "slave" : "memcpy");
1130 return i;
1131}
1132
1133static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
1134{
1135 struct s3c24xx_dma_chan *chan = NULL;
1136 struct s3c24xx_dma_chan *next;
1137
1138 list_for_each_entry_safe(chan,
Vinod Koul7e654bf2016-07-05 11:42:28 +05301139 next, &dmadev->channels, vc.chan.device_node) {
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001140 list_del(&chan->vc.chan.device_node);
Vinod Koul7e654bf2016-07-05 11:42:28 +05301141 tasklet_kill(&chan->vc.task);
1142 }
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001143}
1144
Heiko Stuebner681a2fd2013-10-21 05:32:48 +09001145/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
1146static struct soc_data soc_s3c2410 = {
1147 .stride = 0x40,
1148 .has_reqsel = false,
1149 .has_clocks = false,
1150};
1151
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001152/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
1153static struct soc_data soc_s3c2412 = {
1154 .stride = 0x40,
1155 .has_reqsel = true,
1156 .has_clocks = true,
1157};
1158
1159/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
1160static struct soc_data soc_s3c2443 = {
1161 .stride = 0x100,
1162 .has_reqsel = true,
1163 .has_clocks = true,
1164};
1165
Krzysztof Kozlowski428d96e2015-05-02 00:57:49 +09001166static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001167 {
Heiko Stuebner681a2fd2013-10-21 05:32:48 +09001168 .name = "s3c2410-dma",
1169 .driver_data = (kernel_ulong_t)&soc_s3c2410,
1170 }, {
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001171 .name = "s3c2412-dma",
1172 .driver_data = (kernel_ulong_t)&soc_s3c2412,
1173 }, {
1174 .name = "s3c2443-dma",
1175 .driver_data = (kernel_ulong_t)&soc_s3c2443,
1176 },
1177 { },
1178};
1179
1180static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
1181{
1182 return (struct soc_data *)
1183 platform_get_device_id(pdev)->driver_data;
1184}
1185
1186static int s3c24xx_dma_probe(struct platform_device *pdev)
1187{
1188 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1189 struct s3c24xx_dma_engine *s3cdma;
1190 struct soc_data *sdata;
1191 struct resource *res;
1192 int ret;
1193 int i;
1194
1195 if (!pdata) {
1196 dev_err(&pdev->dev, "platform data missing\n");
1197 return -ENODEV;
1198 }
1199
1200 /* Basic sanity check */
1201 if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
1202 dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
1203 pdata->num_phy_channels, MAX_DMA_CHANNELS);
1204 return -EINVAL;
1205 }
1206
1207 sdata = s3c24xx_dma_get_soc_data(pdev);
1208 if (!sdata)
1209 return -EINVAL;
1210
1211 s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
1212 if (!s3cdma)
1213 return -ENOMEM;
1214
1215 s3cdma->pdev = pdev;
1216 s3cdma->pdata = pdata;
1217 s3cdma->sdata = sdata;
1218
1219 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1220 s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
1221 if (IS_ERR(s3cdma->base))
1222 return PTR_ERR(s3cdma->base);
1223
1224 s3cdma->phy_chans = devm_kzalloc(&pdev->dev,
1225 sizeof(struct s3c24xx_dma_phy) *
1226 pdata->num_phy_channels,
1227 GFP_KERNEL);
1228 if (!s3cdma->phy_chans)
1229 return -ENOMEM;
1230
Colin Ian Kingee655c22015-02-28 22:25:06 +00001231 /* acquire irqs and clocks for all physical channels */
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001232 for (i = 0; i < pdata->num_phy_channels; i++) {
1233 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1234 char clk_name[6];
1235
1236 phy->id = i;
1237 phy->base = s3cdma->base + (i * sdata->stride);
1238 phy->host = s3cdma;
1239
1240 phy->irq = platform_get_irq(pdev, i);
1241 if (phy->irq < 0) {
1242 dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
1243 i, phy->irq);
1244 continue;
1245 }
1246
1247 ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
1248 0, pdev->name, phy);
1249 if (ret) {
1250 dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
1251 i, ret);
1252 continue;
1253 }
1254
1255 if (sdata->has_clocks) {
1256 sprintf(clk_name, "dma.%d", i);
1257 phy->clk = devm_clk_get(&pdev->dev, clk_name);
1258 if (IS_ERR(phy->clk) && sdata->has_clocks) {
Colin Ian Kingee655c22015-02-28 22:25:06 +00001259 dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001260 i, PTR_ERR(phy->clk));
1261 continue;
1262 }
1263
1264 ret = clk_prepare(phy->clk);
1265 if (ret) {
1266 dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
1267 i, ret);
1268 continue;
1269 }
1270 }
1271
1272 spin_lock_init(&phy->lock);
1273 phy->valid = true;
1274
1275 dev_dbg(&pdev->dev, "physical channel %d is %s\n",
1276 i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
1277 }
1278
1279 /* Initialize memcpy engine */
1280 dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
1281 dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
1282 s3cdma->memcpy.dev = &pdev->dev;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001283 s3cdma->memcpy.device_free_chan_resources =
1284 s3c24xx_dma_free_chan_resources;
1285 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1286 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1287 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
Maxime Ripard39ad4602014-11-17 14:42:31 +01001288 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1289 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001290
1291 /* Initialize slave engine for SoC internal dedicated peripherals */
1292 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +03001293 dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001294 dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
1295 s3cdma->slave.dev = &pdev->dev;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001296 s3cdma->slave.device_free_chan_resources =
1297 s3c24xx_dma_free_chan_resources;
1298 s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
1299 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1300 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
Vasily Khoruzhickc3e175e2014-05-20 23:23:02 +03001301 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
Maxime Ripard39ad4602014-11-17 14:42:31 +01001302 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1303 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001304
1305 /* Register as many memcpy channels as there are physical channels */
1306 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
1307 pdata->num_phy_channels, false);
1308 if (ret <= 0) {
1309 dev_warn(&pdev->dev,
1310 "%s failed to enumerate memcpy channels - %d\n",
1311 __func__, ret);
1312 goto err_memcpy;
1313 }
1314
1315 /* Register slave channels */
1316 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
1317 pdata->num_channels, true);
1318 if (ret <= 0) {
1319 dev_warn(&pdev->dev,
1320 "%s failed to enumerate slave channels - %d\n",
1321 __func__, ret);
1322 goto err_slave;
1323 }
1324
1325 ret = dma_async_device_register(&s3cdma->memcpy);
1326 if (ret) {
1327 dev_warn(&pdev->dev,
1328 "%s failed to register memcpy as an async device - %d\n",
1329 __func__, ret);
1330 goto err_memcpy_reg;
1331 }
1332
1333 ret = dma_async_device_register(&s3cdma->slave);
1334 if (ret) {
1335 dev_warn(&pdev->dev,
1336 "%s failed to register slave as an async device - %d\n",
1337 __func__, ret);
1338 goto err_slave_reg;
1339 }
1340
1341 platform_set_drvdata(pdev, s3cdma);
1342 dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
1343 pdata->num_phy_channels);
1344
1345 return 0;
1346
1347err_slave_reg:
1348 dma_async_device_unregister(&s3cdma->memcpy);
1349err_memcpy_reg:
1350 s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1351err_slave:
1352 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1353err_memcpy:
1354 if (sdata->has_clocks)
1355 for (i = 0; i < pdata->num_phy_channels; i++) {
1356 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1357 if (phy->valid)
1358 clk_unprepare(phy->clk);
1359 }
1360
1361 return ret;
1362}
1363
Vinod Koul9200ebd2016-07-05 11:41:23 +05301364static void s3c24xx_dma_free_irq(struct platform_device *pdev,
1365 struct s3c24xx_dma_engine *s3cdma)
1366{
1367 int i;
1368
1369 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
1370 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1371
1372 devm_free_irq(&pdev->dev, phy->irq, phy);
1373 }
1374}
1375
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001376static int s3c24xx_dma_remove(struct platform_device *pdev)
1377{
1378 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1379 struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
1380 struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
1381 int i;
1382
1383 dma_async_device_unregister(&s3cdma->slave);
1384 dma_async_device_unregister(&s3cdma->memcpy);
1385
Vinod Koul9200ebd2016-07-05 11:41:23 +05301386 s3c24xx_dma_free_irq(pdev, s3cdma);
1387
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001388 s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1389 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1390
1391 if (sdata->has_clocks)
1392 for (i = 0; i < pdata->num_phy_channels; i++) {
1393 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1394 if (phy->valid)
1395 clk_unprepare(phy->clk);
1396 }
1397
1398 return 0;
1399}
1400
1401static struct platform_driver s3c24xx_dma_driver = {
1402 .driver = {
1403 .name = "s3c24xx-dma",
Heiko Stuebnerddeccb82013-10-08 06:42:10 +09001404 },
1405 .id_table = s3c24xx_dma_driver_ids,
1406 .probe = s3c24xx_dma_probe,
1407 .remove = s3c24xx_dma_remove,
1408};
1409
1410module_platform_driver(s3c24xx_dma_driver);
1411
1412bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
1413{
1414 struct s3c24xx_dma_chan *s3cchan;
1415
1416 if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
1417 return false;
1418
1419 s3cchan = to_s3c24xx_dma_chan(chan);
1420
1421 return s3cchan->id == (int)param;
1422}
1423EXPORT_SYMBOL(s3c24xx_dma_filter);
1424
1425MODULE_DESCRIPTION("S3C24XX DMA Driver");
1426MODULE_AUTHOR("Heiko Stuebner");
1427MODULE_LICENSE("GPL v2");