blob: 85b3d3c21d9111f4caea16fe8f92672c63d2ddb9 [file] [log] [blame]
Sascha Hauer1f1846c2010-10-06 10:25:55 +02001/*
2 * drivers/dma/imx-dma.c
3 *
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
Javier Martin9e15db72012-03-02 09:28:47 +01008 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
Sascha Hauer1f1846c2010-10-06 10:25:55 +02009 *
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
13 *
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
16 */
Javier Martin9e15db72012-03-02 09:28:47 +010017
Sascha Hauer1f1846c2010-10-06 10:25:55 +020018#include <linux/init.h>
Axel Linf8de8f42011-08-30 15:08:24 +080019#include <linux/module.h>
Sascha Hauer1f1846c2010-10-06 10:25:55 +020020#include <linux/types.h>
21#include <linux/mm.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/device.h>
25#include <linux/dma-mapping.h>
26#include <linux/slab.h>
27#include <linux/platform_device.h>
28#include <linux/dmaengine.h>
29
30#include <asm/irq.h>
31#include <mach/dma-v1.h>
32#include <mach/hardware.h>
33
Javier Martin9e15db72012-03-02 09:28:47 +010034#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
35
36enum imxdma_prep_type {
37 IMXDMA_DESC_MEMCPY,
38 IMXDMA_DESC_INTERLEAVED,
39 IMXDMA_DESC_SLAVE_SG,
40 IMXDMA_DESC_CYCLIC,
41};
42
43struct imxdma_desc {
44 struct list_head node;
45 struct dma_async_tx_descriptor desc;
46 enum dma_status status;
47 dma_addr_t src;
48 dma_addr_t dest;
49 size_t len;
50 unsigned int dmamode;
51 enum imxdma_prep_type type;
52 /* For memcpy and interleaved */
53 unsigned int config_port;
54 unsigned int config_mem;
55 /* For interleaved transfers */
56 unsigned int x;
57 unsigned int y;
58 unsigned int w;
59 /* For slave sg and cyclic */
60 struct scatterlist *sg;
61 unsigned int sgcount;
62};
63
Sascha Hauer1f1846c2010-10-06 10:25:55 +020064struct imxdma_channel {
65 struct imxdma_engine *imxdma;
66 unsigned int channel;
67 unsigned int imxdma_channel;
68
Javier Martin9e15db72012-03-02 09:28:47 +010069 struct tasklet_struct dma_tasklet;
70 struct list_head ld_free;
71 struct list_head ld_queue;
72 struct list_head ld_active;
73 int descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020074 enum dma_slave_buswidth word_size;
75 dma_addr_t per_address;
76 u32 watermark_level;
77 struct dma_chan chan;
78 spinlock_t lock;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020079 dma_cookie_t last_completed;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020080 int dma_request;
81 struct scatterlist *sg_list;
82};
83
84#define MAX_DMA_CHANNELS 8
85
86struct imxdma_engine {
87 struct device *dev;
Sascha Hauer1e070a62011-01-12 13:14:37 +010088 struct device_dma_parameters dma_parms;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020089 struct dma_device dma_device;
90 struct imxdma_channel channel[MAX_DMA_CHANNELS];
91};
92
93static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
94{
95 return container_of(chan, struct imxdma_channel, chan);
96}
97
Javier Martin9e15db72012-03-02 09:28:47 +010098static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
Sascha Hauer1f1846c2010-10-06 10:25:55 +020099{
Javier Martin9e15db72012-03-02 09:28:47 +0100100 struct imxdma_desc *desc;
101
102 if (!list_empty(&imxdmac->ld_active)) {
103 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
104 node);
105 if (desc->type == IMXDMA_DESC_CYCLIC)
106 return true;
107 }
108 return false;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200109}
110
111static void imxdma_irq_handler(int channel, void *data)
112{
113 struct imxdma_channel *imxdmac = data;
114
Javier Martin9e15db72012-03-02 09:28:47 +0100115 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200116}
117
118static void imxdma_err_handler(int channel, void *data, int error)
119{
120 struct imxdma_channel *imxdmac = data;
121
Javier Martin9e15db72012-03-02 09:28:47 +0100122 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200123}
124
125static void imxdma_progression(int channel, void *data,
126 struct scatterlist *sg)
127{
128 struct imxdma_channel *imxdmac = data;
129
Javier Martin9e15db72012-03-02 09:28:47 +0100130 tasklet_schedule(&imxdmac->dma_tasklet);
131}
132
133static int imxdma_xfer_desc(struct imxdma_desc *d)
134{
135 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
136 int ret;
137
138 /* Configure and enable */
139 switch (d->type) {
140 case IMXDMA_DESC_MEMCPY:
141 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
142 d->config_port, d->config_mem, 0, 0);
143 if (ret < 0)
144 return ret;
145 ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src,
146 d->len, d->dest, d->dmamode);
147 if (ret < 0)
148 return ret;
149 break;
150 case IMXDMA_DESC_CYCLIC:
151 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
152 imxdma_progression);
153 if (ret < 0)
154 return ret;
155 /*
156 * We fall through here since cyclic transfer is the same as
157 * slave_sg adding a progression handler and a specific sg
158 * configuration which is done in 'imxdma_prep_dma_cyclic'.
159 */
160 case IMXDMA_DESC_SLAVE_SG:
161 if (d->dmamode == DMA_MODE_READ)
162 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
163 d->sgcount, d->len, d->src, d->dmamode);
164 else
165 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
166 d->sgcount, d->len, d->dest, d->dmamode);
167 if (ret < 0)
168 return ret;
169 break;
170 default:
171 return -EINVAL;
172 }
173 imx_dma_enable(imxdmac->imxdma_channel);
174 return 0;
175}
176
177static void imxdma_tasklet(unsigned long data)
178{
179 struct imxdma_channel *imxdmac = (void *)data;
180 struct imxdma_engine *imxdma = imxdmac->imxdma;
181 struct imxdma_desc *desc;
182
183 spin_lock(&imxdmac->lock);
184
185 if (list_empty(&imxdmac->ld_active)) {
186 /* Someone might have called terminate all */
187 goto out;
188 }
189 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
190
191 if (desc->desc.callback)
192 desc->desc.callback(desc->desc.callback_param);
193
194 imxdmac->last_completed = desc->desc.cookie;
195
196 /* If we are dealing with a cyclic descriptor keep it on ld_active */
197 if (imxdma_chan_is_doing_cyclic(imxdmac))
198 goto out;
199
200 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
201
202 if (!list_empty(&imxdmac->ld_queue)) {
203 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
204 node);
205 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
206 if (imxdma_xfer_desc(desc) < 0)
207 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
208 __func__, imxdmac->channel);
209 }
210out:
211 spin_unlock(&imxdmac->lock);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200212}
213
214static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
215 unsigned long arg)
216{
217 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
218 struct dma_slave_config *dmaengine_cfg = (void *)arg;
219 int ret;
Javier Martin9e15db72012-03-02 09:28:47 +0100220 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200221 unsigned int mode = 0;
222
223 switch (cmd) {
224 case DMA_TERMINATE_ALL:
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200225 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100226
227 spin_lock_irqsave(&imxdmac->lock, flags);
228 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
229 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
230 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200231 return 0;
232 case DMA_SLAVE_CONFIG:
Vinod Kouldb8196d2011-10-13 22:34:23 +0530233 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200234 imxdmac->per_address = dmaengine_cfg->src_addr;
235 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
236 imxdmac->word_size = dmaengine_cfg->src_addr_width;
237 } else {
238 imxdmac->per_address = dmaengine_cfg->dst_addr;
239 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
240 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
241 }
242
243 switch (imxdmac->word_size) {
244 case DMA_SLAVE_BUSWIDTH_1_BYTE:
245 mode = IMX_DMA_MEMSIZE_8;
246 break;
247 case DMA_SLAVE_BUSWIDTH_2_BYTES:
248 mode = IMX_DMA_MEMSIZE_16;
249 break;
250 default:
251 case DMA_SLAVE_BUSWIDTH_4_BYTES:
252 mode = IMX_DMA_MEMSIZE_32;
253 break;
254 }
255 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
256 mode | IMX_DMA_TYPE_FIFO,
257 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
258 imxdmac->dma_request, 1);
259
260 if (ret)
261 return ret;
262
Sascha Hauer6584cb82011-07-06 11:18:33 +0200263 imx_dma_config_burstlen(imxdmac->imxdma_channel,
264 imxdmac->watermark_level * imxdmac->word_size);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200265
266 return 0;
267 default:
268 return -ENOSYS;
269 }
270
271 return -EINVAL;
272}
273
274static enum dma_status imxdma_tx_status(struct dma_chan *chan,
275 dma_cookie_t cookie,
276 struct dma_tx_state *txstate)
277{
278 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
279 dma_cookie_t last_used;
280 enum dma_status ret;
Javier Martin9e15db72012-03-02 09:28:47 +0100281 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200282
Javier Martin9e15db72012-03-02 09:28:47 +0100283 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200284 last_used = chan->cookie;
285
286 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
287 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
Javier Martin9e15db72012-03-02 09:28:47 +0100288 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200289
290 return ret;
291}
292
293static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
294{
295 dma_cookie_t cookie = imxdma->chan.cookie;
296
297 if (++cookie < 0)
298 cookie = 1;
299
300 imxdma->chan.cookie = cookie;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200301
302 return cookie;
303}
304
305static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
306{
307 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
308 dma_cookie_t cookie;
Javier Martin9e15db72012-03-02 09:28:47 +0100309 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200310
Javier Martin9e15db72012-03-02 09:28:47 +0100311 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200312
Javier Martin9e15db72012-03-02 09:28:47 +0100313 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200314 cookie = imxdma_assign_cookie(imxdmac);
Javier Martin9e15db72012-03-02 09:28:47 +0100315 tx->cookie = cookie;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200316
Javier Martin9e15db72012-03-02 09:28:47 +0100317 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200318
319 return cookie;
320}
321
322static int imxdma_alloc_chan_resources(struct dma_chan *chan)
323{
324 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
325 struct imx_dma_data *data = chan->private;
326
Javier Martin6c05f092012-02-28 17:08:17 +0100327 if (data != NULL)
328 imxdmac->dma_request = data->dma_request;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200329
Javier Martin9e15db72012-03-02 09:28:47 +0100330 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
331 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200332
Javier Martin9e15db72012-03-02 09:28:47 +0100333 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
334 if (!desc)
335 break;
336 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
337 dma_async_tx_descriptor_init(&desc->desc, chan);
338 desc->desc.tx_submit = imxdma_tx_submit;
339 /* txd.flags will be overwritten in prep funcs */
340 desc->desc.flags = DMA_CTRL_ACK;
341 desc->status = DMA_SUCCESS;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200342
Javier Martin9e15db72012-03-02 09:28:47 +0100343 list_add_tail(&desc->node, &imxdmac->ld_free);
344 imxdmac->descs_allocated++;
345 }
346
347 if (!imxdmac->descs_allocated)
348 return -ENOMEM;
349
350 return imxdmac->descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200351}
352
353static void imxdma_free_chan_resources(struct dma_chan *chan)
354{
355 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100356 struct imxdma_desc *desc, *_desc;
357 unsigned long flags;
358
359 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200360
361 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100362 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
363 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
364
365 spin_unlock_irqrestore(&imxdmac->lock, flags);
366
367 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
368 kfree(desc);
369 imxdmac->descs_allocated--;
370 }
371 INIT_LIST_HEAD(&imxdmac->ld_free);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200372
373 if (imxdmac->sg_list) {
374 kfree(imxdmac->sg_list);
375 imxdmac->sg_list = NULL;
376 }
377}
378
379static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
380 struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530381 unsigned int sg_len, enum dma_transfer_direction direction,
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200382 unsigned long flags)
383{
384 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
385 struct scatterlist *sg;
Javier Martin9e15db72012-03-02 09:28:47 +0100386 int i, dma_length = 0;
387 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200388
Javier Martin9e15db72012-03-02 09:28:47 +0100389 if (list_empty(&imxdmac->ld_free) ||
390 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200391 return NULL;
392
Javier Martin9e15db72012-03-02 09:28:47 +0100393 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200394
395 for_each_sg(sgl, sg, sg_len, i) {
396 dma_length += sg->length;
397 }
398
Sascha Hauerd07102a2011-01-12 14:13:23 +0100399 switch (imxdmac->word_size) {
400 case DMA_SLAVE_BUSWIDTH_4_BYTES:
401 if (sgl->length & 3 || sgl->dma_address & 3)
402 return NULL;
403 break;
404 case DMA_SLAVE_BUSWIDTH_2_BYTES:
405 if (sgl->length & 1 || sgl->dma_address & 1)
406 return NULL;
407 break;
408 case DMA_SLAVE_BUSWIDTH_1_BYTE:
409 break;
410 default:
411 return NULL;
412 }
413
Javier Martin9e15db72012-03-02 09:28:47 +0100414 desc->type = IMXDMA_DESC_SLAVE_SG;
415 desc->sg = sgl;
416 desc->sgcount = sg_len;
417 desc->len = dma_length;
418 if (direction == DMA_DEV_TO_MEM) {
419 desc->dmamode = DMA_MODE_READ;
420 desc->src = imxdmac->per_address;
421 } else {
422 desc->dmamode = DMA_MODE_WRITE;
423 desc->dest = imxdmac->per_address;
424 }
425 desc->desc.callback = NULL;
426 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200427
Javier Martin9e15db72012-03-02 09:28:47 +0100428 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200429}
430
431static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
432 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530433 size_t period_len, enum dma_transfer_direction direction)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200434{
435 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
436 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100437 struct imxdma_desc *desc;
438 int i;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200439 unsigned int periods = buf_len / period_len;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200440
441 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
442 __func__, imxdmac->channel, buf_len, period_len);
443
Javier Martin9e15db72012-03-02 09:28:47 +0100444 if (list_empty(&imxdmac->ld_free) ||
445 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200446 return NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200447
Javier Martin9e15db72012-03-02 09:28:47 +0100448 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200449
450 if (imxdmac->sg_list)
451 kfree(imxdmac->sg_list);
452
453 imxdmac->sg_list = kcalloc(periods + 1,
454 sizeof(struct scatterlist), GFP_KERNEL);
455 if (!imxdmac->sg_list)
456 return NULL;
457
458 sg_init_table(imxdmac->sg_list, periods);
459
460 for (i = 0; i < periods; i++) {
461 imxdmac->sg_list[i].page_link = 0;
462 imxdmac->sg_list[i].offset = 0;
463 imxdmac->sg_list[i].dma_address = dma_addr;
464 imxdmac->sg_list[i].length = period_len;
465 dma_addr += period_len;
466 }
467
468 /* close the loop */
469 imxdmac->sg_list[periods].offset = 0;
470 imxdmac->sg_list[periods].length = 0;
471 imxdmac->sg_list[periods].page_link =
472 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
473
Javier Martin9e15db72012-03-02 09:28:47 +0100474 desc->type = IMXDMA_DESC_CYCLIC;
475 desc->sg = imxdmac->sg_list;
476 desc->sgcount = periods;
477 desc->len = IMX_DMA_LENGTH_LOOP;
478 if (direction == DMA_DEV_TO_MEM) {
479 desc->dmamode = DMA_MODE_READ;
480 desc->src = imxdmac->per_address;
481 } else {
482 desc->dmamode = DMA_MODE_WRITE;
483 desc->dest = imxdmac->per_address;
484 }
485 desc->desc.callback = NULL;
486 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200487
Javier Martin9e15db72012-03-02 09:28:47 +0100488 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200489}
490
Javier Martin6c05f092012-02-28 17:08:17 +0100491static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
492 struct dma_chan *chan, dma_addr_t dest,
493 dma_addr_t src, size_t len, unsigned long flags)
494{
495 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
496 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100497 struct imxdma_desc *desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100498
499 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
500 __func__, imxdmac->channel, src, dest, len);
501
Javier Martin9e15db72012-03-02 09:28:47 +0100502 if (list_empty(&imxdmac->ld_free) ||
503 imxdma_chan_is_doing_cyclic(imxdmac))
Javier Martin6c05f092012-02-28 17:08:17 +0100504 return NULL;
505
Javier Martin9e15db72012-03-02 09:28:47 +0100506 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Javier Martin6c05f092012-02-28 17:08:17 +0100507
Javier Martin9e15db72012-03-02 09:28:47 +0100508 desc->type = IMXDMA_DESC_MEMCPY;
509 desc->src = src;
510 desc->dest = dest;
511 desc->len = len;
512 desc->dmamode = DMA_MODE_WRITE;
513 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
514 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
515 desc->desc.callback = NULL;
516 desc->desc.callback_param = NULL;
517
518 return &desc->desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100519}
520
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200521static void imxdma_issue_pending(struct dma_chan *chan)
522{
Sascha Hauer5b316872012-01-09 10:32:49 +0100523 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100524 struct imxdma_engine *imxdma = imxdmac->imxdma;
525 struct imxdma_desc *desc;
526 unsigned long flags;
Sascha Hauer5b316872012-01-09 10:32:49 +0100527
Javier Martin9e15db72012-03-02 09:28:47 +0100528 spin_lock_irqsave(&imxdmac->lock, flags);
529 if (list_empty(&imxdmac->ld_active) &&
530 !list_empty(&imxdmac->ld_queue)) {
531 desc = list_first_entry(&imxdmac->ld_queue,
532 struct imxdma_desc, node);
533
534 if (imxdma_xfer_desc(desc) < 0) {
535 dev_warn(imxdma->dev,
536 "%s: channel: %d couldn't issue DMA xfer\n",
537 __func__, imxdmac->channel);
538 } else {
539 list_move_tail(imxdmac->ld_queue.next,
540 &imxdmac->ld_active);
541 }
542 }
543 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200544}
545
546static int __init imxdma_probe(struct platform_device *pdev)
547{
548 struct imxdma_engine *imxdma;
549 int ret, i;
550
551 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
552 if (!imxdma)
553 return -ENOMEM;
554
555 INIT_LIST_HEAD(&imxdma->dma_device.channels);
556
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100557 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
558 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
Javier Martin6c05f092012-02-28 17:08:17 +0100559 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100560
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200561 /* Initialize channel parameters */
562 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
563 struct imxdma_channel *imxdmac = &imxdma->channel[i];
564
565 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
566 DMA_PRIO_MEDIUM);
Sascha Hauer8267f162010-10-20 08:37:19 +0200567 if ((int)imxdmac->channel < 0) {
568 ret = -ENODEV;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200569 goto err_init;
Sascha Hauer8267f162010-10-20 08:37:19 +0200570 }
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200571
572 imx_dma_setup_handlers(imxdmac->imxdma_channel,
573 imxdma_irq_handler, imxdma_err_handler, imxdmac);
574
575 imxdmac->imxdma = imxdma;
576 spin_lock_init(&imxdmac->lock);
577
Javier Martin9e15db72012-03-02 09:28:47 +0100578 INIT_LIST_HEAD(&imxdmac->ld_queue);
579 INIT_LIST_HEAD(&imxdmac->ld_free);
580 INIT_LIST_HEAD(&imxdmac->ld_active);
581
582 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
583 (unsigned long)imxdmac);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200584 imxdmac->chan.device = &imxdma->dma_device;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200585 imxdmac->channel = i;
586
587 /* Add the channel to the DMAC list */
Javier Martin9e15db72012-03-02 09:28:47 +0100588 list_add_tail(&imxdmac->chan.device_node,
589 &imxdma->dma_device.channels);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200590 }
591
592 imxdma->dev = &pdev->dev;
593 imxdma->dma_device.dev = &pdev->dev;
594
595 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
596 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
597 imxdma->dma_device.device_tx_status = imxdma_tx_status;
598 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
599 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
Javier Martin6c05f092012-02-28 17:08:17 +0100600 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200601 imxdma->dma_device.device_control = imxdma_control;
602 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
603
604 platform_set_drvdata(pdev, imxdma);
605
Javier Martin6c05f092012-02-28 17:08:17 +0100606 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
Sascha Hauer1e070a62011-01-12 13:14:37 +0100607 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
608 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
609
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200610 ret = dma_async_device_register(&imxdma->dma_device);
611 if (ret) {
612 dev_err(&pdev->dev, "unable to register\n");
613 goto err_init;
614 }
615
616 return 0;
617
618err_init:
Axel Lincbeae412010-11-02 09:12:57 +0800619 while (--i >= 0) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200620 struct imxdma_channel *imxdmac = &imxdma->channel[i];
621 imx_dma_free(imxdmac->imxdma_channel);
622 }
623
624 kfree(imxdma);
625 return ret;
626}
627
628static int __exit imxdma_remove(struct platform_device *pdev)
629{
630 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
631 int i;
632
633 dma_async_device_unregister(&imxdma->dma_device);
634
635 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
636 struct imxdma_channel *imxdmac = &imxdma->channel[i];
637
638 imx_dma_free(imxdmac->imxdma_channel);
639 }
640
641 kfree(imxdma);
642
643 return 0;
644}
645
646static struct platform_driver imxdma_driver = {
647 .driver = {
648 .name = "imx-dma",
649 },
650 .remove = __exit_p(imxdma_remove),
651};
652
653static int __init imxdma_module_init(void)
654{
655 return platform_driver_probe(&imxdma_driver, imxdma_probe);
656}
657subsys_initcall(imxdma_module_init);
658
659MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
660MODULE_DESCRIPTION("i.MX dma driver");
661MODULE_LICENSE("GPL");