blob: 304839a99ae561415ccc2600df90c14f2f12820c [file] [log] [blame]
Sascha Hauer1f1846c2010-10-06 10:25:55 +02001/*
2 * drivers/dma/imx-dma.c
3 *
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
Javier Martin9e15db72012-03-02 09:28:47 +01008 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
Sascha Hauer1f1846c2010-10-06 10:25:55 +02009 *
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
13 *
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
16 */
Javier Martin9e15db72012-03-02 09:28:47 +010017
Sascha Hauer1f1846c2010-10-06 10:25:55 +020018#include <linux/init.h>
Axel Linf8de8f42011-08-30 15:08:24 +080019#include <linux/module.h>
Sascha Hauer1f1846c2010-10-06 10:25:55 +020020#include <linux/types.h>
21#include <linux/mm.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/device.h>
25#include <linux/dma-mapping.h>
26#include <linux/slab.h>
27#include <linux/platform_device.h>
28#include <linux/dmaengine.h>
Vinod Koul5170c052012-03-09 14:55:25 +053029#include <linux/module.h>
Sascha Hauer1f1846c2010-10-06 10:25:55 +020030
31#include <asm/irq.h>
32#include <mach/dma-v1.h>
33#include <mach/hardware.h>
34
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000035#include "dmaengine.h"
Javier Martin9e15db72012-03-02 09:28:47 +010036#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37
38enum imxdma_prep_type {
39 IMXDMA_DESC_MEMCPY,
40 IMXDMA_DESC_INTERLEAVED,
41 IMXDMA_DESC_SLAVE_SG,
42 IMXDMA_DESC_CYCLIC,
43};
44
45struct imxdma_desc {
46 struct list_head node;
47 struct dma_async_tx_descriptor desc;
48 enum dma_status status;
49 dma_addr_t src;
50 dma_addr_t dest;
51 size_t len;
52 unsigned int dmamode;
53 enum imxdma_prep_type type;
54 /* For memcpy and interleaved */
55 unsigned int config_port;
56 unsigned int config_mem;
57 /* For interleaved transfers */
58 unsigned int x;
59 unsigned int y;
60 unsigned int w;
61 /* For slave sg and cyclic */
62 struct scatterlist *sg;
63 unsigned int sgcount;
64};
65
Sascha Hauer1f1846c2010-10-06 10:25:55 +020066struct imxdma_channel {
67 struct imxdma_engine *imxdma;
68 unsigned int channel;
69 unsigned int imxdma_channel;
70
Javier Martin9e15db72012-03-02 09:28:47 +010071 struct tasklet_struct dma_tasklet;
72 struct list_head ld_free;
73 struct list_head ld_queue;
74 struct list_head ld_active;
75 int descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020076 enum dma_slave_buswidth word_size;
77 dma_addr_t per_address;
78 u32 watermark_level;
79 struct dma_chan chan;
80 spinlock_t lock;
81 struct dma_async_tx_descriptor desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020082 enum dma_status status;
83 int dma_request;
84 struct scatterlist *sg_list;
85};
86
87#define MAX_DMA_CHANNELS 8
88
89struct imxdma_engine {
90 struct device *dev;
Sascha Hauer1e070a62011-01-12 13:14:37 +010091 struct device_dma_parameters dma_parms;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020092 struct dma_device dma_device;
93 struct imxdma_channel channel[MAX_DMA_CHANNELS];
94};
95
96static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
97{
98 return container_of(chan, struct imxdma_channel, chan);
99}
100
Javier Martin9e15db72012-03-02 09:28:47 +0100101static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200102{
Javier Martin9e15db72012-03-02 09:28:47 +0100103 struct imxdma_desc *desc;
104
105 if (!list_empty(&imxdmac->ld_active)) {
106 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
107 node);
108 if (desc->type == IMXDMA_DESC_CYCLIC)
109 return true;
110 }
111 return false;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200112}
113
114static void imxdma_irq_handler(int channel, void *data)
115{
116 struct imxdma_channel *imxdmac = data;
117
Javier Martin9e15db72012-03-02 09:28:47 +0100118 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200119}
120
121static void imxdma_err_handler(int channel, void *data, int error)
122{
123 struct imxdma_channel *imxdmac = data;
124
Javier Martin9e15db72012-03-02 09:28:47 +0100125 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200126}
127
128static void imxdma_progression(int channel, void *data,
129 struct scatterlist *sg)
130{
131 struct imxdma_channel *imxdmac = data;
132
Javier Martin9e15db72012-03-02 09:28:47 +0100133 tasklet_schedule(&imxdmac->dma_tasklet);
134}
135
136static int imxdma_xfer_desc(struct imxdma_desc *d)
137{
138 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
139 int ret;
140
141 /* Configure and enable */
142 switch (d->type) {
143 case IMXDMA_DESC_MEMCPY:
144 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
145 d->config_port, d->config_mem, 0, 0);
146 if (ret < 0)
147 return ret;
148 ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src,
149 d->len, d->dest, d->dmamode);
150 if (ret < 0)
151 return ret;
152 break;
153 case IMXDMA_DESC_CYCLIC:
154 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
155 imxdma_progression);
156 if (ret < 0)
157 return ret;
158 /*
159 * We fall through here since cyclic transfer is the same as
160 * slave_sg adding a progression handler and a specific sg
161 * configuration which is done in 'imxdma_prep_dma_cyclic'.
162 */
163 case IMXDMA_DESC_SLAVE_SG:
164 if (d->dmamode == DMA_MODE_READ)
165 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
166 d->sgcount, d->len, d->src, d->dmamode);
167 else
168 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
169 d->sgcount, d->len, d->dest, d->dmamode);
170 if (ret < 0)
171 return ret;
172 break;
173 default:
174 return -EINVAL;
175 }
176 imx_dma_enable(imxdmac->imxdma_channel);
177 return 0;
178}
179
180static void imxdma_tasklet(unsigned long data)
181{
182 struct imxdma_channel *imxdmac = (void *)data;
183 struct imxdma_engine *imxdma = imxdmac->imxdma;
184 struct imxdma_desc *desc;
185
186 spin_lock(&imxdmac->lock);
187
188 if (list_empty(&imxdmac->ld_active)) {
189 /* Someone might have called terminate all */
190 goto out;
191 }
192 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
193
194 if (desc->desc.callback)
195 desc->desc.callback(desc->desc.callback_param);
196
Vinod Koul1f3d6dc2012-03-13 12:39:49 +0530197 dma_cookie_complete(&desc->desc);
Javier Martin9e15db72012-03-02 09:28:47 +0100198
199 /* If we are dealing with a cyclic descriptor keep it on ld_active */
200 if (imxdma_chan_is_doing_cyclic(imxdmac))
201 goto out;
202
203 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
204
205 if (!list_empty(&imxdmac->ld_queue)) {
206 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
207 node);
208 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
209 if (imxdma_xfer_desc(desc) < 0)
210 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
211 __func__, imxdmac->channel);
212 }
213out:
214 spin_unlock(&imxdmac->lock);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200215}
216
217static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
218 unsigned long arg)
219{
220 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
221 struct dma_slave_config *dmaengine_cfg = (void *)arg;
222 int ret;
Javier Martin9e15db72012-03-02 09:28:47 +0100223 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200224 unsigned int mode = 0;
225
226 switch (cmd) {
227 case DMA_TERMINATE_ALL:
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200228 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100229
230 spin_lock_irqsave(&imxdmac->lock, flags);
231 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
232 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
233 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200234 return 0;
235 case DMA_SLAVE_CONFIG:
Vinod Kouldb8196d2011-10-13 22:34:23 +0530236 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200237 imxdmac->per_address = dmaengine_cfg->src_addr;
238 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
239 imxdmac->word_size = dmaengine_cfg->src_addr_width;
240 } else {
241 imxdmac->per_address = dmaengine_cfg->dst_addr;
242 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
243 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
244 }
245
246 switch (imxdmac->word_size) {
247 case DMA_SLAVE_BUSWIDTH_1_BYTE:
248 mode = IMX_DMA_MEMSIZE_8;
249 break;
250 case DMA_SLAVE_BUSWIDTH_2_BYTES:
251 mode = IMX_DMA_MEMSIZE_16;
252 break;
253 default:
254 case DMA_SLAVE_BUSWIDTH_4_BYTES:
255 mode = IMX_DMA_MEMSIZE_32;
256 break;
257 }
258 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
259 mode | IMX_DMA_TYPE_FIFO,
260 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
261 imxdmac->dma_request, 1);
262
263 if (ret)
264 return ret;
265
Sascha Hauer6584cb82011-07-06 11:18:33 +0200266 imx_dma_config_burstlen(imxdmac->imxdma_channel,
267 imxdmac->watermark_level * imxdmac->word_size);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200268
269 return 0;
270 default:
271 return -ENOSYS;
272 }
273
274 return -EINVAL;
275}
276
277static enum dma_status imxdma_tx_status(struct dma_chan *chan,
278 dma_cookie_t cookie,
279 struct dma_tx_state *txstate)
280{
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000281 return dma_cookie_status(chan, cookie, txstate);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200282}
283
284static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
285{
286 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
287 dma_cookie_t cookie;
Javier Martin9e15db72012-03-02 09:28:47 +0100288 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200289
Javier Martin9e15db72012-03-02 09:28:47 +0100290 spin_lock_irqsave(&imxdmac->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000291 cookie = dma_cookie_assign(tx);
Javier Martin9e15db72012-03-02 09:28:47 +0100292 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200293
294 return cookie;
295}
296
297static int imxdma_alloc_chan_resources(struct dma_chan *chan)
298{
299 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
300 struct imx_dma_data *data = chan->private;
301
Javier Martin6c05f092012-02-28 17:08:17 +0100302 if (data != NULL)
303 imxdmac->dma_request = data->dma_request;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200304
Javier Martin9e15db72012-03-02 09:28:47 +0100305 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
306 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200307
Javier Martin9e15db72012-03-02 09:28:47 +0100308 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
309 if (!desc)
310 break;
311 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
312 dma_async_tx_descriptor_init(&desc->desc, chan);
313 desc->desc.tx_submit = imxdma_tx_submit;
314 /* txd.flags will be overwritten in prep funcs */
315 desc->desc.flags = DMA_CTRL_ACK;
316 desc->status = DMA_SUCCESS;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200317
Javier Martin9e15db72012-03-02 09:28:47 +0100318 list_add_tail(&desc->node, &imxdmac->ld_free);
319 imxdmac->descs_allocated++;
320 }
321
322 if (!imxdmac->descs_allocated)
323 return -ENOMEM;
324
325 return imxdmac->descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200326}
327
328static void imxdma_free_chan_resources(struct dma_chan *chan)
329{
330 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100331 struct imxdma_desc *desc, *_desc;
332 unsigned long flags;
333
334 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200335
336 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100337 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
338 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
339
340 spin_unlock_irqrestore(&imxdmac->lock, flags);
341
342 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
343 kfree(desc);
344 imxdmac->descs_allocated--;
345 }
346 INIT_LIST_HEAD(&imxdmac->ld_free);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200347
348 if (imxdmac->sg_list) {
349 kfree(imxdmac->sg_list);
350 imxdmac->sg_list = NULL;
351 }
352}
353
354static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
355 struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530356 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500357 unsigned long flags, void *context)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200358{
359 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
360 struct scatterlist *sg;
Javier Martin9e15db72012-03-02 09:28:47 +0100361 int i, dma_length = 0;
362 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200363
Javier Martin9e15db72012-03-02 09:28:47 +0100364 if (list_empty(&imxdmac->ld_free) ||
365 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200366 return NULL;
367
Javier Martin9e15db72012-03-02 09:28:47 +0100368 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200369
370 for_each_sg(sgl, sg, sg_len, i) {
371 dma_length += sg->length;
372 }
373
Sascha Hauerd07102a2011-01-12 14:13:23 +0100374 switch (imxdmac->word_size) {
375 case DMA_SLAVE_BUSWIDTH_4_BYTES:
376 if (sgl->length & 3 || sgl->dma_address & 3)
377 return NULL;
378 break;
379 case DMA_SLAVE_BUSWIDTH_2_BYTES:
380 if (sgl->length & 1 || sgl->dma_address & 1)
381 return NULL;
382 break;
383 case DMA_SLAVE_BUSWIDTH_1_BYTE:
384 break;
385 default:
386 return NULL;
387 }
388
Javier Martin9e15db72012-03-02 09:28:47 +0100389 desc->type = IMXDMA_DESC_SLAVE_SG;
390 desc->sg = sgl;
391 desc->sgcount = sg_len;
392 desc->len = dma_length;
393 if (direction == DMA_DEV_TO_MEM) {
394 desc->dmamode = DMA_MODE_READ;
395 desc->src = imxdmac->per_address;
396 } else {
397 desc->dmamode = DMA_MODE_WRITE;
398 desc->dest = imxdmac->per_address;
399 }
400 desc->desc.callback = NULL;
401 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200402
Javier Martin9e15db72012-03-02 09:28:47 +0100403 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200404}
405
406static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
407 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500408 size_t period_len, enum dma_transfer_direction direction,
409 void *context)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200410{
411 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
412 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100413 struct imxdma_desc *desc;
414 int i;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200415 unsigned int periods = buf_len / period_len;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200416
417 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
418 __func__, imxdmac->channel, buf_len, period_len);
419
Javier Martin9e15db72012-03-02 09:28:47 +0100420 if (list_empty(&imxdmac->ld_free) ||
421 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200422 return NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200423
Javier Martin9e15db72012-03-02 09:28:47 +0100424 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200425
426 if (imxdmac->sg_list)
427 kfree(imxdmac->sg_list);
428
429 imxdmac->sg_list = kcalloc(periods + 1,
430 sizeof(struct scatterlist), GFP_KERNEL);
431 if (!imxdmac->sg_list)
432 return NULL;
433
434 sg_init_table(imxdmac->sg_list, periods);
435
436 for (i = 0; i < periods; i++) {
437 imxdmac->sg_list[i].page_link = 0;
438 imxdmac->sg_list[i].offset = 0;
439 imxdmac->sg_list[i].dma_address = dma_addr;
440 imxdmac->sg_list[i].length = period_len;
441 dma_addr += period_len;
442 }
443
444 /* close the loop */
445 imxdmac->sg_list[periods].offset = 0;
446 imxdmac->sg_list[periods].length = 0;
447 imxdmac->sg_list[periods].page_link =
448 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
449
Javier Martin9e15db72012-03-02 09:28:47 +0100450 desc->type = IMXDMA_DESC_CYCLIC;
451 desc->sg = imxdmac->sg_list;
452 desc->sgcount = periods;
453 desc->len = IMX_DMA_LENGTH_LOOP;
454 if (direction == DMA_DEV_TO_MEM) {
455 desc->dmamode = DMA_MODE_READ;
456 desc->src = imxdmac->per_address;
457 } else {
458 desc->dmamode = DMA_MODE_WRITE;
459 desc->dest = imxdmac->per_address;
460 }
461 desc->desc.callback = NULL;
462 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200463
Javier Martin9e15db72012-03-02 09:28:47 +0100464 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200465}
466
Javier Martin6c05f092012-02-28 17:08:17 +0100467static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
468 struct dma_chan *chan, dma_addr_t dest,
469 dma_addr_t src, size_t len, unsigned long flags)
470{
471 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
472 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100473 struct imxdma_desc *desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100474
475 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
476 __func__, imxdmac->channel, src, dest, len);
477
Javier Martin9e15db72012-03-02 09:28:47 +0100478 if (list_empty(&imxdmac->ld_free) ||
479 imxdma_chan_is_doing_cyclic(imxdmac))
Javier Martin6c05f092012-02-28 17:08:17 +0100480 return NULL;
481
Javier Martin9e15db72012-03-02 09:28:47 +0100482 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Javier Martin6c05f092012-02-28 17:08:17 +0100483
Javier Martin9e15db72012-03-02 09:28:47 +0100484 desc->type = IMXDMA_DESC_MEMCPY;
485 desc->src = src;
486 desc->dest = dest;
487 desc->len = len;
488 desc->dmamode = DMA_MODE_WRITE;
489 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
490 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
491 desc->desc.callback = NULL;
492 desc->desc.callback_param = NULL;
493
494 return &desc->desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100495}
496
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200497static void imxdma_issue_pending(struct dma_chan *chan)
498{
Sascha Hauer5b316872012-01-09 10:32:49 +0100499 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100500 struct imxdma_engine *imxdma = imxdmac->imxdma;
501 struct imxdma_desc *desc;
502 unsigned long flags;
Sascha Hauer5b316872012-01-09 10:32:49 +0100503
Javier Martin9e15db72012-03-02 09:28:47 +0100504 spin_lock_irqsave(&imxdmac->lock, flags);
505 if (list_empty(&imxdmac->ld_active) &&
506 !list_empty(&imxdmac->ld_queue)) {
507 desc = list_first_entry(&imxdmac->ld_queue,
508 struct imxdma_desc, node);
509
510 if (imxdma_xfer_desc(desc) < 0) {
511 dev_warn(imxdma->dev,
512 "%s: channel: %d couldn't issue DMA xfer\n",
513 __func__, imxdmac->channel);
514 } else {
515 list_move_tail(imxdmac->ld_queue.next,
516 &imxdmac->ld_active);
517 }
518 }
519 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200520}
521
522static int __init imxdma_probe(struct platform_device *pdev)
523{
524 struct imxdma_engine *imxdma;
525 int ret, i;
526
527 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
528 if (!imxdma)
529 return -ENOMEM;
530
531 INIT_LIST_HEAD(&imxdma->dma_device.channels);
532
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100533 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
534 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
Javier Martin6c05f092012-02-28 17:08:17 +0100535 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100536
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200537 /* Initialize channel parameters */
538 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
539 struct imxdma_channel *imxdmac = &imxdma->channel[i];
540
541 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
542 DMA_PRIO_MEDIUM);
Sascha Hauer8267f162010-10-20 08:37:19 +0200543 if ((int)imxdmac->channel < 0) {
544 ret = -ENODEV;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200545 goto err_init;
Sascha Hauer8267f162010-10-20 08:37:19 +0200546 }
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200547
548 imx_dma_setup_handlers(imxdmac->imxdma_channel,
549 imxdma_irq_handler, imxdma_err_handler, imxdmac);
550
551 imxdmac->imxdma = imxdma;
552 spin_lock_init(&imxdmac->lock);
553
Javier Martin9e15db72012-03-02 09:28:47 +0100554 INIT_LIST_HEAD(&imxdmac->ld_queue);
555 INIT_LIST_HEAD(&imxdmac->ld_free);
556 INIT_LIST_HEAD(&imxdmac->ld_active);
557
558 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
559 (unsigned long)imxdmac);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200560 imxdmac->chan.device = &imxdma->dma_device;
Russell King - ARM Linux8ac69542012-03-06 22:36:27 +0000561 dma_cookie_init(&imxdmac->chan);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200562 imxdmac->channel = i;
563
564 /* Add the channel to the DMAC list */
Javier Martin9e15db72012-03-02 09:28:47 +0100565 list_add_tail(&imxdmac->chan.device_node,
566 &imxdma->dma_device.channels);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200567 }
568
569 imxdma->dev = &pdev->dev;
570 imxdma->dma_device.dev = &pdev->dev;
571
572 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
573 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
574 imxdma->dma_device.device_tx_status = imxdma_tx_status;
575 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
576 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
Javier Martin6c05f092012-02-28 17:08:17 +0100577 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200578 imxdma->dma_device.device_control = imxdma_control;
579 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
580
581 platform_set_drvdata(pdev, imxdma);
582
Javier Martin6c05f092012-02-28 17:08:17 +0100583 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
Sascha Hauer1e070a62011-01-12 13:14:37 +0100584 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
585 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
586
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200587 ret = dma_async_device_register(&imxdma->dma_device);
588 if (ret) {
589 dev_err(&pdev->dev, "unable to register\n");
590 goto err_init;
591 }
592
593 return 0;
594
595err_init:
Axel Lincbeae412010-11-02 09:12:57 +0800596 while (--i >= 0) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200597 struct imxdma_channel *imxdmac = &imxdma->channel[i];
598 imx_dma_free(imxdmac->imxdma_channel);
599 }
600
601 kfree(imxdma);
602 return ret;
603}
604
605static int __exit imxdma_remove(struct platform_device *pdev)
606{
607 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
608 int i;
609
610 dma_async_device_unregister(&imxdma->dma_device);
611
612 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
613 struct imxdma_channel *imxdmac = &imxdma->channel[i];
614
615 imx_dma_free(imxdmac->imxdma_channel);
616 }
617
618 kfree(imxdma);
619
620 return 0;
621}
622
623static struct platform_driver imxdma_driver = {
624 .driver = {
625 .name = "imx-dma",
626 },
627 .remove = __exit_p(imxdma_remove),
628};
629
630static int __init imxdma_module_init(void)
631{
632 return platform_driver_probe(&imxdma_driver, imxdma_probe);
633}
634subsys_initcall(imxdma_module_init);
635
636MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
637MODULE_DESCRIPTION("i.MX dma driver");
638MODULE_LICENSE("GPL");