blob: 20c1565a74867ed2b11d05e16e590f6cac2de7fd [file] [log] [blame]
Sascha Hauer1f1846c2010-10-06 10:25:55 +02001/*
2 * drivers/dma/imx-dma.c
3 *
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
Javier Martin9e15db72012-03-02 09:28:47 +01008 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
Sascha Hauer1f1846c2010-10-06 10:25:55 +02009 *
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
13 *
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
16 */
Javier Martin9e15db72012-03-02 09:28:47 +010017
Sascha Hauer1f1846c2010-10-06 10:25:55 +020018#include <linux/init.h>
Axel Linf8de8f42011-08-30 15:08:24 +080019#include <linux/module.h>
Sascha Hauer1f1846c2010-10-06 10:25:55 +020020#include <linux/types.h>
21#include <linux/mm.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/device.h>
25#include <linux/dma-mapping.h>
26#include <linux/slab.h>
27#include <linux/platform_device.h>
28#include <linux/dmaengine.h>
Vinod Koul5170c052012-03-09 14:55:25 +053029#include <linux/module.h>
Sascha Hauer1f1846c2010-10-06 10:25:55 +020030
31#include <asm/irq.h>
32#include <mach/dma-v1.h>
33#include <mach/hardware.h>
34
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000035#include "dmaengine.h"
Javier Martin9e15db72012-03-02 09:28:47 +010036#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37
38enum imxdma_prep_type {
39 IMXDMA_DESC_MEMCPY,
40 IMXDMA_DESC_INTERLEAVED,
41 IMXDMA_DESC_SLAVE_SG,
42 IMXDMA_DESC_CYCLIC,
43};
44
45struct imxdma_desc {
46 struct list_head node;
47 struct dma_async_tx_descriptor desc;
48 enum dma_status status;
49 dma_addr_t src;
50 dma_addr_t dest;
51 size_t len;
52 unsigned int dmamode;
53 enum imxdma_prep_type type;
54 /* For memcpy and interleaved */
55 unsigned int config_port;
56 unsigned int config_mem;
57 /* For interleaved transfers */
58 unsigned int x;
59 unsigned int y;
60 unsigned int w;
61 /* For slave sg and cyclic */
62 struct scatterlist *sg;
63 unsigned int sgcount;
64};
65
Sascha Hauer1f1846c2010-10-06 10:25:55 +020066struct imxdma_channel {
67 struct imxdma_engine *imxdma;
68 unsigned int channel;
69 unsigned int imxdma_channel;
70
Javier Martin9e15db72012-03-02 09:28:47 +010071 struct tasklet_struct dma_tasklet;
72 struct list_head ld_free;
73 struct list_head ld_queue;
74 struct list_head ld_active;
75 int descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020076 enum dma_slave_buswidth word_size;
77 dma_addr_t per_address;
78 u32 watermark_level;
79 struct dma_chan chan;
80 spinlock_t lock;
81 struct dma_async_tx_descriptor desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020082 enum dma_status status;
83 int dma_request;
84 struct scatterlist *sg_list;
85};
86
87#define MAX_DMA_CHANNELS 8
88
89struct imxdma_engine {
90 struct device *dev;
Sascha Hauer1e070a62011-01-12 13:14:37 +010091 struct device_dma_parameters dma_parms;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020092 struct dma_device dma_device;
93 struct imxdma_channel channel[MAX_DMA_CHANNELS];
94};
95
96static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
97{
98 return container_of(chan, struct imxdma_channel, chan);
99}
100
Javier Martin9e15db72012-03-02 09:28:47 +0100101static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200102{
Javier Martin9e15db72012-03-02 09:28:47 +0100103 struct imxdma_desc *desc;
104
105 if (!list_empty(&imxdmac->ld_active)) {
106 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
107 node);
108 if (desc->type == IMXDMA_DESC_CYCLIC)
109 return true;
110 }
111 return false;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200112}
113
114static void imxdma_irq_handler(int channel, void *data)
115{
116 struct imxdma_channel *imxdmac = data;
117
Javier Martin9e15db72012-03-02 09:28:47 +0100118 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200119}
120
121static void imxdma_err_handler(int channel, void *data, int error)
122{
123 struct imxdma_channel *imxdmac = data;
124
Javier Martin9e15db72012-03-02 09:28:47 +0100125 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200126}
127
128static void imxdma_progression(int channel, void *data,
129 struct scatterlist *sg)
130{
131 struct imxdma_channel *imxdmac = data;
132
Javier Martin9e15db72012-03-02 09:28:47 +0100133 tasklet_schedule(&imxdmac->dma_tasklet);
134}
135
136static int imxdma_xfer_desc(struct imxdma_desc *d)
137{
138 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
139 int ret;
140
141 /* Configure and enable */
142 switch (d->type) {
143 case IMXDMA_DESC_MEMCPY:
144 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
145 d->config_port, d->config_mem, 0, 0);
146 if (ret < 0)
147 return ret;
148 ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src,
149 d->len, d->dest, d->dmamode);
150 if (ret < 0)
151 return ret;
152 break;
153 case IMXDMA_DESC_CYCLIC:
154 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
155 imxdma_progression);
156 if (ret < 0)
157 return ret;
158 /*
159 * We fall through here since cyclic transfer is the same as
160 * slave_sg adding a progression handler and a specific sg
161 * configuration which is done in 'imxdma_prep_dma_cyclic'.
162 */
163 case IMXDMA_DESC_SLAVE_SG:
164 if (d->dmamode == DMA_MODE_READ)
165 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
166 d->sgcount, d->len, d->src, d->dmamode);
167 else
168 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
169 d->sgcount, d->len, d->dest, d->dmamode);
170 if (ret < 0)
171 return ret;
172 break;
173 default:
174 return -EINVAL;
175 }
176 imx_dma_enable(imxdmac->imxdma_channel);
177 return 0;
178}
179
180static void imxdma_tasklet(unsigned long data)
181{
182 struct imxdma_channel *imxdmac = (void *)data;
183 struct imxdma_engine *imxdma = imxdmac->imxdma;
184 struct imxdma_desc *desc;
185
186 spin_lock(&imxdmac->lock);
187
188 if (list_empty(&imxdmac->ld_active)) {
189 /* Someone might have called terminate all */
190 goto out;
191 }
192 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
193
194 if (desc->desc.callback)
195 desc->desc.callback(desc->desc.callback_param);
196
Vinod Koul1f3d6dc2012-03-13 12:39:49 +0530197 dma_cookie_complete(&desc->desc);
Javier Martin9e15db72012-03-02 09:28:47 +0100198
199 /* If we are dealing with a cyclic descriptor keep it on ld_active */
200 if (imxdma_chan_is_doing_cyclic(imxdmac))
201 goto out;
202
203 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
204
205 if (!list_empty(&imxdmac->ld_queue)) {
206 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
207 node);
208 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
209 if (imxdma_xfer_desc(desc) < 0)
210 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
211 __func__, imxdmac->channel);
212 }
213out:
214 spin_unlock(&imxdmac->lock);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200215}
216
217static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
218 unsigned long arg)
219{
220 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
221 struct dma_slave_config *dmaengine_cfg = (void *)arg;
222 int ret;
Javier Martin9e15db72012-03-02 09:28:47 +0100223 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200224 unsigned int mode = 0;
225
226 switch (cmd) {
227 case DMA_TERMINATE_ALL:
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200228 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100229
230 spin_lock_irqsave(&imxdmac->lock, flags);
231 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
232 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
233 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200234 return 0;
235 case DMA_SLAVE_CONFIG:
Vinod Kouldb8196d2011-10-13 22:34:23 +0530236 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200237 imxdmac->per_address = dmaengine_cfg->src_addr;
238 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
239 imxdmac->word_size = dmaengine_cfg->src_addr_width;
240 } else {
241 imxdmac->per_address = dmaengine_cfg->dst_addr;
242 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
243 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
244 }
245
246 switch (imxdmac->word_size) {
247 case DMA_SLAVE_BUSWIDTH_1_BYTE:
248 mode = IMX_DMA_MEMSIZE_8;
249 break;
250 case DMA_SLAVE_BUSWIDTH_2_BYTES:
251 mode = IMX_DMA_MEMSIZE_16;
252 break;
253 default:
254 case DMA_SLAVE_BUSWIDTH_4_BYTES:
255 mode = IMX_DMA_MEMSIZE_32;
256 break;
257 }
258 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
259 mode | IMX_DMA_TYPE_FIFO,
260 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
261 imxdmac->dma_request, 1);
262
263 if (ret)
264 return ret;
265
Sascha Hauer6584cb82011-07-06 11:18:33 +0200266 imx_dma_config_burstlen(imxdmac->imxdma_channel,
267 imxdmac->watermark_level * imxdmac->word_size);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200268
269 return 0;
270 default:
271 return -ENOSYS;
272 }
273
274 return -EINVAL;
275}
276
277static enum dma_status imxdma_tx_status(struct dma_chan *chan,
278 dma_cookie_t cookie,
279 struct dma_tx_state *txstate)
280{
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000281 return dma_cookie_status(chan, cookie, txstate);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200282}
283
284static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
285{
286 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
287 dma_cookie_t cookie;
Javier Martin9e15db72012-03-02 09:28:47 +0100288 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200289
Javier Martin9e15db72012-03-02 09:28:47 +0100290 spin_lock_irqsave(&imxdmac->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000291 cookie = dma_cookie_assign(tx);
Javier Martin9e15db72012-03-02 09:28:47 +0100292 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200293
294 return cookie;
295}
296
297static int imxdma_alloc_chan_resources(struct dma_chan *chan)
298{
299 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
300 struct imx_dma_data *data = chan->private;
301
Javier Martin6c05f092012-02-28 17:08:17 +0100302 if (data != NULL)
303 imxdmac->dma_request = data->dma_request;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200304
Javier Martin9e15db72012-03-02 09:28:47 +0100305 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
306 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200307
Javier Martin9e15db72012-03-02 09:28:47 +0100308 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
309 if (!desc)
310 break;
311 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
312 dma_async_tx_descriptor_init(&desc->desc, chan);
313 desc->desc.tx_submit = imxdma_tx_submit;
314 /* txd.flags will be overwritten in prep funcs */
315 desc->desc.flags = DMA_CTRL_ACK;
316 desc->status = DMA_SUCCESS;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200317
Javier Martin9e15db72012-03-02 09:28:47 +0100318 list_add_tail(&desc->node, &imxdmac->ld_free);
319 imxdmac->descs_allocated++;
320 }
321
322 if (!imxdmac->descs_allocated)
323 return -ENOMEM;
324
325 return imxdmac->descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200326}
327
328static void imxdma_free_chan_resources(struct dma_chan *chan)
329{
330 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100331 struct imxdma_desc *desc, *_desc;
332 unsigned long flags;
333
334 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200335
336 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100337 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
338 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
339
340 spin_unlock_irqrestore(&imxdmac->lock, flags);
341
342 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
343 kfree(desc);
344 imxdmac->descs_allocated--;
345 }
346 INIT_LIST_HEAD(&imxdmac->ld_free);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200347
348 if (imxdmac->sg_list) {
349 kfree(imxdmac->sg_list);
350 imxdmac->sg_list = NULL;
351 }
352}
353
354static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
355 struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530356 unsigned int sg_len, enum dma_transfer_direction direction,
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200357 unsigned long flags)
358{
359 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
360 struct scatterlist *sg;
Javier Martin9e15db72012-03-02 09:28:47 +0100361 int i, dma_length = 0;
362 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200363
Javier Martin9e15db72012-03-02 09:28:47 +0100364 if (list_empty(&imxdmac->ld_free) ||
365 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200366 return NULL;
367
Javier Martin9e15db72012-03-02 09:28:47 +0100368 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200369
370 for_each_sg(sgl, sg, sg_len, i) {
371 dma_length += sg->length;
372 }
373
Sascha Hauerd07102a2011-01-12 14:13:23 +0100374 switch (imxdmac->word_size) {
375 case DMA_SLAVE_BUSWIDTH_4_BYTES:
376 if (sgl->length & 3 || sgl->dma_address & 3)
377 return NULL;
378 break;
379 case DMA_SLAVE_BUSWIDTH_2_BYTES:
380 if (sgl->length & 1 || sgl->dma_address & 1)
381 return NULL;
382 break;
383 case DMA_SLAVE_BUSWIDTH_1_BYTE:
384 break;
385 default:
386 return NULL;
387 }
388
Javier Martin9e15db72012-03-02 09:28:47 +0100389 desc->type = IMXDMA_DESC_SLAVE_SG;
390 desc->sg = sgl;
391 desc->sgcount = sg_len;
392 desc->len = dma_length;
393 if (direction == DMA_DEV_TO_MEM) {
394 desc->dmamode = DMA_MODE_READ;
395 desc->src = imxdmac->per_address;
396 } else {
397 desc->dmamode = DMA_MODE_WRITE;
398 desc->dest = imxdmac->per_address;
399 }
400 desc->desc.callback = NULL;
401 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200402
Javier Martin9e15db72012-03-02 09:28:47 +0100403 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200404}
405
406static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
407 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530408 size_t period_len, enum dma_transfer_direction direction)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200409{
410 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
411 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100412 struct imxdma_desc *desc;
413 int i;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200414 unsigned int periods = buf_len / period_len;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200415
416 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
417 __func__, imxdmac->channel, buf_len, period_len);
418
Javier Martin9e15db72012-03-02 09:28:47 +0100419 if (list_empty(&imxdmac->ld_free) ||
420 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200421 return NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200422
Javier Martin9e15db72012-03-02 09:28:47 +0100423 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200424
425 if (imxdmac->sg_list)
426 kfree(imxdmac->sg_list);
427
428 imxdmac->sg_list = kcalloc(periods + 1,
429 sizeof(struct scatterlist), GFP_KERNEL);
430 if (!imxdmac->sg_list)
431 return NULL;
432
433 sg_init_table(imxdmac->sg_list, periods);
434
435 for (i = 0; i < periods; i++) {
436 imxdmac->sg_list[i].page_link = 0;
437 imxdmac->sg_list[i].offset = 0;
438 imxdmac->sg_list[i].dma_address = dma_addr;
439 imxdmac->sg_list[i].length = period_len;
440 dma_addr += period_len;
441 }
442
443 /* close the loop */
444 imxdmac->sg_list[periods].offset = 0;
445 imxdmac->sg_list[periods].length = 0;
446 imxdmac->sg_list[periods].page_link =
447 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
448
Javier Martin9e15db72012-03-02 09:28:47 +0100449 desc->type = IMXDMA_DESC_CYCLIC;
450 desc->sg = imxdmac->sg_list;
451 desc->sgcount = periods;
452 desc->len = IMX_DMA_LENGTH_LOOP;
453 if (direction == DMA_DEV_TO_MEM) {
454 desc->dmamode = DMA_MODE_READ;
455 desc->src = imxdmac->per_address;
456 } else {
457 desc->dmamode = DMA_MODE_WRITE;
458 desc->dest = imxdmac->per_address;
459 }
460 desc->desc.callback = NULL;
461 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200462
Javier Martin9e15db72012-03-02 09:28:47 +0100463 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200464}
465
Javier Martin6c05f092012-02-28 17:08:17 +0100466static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
467 struct dma_chan *chan, dma_addr_t dest,
468 dma_addr_t src, size_t len, unsigned long flags)
469{
470 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
471 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100472 struct imxdma_desc *desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100473
474 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
475 __func__, imxdmac->channel, src, dest, len);
476
Javier Martin9e15db72012-03-02 09:28:47 +0100477 if (list_empty(&imxdmac->ld_free) ||
478 imxdma_chan_is_doing_cyclic(imxdmac))
Javier Martin6c05f092012-02-28 17:08:17 +0100479 return NULL;
480
Javier Martin9e15db72012-03-02 09:28:47 +0100481 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Javier Martin6c05f092012-02-28 17:08:17 +0100482
Javier Martin9e15db72012-03-02 09:28:47 +0100483 desc->type = IMXDMA_DESC_MEMCPY;
484 desc->src = src;
485 desc->dest = dest;
486 desc->len = len;
487 desc->dmamode = DMA_MODE_WRITE;
488 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
489 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
490 desc->desc.callback = NULL;
491 desc->desc.callback_param = NULL;
492
493 return &desc->desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100494}
495
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200496static void imxdma_issue_pending(struct dma_chan *chan)
497{
Sascha Hauer5b316872012-01-09 10:32:49 +0100498 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100499 struct imxdma_engine *imxdma = imxdmac->imxdma;
500 struct imxdma_desc *desc;
501 unsigned long flags;
Sascha Hauer5b316872012-01-09 10:32:49 +0100502
Javier Martin9e15db72012-03-02 09:28:47 +0100503 spin_lock_irqsave(&imxdmac->lock, flags);
504 if (list_empty(&imxdmac->ld_active) &&
505 !list_empty(&imxdmac->ld_queue)) {
506 desc = list_first_entry(&imxdmac->ld_queue,
507 struct imxdma_desc, node);
508
509 if (imxdma_xfer_desc(desc) < 0) {
510 dev_warn(imxdma->dev,
511 "%s: channel: %d couldn't issue DMA xfer\n",
512 __func__, imxdmac->channel);
513 } else {
514 list_move_tail(imxdmac->ld_queue.next,
515 &imxdmac->ld_active);
516 }
517 }
518 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200519}
520
521static int __init imxdma_probe(struct platform_device *pdev)
522{
523 struct imxdma_engine *imxdma;
524 int ret, i;
525
526 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
527 if (!imxdma)
528 return -ENOMEM;
529
530 INIT_LIST_HEAD(&imxdma->dma_device.channels);
531
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100532 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
533 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
Javier Martin6c05f092012-02-28 17:08:17 +0100534 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100535
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200536 /* Initialize channel parameters */
537 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
538 struct imxdma_channel *imxdmac = &imxdma->channel[i];
539
540 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
541 DMA_PRIO_MEDIUM);
Sascha Hauer8267f162010-10-20 08:37:19 +0200542 if ((int)imxdmac->channel < 0) {
543 ret = -ENODEV;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200544 goto err_init;
Sascha Hauer8267f162010-10-20 08:37:19 +0200545 }
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200546
547 imx_dma_setup_handlers(imxdmac->imxdma_channel,
548 imxdma_irq_handler, imxdma_err_handler, imxdmac);
549
550 imxdmac->imxdma = imxdma;
551 spin_lock_init(&imxdmac->lock);
552
Javier Martin9e15db72012-03-02 09:28:47 +0100553 INIT_LIST_HEAD(&imxdmac->ld_queue);
554 INIT_LIST_HEAD(&imxdmac->ld_free);
555 INIT_LIST_HEAD(&imxdmac->ld_active);
556
557 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
558 (unsigned long)imxdmac);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200559 imxdmac->chan.device = &imxdma->dma_device;
Russell King - ARM Linux8ac69542012-03-06 22:36:27 +0000560 dma_cookie_init(&imxdmac->chan);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200561 imxdmac->channel = i;
562
563 /* Add the channel to the DMAC list */
Javier Martin9e15db72012-03-02 09:28:47 +0100564 list_add_tail(&imxdmac->chan.device_node,
565 &imxdma->dma_device.channels);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200566 }
567
568 imxdma->dev = &pdev->dev;
569 imxdma->dma_device.dev = &pdev->dev;
570
571 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
572 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
573 imxdma->dma_device.device_tx_status = imxdma_tx_status;
574 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
575 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
Javier Martin6c05f092012-02-28 17:08:17 +0100576 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200577 imxdma->dma_device.device_control = imxdma_control;
578 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
579
580 platform_set_drvdata(pdev, imxdma);
581
Javier Martin6c05f092012-02-28 17:08:17 +0100582 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
Sascha Hauer1e070a62011-01-12 13:14:37 +0100583 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
584 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
585
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200586 ret = dma_async_device_register(&imxdma->dma_device);
587 if (ret) {
588 dev_err(&pdev->dev, "unable to register\n");
589 goto err_init;
590 }
591
592 return 0;
593
594err_init:
Axel Lincbeae412010-11-02 09:12:57 +0800595 while (--i >= 0) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200596 struct imxdma_channel *imxdmac = &imxdma->channel[i];
597 imx_dma_free(imxdmac->imxdma_channel);
598 }
599
600 kfree(imxdma);
601 return ret;
602}
603
604static int __exit imxdma_remove(struct platform_device *pdev)
605{
606 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
607 int i;
608
609 dma_async_device_unregister(&imxdma->dma_device);
610
611 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
612 struct imxdma_channel *imxdmac = &imxdma->channel[i];
613
614 imx_dma_free(imxdmac->imxdma_channel);
615 }
616
617 kfree(imxdma);
618
619 return 0;
620}
621
622static struct platform_driver imxdma_driver = {
623 .driver = {
624 .name = "imx-dma",
625 },
626 .remove = __exit_p(imxdma_remove),
627};
628
629static int __init imxdma_module_init(void)
630{
631 return platform_driver_probe(&imxdma_driver, imxdma_probe);
632}
633subsys_initcall(imxdma_module_init);
634
635MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
636MODULE_DESCRIPTION("i.MX dma driver");
637MODULE_LICENSE("GPL");