blob: 296f9e747fac3b920cc344e6862f6d20d7c4b6b8 [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
14 *
Ira W. Snydera7aea372009-04-23 16:17:54 -070015 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
Zhang Wei173acc72008-03-01 07:42:48 -070020 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/interrupt.h>
31#include <linux/dmaengine.h>
32#include <linux/delay.h>
33#include <linux/dma-mapping.h>
34#include <linux/dmapool.h>
35#include <linux/of_platform.h>
36
Ira Snyderbbea0b62009-09-08 17:53:04 -070037#include <asm/fsldma.h>
Zhang Wei173acc72008-03-01 07:42:48 -070038#include "fsldma.h"
39
40static void dma_init(struct fsl_dma_chan *fsl_chan)
41{
42 /* Reset the channel */
43 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
44
45 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
46 case FSL_DMA_IP_85XX:
47 /* Set the channel to below modes:
48 * EIE - Error interrupt enable
49 * EOSIE - End of segments interrupt enable (basic mode)
50 * EOLNIE - End of links interrupt enable
51 */
52 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
53 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
54 break;
55 case FSL_DMA_IP_83XX:
56 /* Set the channel to below modes:
57 * EOTIE - End-of-transfer interrupt enable
Ira W. Snydera7aea372009-04-23 16:17:54 -070058 * PRC_RM - PCI read multiple
Zhang Wei173acc72008-03-01 07:42:48 -070059 */
Ira W. Snydera7aea372009-04-23 16:17:54 -070060 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
61 | FSL_DMA_MR_PRC_RM, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070062 break;
63 }
64
65}
66
Zhang Wei56822842008-03-13 10:45:27 -070067static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070068{
69 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
70}
71
Zhang Wei56822842008-03-13 10:45:27 -070072static u32 get_sr(struct fsl_dma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -070073{
74 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
75}
76
77static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
78 struct fsl_dma_ld_hw *hw, u32 count)
79{
80 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
81}
82
83static void set_desc_src(struct fsl_dma_chan *fsl_chan,
84 struct fsl_dma_ld_hw *hw, dma_addr_t src)
85{
86 u64 snoop_bits;
87
88 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
89 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
90 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
91}
92
93static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
94 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
95{
96 u64 snoop_bits;
97
98 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
99 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
100 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
101}
102
103static void set_desc_next(struct fsl_dma_chan *fsl_chan,
104 struct fsl_dma_ld_hw *hw, dma_addr_t next)
105{
106 u64 snoop_bits;
107
108 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
109 ? FSL_DMA_SNEN : 0;
110 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
111}
112
113static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
114{
115 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
116}
117
118static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
119{
120 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
121}
122
123static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
124{
125 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
126}
127
128static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
129{
130 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
131}
132
Zhang Weif79abb62008-03-18 18:45:00 -0700133static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
134{
135 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
136}
137
Zhang Wei173acc72008-03-01 07:42:48 -0700138static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
139{
140 u32 sr = get_sr(fsl_chan);
141 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
142}
143
144static void dma_start(struct fsl_dma_chan *fsl_chan)
145{
Joe Perchese3d43302009-06-28 09:26:20 -0700146 u32 mr_set = 0;
Zhang Wei173acc72008-03-01 07:42:48 -0700147
148 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
149 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
150 mr_set |= FSL_DMA_MR_EMP_EN;
Ira Snyder43a1a3e2009-05-28 09:26:40 +0000151 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
Zhang Wei173acc72008-03-01 07:42:48 -0700152 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
153 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
154 & ~FSL_DMA_MR_EMP_EN, 32);
Ira Snyder43a1a3e2009-05-28 09:26:40 +0000155 }
Zhang Wei173acc72008-03-01 07:42:48 -0700156
157 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
158 mr_set |= FSL_DMA_MR_EMS_EN;
159 else
160 mr_set |= FSL_DMA_MR_CS;
161
162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
164 | mr_set, 32);
165}
166
167static void dma_halt(struct fsl_dma_chan *fsl_chan)
168{
Dan Williams900325a2009-03-02 15:33:46 -0700169 int i;
170
Zhang Wei173acc72008-03-01 07:42:48 -0700171 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
172 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
173 32);
174 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
175 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
176 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
177
Dan Williams900325a2009-03-02 15:33:46 -0700178 for (i = 0; i < 100; i++) {
179 if (dma_is_idle(fsl_chan))
180 break;
Zhang Wei173acc72008-03-01 07:42:48 -0700181 udelay(10);
Dan Williams900325a2009-03-02 15:33:46 -0700182 }
Zhang Wei173acc72008-03-01 07:42:48 -0700183 if (i >= 100 && !dma_is_idle(fsl_chan))
184 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
185}
186
187static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
188 struct fsl_desc_sw *desc)
189{
Ira Snyder776c8942009-05-15 11:33:20 -0700190 u64 snoop_bits;
191
192 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
193 ? FSL_DMA_SNEN : 0;
194
Zhang Wei173acc72008-03-01 07:42:48 -0700195 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
Ira Snyder776c8942009-05-15 11:33:20 -0700196 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
197 | snoop_bits, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700198}
199
200static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
201 struct fsl_desc_sw *new_desc)
202{
203 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
204
205 if (list_empty(&fsl_chan->ld_queue))
206 return;
207
208 /* Link to the new descriptor physical address and
209 * Enable End-of-segment interrupt for
210 * the last link descriptor.
211 * (the previous node's next link descriptor)
212 *
213 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
214 */
215 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
216 new_desc->async_tx.phys | FSL_DMA_EOSIE |
217 (((fsl_chan->feature & FSL_DMA_IP_MASK)
218 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
219}
220
221/**
222 * fsl_chan_set_src_loop_size - Set source address hold transfer size
223 * @fsl_chan : Freescale DMA channel
224 * @size : Address loop size, 0 for disable loop
225 *
226 * The set source address hold transfer size. The source
227 * address hold or loop transfer size is when the DMA transfer
228 * data from source address (SA), if the loop size is 4, the DMA will
229 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
230 * SA + 1 ... and so on.
231 */
232static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
233{
234 switch (size) {
235 case 0:
236 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
237 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
238 (~FSL_DMA_MR_SAHE), 32);
239 break;
240 case 1:
241 case 2:
242 case 4:
243 case 8:
244 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
245 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
246 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
247 32);
248 break;
249 }
250}
251
252/**
253 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
254 * @fsl_chan : Freescale DMA channel
255 * @size : Address loop size, 0 for disable loop
256 *
257 * The set destination address hold transfer size. The destination
258 * address hold or loop transfer size is when the DMA transfer
259 * data to destination address (TA), if the loop size is 4, the DMA will
260 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
261 * TA + 1 ... and so on.
262 */
263static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
264{
265 switch (size) {
266 case 0:
267 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
268 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
269 (~FSL_DMA_MR_DAHE), 32);
270 break;
271 case 1:
272 case 2:
273 case 4:
274 case 8:
275 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
276 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
277 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
278 32);
279 break;
280 }
281}
282
283/**
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700284 * fsl_chan_set_request_count - Set DMA Request Count for external control
285 * @fsl_chan : Freescale DMA channel
286 * @size : Number of bytes to transfer in a single request
287 *
288 * The Freescale DMA channel can be controlled by the external signal DREQ#.
289 * The DMA request count is how many bytes are allowed to transfer before
290 * pausing the channel, after which a new assertion of DREQ# resumes channel
291 * operation.
292 *
293 * A size of 0 disables external pause control. The maximum size is 1024.
294 */
295static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
296{
297 BUG_ON(size > 1024);
298 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
299 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
300 | ((__ilog2(size) << 24) & 0x0f000000),
301 32);
302}
303
304/**
Zhang Wei173acc72008-03-01 07:42:48 -0700305 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
306 * @fsl_chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700307 * @enable : 0 is disabled, 1 is enabled.
Zhang Wei173acc72008-03-01 07:42:48 -0700308 *
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700309 * The Freescale DMA channel can be controlled by the external signal DREQ#.
310 * The DMA Request Count feature should be used in addition to this feature
311 * to set the number of bytes to transfer before pausing the channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700312 */
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700313static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700314{
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700315 if (enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700316 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700317 else
Zhang Wei173acc72008-03-01 07:42:48 -0700318 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
319}
320
321/**
322 * fsl_chan_toggle_ext_start - Toggle channel external start status
323 * @fsl_chan : Freescale DMA channel
324 * @enable : 0 is disabled, 1 is enabled.
325 *
326 * If enable the external start, the channel can be started by an
327 * external DMA start pin. So the dma_start() does not start the
328 * transfer immediately. The DMA channel will wait for the
329 * control pin asserted.
330 */
331static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
332{
333 if (enable)
334 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
335 else
336 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
337}
338
339static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
340{
Zhang Wei173acc72008-03-01 07:42:48 -0700341 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
Dan Williamseda34232009-09-08 17:53:02 -0700342 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
343 struct fsl_desc_sw *child;
Zhang Wei173acc72008-03-01 07:42:48 -0700344 unsigned long flags;
345 dma_cookie_t cookie;
346
347 /* cookie increment and adding to ld_queue must be atomic */
348 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
349
350 cookie = fsl_chan->common.cookie;
Dan Williamseda34232009-09-08 17:53:02 -0700351 list_for_each_entry(child, &desc->tx_list, node) {
Ira Snyderbcfb7462009-05-15 14:27:16 -0700352 cookie++;
353 if (cookie < 0)
354 cookie = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700355
Ira Snyderbcfb7462009-05-15 14:27:16 -0700356 desc->async_tx.cookie = cookie;
357 }
358
359 fsl_chan->common.cookie = cookie;
Dan Williamseda34232009-09-08 17:53:02 -0700360 append_ld_queue(fsl_chan, desc);
361 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
Zhang Wei173acc72008-03-01 07:42:48 -0700362
363 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
364
365 return cookie;
366}
367
368/**
369 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
370 * @fsl_chan : Freescale DMA channel
371 *
372 * Return - The descriptor allocated. NULL for failed.
373 */
374static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
375 struct fsl_dma_chan *fsl_chan)
376{
377 dma_addr_t pdesc;
378 struct fsl_desc_sw *desc_sw;
379
380 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
381 if (desc_sw) {
382 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
Dan Williamseda34232009-09-08 17:53:02 -0700383 INIT_LIST_HEAD(&desc_sw->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700384 dma_async_tx_descriptor_init(&desc_sw->async_tx,
385 &fsl_chan->common);
386 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
Zhang Wei173acc72008-03-01 07:42:48 -0700387 desc_sw->async_tx.phys = pdesc;
388 }
389
390 return desc_sw;
391}
392
393
394/**
395 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
396 * @fsl_chan : Freescale DMA channel
397 *
398 * This function will create a dma pool for descriptor allocation.
399 *
400 * Return - The number of descriptors allocated.
401 */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700402static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700403{
404 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700405
406 /* Has this channel already been allocated? */
407 if (fsl_chan->desc_pool)
408 return 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700409
410 /* We need the descriptor to be aligned to 32bytes
411 * for meeting FSL DMA specification requirement.
412 */
413 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
414 fsl_chan->dev, sizeof(struct fsl_desc_sw),
415 32, 0);
416 if (!fsl_chan->desc_pool) {
417 dev_err(fsl_chan->dev, "No memory for channel %d "
418 "descriptor dma pool.\n", fsl_chan->id);
419 return 0;
420 }
421
422 return 1;
423}
424
425/**
426 * fsl_dma_free_chan_resources - Free all resources of the channel.
427 * @fsl_chan : Freescale DMA channel
428 */
429static void fsl_dma_free_chan_resources(struct dma_chan *chan)
430{
431 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
432 struct fsl_desc_sw *desc, *_desc;
433 unsigned long flags;
434
435 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
436 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
437 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
438#ifdef FSL_DMA_LD_DEBUG
439 dev_dbg(fsl_chan->dev,
440 "LD %p will be released.\n", desc);
441#endif
442 list_del(&desc->node);
443 /* free link descriptor */
444 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
445 }
446 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
447 dma_pool_destroy(fsl_chan->desc_pool);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700448
449 fsl_chan->desc_pool = NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700450}
451
Zhang Wei2187c262008-03-13 17:45:28 -0700452static struct dma_async_tx_descriptor *
Dan Williams636bdea2008-04-17 20:17:26 -0700453fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
Zhang Wei2187c262008-03-13 17:45:28 -0700454{
455 struct fsl_dma_chan *fsl_chan;
456 struct fsl_desc_sw *new;
457
458 if (!chan)
459 return NULL;
460
461 fsl_chan = to_fsl_chan(chan);
462
463 new = fsl_dma_alloc_descriptor(fsl_chan);
464 if (!new) {
465 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
466 return NULL;
467 }
468
469 new->async_tx.cookie = -EBUSY;
Dan Williams636bdea2008-04-17 20:17:26 -0700470 new->async_tx.flags = flags;
Zhang Wei2187c262008-03-13 17:45:28 -0700471
Zhang Weif79abb62008-03-18 18:45:00 -0700472 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700473 list_add_tail(&new->node, &new->tx_list);
Zhang Weif79abb62008-03-18 18:45:00 -0700474
Zhang Wei2187c262008-03-13 17:45:28 -0700475 /* Set End-of-link to the last link descriptor of new list*/
476 set_ld_eol(fsl_chan, new);
477
478 return &new->async_tx;
479}
480
Zhang Wei173acc72008-03-01 07:42:48 -0700481static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
482 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
483 size_t len, unsigned long flags)
484{
485 struct fsl_dma_chan *fsl_chan;
486 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
Ira Snyder2e077f82009-05-15 09:59:46 -0700487 struct list_head *list;
Zhang Wei173acc72008-03-01 07:42:48 -0700488 size_t copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700489
490 if (!chan)
491 return NULL;
492
493 if (!len)
494 return NULL;
495
496 fsl_chan = to_fsl_chan(chan);
497
498 do {
499
500 /* Allocate the link descriptor from DMA pool */
501 new = fsl_dma_alloc_descriptor(fsl_chan);
502 if (!new) {
503 dev_err(fsl_chan->dev,
504 "No free memory for link descriptor\n");
Ira Snyder2e077f82009-05-15 09:59:46 -0700505 goto fail;
Zhang Wei173acc72008-03-01 07:42:48 -0700506 }
507#ifdef FSL_DMA_LD_DEBUG
508 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
509#endif
510
Zhang Wei56822842008-03-13 10:45:27 -0700511 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700512
513 set_desc_cnt(fsl_chan, &new->hw, copy);
514 set_desc_src(fsl_chan, &new->hw, dma_src);
515 set_desc_dest(fsl_chan, &new->hw, dma_dest);
516
517 if (!first)
518 first = new;
519 else
520 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
521
522 new->async_tx.cookie = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700523 async_tx_ack(&new->async_tx);
Zhang Wei173acc72008-03-01 07:42:48 -0700524
525 prev = new;
526 len -= copy;
527 dma_src += copy;
528 dma_dest += copy;
529
530 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700531 list_add_tail(&new->node, &first->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700532 } while (len);
533
Dan Williams636bdea2008-04-17 20:17:26 -0700534 new->async_tx.flags = flags; /* client is in control of this ack */
Zhang Wei173acc72008-03-01 07:42:48 -0700535 new->async_tx.cookie = -EBUSY;
536
537 /* Set End-of-link to the last link descriptor of new list*/
538 set_ld_eol(fsl_chan, new);
539
Ira Snyder2e077f82009-05-15 09:59:46 -0700540 return &first->async_tx;
541
542fail:
543 if (!first)
544 return NULL;
545
Dan Williamseda34232009-09-08 17:53:02 -0700546 list = &first->tx_list;
Ira Snyder2e077f82009-05-15 09:59:46 -0700547 list_for_each_entry_safe_reverse(new, prev, list, node) {
548 list_del(&new->node);
549 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
550 }
551
552 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700553}
554
555/**
Ira Snyderbbea0b62009-09-08 17:53:04 -0700556 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
557 * @chan: DMA channel
558 * @sgl: scatterlist to transfer to/from
559 * @sg_len: number of entries in @scatterlist
560 * @direction: DMA direction
561 * @flags: DMAEngine flags
562 *
563 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
564 * DMA_SLAVE API, this gets the device-specific information from the
565 * chan->private variable.
566 */
567static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
568 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
569 enum dma_data_direction direction, unsigned long flags)
570{
571 struct fsl_dma_chan *fsl_chan;
572 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
573 struct fsl_dma_slave *slave;
574 struct list_head *tx_list;
575 size_t copy;
576
577 int i;
578 struct scatterlist *sg;
579 size_t sg_used;
580 size_t hw_used;
581 struct fsl_dma_hw_addr *hw;
582 dma_addr_t dma_dst, dma_src;
583
584 if (!chan)
585 return NULL;
586
587 if (!chan->private)
588 return NULL;
589
590 fsl_chan = to_fsl_chan(chan);
591 slave = chan->private;
592
593 if (list_empty(&slave->addresses))
594 return NULL;
595
596 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
597 hw_used = 0;
598
599 /*
600 * Build the hardware transaction to copy from the scatterlist to
601 * the hardware, or from the hardware to the scatterlist
602 *
603 * If you are copying from the hardware to the scatterlist and it
604 * takes two hardware entries to fill an entire page, then both
605 * hardware entries will be coalesced into the same page
606 *
607 * If you are copying from the scatterlist to the hardware and a
608 * single page can fill two hardware entries, then the data will
609 * be read out of the page into the first hardware entry, and so on
610 */
611 for_each_sg(sgl, sg, sg_len, i) {
612 sg_used = 0;
613
614 /* Loop until the entire scatterlist entry is used */
615 while (sg_used < sg_dma_len(sg)) {
616
617 /*
618 * If we've used up the current hardware address/length
619 * pair, we need to load a new one
620 *
621 * This is done in a while loop so that descriptors with
622 * length == 0 will be skipped
623 */
624 while (hw_used >= hw->length) {
625
626 /*
627 * If the current hardware entry is the last
628 * entry in the list, we're finished
629 */
630 if (list_is_last(&hw->entry, &slave->addresses))
631 goto finished;
632
633 /* Get the next hardware address/length pair */
634 hw = list_entry(hw->entry.next,
635 struct fsl_dma_hw_addr, entry);
636 hw_used = 0;
637 }
638
639 /* Allocate the link descriptor from DMA pool */
640 new = fsl_dma_alloc_descriptor(fsl_chan);
641 if (!new) {
642 dev_err(fsl_chan->dev, "No free memory for "
643 "link descriptor\n");
644 goto fail;
645 }
646#ifdef FSL_DMA_LD_DEBUG
647 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
648#endif
649
650 /*
651 * Calculate the maximum number of bytes to transfer,
652 * making sure it is less than the DMA controller limit
653 */
654 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
655 hw->length - hw_used);
656 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
657
658 /*
659 * DMA_FROM_DEVICE
660 * from the hardware to the scatterlist
661 *
662 * DMA_TO_DEVICE
663 * from the scatterlist to the hardware
664 */
665 if (direction == DMA_FROM_DEVICE) {
666 dma_src = hw->address + hw_used;
667 dma_dst = sg_dma_address(sg) + sg_used;
668 } else {
669 dma_src = sg_dma_address(sg) + sg_used;
670 dma_dst = hw->address + hw_used;
671 }
672
673 /* Fill in the descriptor */
674 set_desc_cnt(fsl_chan, &new->hw, copy);
675 set_desc_src(fsl_chan, &new->hw, dma_src);
676 set_desc_dest(fsl_chan, &new->hw, dma_dst);
677
678 /*
679 * If this is not the first descriptor, chain the
680 * current descriptor after the previous descriptor
681 */
682 if (!first) {
683 first = new;
684 } else {
685 set_desc_next(fsl_chan, &prev->hw,
686 new->async_tx.phys);
687 }
688
689 new->async_tx.cookie = 0;
690 async_tx_ack(&new->async_tx);
691
692 prev = new;
693 sg_used += copy;
694 hw_used += copy;
695
696 /* Insert the link descriptor into the LD ring */
697 list_add_tail(&new->node, &first->tx_list);
698 }
699 }
700
701finished:
702
703 /* All of the hardware address/length pairs had length == 0 */
704 if (!first || !new)
705 return NULL;
706
707 new->async_tx.flags = flags;
708 new->async_tx.cookie = -EBUSY;
709
710 /* Set End-of-link to the last link descriptor of new list */
711 set_ld_eol(fsl_chan, new);
712
713 /* Enable extra controller features */
714 if (fsl_chan->set_src_loop_size)
715 fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
716
717 if (fsl_chan->set_dest_loop_size)
718 fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
719
720 if (fsl_chan->toggle_ext_start)
721 fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
722
723 if (fsl_chan->toggle_ext_pause)
724 fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
725
726 if (fsl_chan->set_request_count)
727 fsl_chan->set_request_count(fsl_chan, slave->request_count);
728
729 return &first->async_tx;
730
731fail:
732 /* If first was not set, then we failed to allocate the very first
733 * descriptor, and we're done */
734 if (!first)
735 return NULL;
736
737 /*
738 * First is set, so all of the descriptors we allocated have been added
739 * to first->tx_list, INCLUDING "first" itself. Therefore we
740 * must traverse the list backwards freeing each descriptor in turn
741 *
742 * We're re-using variables for the loop, oh well
743 */
744 tx_list = &first->tx_list;
745 list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
746 list_del_init(&new->node);
747 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
748 }
749
750 return NULL;
751}
752
753static void fsl_dma_device_terminate_all(struct dma_chan *chan)
754{
755 struct fsl_dma_chan *fsl_chan;
756 struct fsl_desc_sw *desc, *tmp;
757 unsigned long flags;
758
759 if (!chan)
760 return;
761
762 fsl_chan = to_fsl_chan(chan);
763
764 /* Halt the DMA engine */
765 dma_halt(fsl_chan);
766
767 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
768
769 /* Remove and free all of the descriptors in the LD queue */
770 list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
771 list_del(&desc->node);
772 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
773 }
774
775 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
776}
777
778/**
Zhang Wei173acc72008-03-01 07:42:48 -0700779 * fsl_dma_update_completed_cookie - Update the completed cookie.
780 * @fsl_chan : Freescale DMA channel
781 */
782static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
783{
784 struct fsl_desc_sw *cur_desc, *desc;
785 dma_addr_t ld_phy;
786
787 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
788
789 if (ld_phy) {
790 cur_desc = NULL;
791 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
792 if (desc->async_tx.phys == ld_phy) {
793 cur_desc = desc;
794 break;
795 }
796
797 if (cur_desc && cur_desc->async_tx.cookie) {
798 if (dma_is_idle(fsl_chan))
799 fsl_chan->completed_cookie =
800 cur_desc->async_tx.cookie;
801 else
802 fsl_chan->completed_cookie =
803 cur_desc->async_tx.cookie - 1;
804 }
805 }
806}
807
808/**
809 * fsl_chan_ld_cleanup - Clean up link descriptors
810 * @fsl_chan : Freescale DMA channel
811 *
812 * This function clean up the ld_queue of DMA channel.
813 * If 'in_intr' is set, the function will move the link descriptor to
814 * the recycle list. Otherwise, free it directly.
815 */
816static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
817{
818 struct fsl_desc_sw *desc, *_desc;
819 unsigned long flags;
820
821 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
822
Zhang Wei173acc72008-03-01 07:42:48 -0700823 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
824 fsl_chan->completed_cookie);
825 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
826 dma_async_tx_callback callback;
827 void *callback_param;
828
829 if (dma_async_is_complete(desc->async_tx.cookie,
830 fsl_chan->completed_cookie, fsl_chan->common.cookie)
831 == DMA_IN_PROGRESS)
832 break;
833
834 callback = desc->async_tx.callback;
835 callback_param = desc->async_tx.callback_param;
836
837 /* Remove from ld_queue list */
838 list_del(&desc->node);
839
840 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
841 desc);
842 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
843
844 /* Run the link descriptor callback function */
845 if (callback) {
846 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
847 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
848 desc);
849 callback(callback_param);
850 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
851 }
852 }
853 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
854}
855
856/**
857 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
858 * @fsl_chan : Freescale DMA channel
859 */
860static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
861{
862 struct list_head *ld_node;
863 dma_addr_t next_dest_addr;
864 unsigned long flags;
865
Ira Snyder138ef012009-05-19 15:42:13 -0700866 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
867
Zhang Wei173acc72008-03-01 07:42:48 -0700868 if (!dma_is_idle(fsl_chan))
Ira Snyder138ef012009-05-19 15:42:13 -0700869 goto out_unlock;
Zhang Wei173acc72008-03-01 07:42:48 -0700870
871 dma_halt(fsl_chan);
872
873 /* If there are some link descriptors
874 * not transfered in queue. We need to start it.
875 */
Zhang Wei173acc72008-03-01 07:42:48 -0700876
877 /* Find the first un-transfer desciptor */
878 for (ld_node = fsl_chan->ld_queue.next;
879 (ld_node != &fsl_chan->ld_queue)
880 && (dma_async_is_complete(
881 to_fsl_desc(ld_node)->async_tx.cookie,
882 fsl_chan->completed_cookie,
883 fsl_chan->common.cookie) == DMA_SUCCESS);
884 ld_node = ld_node->next);
885
Zhang Wei173acc72008-03-01 07:42:48 -0700886 if (ld_node != &fsl_chan->ld_queue) {
887 /* Get the ld start address from ld_queue */
888 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
Kumar Galab787f2e2009-05-13 16:25:57 -0500889 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
890 (unsigned long long)next_dest_addr);
Zhang Wei173acc72008-03-01 07:42:48 -0700891 set_cdar(fsl_chan, next_dest_addr);
892 dma_start(fsl_chan);
893 } else {
894 set_cdar(fsl_chan, 0);
895 set_ndar(fsl_chan, 0);
896 }
Ira Snyder138ef012009-05-19 15:42:13 -0700897
898out_unlock:
899 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700900}
901
902/**
903 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
904 * @fsl_chan : Freescale DMA channel
905 */
906static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
907{
908 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
909
910#ifdef FSL_DMA_LD_DEBUG
911 struct fsl_desc_sw *ld;
912 unsigned long flags;
913
914 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
915 if (list_empty(&fsl_chan->ld_queue)) {
916 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
917 return;
918 }
919
920 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
921 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
922 int i;
923 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
924 fsl_chan->id, ld->async_tx.phys);
925 for (i = 0; i < 8; i++)
926 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
927 i, *(((u32 *)&ld->hw) + i));
928 }
929 dev_dbg(fsl_chan->dev, "----------------\n");
930 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
931#endif
932
933 fsl_chan_xfer_ld_queue(fsl_chan);
934}
935
Zhang Wei173acc72008-03-01 07:42:48 -0700936/**
937 * fsl_dma_is_complete - Determine the DMA status
938 * @fsl_chan : Freescale DMA channel
939 */
940static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
941 dma_cookie_t cookie,
942 dma_cookie_t *done,
943 dma_cookie_t *used)
944{
945 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
946 dma_cookie_t last_used;
947 dma_cookie_t last_complete;
948
949 fsl_chan_ld_cleanup(fsl_chan);
950
951 last_used = chan->cookie;
952 last_complete = fsl_chan->completed_cookie;
953
954 if (done)
955 *done = last_complete;
956
957 if (used)
958 *used = last_used;
959
960 return dma_async_is_complete(cookie, last_complete, last_used);
961}
962
963static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
964{
965 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
Zhang Wei56822842008-03-13 10:45:27 -0700966 u32 stat;
Zhang Wei1c629792008-04-17 20:17:25 -0700967 int update_cookie = 0;
968 int xfer_ld_q = 0;
Zhang Wei173acc72008-03-01 07:42:48 -0700969
970 stat = get_sr(fsl_chan);
971 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
972 fsl_chan->id, stat);
973 set_sr(fsl_chan, stat); /* Clear the event register */
974
975 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
976 if (!stat)
977 return IRQ_NONE;
978
979 if (stat & FSL_DMA_SR_TE)
980 dev_err(fsl_chan->dev, "Transfer Error!\n");
981
Zhang Weif79abb62008-03-18 18:45:00 -0700982 /* Programming Error
983 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
984 * triger a PE interrupt.
985 */
986 if (stat & FSL_DMA_SR_PE) {
987 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
988 if (get_bcr(fsl_chan) == 0) {
989 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
990 * Now, update the completed cookie, and continue the
991 * next uncompleted transfer.
992 */
Zhang Wei1c629792008-04-17 20:17:25 -0700993 update_cookie = 1;
994 xfer_ld_q = 1;
Zhang Weif79abb62008-03-18 18:45:00 -0700995 }
996 stat &= ~FSL_DMA_SR_PE;
997 }
998
Zhang Wei173acc72008-03-01 07:42:48 -0700999 /* If the link descriptor segment transfer finishes,
1000 * we will recycle the used descriptor.
1001 */
1002 if (stat & FSL_DMA_SR_EOSI) {
1003 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
Kumar Galab787f2e2009-05-13 16:25:57 -05001004 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
1005 (unsigned long long)get_cdar(fsl_chan),
1006 (unsigned long long)get_ndar(fsl_chan));
Zhang Wei173acc72008-03-01 07:42:48 -07001007 stat &= ~FSL_DMA_SR_EOSI;
Zhang Wei1c629792008-04-17 20:17:25 -07001008 update_cookie = 1;
1009 }
1010
1011 /* For MPC8349, EOCDI event need to update cookie
1012 * and start the next transfer if it exist.
1013 */
1014 if (stat & FSL_DMA_SR_EOCDI) {
1015 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
1016 stat &= ~FSL_DMA_SR_EOCDI;
1017 update_cookie = 1;
1018 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001019 }
1020
1021 /* If it current transfer is the end-of-transfer,
1022 * we should clear the Channel Start bit for
1023 * prepare next transfer.
1024 */
Zhang Wei1c629792008-04-17 20:17:25 -07001025 if (stat & FSL_DMA_SR_EOLNI) {
Zhang Wei173acc72008-03-01 07:42:48 -07001026 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
1027 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei1c629792008-04-17 20:17:25 -07001028 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001029 }
1030
Zhang Wei1c629792008-04-17 20:17:25 -07001031 if (update_cookie)
1032 fsl_dma_update_completed_cookie(fsl_chan);
1033 if (xfer_ld_q)
1034 fsl_chan_xfer_ld_queue(fsl_chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001035 if (stat)
1036 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
1037 stat);
1038
1039 dev_dbg(fsl_chan->dev, "event: Exit\n");
1040 tasklet_schedule(&fsl_chan->tasklet);
1041 return IRQ_HANDLED;
1042}
1043
1044static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
1045{
1046 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
1047 u32 gsr;
1048 int ch_nr;
1049
1050 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
1051 : in_le32(fdev->reg_base);
1052 ch_nr = (32 - ffs(gsr)) / 8;
1053
1054 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
1055 fdev->chan[ch_nr]) : IRQ_NONE;
1056}
1057
1058static void dma_do_tasklet(unsigned long data)
1059{
1060 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
1061 fsl_chan_ld_cleanup(fsl_chan);
1062}
1063
Timur Tabi77cd62e2008-09-26 17:00:11 -07001064static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
1065 struct device_node *node, u32 feature, const char *compatible)
Zhang Wei173acc72008-03-01 07:42:48 -07001066{
Zhang Wei173acc72008-03-01 07:42:48 -07001067 struct fsl_dma_chan *new_fsl_chan;
1068 int err;
1069
Zhang Wei173acc72008-03-01 07:42:48 -07001070 /* alloc channel */
1071 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
1072 if (!new_fsl_chan) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001073 dev_err(fdev->dev, "No free memory for allocating "
Zhang Wei173acc72008-03-01 07:42:48 -07001074 "dma channels!\n");
Li Yang51ee87f2008-05-29 23:25:45 -07001075 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -07001076 }
1077
1078 /* get dma channel register base */
Timur Tabi77cd62e2008-09-26 17:00:11 -07001079 err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
Zhang Wei173acc72008-03-01 07:42:48 -07001080 if (err) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001081 dev_err(fdev->dev, "Can't get %s property 'reg'\n",
1082 node->full_name);
Li Yang51ee87f2008-05-29 23:25:45 -07001083 goto err_no_reg;
Zhang Wei173acc72008-03-01 07:42:48 -07001084 }
1085
Timur Tabi77cd62e2008-09-26 17:00:11 -07001086 new_fsl_chan->feature = feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001087
1088 if (!fdev->feature)
1089 fdev->feature = new_fsl_chan->feature;
1090
1091 /* If the DMA device's feature is different than its channels',
1092 * report the bug.
1093 */
1094 WARN_ON(fdev->feature != new_fsl_chan->feature);
1095
Dan Williams6527de62009-01-12 15:18:34 -07001096 new_fsl_chan->dev = fdev->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001097 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
1098 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
1099
1100 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
Roel Kluinf47edc62009-05-22 16:46:52 +08001101 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001102 dev_err(fdev->dev, "There is no %d channel!\n",
Zhang Wei173acc72008-03-01 07:42:48 -07001103 new_fsl_chan->id);
1104 err = -EINVAL;
Li Yang51ee87f2008-05-29 23:25:45 -07001105 goto err_no_chan;
Zhang Wei173acc72008-03-01 07:42:48 -07001106 }
1107 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
1108 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
1109 (unsigned long)new_fsl_chan);
1110
1111 /* Init the channel */
1112 dma_init(new_fsl_chan);
1113
1114 /* Clear cdar registers */
1115 set_cdar(new_fsl_chan, 0);
1116
1117 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
1118 case FSL_DMA_IP_85XX:
Zhang Wei173acc72008-03-01 07:42:48 -07001119 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1120 case FSL_DMA_IP_83XX:
Ira Snyderbe30b222009-05-28 09:20:42 +00001121 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
Zhang Wei173acc72008-03-01 07:42:48 -07001122 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1123 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
Ira Snydere6c7ecb2009-09-08 17:53:04 -07001124 new_fsl_chan->set_request_count = fsl_chan_set_request_count;
Zhang Wei173acc72008-03-01 07:42:48 -07001125 }
1126
1127 spin_lock_init(&new_fsl_chan->desc_lock);
1128 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
1129
1130 new_fsl_chan->common.device = &fdev->common;
1131
1132 /* Add the channel to DMA device channel list */
1133 list_add_tail(&new_fsl_chan->common.device_node,
1134 &fdev->common.channels);
1135 fdev->common.chancnt++;
1136
Timur Tabi77cd62e2008-09-26 17:00:11 -07001137 new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001138 if (new_fsl_chan->irq != NO_IRQ) {
1139 err = request_irq(new_fsl_chan->irq,
1140 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
1141 "fsldma-channel", new_fsl_chan);
1142 if (err) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001143 dev_err(fdev->dev, "DMA channel %s request_irq error "
1144 "with return %d\n", node->full_name, err);
Li Yang51ee87f2008-05-29 23:25:45 -07001145 goto err_no_irq;
Zhang Wei173acc72008-03-01 07:42:48 -07001146 }
1147 }
1148
Timur Tabi77cd62e2008-09-26 17:00:11 -07001149 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
Peter Korsgaard169d5f62009-01-14 22:33:31 -07001150 compatible,
1151 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001152
1153 return 0;
Li Yang51ee87f2008-05-29 23:25:45 -07001154
Li Yang51ee87f2008-05-29 23:25:45 -07001155err_no_irq:
Zhang Wei173acc72008-03-01 07:42:48 -07001156 list_del(&new_fsl_chan->common.device_node);
Li Yang51ee87f2008-05-29 23:25:45 -07001157err_no_chan:
1158 iounmap(new_fsl_chan->reg_base);
1159err_no_reg:
Zhang Wei173acc72008-03-01 07:42:48 -07001160 kfree(new_fsl_chan);
1161 return err;
1162}
1163
Timur Tabi77cd62e2008-09-26 17:00:11 -07001164static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
Zhang Wei173acc72008-03-01 07:42:48 -07001165{
Peter Korsgaard6782dfe2009-01-14 22:32:58 -07001166 if (fchan->irq != NO_IRQ)
1167 free_irq(fchan->irq, fchan);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001168 list_del(&fchan->common.device_node);
1169 iounmap(fchan->reg_base);
1170 kfree(fchan);
Zhang Wei173acc72008-03-01 07:42:48 -07001171}
1172
1173static int __devinit of_fsl_dma_probe(struct of_device *dev,
1174 const struct of_device_id *match)
1175{
1176 int err;
Zhang Wei173acc72008-03-01 07:42:48 -07001177 struct fsl_dma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001178 struct device_node *child;
Zhang Wei173acc72008-03-01 07:42:48 -07001179
1180 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
1181 if (!fdev) {
1182 dev_err(&dev->dev, "No enough memory for 'priv'\n");
Li Yang51ee87f2008-05-29 23:25:45 -07001183 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -07001184 }
1185 fdev->dev = &dev->dev;
1186 INIT_LIST_HEAD(&fdev->common.channels);
1187
1188 /* get DMA controller register base */
1189 err = of_address_to_resource(dev->node, 0, &fdev->reg);
1190 if (err) {
1191 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1192 dev->node->full_name);
Li Yang51ee87f2008-05-29 23:25:45 -07001193 goto err_no_reg;
Zhang Wei173acc72008-03-01 07:42:48 -07001194 }
1195
1196 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
Kumar Galab787f2e2009-05-13 16:25:57 -05001197 "controller at 0x%llx...\n",
1198 match->compatible, (unsigned long long)fdev->reg.start);
Zhang Wei173acc72008-03-01 07:42:48 -07001199 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
1200 - fdev->reg.start + 1);
1201
1202 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1203 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
Ira Snyderbbea0b62009-09-08 17:53:04 -07001204 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
Zhang Wei173acc72008-03-01 07:42:48 -07001205 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1206 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -07001207 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -07001208 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1209 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1210 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
Ira Snyderbbea0b62009-09-08 17:53:04 -07001211 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1212 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
Zhang Wei173acc72008-03-01 07:42:48 -07001213 fdev->common.dev = &dev->dev;
1214
Timur Tabi77cd62e2008-09-26 17:00:11 -07001215 fdev->irq = irq_of_parse_and_map(dev->node, 0);
1216 if (fdev->irq != NO_IRQ) {
1217 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
Zhang Wei173acc72008-03-01 07:42:48 -07001218 "fsldma-device", fdev);
1219 if (err) {
1220 dev_err(&dev->dev, "DMA device request_irq error "
1221 "with return %d\n", err);
1222 goto err;
1223 }
1224 }
1225
1226 dev_set_drvdata(&(dev->dev), fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001227
1228 /* We cannot use of_platform_bus_probe() because there is no
1229 * of_platform_bus_remove. Instead, we manually instantiate every DMA
1230 * channel object.
1231 */
1232 for_each_child_of_node(dev->node, child) {
1233 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
1234 fsl_dma_chan_probe(fdev, child,
1235 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1236 "fsl,eloplus-dma-channel");
1237 if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
1238 fsl_dma_chan_probe(fdev, child,
1239 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1240 "fsl,elo-dma-channel");
1241 }
Zhang Wei173acc72008-03-01 07:42:48 -07001242
1243 dma_async_device_register(&fdev->common);
1244 return 0;
1245
1246err:
1247 iounmap(fdev->reg_base);
Li Yang51ee87f2008-05-29 23:25:45 -07001248err_no_reg:
Zhang Wei173acc72008-03-01 07:42:48 -07001249 kfree(fdev);
1250 return err;
1251}
1252
Timur Tabi77cd62e2008-09-26 17:00:11 -07001253static int of_fsl_dma_remove(struct of_device *of_dev)
1254{
1255 struct fsl_dma_device *fdev;
1256 unsigned int i;
1257
1258 fdev = dev_get_drvdata(&of_dev->dev);
1259
1260 dma_async_device_unregister(&fdev->common);
1261
1262 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
1263 if (fdev->chan[i])
1264 fsl_dma_chan_remove(fdev->chan[i]);
1265
1266 if (fdev->irq != NO_IRQ)
1267 free_irq(fdev->irq, fdev);
1268
1269 iounmap(fdev->reg_base);
1270
1271 kfree(fdev);
1272 dev_set_drvdata(&of_dev->dev, NULL);
1273
1274 return 0;
1275}
1276
Zhang Wei173acc72008-03-01 07:42:48 -07001277static struct of_device_id of_fsl_dma_ids[] = {
Kumar Gala049c9d42008-03-31 11:13:21 -05001278 { .compatible = "fsl,eloplus-dma", },
1279 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001280 {}
1281};
1282
1283static struct of_platform_driver of_fsl_dma_driver = {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001284 .name = "fsl-elo-dma",
Zhang Wei173acc72008-03-01 07:42:48 -07001285 .match_table = of_fsl_dma_ids,
1286 .probe = of_fsl_dma_probe,
Timur Tabi77cd62e2008-09-26 17:00:11 -07001287 .remove = of_fsl_dma_remove,
Zhang Wei173acc72008-03-01 07:42:48 -07001288};
1289
1290static __init int of_fsl_dma_init(void)
1291{
Timur Tabi77cd62e2008-09-26 17:00:11 -07001292 int ret;
1293
1294 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1295
1296 ret = of_register_platform_driver(&of_fsl_dma_driver);
1297 if (ret)
1298 pr_err("fsldma: failed to register platform driver\n");
1299
1300 return ret;
Zhang Wei173acc72008-03-01 07:42:48 -07001301}
1302
Timur Tabi77cd62e2008-09-26 17:00:11 -07001303static void __exit of_fsl_dma_exit(void)
1304{
1305 of_unregister_platform_driver(&of_fsl_dma_driver);
1306}
1307
Zhang Wei173acc72008-03-01 07:42:48 -07001308subsys_initcall(of_fsl_dma_init);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001309module_exit(of_fsl_dma_exit);
1310
1311MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1312MODULE_LICENSE("GPL");