blob: 82b8e9f9c7bf124ee676c34219b1f669984d4bc9 [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
Li Yange2c8e4252010-11-11 20:16:29 +08004 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
Zhang Wei173acc72008-03-01 07:42:48 -07005 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
Stefan Weilc2e07b32010-08-03 19:44:52 +020013 * The support for MPC8349 DMA controller is also added.
Zhang Wei173acc72008-03-01 07:42:48 -070014 *
Ira W. Snydera7aea372009-04-23 16:17:54 -070015 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
Zhang Wei173acc72008-03-01 07:42:48 -070020 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Zhang Wei173acc72008-03-01 07:42:48 -070031#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmapool.h>
36#include <linux/of_platform.h>
37
38#include "fsldma.h"
39
Ira Snyderb1584712011-03-03 07:54:55 +000040#define chan_dbg(chan, fmt, arg...) \
41 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42#define chan_err(chan, fmt, arg...) \
43 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
44
45static const char msg_ld_oom[] = "No free memory for link descriptor";
Ira Snyderc14330412010-09-30 11:46:45 +000046
Ira Snydere8bd84d2011-03-03 07:54:54 +000047/*
48 * Register Helpers
49 */
Zhang Wei173acc72008-03-01 07:42:48 -070050
Ira Snydera1c03312010-01-06 13:34:05 +000051static void set_sr(struct fsldma_chan *chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070052{
Ira Snydera1c03312010-01-06 13:34:05 +000053 DMA_OUT(chan, &chan->regs->sr, val, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070054}
55
Ira Snydera1c03312010-01-06 13:34:05 +000056static u32 get_sr(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -070057{
Ira Snydera1c03312010-01-06 13:34:05 +000058 return DMA_IN(chan, &chan->regs->sr, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070059}
60
Ira Snydere8bd84d2011-03-03 07:54:54 +000061static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
62{
63 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
64}
65
66static dma_addr_t get_cdar(struct fsldma_chan *chan)
67{
68 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
69}
70
71static dma_addr_t get_ndar(struct fsldma_chan *chan)
72{
73 return DMA_IN(chan, &chan->regs->ndar, 64);
74}
75
76static u32 get_bcr(struct fsldma_chan *chan)
77{
78 return DMA_IN(chan, &chan->regs->bcr, 32);
79}
80
81/*
82 * Descriptor Helpers
83 */
84
Ira Snydera1c03312010-01-06 13:34:05 +000085static void set_desc_cnt(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -070086 struct fsl_dma_ld_hw *hw, u32 count)
87{
Ira Snydera1c03312010-01-06 13:34:05 +000088 hw->count = CPU_TO_DMA(chan, count, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070089}
90
Ira Snydera1c03312010-01-06 13:34:05 +000091static void set_desc_src(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -070092 struct fsl_dma_ld_hw *hw, dma_addr_t src)
93{
94 u64 snoop_bits;
95
Ira Snydera1c03312010-01-06 13:34:05 +000096 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
Zhang Wei173acc72008-03-01 07:42:48 -070097 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
Ira Snydera1c03312010-01-06 13:34:05 +000098 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
Zhang Wei173acc72008-03-01 07:42:48 -070099}
100
Ira Snydera1c03312010-01-06 13:34:05 +0000101static void set_desc_dst(struct fsldma_chan *chan,
Ira Snyder738f5f72010-01-06 13:34:02 +0000102 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
Zhang Wei173acc72008-03-01 07:42:48 -0700103{
104 u64 snoop_bits;
105
Ira Snydera1c03312010-01-06 13:34:05 +0000106 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
Zhang Wei173acc72008-03-01 07:42:48 -0700107 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
Ira Snydera1c03312010-01-06 13:34:05 +0000108 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700109}
110
Ira Snydera1c03312010-01-06 13:34:05 +0000111static void set_desc_next(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700112 struct fsl_dma_ld_hw *hw, dma_addr_t next)
113{
114 u64 snoop_bits;
115
Ira Snydera1c03312010-01-06 13:34:05 +0000116 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
Zhang Wei173acc72008-03-01 07:42:48 -0700117 ? FSL_DMA_SNEN : 0;
Ira Snydera1c03312010-01-06 13:34:05 +0000118 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700119}
120
Ira Snydere8bd84d2011-03-03 07:54:54 +0000121static void set_ld_eol(struct fsldma_chan *chan,
122 struct fsl_desc_sw *desc)
Zhang Wei173acc72008-03-01 07:42:48 -0700123{
Ira Snydere8bd84d2011-03-03 07:54:54 +0000124 u64 snoop_bits;
125
126 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
127 ? FSL_DMA_SNEN : 0;
128
129 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
130 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
131 | snoop_bits, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700132}
133
Ira Snydere8bd84d2011-03-03 07:54:54 +0000134/*
135 * DMA Engine Hardware Control Helpers
136 */
Zhang Wei173acc72008-03-01 07:42:48 -0700137
Ira Snydere8bd84d2011-03-03 07:54:54 +0000138static void dma_init(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700139{
Ira Snydere8bd84d2011-03-03 07:54:54 +0000140 /* Reset the channel */
141 DMA_OUT(chan, &chan->regs->mr, 0, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700142
Ira Snydere8bd84d2011-03-03 07:54:54 +0000143 switch (chan->feature & FSL_DMA_IP_MASK) {
144 case FSL_DMA_IP_85XX:
145 /* Set the channel to below modes:
146 * EIE - Error interrupt enable
147 * EOSIE - End of segments interrupt enable (basic mode)
148 * EOLNIE - End of links interrupt enable
149 * BWC - Bandwidth sharing among channels
150 */
151 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
152 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
153 | FSL_DMA_MR_EOSIE, 32);
154 break;
155 case FSL_DMA_IP_83XX:
156 /* Set the channel to below modes:
157 * EOTIE - End-of-transfer interrupt enable
158 * PRC_RM - PCI read multiple
159 */
160 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
161 | FSL_DMA_MR_PRC_RM, 32);
162 break;
163 }
Zhang Weif79abb62008-03-18 18:45:00 -0700164}
165
Ira Snydera1c03312010-01-06 13:34:05 +0000166static int dma_is_idle(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700167{
Ira Snydera1c03312010-01-06 13:34:05 +0000168 u32 sr = get_sr(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700169 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
170}
171
Ira Snydera1c03312010-01-06 13:34:05 +0000172static void dma_start(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700173{
Ira Snyder272ca652010-01-06 13:33:59 +0000174 u32 mode;
Zhang Wei173acc72008-03-01 07:42:48 -0700175
Ira Snydera1c03312010-01-06 13:34:05 +0000176 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000177
Ira Snydera1c03312010-01-06 13:34:05 +0000178 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
179 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
180 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000181 mode |= FSL_DMA_MR_EMP_EN;
182 } else {
183 mode &= ~FSL_DMA_MR_EMP_EN;
184 }
Ira Snyder43a1a3e2009-05-28 09:26:40 +0000185 }
Zhang Wei173acc72008-03-01 07:42:48 -0700186
Ira Snydera1c03312010-01-06 13:34:05 +0000187 if (chan->feature & FSL_DMA_CHAN_START_EXT)
Ira Snyder272ca652010-01-06 13:33:59 +0000188 mode |= FSL_DMA_MR_EMS_EN;
Zhang Wei173acc72008-03-01 07:42:48 -0700189 else
Ira Snyder272ca652010-01-06 13:33:59 +0000190 mode |= FSL_DMA_MR_CS;
Zhang Wei173acc72008-03-01 07:42:48 -0700191
Ira Snydera1c03312010-01-06 13:34:05 +0000192 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700193}
194
Ira Snydera1c03312010-01-06 13:34:05 +0000195static void dma_halt(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700196{
Ira Snyder272ca652010-01-06 13:33:59 +0000197 u32 mode;
Dan Williams900325a2009-03-02 15:33:46 -0700198 int i;
199
Ira Snydera1c03312010-01-06 13:34:05 +0000200 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000201 mode |= FSL_DMA_MR_CA;
Ira Snydera1c03312010-01-06 13:34:05 +0000202 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000203
204 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
Ira Snydera1c03312010-01-06 13:34:05 +0000205 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700206
Dan Williams900325a2009-03-02 15:33:46 -0700207 for (i = 0; i < 100; i++) {
Ira Snydera1c03312010-01-06 13:34:05 +0000208 if (dma_is_idle(chan))
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000209 return;
210
Zhang Wei173acc72008-03-01 07:42:48 -0700211 udelay(10);
Dan Williams900325a2009-03-02 15:33:46 -0700212 }
Ira Snyder272ca652010-01-06 13:33:59 +0000213
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000214 if (!dma_is_idle(chan))
Ira Snyderb1584712011-03-03 07:54:55 +0000215 chan_err(chan, "DMA halt timeout!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700216}
217
Zhang Wei173acc72008-03-01 07:42:48 -0700218/**
219 * fsl_chan_set_src_loop_size - Set source address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000220 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700221 * @size : Address loop size, 0 for disable loop
222 *
223 * The set source address hold transfer size. The source
224 * address hold or loop transfer size is when the DMA transfer
225 * data from source address (SA), if the loop size is 4, the DMA will
226 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
227 * SA + 1 ... and so on.
228 */
Ira Snydera1c03312010-01-06 13:34:05 +0000229static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700230{
Ira Snyder272ca652010-01-06 13:33:59 +0000231 u32 mode;
232
Ira Snydera1c03312010-01-06 13:34:05 +0000233 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000234
Zhang Wei173acc72008-03-01 07:42:48 -0700235 switch (size) {
236 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000237 mode &= ~FSL_DMA_MR_SAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700238 break;
239 case 1:
240 case 2:
241 case 4:
242 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000243 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
Zhang Wei173acc72008-03-01 07:42:48 -0700244 break;
245 }
Ira Snyder272ca652010-01-06 13:33:59 +0000246
Ira Snydera1c03312010-01-06 13:34:05 +0000247 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700248}
249
250/**
Ira Snyder738f5f72010-01-06 13:34:02 +0000251 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000252 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700253 * @size : Address loop size, 0 for disable loop
254 *
255 * The set destination address hold transfer size. The destination
256 * address hold or loop transfer size is when the DMA transfer
257 * data to destination address (TA), if the loop size is 4, the DMA will
258 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
259 * TA + 1 ... and so on.
260 */
Ira Snydera1c03312010-01-06 13:34:05 +0000261static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700262{
Ira Snyder272ca652010-01-06 13:33:59 +0000263 u32 mode;
264
Ira Snydera1c03312010-01-06 13:34:05 +0000265 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000266
Zhang Wei173acc72008-03-01 07:42:48 -0700267 switch (size) {
268 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000269 mode &= ~FSL_DMA_MR_DAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700270 break;
271 case 1:
272 case 2:
273 case 4:
274 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000275 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
Zhang Wei173acc72008-03-01 07:42:48 -0700276 break;
277 }
Ira Snyder272ca652010-01-06 13:33:59 +0000278
Ira Snydera1c03312010-01-06 13:34:05 +0000279 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700280}
281
282/**
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700283 * fsl_chan_set_request_count - Set DMA Request Count for external control
Ira Snydera1c03312010-01-06 13:34:05 +0000284 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700285 * @size : Number of bytes to transfer in a single request
286 *
287 * The Freescale DMA channel can be controlled by the external signal DREQ#.
288 * The DMA request count is how many bytes are allowed to transfer before
289 * pausing the channel, after which a new assertion of DREQ# resumes channel
290 * operation.
291 *
292 * A size of 0 disables external pause control. The maximum size is 1024.
293 */
Ira Snydera1c03312010-01-06 13:34:05 +0000294static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700295{
Ira Snyder272ca652010-01-06 13:33:59 +0000296 u32 mode;
297
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700298 BUG_ON(size > 1024);
Ira Snyder272ca652010-01-06 13:33:59 +0000299
Ira Snydera1c03312010-01-06 13:34:05 +0000300 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000301 mode |= (__ilog2(size) << 24) & 0x0f000000;
302
Ira Snydera1c03312010-01-06 13:34:05 +0000303 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700304}
305
306/**
Zhang Wei173acc72008-03-01 07:42:48 -0700307 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
Ira Snydera1c03312010-01-06 13:34:05 +0000308 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700309 * @enable : 0 is disabled, 1 is enabled.
Zhang Wei173acc72008-03-01 07:42:48 -0700310 *
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700311 * The Freescale DMA channel can be controlled by the external signal DREQ#.
312 * The DMA Request Count feature should be used in addition to this feature
313 * to set the number of bytes to transfer before pausing the channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700314 */
Ira Snydera1c03312010-01-06 13:34:05 +0000315static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700316{
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700317 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000318 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700319 else
Ira Snydera1c03312010-01-06 13:34:05 +0000320 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700321}
322
323/**
324 * fsl_chan_toggle_ext_start - Toggle channel external start status
Ira Snydera1c03312010-01-06 13:34:05 +0000325 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700326 * @enable : 0 is disabled, 1 is enabled.
327 *
328 * If enable the external start, the channel can be started by an
329 * external DMA start pin. So the dma_start() does not start the
330 * transfer immediately. The DMA channel will wait for the
331 * control pin asserted.
332 */
Ira Snydera1c03312010-01-06 13:34:05 +0000333static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700334{
335 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000336 chan->feature |= FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700337 else
Ira Snydera1c03312010-01-06 13:34:05 +0000338 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700339}
340
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000341static void append_ld_queue(struct fsldma_chan *chan,
342 struct fsl_desc_sw *desc)
343{
344 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
345
346 if (list_empty(&chan->ld_pending))
347 goto out_splice;
348
349 /*
350 * Add the hardware descriptor to the chain of hardware descriptors
351 * that already exists in memory.
352 *
353 * This will un-set the EOL bit of the existing transaction, and the
354 * last link in this transaction will become the EOL descriptor.
355 */
356 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
357
358 /*
359 * Add the software descriptor and all children to the list
360 * of pending transactions
361 */
362out_splice:
363 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
364}
365
Zhang Wei173acc72008-03-01 07:42:48 -0700366static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
367{
Ira Snydera1c03312010-01-06 13:34:05 +0000368 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
Dan Williamseda34232009-09-08 17:53:02 -0700369 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
370 struct fsl_desc_sw *child;
Zhang Wei173acc72008-03-01 07:42:48 -0700371 unsigned long flags;
372 dma_cookie_t cookie;
373
Ira Snydera1c03312010-01-06 13:34:05 +0000374 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700375
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000376 /*
377 * assign cookies to all of the software descriptors
378 * that make up this transaction
379 */
Ira Snydera1c03312010-01-06 13:34:05 +0000380 cookie = chan->common.cookie;
Dan Williamseda34232009-09-08 17:53:02 -0700381 list_for_each_entry(child, &desc->tx_list, node) {
Ira Snyderbcfb7462009-05-15 14:27:16 -0700382 cookie++;
383 if (cookie < 0)
384 cookie = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700385
Steven J. Magnani6ca3a7a2010-02-25 13:39:30 -0600386 child->async_tx.cookie = cookie;
Ira Snyderbcfb7462009-05-15 14:27:16 -0700387 }
388
Ira Snydera1c03312010-01-06 13:34:05 +0000389 chan->common.cookie = cookie;
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000390
391 /* put this transaction onto the tail of the pending queue */
Ira Snydera1c03312010-01-06 13:34:05 +0000392 append_ld_queue(chan, desc);
Zhang Wei173acc72008-03-01 07:42:48 -0700393
Ira Snydera1c03312010-01-06 13:34:05 +0000394 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700395
396 return cookie;
397}
398
399/**
400 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
Ira Snydera1c03312010-01-06 13:34:05 +0000401 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700402 *
403 * Return - The descriptor allocated. NULL for failed.
404 */
405static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
Ira Snydera1c03312010-01-06 13:34:05 +0000406 struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700407{
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000408 struct fsl_desc_sw *desc;
Zhang Wei173acc72008-03-01 07:42:48 -0700409 dma_addr_t pdesc;
Zhang Wei173acc72008-03-01 07:42:48 -0700410
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000411 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
412 if (!desc) {
Ira Snyderb1584712011-03-03 07:54:55 +0000413 chan_dbg(chan, "out of memory for link descriptor\n");
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000414 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700415 }
416
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000417 memset(desc, 0, sizeof(*desc));
418 INIT_LIST_HEAD(&desc->tx_list);
419 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
420 desc->async_tx.tx_submit = fsl_dma_tx_submit;
421 desc->async_tx.phys = pdesc;
422
Ira Snyder0ab09c32011-03-03 07:54:56 +0000423#ifdef FSL_DMA_LD_DEBUG
424 chan_dbg(chan, "LD %p allocated\n", desc);
425#endif
426
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000427 return desc;
Zhang Wei173acc72008-03-01 07:42:48 -0700428}
429
430
431/**
432 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000433 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700434 *
435 * This function will create a dma pool for descriptor allocation.
436 *
437 * Return - The number of descriptors allocated.
438 */
Ira Snydera1c03312010-01-06 13:34:05 +0000439static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700440{
Ira Snydera1c03312010-01-06 13:34:05 +0000441 struct fsldma_chan *chan = to_fsl_chan(dchan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700442
443 /* Has this channel already been allocated? */
Ira Snydera1c03312010-01-06 13:34:05 +0000444 if (chan->desc_pool)
Timur Tabi77cd62e2008-09-26 17:00:11 -0700445 return 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700446
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000447 /*
448 * We need the descriptor to be aligned to 32bytes
Zhang Wei173acc72008-03-01 07:42:48 -0700449 * for meeting FSL DMA specification requirement.
450 */
Ira Snyderb1584712011-03-03 07:54:55 +0000451 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000452 sizeof(struct fsl_desc_sw),
453 __alignof__(struct fsl_desc_sw), 0);
Ira Snydera1c03312010-01-06 13:34:05 +0000454 if (!chan->desc_pool) {
Ira Snyderb1584712011-03-03 07:54:55 +0000455 chan_err(chan, "unable to allocate descriptor pool\n");
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000456 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -0700457 }
458
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000459 /* there is at least one descriptor free to be allocated */
Zhang Wei173acc72008-03-01 07:42:48 -0700460 return 1;
461}
462
463/**
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000464 * fsldma_free_desc_list - Free all descriptors in a queue
465 * @chan: Freescae DMA channel
466 * @list: the list to free
467 *
468 * LOCKING: must hold chan->desc_lock
469 */
470static void fsldma_free_desc_list(struct fsldma_chan *chan,
471 struct list_head *list)
472{
473 struct fsl_desc_sw *desc, *_desc;
474
475 list_for_each_entry_safe(desc, _desc, list, node) {
476 list_del(&desc->node);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000477#ifdef FSL_DMA_LD_DEBUG
478 chan_dbg(chan, "LD %p free\n", desc);
479#endif
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000480 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
481 }
482}
483
484static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
485 struct list_head *list)
486{
487 struct fsl_desc_sw *desc, *_desc;
488
489 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
490 list_del(&desc->node);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000491#ifdef FSL_DMA_LD_DEBUG
492 chan_dbg(chan, "LD %p free\n", desc);
493#endif
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000494 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
495 }
496}
497
498/**
Zhang Wei173acc72008-03-01 07:42:48 -0700499 * fsl_dma_free_chan_resources - Free all resources of the channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000500 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700501 */
Ira Snydera1c03312010-01-06 13:34:05 +0000502static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700503{
Ira Snydera1c03312010-01-06 13:34:05 +0000504 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700505 unsigned long flags;
506
Ira Snyderb1584712011-03-03 07:54:55 +0000507 chan_dbg(chan, "free all channel resources\n");
Ira Snydera1c03312010-01-06 13:34:05 +0000508 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000509 fsldma_free_desc_list(chan, &chan->ld_pending);
510 fsldma_free_desc_list(chan, &chan->ld_running);
Ira Snydera1c03312010-01-06 13:34:05 +0000511 spin_unlock_irqrestore(&chan->desc_lock, flags);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700512
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000513 dma_pool_destroy(chan->desc_pool);
Ira Snydera1c03312010-01-06 13:34:05 +0000514 chan->desc_pool = NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700515}
516
Zhang Wei2187c262008-03-13 17:45:28 -0700517static struct dma_async_tx_descriptor *
Ira Snydera1c03312010-01-06 13:34:05 +0000518fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
Zhang Wei2187c262008-03-13 17:45:28 -0700519{
Ira Snydera1c03312010-01-06 13:34:05 +0000520 struct fsldma_chan *chan;
Zhang Wei2187c262008-03-13 17:45:28 -0700521 struct fsl_desc_sw *new;
522
Ira Snydera1c03312010-01-06 13:34:05 +0000523 if (!dchan)
Zhang Wei2187c262008-03-13 17:45:28 -0700524 return NULL;
525
Ira Snydera1c03312010-01-06 13:34:05 +0000526 chan = to_fsl_chan(dchan);
Zhang Wei2187c262008-03-13 17:45:28 -0700527
Ira Snydera1c03312010-01-06 13:34:05 +0000528 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei2187c262008-03-13 17:45:28 -0700529 if (!new) {
Ira Snyderb1584712011-03-03 07:54:55 +0000530 chan_err(chan, "%s\n", msg_ld_oom);
Zhang Wei2187c262008-03-13 17:45:28 -0700531 return NULL;
532 }
533
534 new->async_tx.cookie = -EBUSY;
Dan Williams636bdea2008-04-17 20:17:26 -0700535 new->async_tx.flags = flags;
Zhang Wei2187c262008-03-13 17:45:28 -0700536
Zhang Weif79abb62008-03-18 18:45:00 -0700537 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700538 list_add_tail(&new->node, &new->tx_list);
Zhang Weif79abb62008-03-18 18:45:00 -0700539
Zhang Wei2187c262008-03-13 17:45:28 -0700540 /* Set End-of-link to the last link descriptor of new list*/
Ira Snydera1c03312010-01-06 13:34:05 +0000541 set_ld_eol(chan, new);
Zhang Wei2187c262008-03-13 17:45:28 -0700542
543 return &new->async_tx;
544}
545
Zhang Wei173acc72008-03-01 07:42:48 -0700546static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
Ira Snydera1c03312010-01-06 13:34:05 +0000547 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
Zhang Wei173acc72008-03-01 07:42:48 -0700548 size_t len, unsigned long flags)
549{
Ira Snydera1c03312010-01-06 13:34:05 +0000550 struct fsldma_chan *chan;
Zhang Wei173acc72008-03-01 07:42:48 -0700551 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
552 size_t copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700553
Ira Snydera1c03312010-01-06 13:34:05 +0000554 if (!dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700555 return NULL;
556
557 if (!len)
558 return NULL;
559
Ira Snydera1c03312010-01-06 13:34:05 +0000560 chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700561
562 do {
563
564 /* Allocate the link descriptor from DMA pool */
Ira Snydera1c03312010-01-06 13:34:05 +0000565 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700566 if (!new) {
Ira Snyderb1584712011-03-03 07:54:55 +0000567 chan_err(chan, "%s\n", msg_ld_oom);
Ira Snyder2e077f82009-05-15 09:59:46 -0700568 goto fail;
Zhang Wei173acc72008-03-01 07:42:48 -0700569 }
Zhang Wei173acc72008-03-01 07:42:48 -0700570
Zhang Wei56822842008-03-13 10:45:27 -0700571 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700572
Ira Snydera1c03312010-01-06 13:34:05 +0000573 set_desc_cnt(chan, &new->hw, copy);
574 set_desc_src(chan, &new->hw, dma_src);
575 set_desc_dst(chan, &new->hw, dma_dst);
Zhang Wei173acc72008-03-01 07:42:48 -0700576
577 if (!first)
578 first = new;
579 else
Ira Snydera1c03312010-01-06 13:34:05 +0000580 set_desc_next(chan, &prev->hw, new->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700581
582 new->async_tx.cookie = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700583 async_tx_ack(&new->async_tx);
Zhang Wei173acc72008-03-01 07:42:48 -0700584
585 prev = new;
586 len -= copy;
587 dma_src += copy;
Ira Snyder738f5f72010-01-06 13:34:02 +0000588 dma_dst += copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700589
590 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700591 list_add_tail(&new->node, &first->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700592 } while (len);
593
Dan Williams636bdea2008-04-17 20:17:26 -0700594 new->async_tx.flags = flags; /* client is in control of this ack */
Zhang Wei173acc72008-03-01 07:42:48 -0700595 new->async_tx.cookie = -EBUSY;
596
597 /* Set End-of-link to the last link descriptor of new list*/
Ira Snydera1c03312010-01-06 13:34:05 +0000598 set_ld_eol(chan, new);
Zhang Wei173acc72008-03-01 07:42:48 -0700599
Ira Snyder2e077f82009-05-15 09:59:46 -0700600 return &first->async_tx;
601
602fail:
603 if (!first)
604 return NULL;
605
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000606 fsldma_free_desc_list_reverse(chan, &first->tx_list);
Ira Snyder2e077f82009-05-15 09:59:46 -0700607 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700608}
609
Ira Snyderc14330412010-09-30 11:46:45 +0000610static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
611 struct scatterlist *dst_sg, unsigned int dst_nents,
612 struct scatterlist *src_sg, unsigned int src_nents,
613 unsigned long flags)
614{
615 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
616 struct fsldma_chan *chan = to_fsl_chan(dchan);
617 size_t dst_avail, src_avail;
618 dma_addr_t dst, src;
619 size_t len;
620
621 /* basic sanity checks */
622 if (dst_nents == 0 || src_nents == 0)
623 return NULL;
624
625 if (dst_sg == NULL || src_sg == NULL)
626 return NULL;
627
628 /*
629 * TODO: should we check that both scatterlists have the same
630 * TODO: number of bytes in total? Is that really an error?
631 */
632
633 /* get prepared for the loop */
634 dst_avail = sg_dma_len(dst_sg);
635 src_avail = sg_dma_len(src_sg);
636
637 /* run until we are out of scatterlist entries */
638 while (true) {
639
640 /* create the largest transaction possible */
641 len = min_t(size_t, src_avail, dst_avail);
642 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
643 if (len == 0)
644 goto fetch;
645
646 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
647 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
648
649 /* allocate and populate the descriptor */
650 new = fsl_dma_alloc_descriptor(chan);
651 if (!new) {
Ira Snyderb1584712011-03-03 07:54:55 +0000652 chan_err(chan, "%s\n", msg_ld_oom);
Ira Snyderc14330412010-09-30 11:46:45 +0000653 goto fail;
654 }
Ira Snyderc14330412010-09-30 11:46:45 +0000655
656 set_desc_cnt(chan, &new->hw, len);
657 set_desc_src(chan, &new->hw, src);
658 set_desc_dst(chan, &new->hw, dst);
659
660 if (!first)
661 first = new;
662 else
663 set_desc_next(chan, &prev->hw, new->async_tx.phys);
664
665 new->async_tx.cookie = 0;
666 async_tx_ack(&new->async_tx);
667 prev = new;
668
669 /* Insert the link descriptor to the LD ring */
670 list_add_tail(&new->node, &first->tx_list);
671
672 /* update metadata */
673 dst_avail -= len;
674 src_avail -= len;
675
676fetch:
677 /* fetch the next dst scatterlist entry */
678 if (dst_avail == 0) {
679
680 /* no more entries: we're done */
681 if (dst_nents == 0)
682 break;
683
684 /* fetch the next entry: if there are no more: done */
685 dst_sg = sg_next(dst_sg);
686 if (dst_sg == NULL)
687 break;
688
689 dst_nents--;
690 dst_avail = sg_dma_len(dst_sg);
691 }
692
693 /* fetch the next src scatterlist entry */
694 if (src_avail == 0) {
695
696 /* no more entries: we're done */
697 if (src_nents == 0)
698 break;
699
700 /* fetch the next entry: if there are no more: done */
701 src_sg = sg_next(src_sg);
702 if (src_sg == NULL)
703 break;
704
705 src_nents--;
706 src_avail = sg_dma_len(src_sg);
707 }
708 }
709
710 new->async_tx.flags = flags; /* client is in control of this ack */
711 new->async_tx.cookie = -EBUSY;
712
713 /* Set End-of-link to the last link descriptor of new list */
714 set_ld_eol(chan, new);
715
716 return &first->async_tx;
717
718fail:
719 if (!first)
720 return NULL;
721
722 fsldma_free_desc_list_reverse(chan, &first->tx_list);
723 return NULL;
724}
725
Zhang Wei173acc72008-03-01 07:42:48 -0700726/**
Ira Snyderbbea0b62009-09-08 17:53:04 -0700727 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
728 * @chan: DMA channel
729 * @sgl: scatterlist to transfer to/from
730 * @sg_len: number of entries in @scatterlist
731 * @direction: DMA direction
732 * @flags: DMAEngine flags
733 *
734 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
735 * DMA_SLAVE API, this gets the device-specific information from the
736 * chan->private variable.
737 */
738static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
Ira Snydera1c03312010-01-06 13:34:05 +0000739 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
Ira Snyderbbea0b62009-09-08 17:53:04 -0700740 enum dma_data_direction direction, unsigned long flags)
741{
Ira Snyderbbea0b62009-09-08 17:53:04 -0700742 /*
Ira Snyder968f19a2010-09-30 11:46:46 +0000743 * This operation is not supported on the Freescale DMA controller
Ira Snyderbbea0b62009-09-08 17:53:04 -0700744 *
Ira Snyder968f19a2010-09-30 11:46:46 +0000745 * However, we need to provide the function pointer to allow the
746 * device_control() method to work.
Ira Snyderbbea0b62009-09-08 17:53:04 -0700747 */
Ira Snyderbbea0b62009-09-08 17:53:04 -0700748 return NULL;
749}
750
Linus Walleijc3635c72010-03-26 16:44:01 -0700751static int fsl_dma_device_control(struct dma_chan *dchan,
Linus Walleij05827632010-05-17 16:30:42 -0700752 enum dma_ctrl_cmd cmd, unsigned long arg)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700753{
Ira Snyder968f19a2010-09-30 11:46:46 +0000754 struct dma_slave_config *config;
Ira Snydera1c03312010-01-06 13:34:05 +0000755 struct fsldma_chan *chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700756 unsigned long flags;
Ira Snyder968f19a2010-09-30 11:46:46 +0000757 int size;
Linus Walleijc3635c72010-03-26 16:44:01 -0700758
Ira Snydera1c03312010-01-06 13:34:05 +0000759 if (!dchan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700760 return -EINVAL;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700761
Ira Snydera1c03312010-01-06 13:34:05 +0000762 chan = to_fsl_chan(dchan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700763
Ira Snyder968f19a2010-09-30 11:46:46 +0000764 switch (cmd) {
765 case DMA_TERMINATE_ALL:
766 /* Halt the DMA engine */
767 dma_halt(chan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700768
Ira Snyder968f19a2010-09-30 11:46:46 +0000769 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700770
Ira Snyder968f19a2010-09-30 11:46:46 +0000771 /* Remove and free all of the descriptors in the LD queue */
772 fsldma_free_desc_list(chan, &chan->ld_pending);
773 fsldma_free_desc_list(chan, &chan->ld_running);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700774
Ira Snyder968f19a2010-09-30 11:46:46 +0000775 spin_unlock_irqrestore(&chan->desc_lock, flags);
776 return 0;
777
778 case DMA_SLAVE_CONFIG:
779 config = (struct dma_slave_config *)arg;
780
781 /* make sure the channel supports setting burst size */
782 if (!chan->set_request_count)
783 return -ENXIO;
784
785 /* we set the controller burst size depending on direction */
786 if (config->direction == DMA_TO_DEVICE)
787 size = config->dst_addr_width * config->dst_maxburst;
788 else
789 size = config->src_addr_width * config->src_maxburst;
790
791 chan->set_request_count(chan, size);
792 return 0;
793
794 case FSLDMA_EXTERNAL_START:
795
796 /* make sure the channel supports external start */
797 if (!chan->toggle_ext_start)
798 return -ENXIO;
799
800 chan->toggle_ext_start(chan, arg);
801 return 0;
802
803 default:
804 return -ENXIO;
805 }
Linus Walleijc3635c72010-03-26 16:44:01 -0700806
807 return 0;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700808}
809
810/**
Zhang Wei173acc72008-03-01 07:42:48 -0700811 * fsl_dma_update_completed_cookie - Update the completed cookie.
Ira Snydera1c03312010-01-06 13:34:05 +0000812 * @chan : Freescale DMA channel
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000813 *
814 * CONTEXT: hardirq
Zhang Wei173acc72008-03-01 07:42:48 -0700815 */
Ira Snydera1c03312010-01-06 13:34:05 +0000816static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700817{
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000818 struct fsl_desc_sw *desc;
819 unsigned long flags;
820 dma_cookie_t cookie;
Zhang Wei173acc72008-03-01 07:42:48 -0700821
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000822 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700823
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000824 if (list_empty(&chan->ld_running)) {
Ira Snyderb1584712011-03-03 07:54:55 +0000825 chan_dbg(chan, "no running descriptors\n");
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000826 goto out_unlock;
Zhang Wei173acc72008-03-01 07:42:48 -0700827 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000828
829 /* Get the last descriptor, update the cookie to that */
830 desc = to_fsl_desc(chan->ld_running.prev);
831 if (dma_is_idle(chan))
832 cookie = desc->async_tx.cookie;
Steven J. Magnani76bd0612010-02-28 22:18:16 -0700833 else {
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000834 cookie = desc->async_tx.cookie - 1;
Steven J. Magnani76bd0612010-02-28 22:18:16 -0700835 if (unlikely(cookie < DMA_MIN_COOKIE))
836 cookie = DMA_MAX_COOKIE;
837 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000838
839 chan->completed_cookie = cookie;
840
841out_unlock:
842 spin_unlock_irqrestore(&chan->desc_lock, flags);
843}
844
845/**
846 * fsldma_desc_status - Check the status of a descriptor
847 * @chan: Freescale DMA channel
848 * @desc: DMA SW descriptor
849 *
850 * This function will return the status of the given descriptor
851 */
852static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
853 struct fsl_desc_sw *desc)
854{
855 return dma_async_is_complete(desc->async_tx.cookie,
856 chan->completed_cookie,
857 chan->common.cookie);
Zhang Wei173acc72008-03-01 07:42:48 -0700858}
859
860/**
861 * fsl_chan_ld_cleanup - Clean up link descriptors
Ira Snydera1c03312010-01-06 13:34:05 +0000862 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700863 *
864 * This function clean up the ld_queue of DMA channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700865 */
Ira Snydera1c03312010-01-06 13:34:05 +0000866static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700867{
868 struct fsl_desc_sw *desc, *_desc;
869 unsigned long flags;
870
Ira Snydera1c03312010-01-06 13:34:05 +0000871 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700872
Ira Snyderb1584712011-03-03 07:54:55 +0000873 chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie);
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000874 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
Zhang Wei173acc72008-03-01 07:42:48 -0700875 dma_async_tx_callback callback;
876 void *callback_param;
877
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000878 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
Zhang Wei173acc72008-03-01 07:42:48 -0700879 break;
880
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000881 /* Remove from the list of running transactions */
Zhang Wei173acc72008-03-01 07:42:48 -0700882 list_del(&desc->node);
883
Zhang Wei173acc72008-03-01 07:42:48 -0700884 /* Run the link descriptor callback function */
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000885 callback = desc->async_tx.callback;
886 callback_param = desc->async_tx.callback_param;
Zhang Wei173acc72008-03-01 07:42:48 -0700887 if (callback) {
Ira Snydera1c03312010-01-06 13:34:05 +0000888 spin_unlock_irqrestore(&chan->desc_lock, flags);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000889#ifdef FSL_DMA_LD_DEBUG
Ira Snyderb1584712011-03-03 07:54:55 +0000890 chan_dbg(chan, "LD %p callback\n", desc);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000891#endif
Zhang Wei173acc72008-03-01 07:42:48 -0700892 callback(callback_param);
Ira Snydera1c03312010-01-06 13:34:05 +0000893 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700894 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000895
896 /* Run any dependencies, then free the descriptor */
897 dma_run_dependencies(&desc->async_tx);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000898#ifdef FSL_DMA_LD_DEBUG
899 chan_dbg(chan, "LD %p free\n", desc);
900#endif
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000901 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700902 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000903
Ira Snydera1c03312010-01-06 13:34:05 +0000904 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700905}
906
907/**
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000908 * fsl_chan_xfer_ld_queue - transfer any pending transactions
Ira Snydera1c03312010-01-06 13:34:05 +0000909 * @chan : Freescale DMA channel
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000910 *
911 * This will make sure that any pending transactions will be run.
912 * If the DMA controller is idle, it will be started. Otherwise,
913 * the DMA controller's interrupt handler will start any pending
914 * transactions when it becomes idle.
Zhang Wei173acc72008-03-01 07:42:48 -0700915 */
Ira Snydera1c03312010-01-06 13:34:05 +0000916static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700917{
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000918 struct fsl_desc_sw *desc;
Zhang Wei173acc72008-03-01 07:42:48 -0700919 unsigned long flags;
920
Ira Snydera1c03312010-01-06 13:34:05 +0000921 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyder138ef012009-05-19 15:42:13 -0700922
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000923 /*
924 * If the list of pending descriptors is empty, then we
925 * don't need to do any work at all
926 */
927 if (list_empty(&chan->ld_pending)) {
Ira Snyderb1584712011-03-03 07:54:55 +0000928 chan_dbg(chan, "no pending LDs\n");
Ira Snyder138ef012009-05-19 15:42:13 -0700929 goto out_unlock;
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000930 }
Zhang Wei173acc72008-03-01 07:42:48 -0700931
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000932 /*
933 * The DMA controller is not idle, which means the interrupt
934 * handler will start any queued transactions when it runs
935 * at the end of the current transaction
936 */
937 if (!dma_is_idle(chan)) {
Ira Snyderb1584712011-03-03 07:54:55 +0000938 chan_dbg(chan, "DMA controller still busy\n");
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000939 goto out_unlock;
940 }
941
942 /*
943 * TODO:
944 * make sure the dma_halt() function really un-wedges the
945 * controller as much as possible
946 */
Ira Snydera1c03312010-01-06 13:34:05 +0000947 dma_halt(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700948
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000949 /*
950 * If there are some link descriptors which have not been
951 * transferred, we need to start the controller
Zhang Wei173acc72008-03-01 07:42:48 -0700952 */
Zhang Wei173acc72008-03-01 07:42:48 -0700953
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000954 /*
955 * Move all elements from the queue of pending transactions
956 * onto the list of running transactions
957 */
958 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
959 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
Zhang Wei173acc72008-03-01 07:42:48 -0700960
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000961 /*
962 * Program the descriptor's address into the DMA controller,
963 * then start the DMA transaction
964 */
965 set_cdar(chan, desc->async_tx.phys);
966 dma_start(chan);
Ira Snyder138ef012009-05-19 15:42:13 -0700967
968out_unlock:
Ira Snydera1c03312010-01-06 13:34:05 +0000969 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700970}
971
972/**
973 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
Ira Snydera1c03312010-01-06 13:34:05 +0000974 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700975 */
Ira Snydera1c03312010-01-06 13:34:05 +0000976static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700977{
Ira Snydera1c03312010-01-06 13:34:05 +0000978 struct fsldma_chan *chan = to_fsl_chan(dchan);
Ira Snydera1c03312010-01-06 13:34:05 +0000979 fsl_chan_xfer_ld_queue(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700980}
981
Zhang Wei173acc72008-03-01 07:42:48 -0700982/**
Linus Walleij07934482010-03-26 16:50:49 -0700983 * fsl_tx_status - Determine the DMA status
Ira Snydera1c03312010-01-06 13:34:05 +0000984 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700985 */
Linus Walleij07934482010-03-26 16:50:49 -0700986static enum dma_status fsl_tx_status(struct dma_chan *dchan,
Zhang Wei173acc72008-03-01 07:42:48 -0700987 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700988 struct dma_tx_state *txstate)
Zhang Wei173acc72008-03-01 07:42:48 -0700989{
Ira Snydera1c03312010-01-06 13:34:05 +0000990 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700991 dma_cookie_t last_used;
992 dma_cookie_t last_complete;
993
Ira Snydera1c03312010-01-06 13:34:05 +0000994 fsl_chan_ld_cleanup(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700995
Ira Snydera1c03312010-01-06 13:34:05 +0000996 last_used = dchan->cookie;
997 last_complete = chan->completed_cookie;
Zhang Wei173acc72008-03-01 07:42:48 -0700998
Dan Williamsbca34692010-03-26 16:52:10 -0700999 dma_set_tx_state(txstate, last_complete, last_used, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001000
1001 return dma_async_is_complete(cookie, last_complete, last_used);
1002}
1003
Ira Snyderd3f620b2010-01-06 13:34:04 +00001004/*----------------------------------------------------------------------------*/
1005/* Interrupt Handling */
1006/*----------------------------------------------------------------------------*/
1007
Ira Snydere7a29152010-01-06 13:34:03 +00001008static irqreturn_t fsldma_chan_irq(int irq, void *data)
Zhang Wei173acc72008-03-01 07:42:48 -07001009{
Ira Snydera1c03312010-01-06 13:34:05 +00001010 struct fsldma_chan *chan = data;
Zhang Wei1c629792008-04-17 20:17:25 -07001011 int update_cookie = 0;
1012 int xfer_ld_q = 0;
Ira Snydera1c03312010-01-06 13:34:05 +00001013 u32 stat;
Zhang Wei173acc72008-03-01 07:42:48 -07001014
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001015 /* save and clear the status register */
Ira Snydera1c03312010-01-06 13:34:05 +00001016 stat = get_sr(chan);
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001017 set_sr(chan, stat);
Ira Snyderb1584712011-03-03 07:54:55 +00001018 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
Zhang Wei173acc72008-03-01 07:42:48 -07001019
1020 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1021 if (!stat)
1022 return IRQ_NONE;
1023
1024 if (stat & FSL_DMA_SR_TE)
Ira Snyderb1584712011-03-03 07:54:55 +00001025 chan_err(chan, "Transfer Error!\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001026
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001027 /*
1028 * Programming Error
Zhang Weif79abb62008-03-18 18:45:00 -07001029 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1030 * triger a PE interrupt.
1031 */
1032 if (stat & FSL_DMA_SR_PE) {
Ira Snyderb1584712011-03-03 07:54:55 +00001033 chan_dbg(chan, "irq: Programming Error INT\n");
Ira Snydera1c03312010-01-06 13:34:05 +00001034 if (get_bcr(chan) == 0) {
Zhang Weif79abb62008-03-18 18:45:00 -07001035 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1036 * Now, update the completed cookie, and continue the
1037 * next uncompleted transfer.
1038 */
Zhang Wei1c629792008-04-17 20:17:25 -07001039 update_cookie = 1;
1040 xfer_ld_q = 1;
Zhang Weif79abb62008-03-18 18:45:00 -07001041 }
1042 stat &= ~FSL_DMA_SR_PE;
1043 }
1044
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001045 /*
1046 * If the link descriptor segment transfer finishes,
Zhang Wei173acc72008-03-01 07:42:48 -07001047 * we will recycle the used descriptor.
1048 */
1049 if (stat & FSL_DMA_SR_EOSI) {
Ira Snyderb1584712011-03-03 07:54:55 +00001050 chan_dbg(chan, "irq: End-of-segments INT\n");
1051 chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n",
Ira Snydera1c03312010-01-06 13:34:05 +00001052 (unsigned long long)get_cdar(chan),
1053 (unsigned long long)get_ndar(chan));
Zhang Wei173acc72008-03-01 07:42:48 -07001054 stat &= ~FSL_DMA_SR_EOSI;
Zhang Wei1c629792008-04-17 20:17:25 -07001055 update_cookie = 1;
1056 }
1057
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001058 /*
1059 * For MPC8349, EOCDI event need to update cookie
Zhang Wei1c629792008-04-17 20:17:25 -07001060 * and start the next transfer if it exist.
1061 */
1062 if (stat & FSL_DMA_SR_EOCDI) {
Ira Snyderb1584712011-03-03 07:54:55 +00001063 chan_dbg(chan, "irq: End-of-Chain link INT\n");
Zhang Wei1c629792008-04-17 20:17:25 -07001064 stat &= ~FSL_DMA_SR_EOCDI;
1065 update_cookie = 1;
1066 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001067 }
1068
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001069 /*
1070 * If it current transfer is the end-of-transfer,
Zhang Wei173acc72008-03-01 07:42:48 -07001071 * we should clear the Channel Start bit for
1072 * prepare next transfer.
1073 */
Zhang Wei1c629792008-04-17 20:17:25 -07001074 if (stat & FSL_DMA_SR_EOLNI) {
Ira Snyderb1584712011-03-03 07:54:55 +00001075 chan_dbg(chan, "irq: End-of-link INT\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001076 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei1c629792008-04-17 20:17:25 -07001077 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001078 }
1079
Zhang Wei1c629792008-04-17 20:17:25 -07001080 if (update_cookie)
Ira Snydera1c03312010-01-06 13:34:05 +00001081 fsl_dma_update_completed_cookie(chan);
Zhang Wei1c629792008-04-17 20:17:25 -07001082 if (xfer_ld_q)
Ira Snydera1c03312010-01-06 13:34:05 +00001083 fsl_chan_xfer_ld_queue(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001084 if (stat)
Ira Snyderb1584712011-03-03 07:54:55 +00001085 chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat);
Zhang Wei173acc72008-03-01 07:42:48 -07001086
Ira Snyderb1584712011-03-03 07:54:55 +00001087 chan_dbg(chan, "irq: Exit\n");
Ira Snydera1c03312010-01-06 13:34:05 +00001088 tasklet_schedule(&chan->tasklet);
Zhang Wei173acc72008-03-01 07:42:48 -07001089 return IRQ_HANDLED;
1090}
1091
Zhang Wei173acc72008-03-01 07:42:48 -07001092static void dma_do_tasklet(unsigned long data)
1093{
Ira Snydera1c03312010-01-06 13:34:05 +00001094 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1095 fsl_chan_ld_cleanup(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001096}
1097
Ira Snyderd3f620b2010-01-06 13:34:04 +00001098static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1099{
1100 struct fsldma_device *fdev = data;
1101 struct fsldma_chan *chan;
1102 unsigned int handled = 0;
1103 u32 gsr, mask;
1104 int i;
1105
1106 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1107 : in_le32(fdev->regs);
1108 mask = 0xff000000;
1109 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1110
1111 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1112 chan = fdev->chan[i];
1113 if (!chan)
1114 continue;
1115
1116 if (gsr & mask) {
1117 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1118 fsldma_chan_irq(irq, chan);
1119 handled++;
1120 }
1121
1122 gsr &= ~mask;
1123 mask >>= 8;
1124 }
1125
1126 return IRQ_RETVAL(handled);
1127}
1128
1129static void fsldma_free_irqs(struct fsldma_device *fdev)
1130{
1131 struct fsldma_chan *chan;
1132 int i;
1133
1134 if (fdev->irq != NO_IRQ) {
1135 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1136 free_irq(fdev->irq, fdev);
1137 return;
1138 }
1139
1140 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1141 chan = fdev->chan[i];
1142 if (chan && chan->irq != NO_IRQ) {
Ira Snyderb1584712011-03-03 07:54:55 +00001143 chan_dbg(chan, "free per-channel IRQ\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001144 free_irq(chan->irq, chan);
1145 }
1146 }
1147}
1148
1149static int fsldma_request_irqs(struct fsldma_device *fdev)
1150{
1151 struct fsldma_chan *chan;
1152 int ret;
1153 int i;
1154
1155 /* if we have a per-controller IRQ, use that */
1156 if (fdev->irq != NO_IRQ) {
1157 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1158 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1159 "fsldma-controller", fdev);
1160 return ret;
1161 }
1162
1163 /* no per-controller IRQ, use the per-channel IRQs */
1164 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1165 chan = fdev->chan[i];
1166 if (!chan)
1167 continue;
1168
1169 if (chan->irq == NO_IRQ) {
Ira Snyderb1584712011-03-03 07:54:55 +00001170 chan_err(chan, "interrupts property missing in device tree\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001171 ret = -ENODEV;
1172 goto out_unwind;
1173 }
1174
Ira Snyderb1584712011-03-03 07:54:55 +00001175 chan_dbg(chan, "request per-channel IRQ\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001176 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1177 "fsldma-chan", chan);
1178 if (ret) {
Ira Snyderb1584712011-03-03 07:54:55 +00001179 chan_err(chan, "unable to request per-channel IRQ\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001180 goto out_unwind;
1181 }
1182 }
1183
1184 return 0;
1185
1186out_unwind:
1187 for (/* none */; i >= 0; i--) {
1188 chan = fdev->chan[i];
1189 if (!chan)
1190 continue;
1191
1192 if (chan->irq == NO_IRQ)
1193 continue;
1194
1195 free_irq(chan->irq, chan);
1196 }
1197
1198 return ret;
1199}
1200
Ira Snydera4f56d42010-01-06 13:34:01 +00001201/*----------------------------------------------------------------------------*/
1202/* OpenFirmware Subsystem */
1203/*----------------------------------------------------------------------------*/
1204
1205static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
Timur Tabi77cd62e2008-09-26 17:00:11 -07001206 struct device_node *node, u32 feature, const char *compatible)
Zhang Wei173acc72008-03-01 07:42:48 -07001207{
Ira Snydera1c03312010-01-06 13:34:05 +00001208 struct fsldma_chan *chan;
Ira Snyder4ce0e952010-01-06 13:34:00 +00001209 struct resource res;
Zhang Wei173acc72008-03-01 07:42:48 -07001210 int err;
1211
Zhang Wei173acc72008-03-01 07:42:48 -07001212 /* alloc channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001213 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1214 if (!chan) {
Ira Snydere7a29152010-01-06 13:34:03 +00001215 dev_err(fdev->dev, "no free memory for DMA channels!\n");
1216 err = -ENOMEM;
1217 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001218 }
1219
Ira Snydere7a29152010-01-06 13:34:03 +00001220 /* ioremap registers for use */
Ira Snydera1c03312010-01-06 13:34:05 +00001221 chan->regs = of_iomap(node, 0);
1222 if (!chan->regs) {
Ira Snydere7a29152010-01-06 13:34:03 +00001223 dev_err(fdev->dev, "unable to ioremap registers\n");
1224 err = -ENOMEM;
Ira Snydera1c03312010-01-06 13:34:05 +00001225 goto out_free_chan;
Ira Snydere7a29152010-01-06 13:34:03 +00001226 }
1227
Ira Snyder4ce0e952010-01-06 13:34:00 +00001228 err = of_address_to_resource(node, 0, &res);
Zhang Wei173acc72008-03-01 07:42:48 -07001229 if (err) {
Ira Snydere7a29152010-01-06 13:34:03 +00001230 dev_err(fdev->dev, "unable to find 'reg' property\n");
1231 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001232 }
1233
Ira Snydera1c03312010-01-06 13:34:05 +00001234 chan->feature = feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001235 if (!fdev->feature)
Ira Snydera1c03312010-01-06 13:34:05 +00001236 fdev->feature = chan->feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001237
Ira Snydere7a29152010-01-06 13:34:03 +00001238 /*
1239 * If the DMA device's feature is different than the feature
1240 * of its channels, report the bug
Zhang Wei173acc72008-03-01 07:42:48 -07001241 */
Ira Snydera1c03312010-01-06 13:34:05 +00001242 WARN_ON(fdev->feature != chan->feature);
Zhang Wei173acc72008-03-01 07:42:48 -07001243
Ira Snydera1c03312010-01-06 13:34:05 +00001244 chan->dev = fdev->dev;
1245 chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1246 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
Ira Snydere7a29152010-01-06 13:34:03 +00001247 dev_err(fdev->dev, "too many channels for device\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001248 err = -EINVAL;
Ira Snydere7a29152010-01-06 13:34:03 +00001249 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001250 }
Zhang Wei173acc72008-03-01 07:42:48 -07001251
Ira Snydera1c03312010-01-06 13:34:05 +00001252 fdev->chan[chan->id] = chan;
1253 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
Ira Snyderb1584712011-03-03 07:54:55 +00001254 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
Ira Snydere7a29152010-01-06 13:34:03 +00001255
1256 /* Initialize the channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001257 dma_init(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001258
1259 /* Clear cdar registers */
Ira Snydera1c03312010-01-06 13:34:05 +00001260 set_cdar(chan, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001261
Ira Snydera1c03312010-01-06 13:34:05 +00001262 switch (chan->feature & FSL_DMA_IP_MASK) {
Zhang Wei173acc72008-03-01 07:42:48 -07001263 case FSL_DMA_IP_85XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001264 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
Zhang Wei173acc72008-03-01 07:42:48 -07001265 case FSL_DMA_IP_83XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001266 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1267 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1268 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1269 chan->set_request_count = fsl_chan_set_request_count;
Zhang Wei173acc72008-03-01 07:42:48 -07001270 }
1271
Ira Snydera1c03312010-01-06 13:34:05 +00001272 spin_lock_init(&chan->desc_lock);
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001273 INIT_LIST_HEAD(&chan->ld_pending);
1274 INIT_LIST_HEAD(&chan->ld_running);
Zhang Wei173acc72008-03-01 07:42:48 -07001275
Ira Snydera1c03312010-01-06 13:34:05 +00001276 chan->common.device = &fdev->common;
Zhang Wei173acc72008-03-01 07:42:48 -07001277
Ira Snyderd3f620b2010-01-06 13:34:04 +00001278 /* find the IRQ line, if it exists in the device tree */
Ira Snydera1c03312010-01-06 13:34:05 +00001279 chan->irq = irq_of_parse_and_map(node, 0);
Ira Snyderd3f620b2010-01-06 13:34:04 +00001280
Zhang Wei173acc72008-03-01 07:42:48 -07001281 /* Add the channel to DMA device channel list */
Ira Snydera1c03312010-01-06 13:34:05 +00001282 list_add_tail(&chan->common.device_node, &fdev->common.channels);
Zhang Wei173acc72008-03-01 07:42:48 -07001283 fdev->common.chancnt++;
1284
Ira Snydera1c03312010-01-06 13:34:05 +00001285 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1286 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001287
1288 return 0;
Li Yang51ee87f2008-05-29 23:25:45 -07001289
Ira Snydere7a29152010-01-06 13:34:03 +00001290out_iounmap_regs:
Ira Snydera1c03312010-01-06 13:34:05 +00001291 iounmap(chan->regs);
1292out_free_chan:
1293 kfree(chan);
Ira Snydere7a29152010-01-06 13:34:03 +00001294out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001295 return err;
1296}
1297
Ira Snydera1c03312010-01-06 13:34:05 +00001298static void fsl_dma_chan_remove(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -07001299{
Ira Snydera1c03312010-01-06 13:34:05 +00001300 irq_dispose_mapping(chan->irq);
1301 list_del(&chan->common.device_node);
1302 iounmap(chan->regs);
1303 kfree(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001304}
1305
Grant Likely2dc11582010-08-06 09:25:50 -06001306static int __devinit fsldma_of_probe(struct platform_device *op,
Zhang Wei173acc72008-03-01 07:42:48 -07001307 const struct of_device_id *match)
1308{
Ira Snydera4f56d42010-01-06 13:34:01 +00001309 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001310 struct device_node *child;
Ira Snydere7a29152010-01-06 13:34:03 +00001311 int err;
Zhang Wei173acc72008-03-01 07:42:48 -07001312
Ira Snydera4f56d42010-01-06 13:34:01 +00001313 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
Zhang Wei173acc72008-03-01 07:42:48 -07001314 if (!fdev) {
Ira Snydere7a29152010-01-06 13:34:03 +00001315 dev_err(&op->dev, "No enough memory for 'priv'\n");
1316 err = -ENOMEM;
1317 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001318 }
Ira Snydere7a29152010-01-06 13:34:03 +00001319
1320 fdev->dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001321 INIT_LIST_HEAD(&fdev->common.channels);
1322
Ira Snydere7a29152010-01-06 13:34:03 +00001323 /* ioremap the registers for use */
Grant Likely61c7a082010-04-13 16:12:29 -07001324 fdev->regs = of_iomap(op->dev.of_node, 0);
Ira Snydere7a29152010-01-06 13:34:03 +00001325 if (!fdev->regs) {
1326 dev_err(&op->dev, "unable to ioremap registers\n");
1327 err = -ENOMEM;
1328 goto out_free_fdev;
Zhang Wei173acc72008-03-01 07:42:48 -07001329 }
1330
Ira Snyderd3f620b2010-01-06 13:34:04 +00001331 /* map the channel IRQ if it exists, but don't hookup the handler yet */
Grant Likely61c7a082010-04-13 16:12:29 -07001332 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
Ira Snyderd3f620b2010-01-06 13:34:04 +00001333
Zhang Wei173acc72008-03-01 07:42:48 -07001334 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1335 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
Ira Snyderc14330412010-09-30 11:46:45 +00001336 dma_cap_set(DMA_SG, fdev->common.cap_mask);
Ira Snyderbbea0b62009-09-08 17:53:04 -07001337 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
Zhang Wei173acc72008-03-01 07:42:48 -07001338 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1339 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -07001340 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -07001341 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
Ira Snyderc14330412010-09-30 11:46:45 +00001342 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
Linus Walleij07934482010-03-26 16:50:49 -07001343 fdev->common.device_tx_status = fsl_tx_status;
Zhang Wei173acc72008-03-01 07:42:48 -07001344 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
Ira Snyderbbea0b62009-09-08 17:53:04 -07001345 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001346 fdev->common.device_control = fsl_dma_device_control;
Ira Snydere7a29152010-01-06 13:34:03 +00001347 fdev->common.dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001348
Li Yange2c8e4252010-11-11 20:16:29 +08001349 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1350
Ira Snydere7a29152010-01-06 13:34:03 +00001351 dev_set_drvdata(&op->dev, fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001352
Ira Snydere7a29152010-01-06 13:34:03 +00001353 /*
1354 * We cannot use of_platform_bus_probe() because there is no
1355 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
Timur Tabi77cd62e2008-09-26 17:00:11 -07001356 * channel object.
1357 */
Grant Likely61c7a082010-04-13 16:12:29 -07001358 for_each_child_of_node(op->dev.of_node, child) {
Ira Snydere7a29152010-01-06 13:34:03 +00001359 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001360 fsl_dma_chan_probe(fdev, child,
1361 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1362 "fsl,eloplus-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001363 }
1364
1365 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001366 fsl_dma_chan_probe(fdev, child,
1367 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1368 "fsl,elo-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001369 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001370 }
Zhang Wei173acc72008-03-01 07:42:48 -07001371
Ira Snyderd3f620b2010-01-06 13:34:04 +00001372 /*
1373 * Hookup the IRQ handler(s)
1374 *
1375 * If we have a per-controller interrupt, we prefer that to the
1376 * per-channel interrupts to reduce the number of shared interrupt
1377 * handlers on the same IRQ line
1378 */
1379 err = fsldma_request_irqs(fdev);
1380 if (err) {
1381 dev_err(fdev->dev, "unable to request IRQs\n");
1382 goto out_free_fdev;
1383 }
1384
Zhang Wei173acc72008-03-01 07:42:48 -07001385 dma_async_device_register(&fdev->common);
1386 return 0;
1387
Ira Snydere7a29152010-01-06 13:34:03 +00001388out_free_fdev:
Ira Snyderd3f620b2010-01-06 13:34:04 +00001389 irq_dispose_mapping(fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001390 kfree(fdev);
Ira Snydere7a29152010-01-06 13:34:03 +00001391out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001392 return err;
1393}
1394
Grant Likely2dc11582010-08-06 09:25:50 -06001395static int fsldma_of_remove(struct platform_device *op)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001396{
Ira Snydera4f56d42010-01-06 13:34:01 +00001397 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001398 unsigned int i;
1399
Ira Snydere7a29152010-01-06 13:34:03 +00001400 fdev = dev_get_drvdata(&op->dev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001401 dma_async_device_unregister(&fdev->common);
1402
Ira Snyderd3f620b2010-01-06 13:34:04 +00001403 fsldma_free_irqs(fdev);
1404
Ira Snydere7a29152010-01-06 13:34:03 +00001405 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001406 if (fdev->chan[i])
1407 fsl_dma_chan_remove(fdev->chan[i]);
Ira Snydere7a29152010-01-06 13:34:03 +00001408 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001409
Ira Snydere7a29152010-01-06 13:34:03 +00001410 iounmap(fdev->regs);
1411 dev_set_drvdata(&op->dev, NULL);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001412 kfree(fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001413
1414 return 0;
1415}
1416
Márton Németh4b1cf1f2010-02-02 23:41:06 -07001417static const struct of_device_id fsldma_of_ids[] = {
Kumar Gala049c9d42008-03-31 11:13:21 -05001418 { .compatible = "fsl,eloplus-dma", },
1419 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001420 {}
1421};
1422
Ira Snydera4f56d42010-01-06 13:34:01 +00001423static struct of_platform_driver fsldma_of_driver = {
Grant Likely40182942010-04-13 16:13:02 -07001424 .driver = {
1425 .name = "fsl-elo-dma",
1426 .owner = THIS_MODULE,
1427 .of_match_table = fsldma_of_ids,
1428 },
1429 .probe = fsldma_of_probe,
1430 .remove = fsldma_of_remove,
Zhang Wei173acc72008-03-01 07:42:48 -07001431};
1432
Ira Snydera4f56d42010-01-06 13:34:01 +00001433/*----------------------------------------------------------------------------*/
1434/* Module Init / Exit */
1435/*----------------------------------------------------------------------------*/
1436
1437static __init int fsldma_init(void)
Zhang Wei173acc72008-03-01 07:42:48 -07001438{
Timur Tabi77cd62e2008-09-26 17:00:11 -07001439 int ret;
1440
1441 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1442
Ira Snydera4f56d42010-01-06 13:34:01 +00001443 ret = of_register_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001444 if (ret)
1445 pr_err("fsldma: failed to register platform driver\n");
1446
1447 return ret;
Zhang Wei173acc72008-03-01 07:42:48 -07001448}
1449
Ira Snydera4f56d42010-01-06 13:34:01 +00001450static void __exit fsldma_exit(void)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001451{
Ira Snydera4f56d42010-01-06 13:34:01 +00001452 of_unregister_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001453}
1454
Ira Snydera4f56d42010-01-06 13:34:01 +00001455subsys_initcall(fsldma_init);
1456module_exit(fsldma_exit);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001457
1458MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1459MODULE_LICENSE("GPL");