blob: ec5042044d67b7d4bb55187e7c9d3ef58cef2c53 [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
Li Yange2c8e4252010-11-11 20:16:29 +08004 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
Zhang Wei173acc72008-03-01 07:42:48 -07005 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
Stefan Weilc2e07b32010-08-03 19:44:52 +020013 * The support for MPC8349 DMA controller is also added.
Zhang Wei173acc72008-03-01 07:42:48 -070014 *
Ira W. Snydera7aea372009-04-23 16:17:54 -070015 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
Zhang Wei173acc72008-03-01 07:42:48 -070020 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Zhang Wei173acc72008-03-01 07:42:48 -070031#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmapool.h>
Rob Herring5af50732013-09-17 14:28:33 -050036#include <linux/of_address.h>
37#include <linux/of_irq.h>
Zhang Wei173acc72008-03-01 07:42:48 -070038#include <linux/of_platform.h>
39
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000040#include "dmaengine.h"
Zhang Wei173acc72008-03-01 07:42:48 -070041#include "fsldma.h"
42
Ira Snyderb1584712011-03-03 07:54:55 +000043#define chan_dbg(chan, fmt, arg...) \
44 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
45#define chan_err(chan, fmt, arg...) \
46 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
Ira Snyderc14330412010-09-30 11:46:45 +000047
Ira Snyderb1584712011-03-03 07:54:55 +000048static const char msg_ld_oom[] = "No free memory for link descriptor";
Zhang Wei173acc72008-03-01 07:42:48 -070049
Ira Snydere8bd84d2011-03-03 07:54:54 +000050/*
51 * Register Helpers
52 */
Zhang Wei173acc72008-03-01 07:42:48 -070053
Ira Snydera1c03312010-01-06 13:34:05 +000054static void set_sr(struct fsldma_chan *chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070055{
Ira Snydera1c03312010-01-06 13:34:05 +000056 DMA_OUT(chan, &chan->regs->sr, val, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070057}
58
Ira Snydera1c03312010-01-06 13:34:05 +000059static u32 get_sr(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -070060{
Ira Snydera1c03312010-01-06 13:34:05 +000061 return DMA_IN(chan, &chan->regs->sr, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070062}
63
Ira Snydera1c03312010-01-06 13:34:05 +000064static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
Zhang Wei173acc72008-03-01 07:42:48 -070065{
Ira Snydera1c03312010-01-06 13:34:05 +000066 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
Zhang Wei173acc72008-03-01 07:42:48 -070067}
68
Ira Snydera1c03312010-01-06 13:34:05 +000069static dma_addr_t get_cdar(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -070070{
Ira Snydera1c03312010-01-06 13:34:05 +000071 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
Zhang Wei173acc72008-03-01 07:42:48 -070072}
73
Ira Snydera1c03312010-01-06 13:34:05 +000074static u32 get_bcr(struct fsldma_chan *chan)
Zhang Weif79abb62008-03-18 18:45:00 -070075{
Ira Snydera1c03312010-01-06 13:34:05 +000076 return DMA_IN(chan, &chan->regs->bcr, 32);
Zhang Weif79abb62008-03-18 18:45:00 -070077}
78
Ira Snydere8bd84d2011-03-03 07:54:54 +000079/*
80 * Descriptor Helpers
81 */
82
Zhang Wei173acc72008-03-01 07:42:48 -070083static void set_desc_cnt(struct fsldma_chan *chan,
84 struct fsl_dma_ld_hw *hw, u32 count)
Zhang Wei173acc72008-03-01 07:42:48 -070085{
Zhang Wei173acc72008-03-01 07:42:48 -070086 hw->count = CPU_TO_DMA(chan, count, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070087}
88
Zhang Wei173acc72008-03-01 07:42:48 -070089static void set_desc_src(struct fsldma_chan *chan,
Ira Snyder31f43062011-03-03 07:54:57 +000090 struct fsl_dma_ld_hw *hw, dma_addr_t src)
Zhang Wei173acc72008-03-01 07:42:48 -070091{
Zhang Wei173acc72008-03-01 07:42:48 -070092 u64 snoop_bits;
Dan Williams900325a2009-03-02 15:33:46 -070093
Zhang Wei173acc72008-03-01 07:42:48 -070094 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
95 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
96 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
Zhang Wei173acc72008-03-01 07:42:48 -070097}
98
Zhang Wei173acc72008-03-01 07:42:48 -070099static void set_desc_dst(struct fsldma_chan *chan,
Ira Snyder31f43062011-03-03 07:54:57 +0000100 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
Zhang Wei173acc72008-03-01 07:42:48 -0700101{
102 u64 snoop_bits;
103
104 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
105 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
106 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
107}
108
109static void set_desc_next(struct fsldma_chan *chan,
Ira Snyder31f43062011-03-03 07:54:57 +0000110 struct fsl_dma_ld_hw *hw, dma_addr_t next)
Zhang Wei173acc72008-03-01 07:42:48 -0700111{
112 u64 snoop_bits;
113
114 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
115 ? FSL_DMA_SNEN : 0;
116 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
117}
118
Ira Snyder31f43062011-03-03 07:54:57 +0000119static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
Zhang Wei173acc72008-03-01 07:42:48 -0700120{
Ira Snyder776c8942009-05-15 11:33:20 -0700121 u64 snoop_bits;
122
Ira Snydera1c03312010-01-06 13:34:05 +0000123 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
Ira Snyder776c8942009-05-15 11:33:20 -0700124 ? FSL_DMA_SNEN : 0;
125
Ira Snydera1c03312010-01-06 13:34:05 +0000126 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
127 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
Ira Snyder776c8942009-05-15 11:33:20 -0700128 | snoop_bits, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700129}
130
Ira Snydere8bd84d2011-03-03 07:54:54 +0000131/*
132 * DMA Engine Hardware Control Helpers
133 */
Zhang Wei173acc72008-03-01 07:42:48 -0700134
Ira Snydere8bd84d2011-03-03 07:54:54 +0000135static void dma_init(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700136{
Ira Snydere8bd84d2011-03-03 07:54:54 +0000137 /* Reset the channel */
138 DMA_OUT(chan, &chan->regs->mr, 0, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700139
Ira Snydere8bd84d2011-03-03 07:54:54 +0000140 switch (chan->feature & FSL_DMA_IP_MASK) {
141 case FSL_DMA_IP_85XX:
142 /* Set the channel to below modes:
143 * EIE - Error interrupt enable
Ira Snydere8bd84d2011-03-03 07:54:54 +0000144 * EOLNIE - End of links interrupt enable
145 * BWC - Bandwidth sharing among channels
146 */
147 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
Ira Snyderf04cd402011-03-03 07:54:58 +0000148 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
Ira Snydere8bd84d2011-03-03 07:54:54 +0000149 break;
150 case FSL_DMA_IP_83XX:
151 /* Set the channel to below modes:
152 * EOTIE - End-of-transfer interrupt enable
153 * PRC_RM - PCI read multiple
154 */
155 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
156 | FSL_DMA_MR_PRC_RM, 32);
157 break;
158 }
Zhang Wei173acc72008-03-01 07:42:48 -0700159}
160
161static int dma_is_idle(struct fsldma_chan *chan)
162{
163 u32 sr = get_sr(chan);
164 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
165}
166
Ira Snyderf04cd402011-03-03 07:54:58 +0000167/*
168 * Start the DMA controller
169 *
170 * Preconditions:
171 * - the CDAR register must point to the start descriptor
172 * - the MRn[CS] bit must be cleared
173 */
Zhang Wei173acc72008-03-01 07:42:48 -0700174static void dma_start(struct fsldma_chan *chan)
175{
176 u32 mode;
177
178 mode = DMA_IN(chan, &chan->regs->mr, 32);
179
Ira Snyderf04cd402011-03-03 07:54:58 +0000180 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
181 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
182 mode |= FSL_DMA_MR_EMP_EN;
183 } else {
184 mode &= ~FSL_DMA_MR_EMP_EN;
Zhang Wei173acc72008-03-01 07:42:48 -0700185 }
186
Ira Snyderf04cd402011-03-03 07:54:58 +0000187 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
Zhang Wei173acc72008-03-01 07:42:48 -0700188 mode |= FSL_DMA_MR_EMS_EN;
Ira Snyderf04cd402011-03-03 07:54:58 +0000189 } else {
190 mode &= ~FSL_DMA_MR_EMS_EN;
Zhang Wei173acc72008-03-01 07:42:48 -0700191 mode |= FSL_DMA_MR_CS;
Ira Snyderf04cd402011-03-03 07:54:58 +0000192 }
Zhang Wei173acc72008-03-01 07:42:48 -0700193
194 DMA_OUT(chan, &chan->regs->mr, mode, 32);
195}
196
197static void dma_halt(struct fsldma_chan *chan)
198{
199 u32 mode;
200 int i;
201
Ira Snydera00ae342011-03-03 07:55:01 +0000202 /* read the mode register */
Zhang Wei173acc72008-03-01 07:42:48 -0700203 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snydera00ae342011-03-03 07:55:01 +0000204
205 /*
206 * The 85xx controller supports channel abort, which will stop
207 * the current transfer. On 83xx, this bit is the transfer error
208 * mask bit, which should not be changed.
209 */
210 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
211 mode |= FSL_DMA_MR_CA;
212 DMA_OUT(chan, &chan->regs->mr, mode, 32);
213
214 mode &= ~FSL_DMA_MR_CA;
215 }
216
217 /* stop the DMA controller */
218 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
Zhang Wei173acc72008-03-01 07:42:48 -0700219 DMA_OUT(chan, &chan->regs->mr, mode, 32);
220
Ira Snydera00ae342011-03-03 07:55:01 +0000221 /* wait for the DMA controller to become idle */
Zhang Wei173acc72008-03-01 07:42:48 -0700222 for (i = 0; i < 100; i++) {
223 if (dma_is_idle(chan))
224 return;
225
226 udelay(10);
227 }
228
229 if (!dma_is_idle(chan))
Ira Snyderb1584712011-03-03 07:54:55 +0000230 chan_err(chan, "DMA halt timeout!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700231}
232
Zhang Wei173acc72008-03-01 07:42:48 -0700233/**
234 * fsl_chan_set_src_loop_size - Set source address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000235 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700236 * @size : Address loop size, 0 for disable loop
237 *
238 * The set source address hold transfer size. The source
239 * address hold or loop transfer size is when the DMA transfer
240 * data from source address (SA), if the loop size is 4, the DMA will
241 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
242 * SA + 1 ... and so on.
243 */
Ira Snydera1c03312010-01-06 13:34:05 +0000244static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700245{
Ira Snyder272ca652010-01-06 13:33:59 +0000246 u32 mode;
247
Ira Snydera1c03312010-01-06 13:34:05 +0000248 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000249
Zhang Wei173acc72008-03-01 07:42:48 -0700250 switch (size) {
251 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000252 mode &= ~FSL_DMA_MR_SAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700253 break;
254 case 1:
255 case 2:
256 case 4:
257 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000258 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
Zhang Wei173acc72008-03-01 07:42:48 -0700259 break;
260 }
Ira Snyder272ca652010-01-06 13:33:59 +0000261
Ira Snydera1c03312010-01-06 13:34:05 +0000262 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700263}
264
265/**
Ira Snyder738f5f72010-01-06 13:34:02 +0000266 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000267 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700268 * @size : Address loop size, 0 for disable loop
269 *
270 * The set destination address hold transfer size. The destination
271 * address hold or loop transfer size is when the DMA transfer
272 * data to destination address (TA), if the loop size is 4, the DMA will
273 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
274 * TA + 1 ... and so on.
275 */
Ira Snydera1c03312010-01-06 13:34:05 +0000276static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700277{
Ira Snyder272ca652010-01-06 13:33:59 +0000278 u32 mode;
279
Ira Snydera1c03312010-01-06 13:34:05 +0000280 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000281
Zhang Wei173acc72008-03-01 07:42:48 -0700282 switch (size) {
283 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000284 mode &= ~FSL_DMA_MR_DAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700285 break;
286 case 1:
287 case 2:
288 case 4:
289 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000290 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
Zhang Wei173acc72008-03-01 07:42:48 -0700291 break;
292 }
Ira Snyder272ca652010-01-06 13:33:59 +0000293
Ira Snydera1c03312010-01-06 13:34:05 +0000294 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700295}
296
297/**
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700298 * fsl_chan_set_request_count - Set DMA Request Count for external control
Ira Snydera1c03312010-01-06 13:34:05 +0000299 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700300 * @size : Number of bytes to transfer in a single request
301 *
302 * The Freescale DMA channel can be controlled by the external signal DREQ#.
303 * The DMA request count is how many bytes are allowed to transfer before
304 * pausing the channel, after which a new assertion of DREQ# resumes channel
305 * operation.
306 *
307 * A size of 0 disables external pause control. The maximum size is 1024.
308 */
Ira Snydera1c03312010-01-06 13:34:05 +0000309static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700310{
Ira Snyder272ca652010-01-06 13:33:59 +0000311 u32 mode;
312
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700313 BUG_ON(size > 1024);
Ira Snyder272ca652010-01-06 13:33:59 +0000314
Ira Snydera1c03312010-01-06 13:34:05 +0000315 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000316 mode |= (__ilog2(size) << 24) & 0x0f000000;
317
Ira Snydera1c03312010-01-06 13:34:05 +0000318 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700319}
320
321/**
Zhang Wei173acc72008-03-01 07:42:48 -0700322 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
Ira Snydera1c03312010-01-06 13:34:05 +0000323 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700324 * @enable : 0 is disabled, 1 is enabled.
Zhang Wei173acc72008-03-01 07:42:48 -0700325 *
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700326 * The Freescale DMA channel can be controlled by the external signal DREQ#.
327 * The DMA Request Count feature should be used in addition to this feature
328 * to set the number of bytes to transfer before pausing the channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700329 */
Ira Snydera1c03312010-01-06 13:34:05 +0000330static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700331{
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700332 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000333 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700334 else
Ira Snydera1c03312010-01-06 13:34:05 +0000335 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700336}
337
338/**
339 * fsl_chan_toggle_ext_start - Toggle channel external start status
Ira Snydera1c03312010-01-06 13:34:05 +0000340 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700341 * @enable : 0 is disabled, 1 is enabled.
342 *
343 * If enable the external start, the channel can be started by an
344 * external DMA start pin. So the dma_start() does not start the
345 * transfer immediately. The DMA channel will wait for the
346 * control pin asserted.
347 */
Ira Snydera1c03312010-01-06 13:34:05 +0000348static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700349{
350 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000351 chan->feature |= FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700352 else
Ira Snydera1c03312010-01-06 13:34:05 +0000353 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700354}
355
Ira Snyder31f43062011-03-03 07:54:57 +0000356static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000357{
358 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
359
360 if (list_empty(&chan->ld_pending))
361 goto out_splice;
362
363 /*
364 * Add the hardware descriptor to the chain of hardware descriptors
365 * that already exists in memory.
366 *
367 * This will un-set the EOL bit of the existing transaction, and the
368 * last link in this transaction will become the EOL descriptor.
369 */
370 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
371
372 /*
373 * Add the software descriptor and all children to the list
374 * of pending transactions
375 */
376out_splice:
377 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
378}
379
Zhang Wei173acc72008-03-01 07:42:48 -0700380static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
381{
Ira Snydera1c03312010-01-06 13:34:05 +0000382 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
Dan Williamseda34232009-09-08 17:53:02 -0700383 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
384 struct fsl_desc_sw *child;
Zhang Wei173acc72008-03-01 07:42:48 -0700385 unsigned long flags;
Dan Williamsbbc76562013-12-09 11:16:00 -0800386 dma_cookie_t cookie = -EINVAL;
Zhang Wei173acc72008-03-01 07:42:48 -0700387
Ira Snydera1c03312010-01-06 13:34:05 +0000388 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700389
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000390 /*
391 * assign cookies to all of the software descriptors
392 * that make up this transaction
393 */
Dan Williamseda34232009-09-08 17:53:02 -0700394 list_for_each_entry(child, &desc->tx_list, node) {
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000395 cookie = dma_cookie_assign(&child->async_tx);
Ira Snyderbcfb7462009-05-15 14:27:16 -0700396 }
397
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000398 /* put this transaction onto the tail of the pending queue */
Ira Snydera1c03312010-01-06 13:34:05 +0000399 append_ld_queue(chan, desc);
Zhang Wei173acc72008-03-01 07:42:48 -0700400
Ira Snydera1c03312010-01-06 13:34:05 +0000401 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700402
403 return cookie;
404}
405
406/**
407 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
Ira Snydera1c03312010-01-06 13:34:05 +0000408 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700409 *
410 * Return - The descriptor allocated. NULL for failed.
411 */
Ira Snyder31f43062011-03-03 07:54:57 +0000412static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700413{
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000414 struct fsl_desc_sw *desc;
Zhang Wei173acc72008-03-01 07:42:48 -0700415 dma_addr_t pdesc;
Zhang Wei173acc72008-03-01 07:42:48 -0700416
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000417 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
418 if (!desc) {
Ira Snyderb1584712011-03-03 07:54:55 +0000419 chan_dbg(chan, "out of memory for link descriptor\n");
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000420 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700421 }
422
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000423 memset(desc, 0, sizeof(*desc));
424 INIT_LIST_HEAD(&desc->tx_list);
425 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
426 desc->async_tx.tx_submit = fsl_dma_tx_submit;
427 desc->async_tx.phys = pdesc;
428
Ira Snyder0ab09c32011-03-03 07:54:56 +0000429 chan_dbg(chan, "LD %p allocated\n", desc);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000430
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000431 return desc;
Zhang Wei173acc72008-03-01 07:42:48 -0700432}
433
Zhang Wei173acc72008-03-01 07:42:48 -0700434/**
435 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000436 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700437 *
438 * This function will create a dma pool for descriptor allocation.
439 *
440 * Return - The number of descriptors allocated.
441 */
Ira Snydera1c03312010-01-06 13:34:05 +0000442static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700443{
Ira Snydera1c03312010-01-06 13:34:05 +0000444 struct fsldma_chan *chan = to_fsl_chan(dchan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700445
446 /* Has this channel already been allocated? */
Ira Snydera1c03312010-01-06 13:34:05 +0000447 if (chan->desc_pool)
Timur Tabi77cd62e2008-09-26 17:00:11 -0700448 return 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700449
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000450 /*
451 * We need the descriptor to be aligned to 32bytes
Zhang Wei173acc72008-03-01 07:42:48 -0700452 * for meeting FSL DMA specification requirement.
453 */
Ira Snyderb1584712011-03-03 07:54:55 +0000454 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000455 sizeof(struct fsl_desc_sw),
456 __alignof__(struct fsl_desc_sw), 0);
Ira Snydera1c03312010-01-06 13:34:05 +0000457 if (!chan->desc_pool) {
Ira Snyderb1584712011-03-03 07:54:55 +0000458 chan_err(chan, "unable to allocate descriptor pool\n");
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000459 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -0700460 }
461
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000462 /* there is at least one descriptor free to be allocated */
Zhang Wei173acc72008-03-01 07:42:48 -0700463 return 1;
464}
465
466/**
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000467 * fsldma_free_desc_list - Free all descriptors in a queue
468 * @chan: Freescae DMA channel
469 * @list: the list to free
470 *
471 * LOCKING: must hold chan->desc_lock
472 */
473static void fsldma_free_desc_list(struct fsldma_chan *chan,
474 struct list_head *list)
475{
476 struct fsl_desc_sw *desc, *_desc;
477
478 list_for_each_entry_safe(desc, _desc, list, node) {
479 list_del(&desc->node);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000480 chan_dbg(chan, "LD %p free\n", desc);
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000481 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
482 }
483}
484
485static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
486 struct list_head *list)
487{
488 struct fsl_desc_sw *desc, *_desc;
489
490 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
491 list_del(&desc->node);
Ira Snyder0ab09c32011-03-03 07:54:56 +0000492 chan_dbg(chan, "LD %p free\n", desc);
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000493 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
494 }
495}
496
497/**
Zhang Wei173acc72008-03-01 07:42:48 -0700498 * fsl_dma_free_chan_resources - Free all resources of the channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000499 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700500 */
Ira Snydera1c03312010-01-06 13:34:05 +0000501static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700502{
Ira Snydera1c03312010-01-06 13:34:05 +0000503 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700504 unsigned long flags;
505
Ira Snyderb1584712011-03-03 07:54:55 +0000506 chan_dbg(chan, "free all channel resources\n");
Ira Snydera1c03312010-01-06 13:34:05 +0000507 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000508 fsldma_free_desc_list(chan, &chan->ld_pending);
509 fsldma_free_desc_list(chan, &chan->ld_running);
Ira Snydera1c03312010-01-06 13:34:05 +0000510 spin_unlock_irqrestore(&chan->desc_lock, flags);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700511
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000512 dma_pool_destroy(chan->desc_pool);
Ira Snydera1c03312010-01-06 13:34:05 +0000513 chan->desc_pool = NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700514}
515
Zhang Wei2187c262008-03-13 17:45:28 -0700516static struct dma_async_tx_descriptor *
Ira Snydera1c03312010-01-06 13:34:05 +0000517fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
Zhang Wei2187c262008-03-13 17:45:28 -0700518{
Ira Snydera1c03312010-01-06 13:34:05 +0000519 struct fsldma_chan *chan;
Zhang Wei2187c262008-03-13 17:45:28 -0700520 struct fsl_desc_sw *new;
521
Ira Snydera1c03312010-01-06 13:34:05 +0000522 if (!dchan)
Zhang Wei2187c262008-03-13 17:45:28 -0700523 return NULL;
524
Ira Snydera1c03312010-01-06 13:34:05 +0000525 chan = to_fsl_chan(dchan);
Zhang Wei2187c262008-03-13 17:45:28 -0700526
Ira Snydera1c03312010-01-06 13:34:05 +0000527 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei2187c262008-03-13 17:45:28 -0700528 if (!new) {
Ira Snyderb1584712011-03-03 07:54:55 +0000529 chan_err(chan, "%s\n", msg_ld_oom);
Zhang Wei2187c262008-03-13 17:45:28 -0700530 return NULL;
531 }
532
533 new->async_tx.cookie = -EBUSY;
Dan Williams636bdea2008-04-17 20:17:26 -0700534 new->async_tx.flags = flags;
Zhang Wei2187c262008-03-13 17:45:28 -0700535
Zhang Weif79abb62008-03-18 18:45:00 -0700536 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700537 list_add_tail(&new->node, &new->tx_list);
Zhang Weif79abb62008-03-18 18:45:00 -0700538
Ira Snyder31f43062011-03-03 07:54:57 +0000539 /* Set End-of-link to the last link descriptor of new list */
Ira Snydera1c03312010-01-06 13:34:05 +0000540 set_ld_eol(chan, new);
Zhang Wei2187c262008-03-13 17:45:28 -0700541
542 return &new->async_tx;
543}
544
Ira Snyder31f43062011-03-03 07:54:57 +0000545static struct dma_async_tx_descriptor *
546fsl_dma_prep_memcpy(struct dma_chan *dchan,
547 dma_addr_t dma_dst, dma_addr_t dma_src,
Zhang Wei173acc72008-03-01 07:42:48 -0700548 size_t len, unsigned long flags)
549{
Ira Snydera1c03312010-01-06 13:34:05 +0000550 struct fsldma_chan *chan;
Zhang Wei173acc72008-03-01 07:42:48 -0700551 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
552 size_t copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700553
Ira Snydera1c03312010-01-06 13:34:05 +0000554 if (!dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700555 return NULL;
556
557 if (!len)
558 return NULL;
559
Ira Snydera1c03312010-01-06 13:34:05 +0000560 chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700561
562 do {
563
564 /* Allocate the link descriptor from DMA pool */
Ira Snydera1c03312010-01-06 13:34:05 +0000565 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700566 if (!new) {
Ira Snyderb1584712011-03-03 07:54:55 +0000567 chan_err(chan, "%s\n", msg_ld_oom);
Ira Snyder2e077f82009-05-15 09:59:46 -0700568 goto fail;
Zhang Wei173acc72008-03-01 07:42:48 -0700569 }
Zhang Wei173acc72008-03-01 07:42:48 -0700570
Zhang Wei56822842008-03-13 10:45:27 -0700571 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700572
Ira Snydera1c03312010-01-06 13:34:05 +0000573 set_desc_cnt(chan, &new->hw, copy);
574 set_desc_src(chan, &new->hw, dma_src);
575 set_desc_dst(chan, &new->hw, dma_dst);
Zhang Wei173acc72008-03-01 07:42:48 -0700576
577 if (!first)
578 first = new;
579 else
Ira Snydera1c03312010-01-06 13:34:05 +0000580 set_desc_next(chan, &prev->hw, new->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700581
582 new->async_tx.cookie = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700583 async_tx_ack(&new->async_tx);
Zhang Wei173acc72008-03-01 07:42:48 -0700584
585 prev = new;
586 len -= copy;
587 dma_src += copy;
Ira Snyder738f5f72010-01-06 13:34:02 +0000588 dma_dst += copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700589
590 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700591 list_add_tail(&new->node, &first->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700592 } while (len);
593
Dan Williams636bdea2008-04-17 20:17:26 -0700594 new->async_tx.flags = flags; /* client is in control of this ack */
Zhang Wei173acc72008-03-01 07:42:48 -0700595 new->async_tx.cookie = -EBUSY;
596
Ira Snyder31f43062011-03-03 07:54:57 +0000597 /* Set End-of-link to the last link descriptor of new list */
Ira Snydera1c03312010-01-06 13:34:05 +0000598 set_ld_eol(chan, new);
Zhang Wei173acc72008-03-01 07:42:48 -0700599
Ira Snyder2e077f82009-05-15 09:59:46 -0700600 return &first->async_tx;
601
602fail:
603 if (!first)
604 return NULL;
605
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000606 fsldma_free_desc_list_reverse(chan, &first->tx_list);
Ira Snyder2e077f82009-05-15 09:59:46 -0700607 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700608}
609
Ira Snyderc14330412010-09-30 11:46:45 +0000610static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
611 struct scatterlist *dst_sg, unsigned int dst_nents,
612 struct scatterlist *src_sg, unsigned int src_nents,
613 unsigned long flags)
614{
615 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
616 struct fsldma_chan *chan = to_fsl_chan(dchan);
617 size_t dst_avail, src_avail;
618 dma_addr_t dst, src;
619 size_t len;
620
621 /* basic sanity checks */
622 if (dst_nents == 0 || src_nents == 0)
623 return NULL;
624
625 if (dst_sg == NULL || src_sg == NULL)
626 return NULL;
627
628 /*
629 * TODO: should we check that both scatterlists have the same
630 * TODO: number of bytes in total? Is that really an error?
631 */
632
633 /* get prepared for the loop */
634 dst_avail = sg_dma_len(dst_sg);
635 src_avail = sg_dma_len(src_sg);
636
637 /* run until we are out of scatterlist entries */
638 while (true) {
639
640 /* create the largest transaction possible */
641 len = min_t(size_t, src_avail, dst_avail);
642 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
643 if (len == 0)
644 goto fetch;
645
646 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
647 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
648
649 /* allocate and populate the descriptor */
650 new = fsl_dma_alloc_descriptor(chan);
651 if (!new) {
Ira Snyderb1584712011-03-03 07:54:55 +0000652 chan_err(chan, "%s\n", msg_ld_oom);
Ira Snyderc14330412010-09-30 11:46:45 +0000653 goto fail;
654 }
Ira Snyderc14330412010-09-30 11:46:45 +0000655
656 set_desc_cnt(chan, &new->hw, len);
657 set_desc_src(chan, &new->hw, src);
658 set_desc_dst(chan, &new->hw, dst);
659
660 if (!first)
661 first = new;
662 else
663 set_desc_next(chan, &prev->hw, new->async_tx.phys);
664
665 new->async_tx.cookie = 0;
666 async_tx_ack(&new->async_tx);
667 prev = new;
668
669 /* Insert the link descriptor to the LD ring */
670 list_add_tail(&new->node, &first->tx_list);
671
672 /* update metadata */
673 dst_avail -= len;
674 src_avail -= len;
675
676fetch:
677 /* fetch the next dst scatterlist entry */
678 if (dst_avail == 0) {
679
680 /* no more entries: we're done */
681 if (dst_nents == 0)
682 break;
683
684 /* fetch the next entry: if there are no more: done */
685 dst_sg = sg_next(dst_sg);
686 if (dst_sg == NULL)
687 break;
688
689 dst_nents--;
690 dst_avail = sg_dma_len(dst_sg);
691 }
692
693 /* fetch the next src scatterlist entry */
694 if (src_avail == 0) {
695
696 /* no more entries: we're done */
697 if (src_nents == 0)
698 break;
699
700 /* fetch the next entry: if there are no more: done */
701 src_sg = sg_next(src_sg);
702 if (src_sg == NULL)
703 break;
704
705 src_nents--;
706 src_avail = sg_dma_len(src_sg);
707 }
708 }
709
710 new->async_tx.flags = flags; /* client is in control of this ack */
711 new->async_tx.cookie = -EBUSY;
712
713 /* Set End-of-link to the last link descriptor of new list */
714 set_ld_eol(chan, new);
715
716 return &first->async_tx;
717
718fail:
719 if (!first)
720 return NULL;
721
722 fsldma_free_desc_list_reverse(chan, &first->tx_list);
723 return NULL;
724}
725
Zhang Wei173acc72008-03-01 07:42:48 -0700726/**
Ira Snyderbbea0b62009-09-08 17:53:04 -0700727 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
728 * @chan: DMA channel
729 * @sgl: scatterlist to transfer to/from
730 * @sg_len: number of entries in @scatterlist
731 * @direction: DMA direction
732 * @flags: DMAEngine flags
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500733 * @context: transaction context (ignored)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700734 *
735 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
736 * DMA_SLAVE API, this gets the device-specific information from the
737 * chan->private variable.
738 */
739static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
Ira Snydera1c03312010-01-06 13:34:05 +0000740 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500741 enum dma_transfer_direction direction, unsigned long flags,
742 void *context)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700743{
Ira Snyderbbea0b62009-09-08 17:53:04 -0700744 /*
Ira Snyder968f19a2010-09-30 11:46:46 +0000745 * This operation is not supported on the Freescale DMA controller
Ira Snyderbbea0b62009-09-08 17:53:04 -0700746 *
Ira Snyder968f19a2010-09-30 11:46:46 +0000747 * However, we need to provide the function pointer to allow the
748 * device_control() method to work.
Ira Snyderbbea0b62009-09-08 17:53:04 -0700749 */
Ira Snyderbbea0b62009-09-08 17:53:04 -0700750 return NULL;
751}
752
Linus Walleijc3635c72010-03-26 16:44:01 -0700753static int fsl_dma_device_control(struct dma_chan *dchan,
Linus Walleij05827632010-05-17 16:30:42 -0700754 enum dma_ctrl_cmd cmd, unsigned long arg)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700755{
Ira Snyder968f19a2010-09-30 11:46:46 +0000756 struct dma_slave_config *config;
Ira Snydera1c03312010-01-06 13:34:05 +0000757 struct fsldma_chan *chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700758 unsigned long flags;
Ira Snyder968f19a2010-09-30 11:46:46 +0000759 int size;
Linus Walleijc3635c72010-03-26 16:44:01 -0700760
Ira Snydera1c03312010-01-06 13:34:05 +0000761 if (!dchan)
Linus Walleijc3635c72010-03-26 16:44:01 -0700762 return -EINVAL;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700763
Ira Snydera1c03312010-01-06 13:34:05 +0000764 chan = to_fsl_chan(dchan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700765
Ira Snyder968f19a2010-09-30 11:46:46 +0000766 switch (cmd) {
767 case DMA_TERMINATE_ALL:
Ira Snyderf04cd402011-03-03 07:54:58 +0000768 spin_lock_irqsave(&chan->desc_lock, flags);
769
Ira Snyder968f19a2010-09-30 11:46:46 +0000770 /* Halt the DMA engine */
771 dma_halt(chan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700772
Ira Snyder968f19a2010-09-30 11:46:46 +0000773 /* Remove and free all of the descriptors in the LD queue */
774 fsldma_free_desc_list(chan, &chan->ld_pending);
775 fsldma_free_desc_list(chan, &chan->ld_running);
Ira Snyderf04cd402011-03-03 07:54:58 +0000776 chan->idle = true;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700777
Ira Snyder968f19a2010-09-30 11:46:46 +0000778 spin_unlock_irqrestore(&chan->desc_lock, flags);
779 return 0;
780
781 case DMA_SLAVE_CONFIG:
782 config = (struct dma_slave_config *)arg;
783
784 /* make sure the channel supports setting burst size */
785 if (!chan->set_request_count)
786 return -ENXIO;
787
788 /* we set the controller burst size depending on direction */
Vinod Kouldb8196d2011-10-13 22:34:23 +0530789 if (config->direction == DMA_MEM_TO_DEV)
Ira Snyder968f19a2010-09-30 11:46:46 +0000790 size = config->dst_addr_width * config->dst_maxburst;
791 else
792 size = config->src_addr_width * config->src_maxburst;
793
794 chan->set_request_count(chan, size);
795 return 0;
796
797 case FSLDMA_EXTERNAL_START:
798
799 /* make sure the channel supports external start */
800 if (!chan->toggle_ext_start)
801 return -ENXIO;
802
803 chan->toggle_ext_start(chan, arg);
804 return 0;
805
806 default:
807 return -ENXIO;
808 }
Linus Walleijc3635c72010-03-26 16:44:01 -0700809
810 return 0;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700811}
812
813/**
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000814 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000815 * @chan: Freescale DMA channel
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000816 * @desc: descriptor to cleanup and free
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000817 *
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000818 * This function is used on a descriptor which has been executed by the DMA
819 * controller. It will run any callbacks, submit any dependencies, and then
820 * free the descriptor.
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000821 */
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000822static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
823 struct fsl_desc_sw *desc)
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000824{
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000825 struct dma_async_tx_descriptor *txd = &desc->async_tx;
Zhang Wei173acc72008-03-01 07:42:48 -0700826
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000827 /* Run the link descriptor callback function */
828 if (txd->callback) {
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000829 chan_dbg(chan, "LD %p callback\n", desc);
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000830 txd->callback(txd->callback_param);
Zhang Wei173acc72008-03-01 07:42:48 -0700831 }
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000832
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000833 /* Run any dependencies */
834 dma_run_dependencies(txd);
835
Dan Williamsd38a8c62013-10-18 19:35:23 +0200836 dma_descriptor_unmap(txd);
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000837 chan_dbg(chan, "LD %p free\n", desc);
Ira Snyder9c4d1e72011-03-03 07:54:59 +0000838 dma_pool_free(chan->desc_pool, desc, txd->phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700839}
840
841/**
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000842 * fsl_chan_xfer_ld_queue - transfer any pending transactions
Ira Snydera1c03312010-01-06 13:34:05 +0000843 * @chan : Freescale DMA channel
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000844 *
Ira Snyderf04cd402011-03-03 07:54:58 +0000845 * HARDWARE STATE: idle
Ira Snyderdc8d4092011-03-03 07:55:00 +0000846 * LOCKING: must hold chan->desc_lock
Zhang Wei173acc72008-03-01 07:42:48 -0700847 */
Ira Snydera1c03312010-01-06 13:34:05 +0000848static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700849{
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000850 struct fsl_desc_sw *desc;
Ira Snyder138ef012009-05-19 15:42:13 -0700851
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000852 /*
853 * If the list of pending descriptors is empty, then we
854 * don't need to do any work at all
855 */
856 if (list_empty(&chan->ld_pending)) {
Ira Snyderb1584712011-03-03 07:54:55 +0000857 chan_dbg(chan, "no pending LDs\n");
Ira Snyderdc8d4092011-03-03 07:55:00 +0000858 return;
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000859 }
Zhang Wei173acc72008-03-01 07:42:48 -0700860
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000861 /*
Ira Snyderf04cd402011-03-03 07:54:58 +0000862 * The DMA controller is not idle, which means that the interrupt
863 * handler will start any queued transactions when it runs after
864 * this transaction finishes
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000865 */
Ira Snyderf04cd402011-03-03 07:54:58 +0000866 if (!chan->idle) {
Ira Snyderb1584712011-03-03 07:54:55 +0000867 chan_dbg(chan, "DMA controller still busy\n");
Ira Snyderdc8d4092011-03-03 07:55:00 +0000868 return;
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000869 }
870
871 /*
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000872 * If there are some link descriptors which have not been
873 * transferred, we need to start the controller
Zhang Wei173acc72008-03-01 07:42:48 -0700874 */
Zhang Wei173acc72008-03-01 07:42:48 -0700875
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000876 /*
877 * Move all elements from the queue of pending transactions
878 * onto the list of running transactions
879 */
Ira Snyderf04cd402011-03-03 07:54:58 +0000880 chan_dbg(chan, "idle, starting controller\n");
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000881 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
882 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
Zhang Wei173acc72008-03-01 07:42:48 -0700883
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000884 /*
Ira Snyderf04cd402011-03-03 07:54:58 +0000885 * The 85xx DMA controller doesn't clear the channel start bit
886 * automatically at the end of a transfer. Therefore we must clear
887 * it in software before starting the transfer.
888 */
889 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
890 u32 mode;
891
892 mode = DMA_IN(chan, &chan->regs->mr, 32);
893 mode &= ~FSL_DMA_MR_CS;
894 DMA_OUT(chan, &chan->regs->mr, mode, 32);
895 }
896
897 /*
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000898 * Program the descriptor's address into the DMA controller,
899 * then start the DMA transaction
900 */
901 set_cdar(chan, desc->async_tx.phys);
Ira Snyderf04cd402011-03-03 07:54:58 +0000902 get_cdar(chan);
Ira Snyder138ef012009-05-19 15:42:13 -0700903
Zhang Wei173acc72008-03-01 07:42:48 -0700904 dma_start(chan);
Ira Snyderf04cd402011-03-03 07:54:58 +0000905 chan->idle = false;
Zhang Wei173acc72008-03-01 07:42:48 -0700906}
907
908/**
909 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
Ira Snydera1c03312010-01-06 13:34:05 +0000910 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700911 */
Ira Snydera1c03312010-01-06 13:34:05 +0000912static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700913{
Ira Snydera1c03312010-01-06 13:34:05 +0000914 struct fsldma_chan *chan = to_fsl_chan(dchan);
Ira Snyderdc8d4092011-03-03 07:55:00 +0000915 unsigned long flags;
916
917 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snydera1c03312010-01-06 13:34:05 +0000918 fsl_chan_xfer_ld_queue(chan);
Ira Snyderdc8d4092011-03-03 07:55:00 +0000919 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700920}
921
Zhang Wei173acc72008-03-01 07:42:48 -0700922/**
Linus Walleij07934482010-03-26 16:50:49 -0700923 * fsl_tx_status - Determine the DMA status
Ira Snydera1c03312010-01-06 13:34:05 +0000924 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700925 */
Linus Walleij07934482010-03-26 16:50:49 -0700926static enum dma_status fsl_tx_status(struct dma_chan *dchan,
Zhang Wei173acc72008-03-01 07:42:48 -0700927 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700928 struct dma_tx_state *txstate)
Zhang Wei173acc72008-03-01 07:42:48 -0700929{
Andy Shevchenko9b0b0bd2013-05-27 15:14:35 +0300930 return dma_cookie_status(dchan, cookie, txstate);
Zhang Wei173acc72008-03-01 07:42:48 -0700931}
932
Ira Snyderd3f620b2010-01-06 13:34:04 +0000933/*----------------------------------------------------------------------------*/
934/* Interrupt Handling */
935/*----------------------------------------------------------------------------*/
936
Ira Snydere7a29152010-01-06 13:34:03 +0000937static irqreturn_t fsldma_chan_irq(int irq, void *data)
Zhang Wei173acc72008-03-01 07:42:48 -0700938{
Ira Snydera1c03312010-01-06 13:34:05 +0000939 struct fsldma_chan *chan = data;
Ira Snydera1c03312010-01-06 13:34:05 +0000940 u32 stat;
Zhang Wei173acc72008-03-01 07:42:48 -0700941
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000942 /* save and clear the status register */
Ira Snydera1c03312010-01-06 13:34:05 +0000943 stat = get_sr(chan);
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000944 set_sr(chan, stat);
Ira Snyderb1584712011-03-03 07:54:55 +0000945 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
Zhang Wei173acc72008-03-01 07:42:48 -0700946
Ira Snyderf04cd402011-03-03 07:54:58 +0000947 /* check that this was really our device */
Zhang Wei173acc72008-03-01 07:42:48 -0700948 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
949 if (!stat)
950 return IRQ_NONE;
951
952 if (stat & FSL_DMA_SR_TE)
Ira Snyderb1584712011-03-03 07:54:55 +0000953 chan_err(chan, "Transfer Error!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700954
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000955 /*
956 * Programming Error
Zhang Weif79abb62008-03-18 18:45:00 -0700957 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
Masanari Iidad73111c2012-08-04 23:37:53 +0900958 * trigger a PE interrupt.
Zhang Weif79abb62008-03-18 18:45:00 -0700959 */
960 if (stat & FSL_DMA_SR_PE) {
Ira Snyderb1584712011-03-03 07:54:55 +0000961 chan_dbg(chan, "irq: Programming Error INT\n");
Zhang Weif79abb62008-03-18 18:45:00 -0700962 stat &= ~FSL_DMA_SR_PE;
Ira Snyderf04cd402011-03-03 07:54:58 +0000963 if (get_bcr(chan) != 0)
964 chan_err(chan, "Programming Error!\n");
Zhang Wei1c629792008-04-17 20:17:25 -0700965 }
966
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000967 /*
968 * For MPC8349, EOCDI event need to update cookie
Zhang Wei1c629792008-04-17 20:17:25 -0700969 * and start the next transfer if it exist.
970 */
971 if (stat & FSL_DMA_SR_EOCDI) {
Ira Snyderb1584712011-03-03 07:54:55 +0000972 chan_dbg(chan, "irq: End-of-Chain link INT\n");
Zhang Wei1c629792008-04-17 20:17:25 -0700973 stat &= ~FSL_DMA_SR_EOCDI;
Zhang Wei173acc72008-03-01 07:42:48 -0700974 }
975
Ira Snyder9c3a50b2010-01-06 13:34:06 +0000976 /*
977 * If it current transfer is the end-of-transfer,
Zhang Wei173acc72008-03-01 07:42:48 -0700978 * we should clear the Channel Start bit for
979 * prepare next transfer.
980 */
Zhang Wei1c629792008-04-17 20:17:25 -0700981 if (stat & FSL_DMA_SR_EOLNI) {
Ira Snyderb1584712011-03-03 07:54:55 +0000982 chan_dbg(chan, "irq: End-of-link INT\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700983 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei173acc72008-03-01 07:42:48 -0700984 }
985
Ira Snyderf04cd402011-03-03 07:54:58 +0000986 /* check that the DMA controller is really idle */
987 if (!dma_is_idle(chan))
988 chan_err(chan, "irq: controller not idle!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700989
Ira Snyderf04cd402011-03-03 07:54:58 +0000990 /* check that we handled all of the bits */
991 if (stat)
992 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
993
994 /*
995 * Schedule the tasklet to handle all cleanup of the current
996 * transaction. It will start a new transaction if there is
997 * one pending.
998 */
Ira Snydera1c03312010-01-06 13:34:05 +0000999 tasklet_schedule(&chan->tasklet);
Ira Snyderf04cd402011-03-03 07:54:58 +00001000 chan_dbg(chan, "irq: Exit\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001001 return IRQ_HANDLED;
1002}
1003
Zhang Wei173acc72008-03-01 07:42:48 -07001004static void dma_do_tasklet(unsigned long data)
1005{
Ira Snydera1c03312010-01-06 13:34:05 +00001006 struct fsldma_chan *chan = (struct fsldma_chan *)data;
Ira Snyderdc8d4092011-03-03 07:55:00 +00001007 struct fsl_desc_sw *desc, *_desc;
1008 LIST_HEAD(ld_cleanup);
Ira Snyderf04cd402011-03-03 07:54:58 +00001009 unsigned long flags;
1010
1011 chan_dbg(chan, "tasklet entry\n");
1012
Ira Snyderf04cd402011-03-03 07:54:58 +00001013 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyderdc8d4092011-03-03 07:55:00 +00001014
1015 /* update the cookie if we have some descriptors to cleanup */
1016 if (!list_empty(&chan->ld_running)) {
1017 dma_cookie_t cookie;
1018
1019 desc = to_fsl_desc(chan->ld_running.prev);
1020 cookie = desc->async_tx.cookie;
Russell King - ARM Linuxf7fbce02012-03-06 22:35:07 +00001021 dma_cookie_complete(&desc->async_tx);
Ira Snyderdc8d4092011-03-03 07:55:00 +00001022
Ira Snyderdc8d4092011-03-03 07:55:00 +00001023 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1024 }
1025
1026 /*
1027 * move the descriptors to a temporary list so we can drop the lock
1028 * during the entire cleanup operation
1029 */
1030 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1031
1032 /* the hardware is now idle and ready for more */
Ira Snyderf04cd402011-03-03 07:54:58 +00001033 chan->idle = true;
Ira Snyderdc8d4092011-03-03 07:55:00 +00001034
1035 /*
1036 * Start any pending transactions automatically
1037 *
1038 * In the ideal case, we keep the DMA controller busy while we go
1039 * ahead and free the descriptors below.
1040 */
1041 fsl_chan_xfer_ld_queue(chan);
Ira Snyderf04cd402011-03-03 07:54:58 +00001042 spin_unlock_irqrestore(&chan->desc_lock, flags);
1043
Ira Snyderdc8d4092011-03-03 07:55:00 +00001044 /* Run the callback for each descriptor, in order */
1045 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1046
1047 /* Remove from the list of transactions */
1048 list_del(&desc->node);
1049
1050 /* Run all cleanup for this descriptor */
1051 fsldma_cleanup_descriptor(chan, desc);
1052 }
1053
Ira Snyderf04cd402011-03-03 07:54:58 +00001054 chan_dbg(chan, "tasklet exit\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001055}
1056
Ira Snyderd3f620b2010-01-06 13:34:04 +00001057static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1058{
1059 struct fsldma_device *fdev = data;
1060 struct fsldma_chan *chan;
1061 unsigned int handled = 0;
1062 u32 gsr, mask;
1063 int i;
1064
1065 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1066 : in_le32(fdev->regs);
1067 mask = 0xff000000;
1068 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1069
1070 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1071 chan = fdev->chan[i];
1072 if (!chan)
1073 continue;
1074
1075 if (gsr & mask) {
1076 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1077 fsldma_chan_irq(irq, chan);
1078 handled++;
1079 }
1080
1081 gsr &= ~mask;
1082 mask >>= 8;
1083 }
1084
1085 return IRQ_RETVAL(handled);
1086}
1087
1088static void fsldma_free_irqs(struct fsldma_device *fdev)
1089{
1090 struct fsldma_chan *chan;
1091 int i;
1092
1093 if (fdev->irq != NO_IRQ) {
1094 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1095 free_irq(fdev->irq, fdev);
1096 return;
1097 }
1098
1099 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1100 chan = fdev->chan[i];
1101 if (chan && chan->irq != NO_IRQ) {
Ira Snyderb1584712011-03-03 07:54:55 +00001102 chan_dbg(chan, "free per-channel IRQ\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001103 free_irq(chan->irq, chan);
1104 }
1105 }
1106}
1107
1108static int fsldma_request_irqs(struct fsldma_device *fdev)
1109{
1110 struct fsldma_chan *chan;
1111 int ret;
1112 int i;
1113
1114 /* if we have a per-controller IRQ, use that */
1115 if (fdev->irq != NO_IRQ) {
1116 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1117 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1118 "fsldma-controller", fdev);
1119 return ret;
1120 }
1121
1122 /* no per-controller IRQ, use the per-channel IRQs */
1123 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1124 chan = fdev->chan[i];
1125 if (!chan)
1126 continue;
1127
1128 if (chan->irq == NO_IRQ) {
Ira Snyderb1584712011-03-03 07:54:55 +00001129 chan_err(chan, "interrupts property missing in device tree\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001130 ret = -ENODEV;
1131 goto out_unwind;
1132 }
1133
Ira Snyderb1584712011-03-03 07:54:55 +00001134 chan_dbg(chan, "request per-channel IRQ\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001135 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1136 "fsldma-chan", chan);
1137 if (ret) {
Ira Snyderb1584712011-03-03 07:54:55 +00001138 chan_err(chan, "unable to request per-channel IRQ\n");
Ira Snyderd3f620b2010-01-06 13:34:04 +00001139 goto out_unwind;
1140 }
1141 }
1142
1143 return 0;
1144
1145out_unwind:
1146 for (/* none */; i >= 0; i--) {
1147 chan = fdev->chan[i];
1148 if (!chan)
1149 continue;
1150
1151 if (chan->irq == NO_IRQ)
1152 continue;
1153
1154 free_irq(chan->irq, chan);
1155 }
1156
1157 return ret;
1158}
1159
Ira Snydera4f56d42010-01-06 13:34:01 +00001160/*----------------------------------------------------------------------------*/
1161/* OpenFirmware Subsystem */
1162/*----------------------------------------------------------------------------*/
1163
Bill Pemberton463a1f82012-11-19 13:22:55 -05001164static int fsl_dma_chan_probe(struct fsldma_device *fdev,
Timur Tabi77cd62e2008-09-26 17:00:11 -07001165 struct device_node *node, u32 feature, const char *compatible)
Zhang Wei173acc72008-03-01 07:42:48 -07001166{
Ira Snydera1c03312010-01-06 13:34:05 +00001167 struct fsldma_chan *chan;
Ira Snyder4ce0e952010-01-06 13:34:00 +00001168 struct resource res;
Zhang Wei173acc72008-03-01 07:42:48 -07001169 int err;
1170
Zhang Wei173acc72008-03-01 07:42:48 -07001171 /* alloc channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001172 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1173 if (!chan) {
Ira Snydere7a29152010-01-06 13:34:03 +00001174 dev_err(fdev->dev, "no free memory for DMA channels!\n");
1175 err = -ENOMEM;
1176 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001177 }
1178
Ira Snydere7a29152010-01-06 13:34:03 +00001179 /* ioremap registers for use */
Ira Snydera1c03312010-01-06 13:34:05 +00001180 chan->regs = of_iomap(node, 0);
1181 if (!chan->regs) {
Ira Snydere7a29152010-01-06 13:34:03 +00001182 dev_err(fdev->dev, "unable to ioremap registers\n");
1183 err = -ENOMEM;
Ira Snydera1c03312010-01-06 13:34:05 +00001184 goto out_free_chan;
Ira Snydere7a29152010-01-06 13:34:03 +00001185 }
1186
Ira Snyder4ce0e952010-01-06 13:34:00 +00001187 err = of_address_to_resource(node, 0, &res);
Zhang Wei173acc72008-03-01 07:42:48 -07001188 if (err) {
Ira Snydere7a29152010-01-06 13:34:03 +00001189 dev_err(fdev->dev, "unable to find 'reg' property\n");
1190 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001191 }
1192
Ira Snydera1c03312010-01-06 13:34:05 +00001193 chan->feature = feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001194 if (!fdev->feature)
Ira Snydera1c03312010-01-06 13:34:05 +00001195 fdev->feature = chan->feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001196
Ira Snydere7a29152010-01-06 13:34:03 +00001197 /*
1198 * If the DMA device's feature is different than the feature
1199 * of its channels, report the bug
Zhang Wei173acc72008-03-01 07:42:48 -07001200 */
Ira Snydera1c03312010-01-06 13:34:05 +00001201 WARN_ON(fdev->feature != chan->feature);
Zhang Wei173acc72008-03-01 07:42:48 -07001202
Ira Snydera1c03312010-01-06 13:34:05 +00001203 chan->dev = fdev->dev;
Hongbo Zhang8de7a7d2013-09-26 17:33:43 +08001204 chan->id = (res.start & 0xfff) < 0x300 ?
1205 ((res.start - 0x100) & 0xfff) >> 7 :
1206 ((res.start - 0x200) & 0xfff) >> 7;
Ira Snydera1c03312010-01-06 13:34:05 +00001207 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
Ira Snydere7a29152010-01-06 13:34:03 +00001208 dev_err(fdev->dev, "too many channels for device\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001209 err = -EINVAL;
Ira Snydere7a29152010-01-06 13:34:03 +00001210 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001211 }
Zhang Wei173acc72008-03-01 07:42:48 -07001212
Ira Snydera1c03312010-01-06 13:34:05 +00001213 fdev->chan[chan->id] = chan;
1214 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
Ira Snyderb1584712011-03-03 07:54:55 +00001215 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
Ira Snydere7a29152010-01-06 13:34:03 +00001216
1217 /* Initialize the channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001218 dma_init(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001219
1220 /* Clear cdar registers */
Ira Snydera1c03312010-01-06 13:34:05 +00001221 set_cdar(chan, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001222
Ira Snydera1c03312010-01-06 13:34:05 +00001223 switch (chan->feature & FSL_DMA_IP_MASK) {
Zhang Wei173acc72008-03-01 07:42:48 -07001224 case FSL_DMA_IP_85XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001225 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
Zhang Wei173acc72008-03-01 07:42:48 -07001226 case FSL_DMA_IP_83XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001227 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1228 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1229 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1230 chan->set_request_count = fsl_chan_set_request_count;
Zhang Wei173acc72008-03-01 07:42:48 -07001231 }
1232
Ira Snydera1c03312010-01-06 13:34:05 +00001233 spin_lock_init(&chan->desc_lock);
Ira Snyder9c3a50b2010-01-06 13:34:06 +00001234 INIT_LIST_HEAD(&chan->ld_pending);
1235 INIT_LIST_HEAD(&chan->ld_running);
Ira Snyderf04cd402011-03-03 07:54:58 +00001236 chan->idle = true;
Zhang Wei173acc72008-03-01 07:42:48 -07001237
Ira Snydera1c03312010-01-06 13:34:05 +00001238 chan->common.device = &fdev->common;
Russell King - ARM Linux8ac69542012-03-06 22:36:27 +00001239 dma_cookie_init(&chan->common);
Zhang Wei173acc72008-03-01 07:42:48 -07001240
Ira Snyderd3f620b2010-01-06 13:34:04 +00001241 /* find the IRQ line, if it exists in the device tree */
Ira Snydera1c03312010-01-06 13:34:05 +00001242 chan->irq = irq_of_parse_and_map(node, 0);
Ira Snyderd3f620b2010-01-06 13:34:04 +00001243
Zhang Wei173acc72008-03-01 07:42:48 -07001244 /* Add the channel to DMA device channel list */
Ira Snydera1c03312010-01-06 13:34:05 +00001245 list_add_tail(&chan->common.device_node, &fdev->common.channels);
Zhang Wei173acc72008-03-01 07:42:48 -07001246 fdev->common.chancnt++;
1247
Ira Snydera1c03312010-01-06 13:34:05 +00001248 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1249 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001250
1251 return 0;
Li Yang51ee87f2008-05-29 23:25:45 -07001252
Ira Snydere7a29152010-01-06 13:34:03 +00001253out_iounmap_regs:
Ira Snydera1c03312010-01-06 13:34:05 +00001254 iounmap(chan->regs);
1255out_free_chan:
1256 kfree(chan);
Ira Snydere7a29152010-01-06 13:34:03 +00001257out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001258 return err;
1259}
1260
Ira Snydera1c03312010-01-06 13:34:05 +00001261static void fsl_dma_chan_remove(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -07001262{
Ira Snydera1c03312010-01-06 13:34:05 +00001263 irq_dispose_mapping(chan->irq);
1264 list_del(&chan->common.device_node);
1265 iounmap(chan->regs);
1266 kfree(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001267}
1268
Bill Pemberton463a1f82012-11-19 13:22:55 -05001269static int fsldma_of_probe(struct platform_device *op)
Zhang Wei173acc72008-03-01 07:42:48 -07001270{
Ira Snydera4f56d42010-01-06 13:34:01 +00001271 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001272 struct device_node *child;
Ira Snydere7a29152010-01-06 13:34:03 +00001273 int err;
Zhang Wei173acc72008-03-01 07:42:48 -07001274
Ira Snydera4f56d42010-01-06 13:34:01 +00001275 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
Zhang Wei173acc72008-03-01 07:42:48 -07001276 if (!fdev) {
Ira Snydere7a29152010-01-06 13:34:03 +00001277 dev_err(&op->dev, "No enough memory for 'priv'\n");
1278 err = -ENOMEM;
1279 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001280 }
Ira Snydere7a29152010-01-06 13:34:03 +00001281
1282 fdev->dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001283 INIT_LIST_HEAD(&fdev->common.channels);
1284
Ira Snydere7a29152010-01-06 13:34:03 +00001285 /* ioremap the registers for use */
Grant Likely61c7a082010-04-13 16:12:29 -07001286 fdev->regs = of_iomap(op->dev.of_node, 0);
Ira Snydere7a29152010-01-06 13:34:03 +00001287 if (!fdev->regs) {
1288 dev_err(&op->dev, "unable to ioremap registers\n");
1289 err = -ENOMEM;
1290 goto out_free_fdev;
Zhang Wei173acc72008-03-01 07:42:48 -07001291 }
1292
Ira Snyderd3f620b2010-01-06 13:34:04 +00001293 /* map the channel IRQ if it exists, but don't hookup the handler yet */
Grant Likely61c7a082010-04-13 16:12:29 -07001294 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
Ira Snyderd3f620b2010-01-06 13:34:04 +00001295
Zhang Wei173acc72008-03-01 07:42:48 -07001296 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1297 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
Ira Snyderc14330412010-09-30 11:46:45 +00001298 dma_cap_set(DMA_SG, fdev->common.cap_mask);
Ira Snyderbbea0b62009-09-08 17:53:04 -07001299 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
Zhang Wei173acc72008-03-01 07:42:48 -07001300 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1301 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -07001302 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -07001303 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
Ira Snyderc14330412010-09-30 11:46:45 +00001304 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
Linus Walleij07934482010-03-26 16:50:49 -07001305 fdev->common.device_tx_status = fsl_tx_status;
Zhang Wei173acc72008-03-01 07:42:48 -07001306 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
Ira Snyderbbea0b62009-09-08 17:53:04 -07001307 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001308 fdev->common.device_control = fsl_dma_device_control;
Ira Snydere7a29152010-01-06 13:34:03 +00001309 fdev->common.dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001310
Li Yange2c8e4252010-11-11 20:16:29 +08001311 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1312
Jingoo Handd3daca2013-05-24 10:10:13 +09001313 platform_set_drvdata(op, fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001314
Ira Snydere7a29152010-01-06 13:34:03 +00001315 /*
1316 * We cannot use of_platform_bus_probe() because there is no
1317 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
Timur Tabi77cd62e2008-09-26 17:00:11 -07001318 * channel object.
1319 */
Grant Likely61c7a082010-04-13 16:12:29 -07001320 for_each_child_of_node(op->dev.of_node, child) {
Ira Snydere7a29152010-01-06 13:34:03 +00001321 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001322 fsl_dma_chan_probe(fdev, child,
1323 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1324 "fsl,eloplus-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001325 }
1326
1327 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001328 fsl_dma_chan_probe(fdev, child,
1329 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1330 "fsl,elo-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001331 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001332 }
Zhang Wei173acc72008-03-01 07:42:48 -07001333
Ira Snyderd3f620b2010-01-06 13:34:04 +00001334 /*
1335 * Hookup the IRQ handler(s)
1336 *
1337 * If we have a per-controller interrupt, we prefer that to the
1338 * per-channel interrupts to reduce the number of shared interrupt
1339 * handlers on the same IRQ line
1340 */
1341 err = fsldma_request_irqs(fdev);
1342 if (err) {
1343 dev_err(fdev->dev, "unable to request IRQs\n");
1344 goto out_free_fdev;
1345 }
1346
Zhang Wei173acc72008-03-01 07:42:48 -07001347 dma_async_device_register(&fdev->common);
1348 return 0;
1349
Ira Snydere7a29152010-01-06 13:34:03 +00001350out_free_fdev:
Ira Snyderd3f620b2010-01-06 13:34:04 +00001351 irq_dispose_mapping(fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001352 kfree(fdev);
Ira Snydere7a29152010-01-06 13:34:03 +00001353out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001354 return err;
1355}
1356
Grant Likely2dc11582010-08-06 09:25:50 -06001357static int fsldma_of_remove(struct platform_device *op)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001358{
Ira Snydera4f56d42010-01-06 13:34:01 +00001359 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001360 unsigned int i;
1361
Jingoo Handd3daca2013-05-24 10:10:13 +09001362 fdev = platform_get_drvdata(op);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001363 dma_async_device_unregister(&fdev->common);
1364
Ira Snyderd3f620b2010-01-06 13:34:04 +00001365 fsldma_free_irqs(fdev);
1366
Ira Snydere7a29152010-01-06 13:34:03 +00001367 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001368 if (fdev->chan[i])
1369 fsl_dma_chan_remove(fdev->chan[i]);
Ira Snydere7a29152010-01-06 13:34:03 +00001370 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001371
Ira Snydere7a29152010-01-06 13:34:03 +00001372 iounmap(fdev->regs);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001373 kfree(fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001374
1375 return 0;
1376}
1377
Márton Németh4b1cf1f2010-02-02 23:41:06 -07001378static const struct of_device_id fsldma_of_ids[] = {
Hongbo Zhang8de7a7d2013-09-26 17:33:43 +08001379 { .compatible = "fsl,elo3-dma", },
Kumar Gala049c9d42008-03-31 11:13:21 -05001380 { .compatible = "fsl,eloplus-dma", },
1381 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001382 {}
1383};
1384
Ira W. Snyder8faa7cf2011-04-07 10:33:03 -07001385static struct platform_driver fsldma_of_driver = {
Grant Likely40182942010-04-13 16:13:02 -07001386 .driver = {
1387 .name = "fsl-elo-dma",
1388 .owner = THIS_MODULE,
1389 .of_match_table = fsldma_of_ids,
1390 },
1391 .probe = fsldma_of_probe,
1392 .remove = fsldma_of_remove,
Zhang Wei173acc72008-03-01 07:42:48 -07001393};
1394
Ira Snydera4f56d42010-01-06 13:34:01 +00001395/*----------------------------------------------------------------------------*/
1396/* Module Init / Exit */
1397/*----------------------------------------------------------------------------*/
1398
1399static __init int fsldma_init(void)
Zhang Wei173acc72008-03-01 07:42:48 -07001400{
Hongbo Zhang8de7a7d2013-09-26 17:33:43 +08001401 pr_info("Freescale Elo series DMA driver\n");
Grant Likely00006122011-02-22 19:59:54 -07001402 return platform_driver_register(&fsldma_of_driver);
Zhang Wei173acc72008-03-01 07:42:48 -07001403}
1404
Ira Snydera4f56d42010-01-06 13:34:01 +00001405static void __exit fsldma_exit(void)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001406{
Grant Likely00006122011-02-22 19:59:54 -07001407 platform_driver_unregister(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001408}
1409
Ira Snydera4f56d42010-01-06 13:34:01 +00001410subsys_initcall(fsldma_init);
1411module_exit(fsldma_exit);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001412
Hongbo Zhang8de7a7d2013-09-26 17:33:43 +08001413MODULE_DESCRIPTION("Freescale Elo series DMA driver");
Timur Tabi77cd62e2008-09-26 17:00:11 -07001414MODULE_LICENSE("GPL");