blob: 7b5f88cb495bba3397e57e37f4c3bf6385d50ee0 [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
14 *
Ira W. Snydera7aea372009-04-23 16:17:54 -070015 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
Zhang Wei173acc72008-03-01 07:42:48 -070020 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/interrupt.h>
31#include <linux/dmaengine.h>
32#include <linux/delay.h>
33#include <linux/dma-mapping.h>
34#include <linux/dmapool.h>
35#include <linux/of_platform.h>
36
Ira Snyderbbea0b62009-09-08 17:53:04 -070037#include <asm/fsldma.h>
Zhang Wei173acc72008-03-01 07:42:48 -070038#include "fsldma.h"
39
Ira Snydera1c03312010-01-06 13:34:05 +000040static void dma_init(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -070041{
42 /* Reset the channel */
Ira Snydera1c03312010-01-06 13:34:05 +000043 DMA_OUT(chan, &chan->regs->mr, 0, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070044
Ira Snydera1c03312010-01-06 13:34:05 +000045 switch (chan->feature & FSL_DMA_IP_MASK) {
Zhang Wei173acc72008-03-01 07:42:48 -070046 case FSL_DMA_IP_85XX:
47 /* Set the channel to below modes:
48 * EIE - Error interrupt enable
49 * EOSIE - End of segments interrupt enable (basic mode)
50 * EOLNIE - End of links interrupt enable
51 */
Ira Snydera1c03312010-01-06 13:34:05 +000052 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
Zhang Wei173acc72008-03-01 07:42:48 -070053 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
54 break;
55 case FSL_DMA_IP_83XX:
56 /* Set the channel to below modes:
57 * EOTIE - End-of-transfer interrupt enable
Ira W. Snydera7aea372009-04-23 16:17:54 -070058 * PRC_RM - PCI read multiple
Zhang Wei173acc72008-03-01 07:42:48 -070059 */
Ira Snydera1c03312010-01-06 13:34:05 +000060 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
Ira W. Snydera7aea372009-04-23 16:17:54 -070061 | FSL_DMA_MR_PRC_RM, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070062 break;
63 }
64
65}
66
Ira Snydera1c03312010-01-06 13:34:05 +000067static void set_sr(struct fsldma_chan *chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070068{
Ira Snydera1c03312010-01-06 13:34:05 +000069 DMA_OUT(chan, &chan->regs->sr, val, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070070}
71
Ira Snydera1c03312010-01-06 13:34:05 +000072static u32 get_sr(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -070073{
Ira Snydera1c03312010-01-06 13:34:05 +000074 return DMA_IN(chan, &chan->regs->sr, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070075}
76
Ira Snydera1c03312010-01-06 13:34:05 +000077static void set_desc_cnt(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -070078 struct fsl_dma_ld_hw *hw, u32 count)
79{
Ira Snydera1c03312010-01-06 13:34:05 +000080 hw->count = CPU_TO_DMA(chan, count, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070081}
82
Ira Snydera1c03312010-01-06 13:34:05 +000083static void set_desc_src(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -070084 struct fsl_dma_ld_hw *hw, dma_addr_t src)
85{
86 u64 snoop_bits;
87
Ira Snydera1c03312010-01-06 13:34:05 +000088 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
Zhang Wei173acc72008-03-01 07:42:48 -070089 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
Ira Snydera1c03312010-01-06 13:34:05 +000090 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
Zhang Wei173acc72008-03-01 07:42:48 -070091}
92
Ira Snydera1c03312010-01-06 13:34:05 +000093static void set_desc_dst(struct fsldma_chan *chan,
Ira Snyder738f5f72010-01-06 13:34:02 +000094 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
Zhang Wei173acc72008-03-01 07:42:48 -070095{
96 u64 snoop_bits;
97
Ira Snydera1c03312010-01-06 13:34:05 +000098 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
Zhang Wei173acc72008-03-01 07:42:48 -070099 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
Ira Snydera1c03312010-01-06 13:34:05 +0000100 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700101}
102
Ira Snydera1c03312010-01-06 13:34:05 +0000103static void set_desc_next(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700104 struct fsl_dma_ld_hw *hw, dma_addr_t next)
105{
106 u64 snoop_bits;
107
Ira Snydera1c03312010-01-06 13:34:05 +0000108 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
Zhang Wei173acc72008-03-01 07:42:48 -0700109 ? FSL_DMA_SNEN : 0;
Ira Snydera1c03312010-01-06 13:34:05 +0000110 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700111}
112
Ira Snydera1c03312010-01-06 13:34:05 +0000113static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
Zhang Wei173acc72008-03-01 07:42:48 -0700114{
Ira Snydera1c03312010-01-06 13:34:05 +0000115 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700116}
117
Ira Snydera1c03312010-01-06 13:34:05 +0000118static dma_addr_t get_cdar(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700119{
Ira Snydera1c03312010-01-06 13:34:05 +0000120 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
Zhang Wei173acc72008-03-01 07:42:48 -0700121}
122
Ira Snydera1c03312010-01-06 13:34:05 +0000123static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr)
Zhang Wei173acc72008-03-01 07:42:48 -0700124{
Ira Snydera1c03312010-01-06 13:34:05 +0000125 DMA_OUT(chan, &chan->regs->ndar, addr, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700126}
127
Ira Snydera1c03312010-01-06 13:34:05 +0000128static dma_addr_t get_ndar(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700129{
Ira Snydera1c03312010-01-06 13:34:05 +0000130 return DMA_IN(chan, &chan->regs->ndar, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700131}
132
Ira Snydera1c03312010-01-06 13:34:05 +0000133static u32 get_bcr(struct fsldma_chan *chan)
Zhang Weif79abb62008-03-18 18:45:00 -0700134{
Ira Snydera1c03312010-01-06 13:34:05 +0000135 return DMA_IN(chan, &chan->regs->bcr, 32);
Zhang Weif79abb62008-03-18 18:45:00 -0700136}
137
Ira Snydera1c03312010-01-06 13:34:05 +0000138static int dma_is_idle(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700139{
Ira Snydera1c03312010-01-06 13:34:05 +0000140 u32 sr = get_sr(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700141 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
142}
143
Ira Snydera1c03312010-01-06 13:34:05 +0000144static void dma_start(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700145{
Ira Snyder272ca652010-01-06 13:33:59 +0000146 u32 mode;
Zhang Wei173acc72008-03-01 07:42:48 -0700147
Ira Snydera1c03312010-01-06 13:34:05 +0000148 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000149
Ira Snydera1c03312010-01-06 13:34:05 +0000150 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
151 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
152 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000153 mode |= FSL_DMA_MR_EMP_EN;
154 } else {
155 mode &= ~FSL_DMA_MR_EMP_EN;
156 }
Ira Snyder43a1a3e2009-05-28 09:26:40 +0000157 }
Zhang Wei173acc72008-03-01 07:42:48 -0700158
Ira Snydera1c03312010-01-06 13:34:05 +0000159 if (chan->feature & FSL_DMA_CHAN_START_EXT)
Ira Snyder272ca652010-01-06 13:33:59 +0000160 mode |= FSL_DMA_MR_EMS_EN;
Zhang Wei173acc72008-03-01 07:42:48 -0700161 else
Ira Snyder272ca652010-01-06 13:33:59 +0000162 mode |= FSL_DMA_MR_CS;
Zhang Wei173acc72008-03-01 07:42:48 -0700163
Ira Snydera1c03312010-01-06 13:34:05 +0000164 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700165}
166
Ira Snydera1c03312010-01-06 13:34:05 +0000167static void dma_halt(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700168{
Ira Snyder272ca652010-01-06 13:33:59 +0000169 u32 mode;
Dan Williams900325a2009-03-02 15:33:46 -0700170 int i;
171
Ira Snydera1c03312010-01-06 13:34:05 +0000172 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000173 mode |= FSL_DMA_MR_CA;
Ira Snydera1c03312010-01-06 13:34:05 +0000174 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000175
176 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
Ira Snydera1c03312010-01-06 13:34:05 +0000177 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700178
Dan Williams900325a2009-03-02 15:33:46 -0700179 for (i = 0; i < 100; i++) {
Ira Snydera1c03312010-01-06 13:34:05 +0000180 if (dma_is_idle(chan))
Dan Williams900325a2009-03-02 15:33:46 -0700181 break;
Zhang Wei173acc72008-03-01 07:42:48 -0700182 udelay(10);
Dan Williams900325a2009-03-02 15:33:46 -0700183 }
Ira Snyder272ca652010-01-06 13:33:59 +0000184
Ira Snydera1c03312010-01-06 13:34:05 +0000185 if (i >= 100 && !dma_is_idle(chan))
186 dev_err(chan->dev, "DMA halt timeout!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700187}
188
Ira Snydera1c03312010-01-06 13:34:05 +0000189static void set_ld_eol(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700190 struct fsl_desc_sw *desc)
191{
Ira Snyder776c8942009-05-15 11:33:20 -0700192 u64 snoop_bits;
193
Ira Snydera1c03312010-01-06 13:34:05 +0000194 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
Ira Snyder776c8942009-05-15 11:33:20 -0700195 ? FSL_DMA_SNEN : 0;
196
Ira Snydera1c03312010-01-06 13:34:05 +0000197 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
198 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
Ira Snyder776c8942009-05-15 11:33:20 -0700199 | snoop_bits, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700200}
201
Ira Snydera1c03312010-01-06 13:34:05 +0000202static void append_ld_queue(struct fsldma_chan *chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700203 struct fsl_desc_sw *new_desc)
204{
Ira Snydera1c03312010-01-06 13:34:05 +0000205 struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev);
Zhang Wei173acc72008-03-01 07:42:48 -0700206
Ira Snydera1c03312010-01-06 13:34:05 +0000207 if (list_empty(&chan->ld_queue))
Zhang Wei173acc72008-03-01 07:42:48 -0700208 return;
209
210 /* Link to the new descriptor physical address and
211 * Enable End-of-segment interrupt for
212 * the last link descriptor.
213 * (the previous node's next link descriptor)
214 *
215 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
216 */
Ira Snydera1c03312010-01-06 13:34:05 +0000217 queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700218 new_desc->async_tx.phys | FSL_DMA_EOSIE |
Ira Snydera1c03312010-01-06 13:34:05 +0000219 (((chan->feature & FSL_DMA_IP_MASK)
Zhang Wei173acc72008-03-01 07:42:48 -0700220 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
221}
222
223/**
224 * fsl_chan_set_src_loop_size - Set source address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000225 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700226 * @size : Address loop size, 0 for disable loop
227 *
228 * The set source address hold transfer size. The source
229 * address hold or loop transfer size is when the DMA transfer
230 * data from source address (SA), if the loop size is 4, the DMA will
231 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
232 * SA + 1 ... and so on.
233 */
Ira Snydera1c03312010-01-06 13:34:05 +0000234static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700235{
Ira Snyder272ca652010-01-06 13:33:59 +0000236 u32 mode;
237
Ira Snydera1c03312010-01-06 13:34:05 +0000238 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000239
Zhang Wei173acc72008-03-01 07:42:48 -0700240 switch (size) {
241 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000242 mode &= ~FSL_DMA_MR_SAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700243 break;
244 case 1:
245 case 2:
246 case 4:
247 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000248 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
Zhang Wei173acc72008-03-01 07:42:48 -0700249 break;
250 }
Ira Snyder272ca652010-01-06 13:33:59 +0000251
Ira Snydera1c03312010-01-06 13:34:05 +0000252 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700253}
254
255/**
Ira Snyder738f5f72010-01-06 13:34:02 +0000256 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
Ira Snydera1c03312010-01-06 13:34:05 +0000257 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700258 * @size : Address loop size, 0 for disable loop
259 *
260 * The set destination address hold transfer size. The destination
261 * address hold or loop transfer size is when the DMA transfer
262 * data to destination address (TA), if the loop size is 4, the DMA will
263 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
264 * TA + 1 ... and so on.
265 */
Ira Snydera1c03312010-01-06 13:34:05 +0000266static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700267{
Ira Snyder272ca652010-01-06 13:33:59 +0000268 u32 mode;
269
Ira Snydera1c03312010-01-06 13:34:05 +0000270 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000271
Zhang Wei173acc72008-03-01 07:42:48 -0700272 switch (size) {
273 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000274 mode &= ~FSL_DMA_MR_DAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700275 break;
276 case 1:
277 case 2:
278 case 4:
279 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000280 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
Zhang Wei173acc72008-03-01 07:42:48 -0700281 break;
282 }
Ira Snyder272ca652010-01-06 13:33:59 +0000283
Ira Snydera1c03312010-01-06 13:34:05 +0000284 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700285}
286
287/**
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700288 * fsl_chan_set_request_count - Set DMA Request Count for external control
Ira Snydera1c03312010-01-06 13:34:05 +0000289 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700290 * @size : Number of bytes to transfer in a single request
291 *
292 * The Freescale DMA channel can be controlled by the external signal DREQ#.
293 * The DMA request count is how many bytes are allowed to transfer before
294 * pausing the channel, after which a new assertion of DREQ# resumes channel
295 * operation.
296 *
297 * A size of 0 disables external pause control. The maximum size is 1024.
298 */
Ira Snydera1c03312010-01-06 13:34:05 +0000299static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700300{
Ira Snyder272ca652010-01-06 13:33:59 +0000301 u32 mode;
302
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700303 BUG_ON(size > 1024);
Ira Snyder272ca652010-01-06 13:33:59 +0000304
Ira Snydera1c03312010-01-06 13:34:05 +0000305 mode = DMA_IN(chan, &chan->regs->mr, 32);
Ira Snyder272ca652010-01-06 13:33:59 +0000306 mode |= (__ilog2(size) << 24) & 0x0f000000;
307
Ira Snydera1c03312010-01-06 13:34:05 +0000308 DMA_OUT(chan, &chan->regs->mr, mode, 32);
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700309}
310
311/**
Zhang Wei173acc72008-03-01 07:42:48 -0700312 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
Ira Snydera1c03312010-01-06 13:34:05 +0000313 * @chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700314 * @enable : 0 is disabled, 1 is enabled.
Zhang Wei173acc72008-03-01 07:42:48 -0700315 *
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700316 * The Freescale DMA channel can be controlled by the external signal DREQ#.
317 * The DMA Request Count feature should be used in addition to this feature
318 * to set the number of bytes to transfer before pausing the channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700319 */
Ira Snydera1c03312010-01-06 13:34:05 +0000320static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700321{
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700322 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000323 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700324 else
Ira Snydera1c03312010-01-06 13:34:05 +0000325 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700326}
327
328/**
329 * fsl_chan_toggle_ext_start - Toggle channel external start status
Ira Snydera1c03312010-01-06 13:34:05 +0000330 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700331 * @enable : 0 is disabled, 1 is enabled.
332 *
333 * If enable the external start, the channel can be started by an
334 * external DMA start pin. So the dma_start() does not start the
335 * transfer immediately. The DMA channel will wait for the
336 * control pin asserted.
337 */
Ira Snydera1c03312010-01-06 13:34:05 +0000338static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700339{
340 if (enable)
Ira Snydera1c03312010-01-06 13:34:05 +0000341 chan->feature |= FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700342 else
Ira Snydera1c03312010-01-06 13:34:05 +0000343 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
Zhang Wei173acc72008-03-01 07:42:48 -0700344}
345
346static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
347{
Ira Snydera1c03312010-01-06 13:34:05 +0000348 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
Dan Williamseda34232009-09-08 17:53:02 -0700349 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
350 struct fsl_desc_sw *child;
Zhang Wei173acc72008-03-01 07:42:48 -0700351 unsigned long flags;
352 dma_cookie_t cookie;
353
354 /* cookie increment and adding to ld_queue must be atomic */
Ira Snydera1c03312010-01-06 13:34:05 +0000355 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700356
Ira Snydera1c03312010-01-06 13:34:05 +0000357 cookie = chan->common.cookie;
Dan Williamseda34232009-09-08 17:53:02 -0700358 list_for_each_entry(child, &desc->tx_list, node) {
Ira Snyderbcfb7462009-05-15 14:27:16 -0700359 cookie++;
360 if (cookie < 0)
361 cookie = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700362
Ira Snyderbcfb7462009-05-15 14:27:16 -0700363 desc->async_tx.cookie = cookie;
364 }
365
Ira Snydera1c03312010-01-06 13:34:05 +0000366 chan->common.cookie = cookie;
367 append_ld_queue(chan, desc);
368 list_splice_init(&desc->tx_list, chan->ld_queue.prev);
Zhang Wei173acc72008-03-01 07:42:48 -0700369
Ira Snydera1c03312010-01-06 13:34:05 +0000370 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700371
372 return cookie;
373}
374
375/**
376 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
Ira Snydera1c03312010-01-06 13:34:05 +0000377 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700378 *
379 * Return - The descriptor allocated. NULL for failed.
380 */
381static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
Ira Snydera1c03312010-01-06 13:34:05 +0000382 struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700383{
384 dma_addr_t pdesc;
385 struct fsl_desc_sw *desc_sw;
386
Ira Snydera1c03312010-01-06 13:34:05 +0000387 desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
Zhang Wei173acc72008-03-01 07:42:48 -0700388 if (desc_sw) {
389 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
Dan Williamseda34232009-09-08 17:53:02 -0700390 INIT_LIST_HEAD(&desc_sw->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700391 dma_async_tx_descriptor_init(&desc_sw->async_tx,
Ira Snydera1c03312010-01-06 13:34:05 +0000392 &chan->common);
Zhang Wei173acc72008-03-01 07:42:48 -0700393 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
Zhang Wei173acc72008-03-01 07:42:48 -0700394 desc_sw->async_tx.phys = pdesc;
395 }
396
397 return desc_sw;
398}
399
400
401/**
402 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000403 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700404 *
405 * This function will create a dma pool for descriptor allocation.
406 *
407 * Return - The number of descriptors allocated.
408 */
Ira Snydera1c03312010-01-06 13:34:05 +0000409static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700410{
Ira Snydera1c03312010-01-06 13:34:05 +0000411 struct fsldma_chan *chan = to_fsl_chan(dchan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700412
413 /* Has this channel already been allocated? */
Ira Snydera1c03312010-01-06 13:34:05 +0000414 if (chan->desc_pool)
Timur Tabi77cd62e2008-09-26 17:00:11 -0700415 return 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700416
417 /* We need the descriptor to be aligned to 32bytes
418 * for meeting FSL DMA specification requirement.
419 */
Ira Snydera1c03312010-01-06 13:34:05 +0000420 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
421 chan->dev, sizeof(struct fsl_desc_sw),
Zhang Wei173acc72008-03-01 07:42:48 -0700422 32, 0);
Ira Snydera1c03312010-01-06 13:34:05 +0000423 if (!chan->desc_pool) {
424 dev_err(chan->dev, "No memory for channel %d "
425 "descriptor dma pool.\n", chan->id);
Zhang Wei173acc72008-03-01 07:42:48 -0700426 return 0;
427 }
428
429 return 1;
430}
431
432/**
433 * fsl_dma_free_chan_resources - Free all resources of the channel.
Ira Snydera1c03312010-01-06 13:34:05 +0000434 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700435 */
Ira Snydera1c03312010-01-06 13:34:05 +0000436static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700437{
Ira Snydera1c03312010-01-06 13:34:05 +0000438 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700439 struct fsl_desc_sw *desc, *_desc;
440 unsigned long flags;
441
Ira Snydera1c03312010-01-06 13:34:05 +0000442 dev_dbg(chan->dev, "Free all channel resources.\n");
443 spin_lock_irqsave(&chan->desc_lock, flags);
444 list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) {
Zhang Wei173acc72008-03-01 07:42:48 -0700445#ifdef FSL_DMA_LD_DEBUG
Ira Snydera1c03312010-01-06 13:34:05 +0000446 dev_dbg(chan->dev,
Zhang Wei173acc72008-03-01 07:42:48 -0700447 "LD %p will be released.\n", desc);
448#endif
449 list_del(&desc->node);
450 /* free link descriptor */
Ira Snydera1c03312010-01-06 13:34:05 +0000451 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700452 }
Ira Snydera1c03312010-01-06 13:34:05 +0000453 spin_unlock_irqrestore(&chan->desc_lock, flags);
454 dma_pool_destroy(chan->desc_pool);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700455
Ira Snydera1c03312010-01-06 13:34:05 +0000456 chan->desc_pool = NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700457}
458
Zhang Wei2187c262008-03-13 17:45:28 -0700459static struct dma_async_tx_descriptor *
Ira Snydera1c03312010-01-06 13:34:05 +0000460fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
Zhang Wei2187c262008-03-13 17:45:28 -0700461{
Ira Snydera1c03312010-01-06 13:34:05 +0000462 struct fsldma_chan *chan;
Zhang Wei2187c262008-03-13 17:45:28 -0700463 struct fsl_desc_sw *new;
464
Ira Snydera1c03312010-01-06 13:34:05 +0000465 if (!dchan)
Zhang Wei2187c262008-03-13 17:45:28 -0700466 return NULL;
467
Ira Snydera1c03312010-01-06 13:34:05 +0000468 chan = to_fsl_chan(dchan);
Zhang Wei2187c262008-03-13 17:45:28 -0700469
Ira Snydera1c03312010-01-06 13:34:05 +0000470 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei2187c262008-03-13 17:45:28 -0700471 if (!new) {
Ira Snydera1c03312010-01-06 13:34:05 +0000472 dev_err(chan->dev, "No free memory for link descriptor\n");
Zhang Wei2187c262008-03-13 17:45:28 -0700473 return NULL;
474 }
475
476 new->async_tx.cookie = -EBUSY;
Dan Williams636bdea2008-04-17 20:17:26 -0700477 new->async_tx.flags = flags;
Zhang Wei2187c262008-03-13 17:45:28 -0700478
Zhang Weif79abb62008-03-18 18:45:00 -0700479 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700480 list_add_tail(&new->node, &new->tx_list);
Zhang Weif79abb62008-03-18 18:45:00 -0700481
Zhang Wei2187c262008-03-13 17:45:28 -0700482 /* Set End-of-link to the last link descriptor of new list*/
Ira Snydera1c03312010-01-06 13:34:05 +0000483 set_ld_eol(chan, new);
Zhang Wei2187c262008-03-13 17:45:28 -0700484
485 return &new->async_tx;
486}
487
Zhang Wei173acc72008-03-01 07:42:48 -0700488static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
Ira Snydera1c03312010-01-06 13:34:05 +0000489 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
Zhang Wei173acc72008-03-01 07:42:48 -0700490 size_t len, unsigned long flags)
491{
Ira Snydera1c03312010-01-06 13:34:05 +0000492 struct fsldma_chan *chan;
Zhang Wei173acc72008-03-01 07:42:48 -0700493 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
Ira Snyder2e077f82009-05-15 09:59:46 -0700494 struct list_head *list;
Zhang Wei173acc72008-03-01 07:42:48 -0700495 size_t copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700496
Ira Snydera1c03312010-01-06 13:34:05 +0000497 if (!dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700498 return NULL;
499
500 if (!len)
501 return NULL;
502
Ira Snydera1c03312010-01-06 13:34:05 +0000503 chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700504
505 do {
506
507 /* Allocate the link descriptor from DMA pool */
Ira Snydera1c03312010-01-06 13:34:05 +0000508 new = fsl_dma_alloc_descriptor(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700509 if (!new) {
Ira Snydera1c03312010-01-06 13:34:05 +0000510 dev_err(chan->dev,
Zhang Wei173acc72008-03-01 07:42:48 -0700511 "No free memory for link descriptor\n");
Ira Snyder2e077f82009-05-15 09:59:46 -0700512 goto fail;
Zhang Wei173acc72008-03-01 07:42:48 -0700513 }
514#ifdef FSL_DMA_LD_DEBUG
Ira Snydera1c03312010-01-06 13:34:05 +0000515 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
Zhang Wei173acc72008-03-01 07:42:48 -0700516#endif
517
Zhang Wei56822842008-03-13 10:45:27 -0700518 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700519
Ira Snydera1c03312010-01-06 13:34:05 +0000520 set_desc_cnt(chan, &new->hw, copy);
521 set_desc_src(chan, &new->hw, dma_src);
522 set_desc_dst(chan, &new->hw, dma_dst);
Zhang Wei173acc72008-03-01 07:42:48 -0700523
524 if (!first)
525 first = new;
526 else
Ira Snydera1c03312010-01-06 13:34:05 +0000527 set_desc_next(chan, &prev->hw, new->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700528
529 new->async_tx.cookie = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700530 async_tx_ack(&new->async_tx);
Zhang Wei173acc72008-03-01 07:42:48 -0700531
532 prev = new;
533 len -= copy;
534 dma_src += copy;
Ira Snyder738f5f72010-01-06 13:34:02 +0000535 dma_dst += copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700536
537 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700538 list_add_tail(&new->node, &first->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700539 } while (len);
540
Dan Williams636bdea2008-04-17 20:17:26 -0700541 new->async_tx.flags = flags; /* client is in control of this ack */
Zhang Wei173acc72008-03-01 07:42:48 -0700542 new->async_tx.cookie = -EBUSY;
543
544 /* Set End-of-link to the last link descriptor of new list*/
Ira Snydera1c03312010-01-06 13:34:05 +0000545 set_ld_eol(chan, new);
Zhang Wei173acc72008-03-01 07:42:48 -0700546
Ira Snyder2e077f82009-05-15 09:59:46 -0700547 return &first->async_tx;
548
549fail:
550 if (!first)
551 return NULL;
552
Dan Williamseda34232009-09-08 17:53:02 -0700553 list = &first->tx_list;
Ira Snyder2e077f82009-05-15 09:59:46 -0700554 list_for_each_entry_safe_reverse(new, prev, list, node) {
555 list_del(&new->node);
Ira Snydera1c03312010-01-06 13:34:05 +0000556 dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
Ira Snyder2e077f82009-05-15 09:59:46 -0700557 }
558
559 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700560}
561
562/**
Ira Snyderbbea0b62009-09-08 17:53:04 -0700563 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
564 * @chan: DMA channel
565 * @sgl: scatterlist to transfer to/from
566 * @sg_len: number of entries in @scatterlist
567 * @direction: DMA direction
568 * @flags: DMAEngine flags
569 *
570 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
571 * DMA_SLAVE API, this gets the device-specific information from the
572 * chan->private variable.
573 */
574static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
Ira Snydera1c03312010-01-06 13:34:05 +0000575 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
Ira Snyderbbea0b62009-09-08 17:53:04 -0700576 enum dma_data_direction direction, unsigned long flags)
577{
Ira Snydera1c03312010-01-06 13:34:05 +0000578 struct fsldma_chan *chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700579 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
580 struct fsl_dma_slave *slave;
581 struct list_head *tx_list;
582 size_t copy;
583
584 int i;
585 struct scatterlist *sg;
586 size_t sg_used;
587 size_t hw_used;
588 struct fsl_dma_hw_addr *hw;
589 dma_addr_t dma_dst, dma_src;
590
Ira Snydera1c03312010-01-06 13:34:05 +0000591 if (!dchan)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700592 return NULL;
593
Ira Snydera1c03312010-01-06 13:34:05 +0000594 if (!dchan->private)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700595 return NULL;
596
Ira Snydera1c03312010-01-06 13:34:05 +0000597 chan = to_fsl_chan(dchan);
598 slave = dchan->private;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700599
600 if (list_empty(&slave->addresses))
601 return NULL;
602
603 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
604 hw_used = 0;
605
606 /*
607 * Build the hardware transaction to copy from the scatterlist to
608 * the hardware, or from the hardware to the scatterlist
609 *
610 * If you are copying from the hardware to the scatterlist and it
611 * takes two hardware entries to fill an entire page, then both
612 * hardware entries will be coalesced into the same page
613 *
614 * If you are copying from the scatterlist to the hardware and a
615 * single page can fill two hardware entries, then the data will
616 * be read out of the page into the first hardware entry, and so on
617 */
618 for_each_sg(sgl, sg, sg_len, i) {
619 sg_used = 0;
620
621 /* Loop until the entire scatterlist entry is used */
622 while (sg_used < sg_dma_len(sg)) {
623
624 /*
625 * If we've used up the current hardware address/length
626 * pair, we need to load a new one
627 *
628 * This is done in a while loop so that descriptors with
629 * length == 0 will be skipped
630 */
631 while (hw_used >= hw->length) {
632
633 /*
634 * If the current hardware entry is the last
635 * entry in the list, we're finished
636 */
637 if (list_is_last(&hw->entry, &slave->addresses))
638 goto finished;
639
640 /* Get the next hardware address/length pair */
641 hw = list_entry(hw->entry.next,
642 struct fsl_dma_hw_addr, entry);
643 hw_used = 0;
644 }
645
646 /* Allocate the link descriptor from DMA pool */
Ira Snydera1c03312010-01-06 13:34:05 +0000647 new = fsl_dma_alloc_descriptor(chan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700648 if (!new) {
Ira Snydera1c03312010-01-06 13:34:05 +0000649 dev_err(chan->dev, "No free memory for "
Ira Snyderbbea0b62009-09-08 17:53:04 -0700650 "link descriptor\n");
651 goto fail;
652 }
653#ifdef FSL_DMA_LD_DEBUG
Ira Snydera1c03312010-01-06 13:34:05 +0000654 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700655#endif
656
657 /*
658 * Calculate the maximum number of bytes to transfer,
659 * making sure it is less than the DMA controller limit
660 */
661 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
662 hw->length - hw_used);
663 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
664
665 /*
666 * DMA_FROM_DEVICE
667 * from the hardware to the scatterlist
668 *
669 * DMA_TO_DEVICE
670 * from the scatterlist to the hardware
671 */
672 if (direction == DMA_FROM_DEVICE) {
673 dma_src = hw->address + hw_used;
674 dma_dst = sg_dma_address(sg) + sg_used;
675 } else {
676 dma_src = sg_dma_address(sg) + sg_used;
677 dma_dst = hw->address + hw_used;
678 }
679
680 /* Fill in the descriptor */
Ira Snydera1c03312010-01-06 13:34:05 +0000681 set_desc_cnt(chan, &new->hw, copy);
682 set_desc_src(chan, &new->hw, dma_src);
683 set_desc_dst(chan, &new->hw, dma_dst);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700684
685 /*
686 * If this is not the first descriptor, chain the
687 * current descriptor after the previous descriptor
688 */
689 if (!first) {
690 first = new;
691 } else {
Ira Snydera1c03312010-01-06 13:34:05 +0000692 set_desc_next(chan, &prev->hw,
Ira Snyderbbea0b62009-09-08 17:53:04 -0700693 new->async_tx.phys);
694 }
695
696 new->async_tx.cookie = 0;
697 async_tx_ack(&new->async_tx);
698
699 prev = new;
700 sg_used += copy;
701 hw_used += copy;
702
703 /* Insert the link descriptor into the LD ring */
704 list_add_tail(&new->node, &first->tx_list);
705 }
706 }
707
708finished:
709
710 /* All of the hardware address/length pairs had length == 0 */
711 if (!first || !new)
712 return NULL;
713
714 new->async_tx.flags = flags;
715 new->async_tx.cookie = -EBUSY;
716
717 /* Set End-of-link to the last link descriptor of new list */
Ira Snydera1c03312010-01-06 13:34:05 +0000718 set_ld_eol(chan, new);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700719
720 /* Enable extra controller features */
Ira Snydera1c03312010-01-06 13:34:05 +0000721 if (chan->set_src_loop_size)
722 chan->set_src_loop_size(chan, slave->src_loop_size);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700723
Ira Snydera1c03312010-01-06 13:34:05 +0000724 if (chan->set_dst_loop_size)
725 chan->set_dst_loop_size(chan, slave->dst_loop_size);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700726
Ira Snydera1c03312010-01-06 13:34:05 +0000727 if (chan->toggle_ext_start)
728 chan->toggle_ext_start(chan, slave->external_start);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700729
Ira Snydera1c03312010-01-06 13:34:05 +0000730 if (chan->toggle_ext_pause)
731 chan->toggle_ext_pause(chan, slave->external_pause);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700732
Ira Snydera1c03312010-01-06 13:34:05 +0000733 if (chan->set_request_count)
734 chan->set_request_count(chan, slave->request_count);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700735
736 return &first->async_tx;
737
738fail:
739 /* If first was not set, then we failed to allocate the very first
740 * descriptor, and we're done */
741 if (!first)
742 return NULL;
743
744 /*
745 * First is set, so all of the descriptors we allocated have been added
746 * to first->tx_list, INCLUDING "first" itself. Therefore we
747 * must traverse the list backwards freeing each descriptor in turn
748 *
749 * We're re-using variables for the loop, oh well
750 */
751 tx_list = &first->tx_list;
752 list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
753 list_del_init(&new->node);
Ira Snydera1c03312010-01-06 13:34:05 +0000754 dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700755 }
756
757 return NULL;
758}
759
Ira Snydera1c03312010-01-06 13:34:05 +0000760static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700761{
Ira Snydera1c03312010-01-06 13:34:05 +0000762 struct fsldma_chan *chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700763 struct fsl_desc_sw *desc, *tmp;
764 unsigned long flags;
765
Ira Snydera1c03312010-01-06 13:34:05 +0000766 if (!dchan)
Ira Snyderbbea0b62009-09-08 17:53:04 -0700767 return;
768
Ira Snydera1c03312010-01-06 13:34:05 +0000769 chan = to_fsl_chan(dchan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700770
771 /* Halt the DMA engine */
Ira Snydera1c03312010-01-06 13:34:05 +0000772 dma_halt(chan);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700773
Ira Snydera1c03312010-01-06 13:34:05 +0000774 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700775
776 /* Remove and free all of the descriptors in the LD queue */
Ira Snydera1c03312010-01-06 13:34:05 +0000777 list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) {
Ira Snyderbbea0b62009-09-08 17:53:04 -0700778 list_del(&desc->node);
Ira Snydera1c03312010-01-06 13:34:05 +0000779 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700780 }
781
Ira Snydera1c03312010-01-06 13:34:05 +0000782 spin_unlock_irqrestore(&chan->desc_lock, flags);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700783}
784
785/**
Zhang Wei173acc72008-03-01 07:42:48 -0700786 * fsl_dma_update_completed_cookie - Update the completed cookie.
Ira Snydera1c03312010-01-06 13:34:05 +0000787 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700788 */
Ira Snydera1c03312010-01-06 13:34:05 +0000789static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700790{
791 struct fsl_desc_sw *cur_desc, *desc;
792 dma_addr_t ld_phy;
793
Ira Snydera1c03312010-01-06 13:34:05 +0000794 ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK;
Zhang Wei173acc72008-03-01 07:42:48 -0700795
796 if (ld_phy) {
797 cur_desc = NULL;
Ira Snydera1c03312010-01-06 13:34:05 +0000798 list_for_each_entry(desc, &chan->ld_queue, node)
Zhang Wei173acc72008-03-01 07:42:48 -0700799 if (desc->async_tx.phys == ld_phy) {
800 cur_desc = desc;
801 break;
802 }
803
804 if (cur_desc && cur_desc->async_tx.cookie) {
Ira Snydera1c03312010-01-06 13:34:05 +0000805 if (dma_is_idle(chan))
806 chan->completed_cookie =
Zhang Wei173acc72008-03-01 07:42:48 -0700807 cur_desc->async_tx.cookie;
808 else
Ira Snydera1c03312010-01-06 13:34:05 +0000809 chan->completed_cookie =
Zhang Wei173acc72008-03-01 07:42:48 -0700810 cur_desc->async_tx.cookie - 1;
811 }
812 }
813}
814
815/**
816 * fsl_chan_ld_cleanup - Clean up link descriptors
Ira Snydera1c03312010-01-06 13:34:05 +0000817 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700818 *
819 * This function clean up the ld_queue of DMA channel.
820 * If 'in_intr' is set, the function will move the link descriptor to
821 * the recycle list. Otherwise, free it directly.
822 */
Ira Snydera1c03312010-01-06 13:34:05 +0000823static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700824{
825 struct fsl_desc_sw *desc, *_desc;
826 unsigned long flags;
827
Ira Snydera1c03312010-01-06 13:34:05 +0000828 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700829
Ira Snydera1c03312010-01-06 13:34:05 +0000830 dev_dbg(chan->dev, "chan completed_cookie = %d\n",
831 chan->completed_cookie);
832 list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) {
Zhang Wei173acc72008-03-01 07:42:48 -0700833 dma_async_tx_callback callback;
834 void *callback_param;
835
836 if (dma_async_is_complete(desc->async_tx.cookie,
Ira Snydera1c03312010-01-06 13:34:05 +0000837 chan->completed_cookie, chan->common.cookie)
Zhang Wei173acc72008-03-01 07:42:48 -0700838 == DMA_IN_PROGRESS)
839 break;
840
841 callback = desc->async_tx.callback;
842 callback_param = desc->async_tx.callback_param;
843
844 /* Remove from ld_queue list */
845 list_del(&desc->node);
846
Ira Snydera1c03312010-01-06 13:34:05 +0000847 dev_dbg(chan->dev, "link descriptor %p will be recycle.\n",
Zhang Wei173acc72008-03-01 07:42:48 -0700848 desc);
Ira Snydera1c03312010-01-06 13:34:05 +0000849 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700850
851 /* Run the link descriptor callback function */
852 if (callback) {
Ira Snydera1c03312010-01-06 13:34:05 +0000853 spin_unlock_irqrestore(&chan->desc_lock, flags);
854 dev_dbg(chan->dev, "link descriptor %p callback\n",
Zhang Wei173acc72008-03-01 07:42:48 -0700855 desc);
856 callback(callback_param);
Ira Snydera1c03312010-01-06 13:34:05 +0000857 spin_lock_irqsave(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700858 }
859 }
Ira Snydera1c03312010-01-06 13:34:05 +0000860 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700861}
862
863/**
864 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
Ira Snydera1c03312010-01-06 13:34:05 +0000865 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700866 */
Ira Snydera1c03312010-01-06 13:34:05 +0000867static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700868{
869 struct list_head *ld_node;
Ira Snyder738f5f72010-01-06 13:34:02 +0000870 dma_addr_t next_dst_addr;
Zhang Wei173acc72008-03-01 07:42:48 -0700871 unsigned long flags;
872
Ira Snydera1c03312010-01-06 13:34:05 +0000873 spin_lock_irqsave(&chan->desc_lock, flags);
Ira Snyder138ef012009-05-19 15:42:13 -0700874
Ira Snydera1c03312010-01-06 13:34:05 +0000875 if (!dma_is_idle(chan))
Ira Snyder138ef012009-05-19 15:42:13 -0700876 goto out_unlock;
Zhang Wei173acc72008-03-01 07:42:48 -0700877
Ira Snydera1c03312010-01-06 13:34:05 +0000878 dma_halt(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700879
880 /* If there are some link descriptors
881 * not transfered in queue. We need to start it.
882 */
Zhang Wei173acc72008-03-01 07:42:48 -0700883
884 /* Find the first un-transfer desciptor */
Ira Snydera1c03312010-01-06 13:34:05 +0000885 for (ld_node = chan->ld_queue.next;
886 (ld_node != &chan->ld_queue)
Zhang Wei173acc72008-03-01 07:42:48 -0700887 && (dma_async_is_complete(
888 to_fsl_desc(ld_node)->async_tx.cookie,
Ira Snydera1c03312010-01-06 13:34:05 +0000889 chan->completed_cookie,
890 chan->common.cookie) == DMA_SUCCESS);
Zhang Wei173acc72008-03-01 07:42:48 -0700891 ld_node = ld_node->next);
892
Ira Snydera1c03312010-01-06 13:34:05 +0000893 if (ld_node != &chan->ld_queue) {
Zhang Wei173acc72008-03-01 07:42:48 -0700894 /* Get the ld start address from ld_queue */
Ira Snyder738f5f72010-01-06 13:34:02 +0000895 next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys;
Ira Snydera1c03312010-01-06 13:34:05 +0000896 dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n",
Ira Snyder738f5f72010-01-06 13:34:02 +0000897 (unsigned long long)next_dst_addr);
Ira Snydera1c03312010-01-06 13:34:05 +0000898 set_cdar(chan, next_dst_addr);
899 dma_start(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700900 } else {
Ira Snydera1c03312010-01-06 13:34:05 +0000901 set_cdar(chan, 0);
902 set_ndar(chan, 0);
Zhang Wei173acc72008-03-01 07:42:48 -0700903 }
Ira Snyder138ef012009-05-19 15:42:13 -0700904
905out_unlock:
Ira Snydera1c03312010-01-06 13:34:05 +0000906 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700907}
908
909/**
910 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
Ira Snydera1c03312010-01-06 13:34:05 +0000911 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700912 */
Ira Snydera1c03312010-01-06 13:34:05 +0000913static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
Zhang Wei173acc72008-03-01 07:42:48 -0700914{
Ira Snydera1c03312010-01-06 13:34:05 +0000915 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700916
917#ifdef FSL_DMA_LD_DEBUG
918 struct fsl_desc_sw *ld;
919 unsigned long flags;
920
Ira Snydera1c03312010-01-06 13:34:05 +0000921 spin_lock_irqsave(&chan->desc_lock, flags);
922 if (list_empty(&chan->ld_queue)) {
923 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700924 return;
925 }
926
Ira Snydera1c03312010-01-06 13:34:05 +0000927 dev_dbg(chan->dev, "--memcpy issue--\n");
928 list_for_each_entry(ld, &chan->ld_queue, node) {
Zhang Wei173acc72008-03-01 07:42:48 -0700929 int i;
Ira Snydera1c03312010-01-06 13:34:05 +0000930 dev_dbg(chan->dev, "Ch %d, LD %08x\n",
931 chan->id, ld->async_tx.phys);
Zhang Wei173acc72008-03-01 07:42:48 -0700932 for (i = 0; i < 8; i++)
Ira Snydera1c03312010-01-06 13:34:05 +0000933 dev_dbg(chan->dev, "LD offset %d: %08x\n",
Zhang Wei173acc72008-03-01 07:42:48 -0700934 i, *(((u32 *)&ld->hw) + i));
935 }
Ira Snydera1c03312010-01-06 13:34:05 +0000936 dev_dbg(chan->dev, "----------------\n");
937 spin_unlock_irqrestore(&chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700938#endif
939
Ira Snydera1c03312010-01-06 13:34:05 +0000940 fsl_chan_xfer_ld_queue(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700941}
942
Zhang Wei173acc72008-03-01 07:42:48 -0700943/**
944 * fsl_dma_is_complete - Determine the DMA status
Ira Snydera1c03312010-01-06 13:34:05 +0000945 * @chan : Freescale DMA channel
Zhang Wei173acc72008-03-01 07:42:48 -0700946 */
Ira Snydera1c03312010-01-06 13:34:05 +0000947static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
Zhang Wei173acc72008-03-01 07:42:48 -0700948 dma_cookie_t cookie,
949 dma_cookie_t *done,
950 dma_cookie_t *used)
951{
Ira Snydera1c03312010-01-06 13:34:05 +0000952 struct fsldma_chan *chan = to_fsl_chan(dchan);
Zhang Wei173acc72008-03-01 07:42:48 -0700953 dma_cookie_t last_used;
954 dma_cookie_t last_complete;
955
Ira Snydera1c03312010-01-06 13:34:05 +0000956 fsl_chan_ld_cleanup(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700957
Ira Snydera1c03312010-01-06 13:34:05 +0000958 last_used = dchan->cookie;
959 last_complete = chan->completed_cookie;
Zhang Wei173acc72008-03-01 07:42:48 -0700960
961 if (done)
962 *done = last_complete;
963
964 if (used)
965 *used = last_used;
966
967 return dma_async_is_complete(cookie, last_complete, last_used);
968}
969
Ira Snyderd3f620b2010-01-06 13:34:04 +0000970/*----------------------------------------------------------------------------*/
971/* Interrupt Handling */
972/*----------------------------------------------------------------------------*/
973
Ira Snydere7a29152010-01-06 13:34:03 +0000974static irqreturn_t fsldma_chan_irq(int irq, void *data)
Zhang Wei173acc72008-03-01 07:42:48 -0700975{
Ira Snydera1c03312010-01-06 13:34:05 +0000976 struct fsldma_chan *chan = data;
Zhang Wei1c629792008-04-17 20:17:25 -0700977 int update_cookie = 0;
978 int xfer_ld_q = 0;
Ira Snydera1c03312010-01-06 13:34:05 +0000979 u32 stat;
Zhang Wei173acc72008-03-01 07:42:48 -0700980
Ira Snydera1c03312010-01-06 13:34:05 +0000981 stat = get_sr(chan);
982 dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n",
983 chan->id, stat);
984 set_sr(chan, stat); /* Clear the event register */
Zhang Wei173acc72008-03-01 07:42:48 -0700985
986 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
987 if (!stat)
988 return IRQ_NONE;
989
990 if (stat & FSL_DMA_SR_TE)
Ira Snydera1c03312010-01-06 13:34:05 +0000991 dev_err(chan->dev, "Transfer Error!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700992
Zhang Weif79abb62008-03-18 18:45:00 -0700993 /* Programming Error
994 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
995 * triger a PE interrupt.
996 */
997 if (stat & FSL_DMA_SR_PE) {
Ira Snydera1c03312010-01-06 13:34:05 +0000998 dev_dbg(chan->dev, "event: Programming Error INT\n");
999 if (get_bcr(chan) == 0) {
Zhang Weif79abb62008-03-18 18:45:00 -07001000 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1001 * Now, update the completed cookie, and continue the
1002 * next uncompleted transfer.
1003 */
Zhang Wei1c629792008-04-17 20:17:25 -07001004 update_cookie = 1;
1005 xfer_ld_q = 1;
Zhang Weif79abb62008-03-18 18:45:00 -07001006 }
1007 stat &= ~FSL_DMA_SR_PE;
1008 }
1009
Zhang Wei173acc72008-03-01 07:42:48 -07001010 /* If the link descriptor segment transfer finishes,
1011 * we will recycle the used descriptor.
1012 */
1013 if (stat & FSL_DMA_SR_EOSI) {
Ira Snydera1c03312010-01-06 13:34:05 +00001014 dev_dbg(chan->dev, "event: End-of-segments INT\n");
1015 dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
1016 (unsigned long long)get_cdar(chan),
1017 (unsigned long long)get_ndar(chan));
Zhang Wei173acc72008-03-01 07:42:48 -07001018 stat &= ~FSL_DMA_SR_EOSI;
Zhang Wei1c629792008-04-17 20:17:25 -07001019 update_cookie = 1;
1020 }
1021
1022 /* For MPC8349, EOCDI event need to update cookie
1023 * and start the next transfer if it exist.
1024 */
1025 if (stat & FSL_DMA_SR_EOCDI) {
Ira Snydera1c03312010-01-06 13:34:05 +00001026 dev_dbg(chan->dev, "event: End-of-Chain link INT\n");
Zhang Wei1c629792008-04-17 20:17:25 -07001027 stat &= ~FSL_DMA_SR_EOCDI;
1028 update_cookie = 1;
1029 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001030 }
1031
1032 /* If it current transfer is the end-of-transfer,
1033 * we should clear the Channel Start bit for
1034 * prepare next transfer.
1035 */
Zhang Wei1c629792008-04-17 20:17:25 -07001036 if (stat & FSL_DMA_SR_EOLNI) {
Ira Snydera1c03312010-01-06 13:34:05 +00001037 dev_dbg(chan->dev, "event: End-of-link INT\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001038 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei1c629792008-04-17 20:17:25 -07001039 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001040 }
1041
Zhang Wei1c629792008-04-17 20:17:25 -07001042 if (update_cookie)
Ira Snydera1c03312010-01-06 13:34:05 +00001043 fsl_dma_update_completed_cookie(chan);
Zhang Wei1c629792008-04-17 20:17:25 -07001044 if (xfer_ld_q)
Ira Snydera1c03312010-01-06 13:34:05 +00001045 fsl_chan_xfer_ld_queue(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001046 if (stat)
Ira Snydera1c03312010-01-06 13:34:05 +00001047 dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n",
Zhang Wei173acc72008-03-01 07:42:48 -07001048 stat);
1049
Ira Snydera1c03312010-01-06 13:34:05 +00001050 dev_dbg(chan->dev, "event: Exit\n");
1051 tasklet_schedule(&chan->tasklet);
Zhang Wei173acc72008-03-01 07:42:48 -07001052 return IRQ_HANDLED;
1053}
1054
Zhang Wei173acc72008-03-01 07:42:48 -07001055static void dma_do_tasklet(unsigned long data)
1056{
Ira Snydera1c03312010-01-06 13:34:05 +00001057 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1058 fsl_chan_ld_cleanup(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001059}
1060
Ira Snyderd3f620b2010-01-06 13:34:04 +00001061static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1062{
1063 struct fsldma_device *fdev = data;
1064 struct fsldma_chan *chan;
1065 unsigned int handled = 0;
1066 u32 gsr, mask;
1067 int i;
1068
1069 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1070 : in_le32(fdev->regs);
1071 mask = 0xff000000;
1072 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1073
1074 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1075 chan = fdev->chan[i];
1076 if (!chan)
1077 continue;
1078
1079 if (gsr & mask) {
1080 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1081 fsldma_chan_irq(irq, chan);
1082 handled++;
1083 }
1084
1085 gsr &= ~mask;
1086 mask >>= 8;
1087 }
1088
1089 return IRQ_RETVAL(handled);
1090}
1091
1092static void fsldma_free_irqs(struct fsldma_device *fdev)
1093{
1094 struct fsldma_chan *chan;
1095 int i;
1096
1097 if (fdev->irq != NO_IRQ) {
1098 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1099 free_irq(fdev->irq, fdev);
1100 return;
1101 }
1102
1103 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1104 chan = fdev->chan[i];
1105 if (chan && chan->irq != NO_IRQ) {
1106 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
1107 free_irq(chan->irq, chan);
1108 }
1109 }
1110}
1111
1112static int fsldma_request_irqs(struct fsldma_device *fdev)
1113{
1114 struct fsldma_chan *chan;
1115 int ret;
1116 int i;
1117
1118 /* if we have a per-controller IRQ, use that */
1119 if (fdev->irq != NO_IRQ) {
1120 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1121 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1122 "fsldma-controller", fdev);
1123 return ret;
1124 }
1125
1126 /* no per-controller IRQ, use the per-channel IRQs */
1127 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1128 chan = fdev->chan[i];
1129 if (!chan)
1130 continue;
1131
1132 if (chan->irq == NO_IRQ) {
1133 dev_err(fdev->dev, "no interrupts property defined for "
1134 "DMA channel %d. Please fix your "
1135 "device tree\n", chan->id);
1136 ret = -ENODEV;
1137 goto out_unwind;
1138 }
1139
1140 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
1141 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1142 "fsldma-chan", chan);
1143 if (ret) {
1144 dev_err(fdev->dev, "unable to request IRQ for DMA "
1145 "channel %d\n", chan->id);
1146 goto out_unwind;
1147 }
1148 }
1149
1150 return 0;
1151
1152out_unwind:
1153 for (/* none */; i >= 0; i--) {
1154 chan = fdev->chan[i];
1155 if (!chan)
1156 continue;
1157
1158 if (chan->irq == NO_IRQ)
1159 continue;
1160
1161 free_irq(chan->irq, chan);
1162 }
1163
1164 return ret;
1165}
1166
Ira Snydera4f56d42010-01-06 13:34:01 +00001167/*----------------------------------------------------------------------------*/
1168/* OpenFirmware Subsystem */
1169/*----------------------------------------------------------------------------*/
1170
1171static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
Timur Tabi77cd62e2008-09-26 17:00:11 -07001172 struct device_node *node, u32 feature, const char *compatible)
Zhang Wei173acc72008-03-01 07:42:48 -07001173{
Ira Snydera1c03312010-01-06 13:34:05 +00001174 struct fsldma_chan *chan;
Ira Snyder4ce0e952010-01-06 13:34:00 +00001175 struct resource res;
Zhang Wei173acc72008-03-01 07:42:48 -07001176 int err;
1177
Zhang Wei173acc72008-03-01 07:42:48 -07001178 /* alloc channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001179 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1180 if (!chan) {
Ira Snydere7a29152010-01-06 13:34:03 +00001181 dev_err(fdev->dev, "no free memory for DMA channels!\n");
1182 err = -ENOMEM;
1183 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001184 }
1185
Ira Snydere7a29152010-01-06 13:34:03 +00001186 /* ioremap registers for use */
Ira Snydera1c03312010-01-06 13:34:05 +00001187 chan->regs = of_iomap(node, 0);
1188 if (!chan->regs) {
Ira Snydere7a29152010-01-06 13:34:03 +00001189 dev_err(fdev->dev, "unable to ioremap registers\n");
1190 err = -ENOMEM;
Ira Snydera1c03312010-01-06 13:34:05 +00001191 goto out_free_chan;
Ira Snydere7a29152010-01-06 13:34:03 +00001192 }
1193
Ira Snyder4ce0e952010-01-06 13:34:00 +00001194 err = of_address_to_resource(node, 0, &res);
Zhang Wei173acc72008-03-01 07:42:48 -07001195 if (err) {
Ira Snydere7a29152010-01-06 13:34:03 +00001196 dev_err(fdev->dev, "unable to find 'reg' property\n");
1197 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001198 }
1199
Ira Snydera1c03312010-01-06 13:34:05 +00001200 chan->feature = feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001201 if (!fdev->feature)
Ira Snydera1c03312010-01-06 13:34:05 +00001202 fdev->feature = chan->feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001203
Ira Snydere7a29152010-01-06 13:34:03 +00001204 /*
1205 * If the DMA device's feature is different than the feature
1206 * of its channels, report the bug
Zhang Wei173acc72008-03-01 07:42:48 -07001207 */
Ira Snydera1c03312010-01-06 13:34:05 +00001208 WARN_ON(fdev->feature != chan->feature);
Zhang Wei173acc72008-03-01 07:42:48 -07001209
Ira Snydera1c03312010-01-06 13:34:05 +00001210 chan->dev = fdev->dev;
1211 chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1212 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
Ira Snydere7a29152010-01-06 13:34:03 +00001213 dev_err(fdev->dev, "too many channels for device\n");
Zhang Wei173acc72008-03-01 07:42:48 -07001214 err = -EINVAL;
Ira Snydere7a29152010-01-06 13:34:03 +00001215 goto out_iounmap_regs;
Zhang Wei173acc72008-03-01 07:42:48 -07001216 }
Zhang Wei173acc72008-03-01 07:42:48 -07001217
Ira Snydera1c03312010-01-06 13:34:05 +00001218 fdev->chan[chan->id] = chan;
1219 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
Ira Snydere7a29152010-01-06 13:34:03 +00001220
1221 /* Initialize the channel */
Ira Snydera1c03312010-01-06 13:34:05 +00001222 dma_init(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001223
1224 /* Clear cdar registers */
Ira Snydera1c03312010-01-06 13:34:05 +00001225 set_cdar(chan, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001226
Ira Snydera1c03312010-01-06 13:34:05 +00001227 switch (chan->feature & FSL_DMA_IP_MASK) {
Zhang Wei173acc72008-03-01 07:42:48 -07001228 case FSL_DMA_IP_85XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001229 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
Zhang Wei173acc72008-03-01 07:42:48 -07001230 case FSL_DMA_IP_83XX:
Ira Snydera1c03312010-01-06 13:34:05 +00001231 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1232 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1233 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1234 chan->set_request_count = fsl_chan_set_request_count;
Zhang Wei173acc72008-03-01 07:42:48 -07001235 }
1236
Ira Snydera1c03312010-01-06 13:34:05 +00001237 spin_lock_init(&chan->desc_lock);
1238 INIT_LIST_HEAD(&chan->ld_queue);
Zhang Wei173acc72008-03-01 07:42:48 -07001239
Ira Snydera1c03312010-01-06 13:34:05 +00001240 chan->common.device = &fdev->common;
Zhang Wei173acc72008-03-01 07:42:48 -07001241
Ira Snyderd3f620b2010-01-06 13:34:04 +00001242 /* find the IRQ line, if it exists in the device tree */
Ira Snydera1c03312010-01-06 13:34:05 +00001243 chan->irq = irq_of_parse_and_map(node, 0);
Ira Snyderd3f620b2010-01-06 13:34:04 +00001244
Zhang Wei173acc72008-03-01 07:42:48 -07001245 /* Add the channel to DMA device channel list */
Ira Snydera1c03312010-01-06 13:34:05 +00001246 list_add_tail(&chan->common.device_node, &fdev->common.channels);
Zhang Wei173acc72008-03-01 07:42:48 -07001247 fdev->common.chancnt++;
1248
Ira Snydera1c03312010-01-06 13:34:05 +00001249 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1250 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001251
1252 return 0;
Li Yang51ee87f2008-05-29 23:25:45 -07001253
Ira Snydere7a29152010-01-06 13:34:03 +00001254out_iounmap_regs:
Ira Snydera1c03312010-01-06 13:34:05 +00001255 iounmap(chan->regs);
1256out_free_chan:
1257 kfree(chan);
Ira Snydere7a29152010-01-06 13:34:03 +00001258out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001259 return err;
1260}
1261
Ira Snydera1c03312010-01-06 13:34:05 +00001262static void fsl_dma_chan_remove(struct fsldma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -07001263{
Ira Snydera1c03312010-01-06 13:34:05 +00001264 irq_dispose_mapping(chan->irq);
1265 list_del(&chan->common.device_node);
1266 iounmap(chan->regs);
1267 kfree(chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001268}
1269
Ira Snydere7a29152010-01-06 13:34:03 +00001270static int __devinit fsldma_of_probe(struct of_device *op,
Zhang Wei173acc72008-03-01 07:42:48 -07001271 const struct of_device_id *match)
1272{
Ira Snydera4f56d42010-01-06 13:34:01 +00001273 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001274 struct device_node *child;
Ira Snydere7a29152010-01-06 13:34:03 +00001275 int err;
Zhang Wei173acc72008-03-01 07:42:48 -07001276
Ira Snydera4f56d42010-01-06 13:34:01 +00001277 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
Zhang Wei173acc72008-03-01 07:42:48 -07001278 if (!fdev) {
Ira Snydere7a29152010-01-06 13:34:03 +00001279 dev_err(&op->dev, "No enough memory for 'priv'\n");
1280 err = -ENOMEM;
1281 goto out_return;
Zhang Wei173acc72008-03-01 07:42:48 -07001282 }
Ira Snydere7a29152010-01-06 13:34:03 +00001283
1284 fdev->dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001285 INIT_LIST_HEAD(&fdev->common.channels);
1286
Ira Snydere7a29152010-01-06 13:34:03 +00001287 /* ioremap the registers for use */
1288 fdev->regs = of_iomap(op->node, 0);
1289 if (!fdev->regs) {
1290 dev_err(&op->dev, "unable to ioremap registers\n");
1291 err = -ENOMEM;
1292 goto out_free_fdev;
Zhang Wei173acc72008-03-01 07:42:48 -07001293 }
1294
Ira Snyderd3f620b2010-01-06 13:34:04 +00001295 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1296 fdev->irq = irq_of_parse_and_map(op->node, 0);
1297
Zhang Wei173acc72008-03-01 07:42:48 -07001298 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1299 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
Ira Snyderbbea0b62009-09-08 17:53:04 -07001300 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
Zhang Wei173acc72008-03-01 07:42:48 -07001301 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1302 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -07001303 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -07001304 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1305 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1306 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
Ira Snyderbbea0b62009-09-08 17:53:04 -07001307 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1308 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
Ira Snydere7a29152010-01-06 13:34:03 +00001309 fdev->common.dev = &op->dev;
Zhang Wei173acc72008-03-01 07:42:48 -07001310
Ira Snydere7a29152010-01-06 13:34:03 +00001311 dev_set_drvdata(&op->dev, fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001312
Ira Snydere7a29152010-01-06 13:34:03 +00001313 /*
1314 * We cannot use of_platform_bus_probe() because there is no
1315 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
Timur Tabi77cd62e2008-09-26 17:00:11 -07001316 * channel object.
1317 */
Ira Snydere7a29152010-01-06 13:34:03 +00001318 for_each_child_of_node(op->node, child) {
1319 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001320 fsl_dma_chan_probe(fdev, child,
1321 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1322 "fsl,eloplus-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001323 }
1324
1325 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001326 fsl_dma_chan_probe(fdev, child,
1327 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1328 "fsl,elo-dma-channel");
Ira Snydere7a29152010-01-06 13:34:03 +00001329 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001330 }
Zhang Wei173acc72008-03-01 07:42:48 -07001331
Ira Snyderd3f620b2010-01-06 13:34:04 +00001332 /*
1333 * Hookup the IRQ handler(s)
1334 *
1335 * If we have a per-controller interrupt, we prefer that to the
1336 * per-channel interrupts to reduce the number of shared interrupt
1337 * handlers on the same IRQ line
1338 */
1339 err = fsldma_request_irqs(fdev);
1340 if (err) {
1341 dev_err(fdev->dev, "unable to request IRQs\n");
1342 goto out_free_fdev;
1343 }
1344
Zhang Wei173acc72008-03-01 07:42:48 -07001345 dma_async_device_register(&fdev->common);
1346 return 0;
1347
Ira Snydere7a29152010-01-06 13:34:03 +00001348out_free_fdev:
Ira Snyderd3f620b2010-01-06 13:34:04 +00001349 irq_dispose_mapping(fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001350 kfree(fdev);
Ira Snydere7a29152010-01-06 13:34:03 +00001351out_return:
Zhang Wei173acc72008-03-01 07:42:48 -07001352 return err;
1353}
1354
Ira Snydere7a29152010-01-06 13:34:03 +00001355static int fsldma_of_remove(struct of_device *op)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001356{
Ira Snydera4f56d42010-01-06 13:34:01 +00001357 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001358 unsigned int i;
1359
Ira Snydere7a29152010-01-06 13:34:03 +00001360 fdev = dev_get_drvdata(&op->dev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001361 dma_async_device_unregister(&fdev->common);
1362
Ira Snyderd3f620b2010-01-06 13:34:04 +00001363 fsldma_free_irqs(fdev);
1364
Ira Snydere7a29152010-01-06 13:34:03 +00001365 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001366 if (fdev->chan[i])
1367 fsl_dma_chan_remove(fdev->chan[i]);
Ira Snydere7a29152010-01-06 13:34:03 +00001368 }
Timur Tabi77cd62e2008-09-26 17:00:11 -07001369
Ira Snydere7a29152010-01-06 13:34:03 +00001370 iounmap(fdev->regs);
1371 dev_set_drvdata(&op->dev, NULL);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001372 kfree(fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001373
1374 return 0;
1375}
1376
Ira Snydera4f56d42010-01-06 13:34:01 +00001377static struct of_device_id fsldma_of_ids[] = {
Kumar Gala049c9d42008-03-31 11:13:21 -05001378 { .compatible = "fsl,eloplus-dma", },
1379 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001380 {}
1381};
1382
Ira Snydera4f56d42010-01-06 13:34:01 +00001383static struct of_platform_driver fsldma_of_driver = {
1384 .name = "fsl-elo-dma",
1385 .match_table = fsldma_of_ids,
1386 .probe = fsldma_of_probe,
1387 .remove = fsldma_of_remove,
Zhang Wei173acc72008-03-01 07:42:48 -07001388};
1389
Ira Snydera4f56d42010-01-06 13:34:01 +00001390/*----------------------------------------------------------------------------*/
1391/* Module Init / Exit */
1392/*----------------------------------------------------------------------------*/
1393
1394static __init int fsldma_init(void)
Zhang Wei173acc72008-03-01 07:42:48 -07001395{
Timur Tabi77cd62e2008-09-26 17:00:11 -07001396 int ret;
1397
1398 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1399
Ira Snydera4f56d42010-01-06 13:34:01 +00001400 ret = of_register_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001401 if (ret)
1402 pr_err("fsldma: failed to register platform driver\n");
1403
1404 return ret;
Zhang Wei173acc72008-03-01 07:42:48 -07001405}
1406
Ira Snydera4f56d42010-01-06 13:34:01 +00001407static void __exit fsldma_exit(void)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001408{
Ira Snydera4f56d42010-01-06 13:34:01 +00001409 of_unregister_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001410}
1411
Ira Snydera4f56d42010-01-06 13:34:01 +00001412subsys_initcall(fsldma_init);
1413module_exit(fsldma_exit);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001414
1415MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1416MODULE_LICENSE("GPL");