blob: c2db7541c22b2af0312d38e88990a12272dbcb74 [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
14 *
Ira W. Snydera7aea372009-04-23 16:17:54 -070015 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
Zhang Wei173acc72008-03-01 07:42:48 -070020 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/interrupt.h>
31#include <linux/dmaengine.h>
32#include <linux/delay.h>
33#include <linux/dma-mapping.h>
34#include <linux/dmapool.h>
35#include <linux/of_platform.h>
36
Ira Snyderbbea0b62009-09-08 17:53:04 -070037#include <asm/fsldma.h>
Zhang Wei173acc72008-03-01 07:42:48 -070038#include "fsldma.h"
39
Ira Snydera4f56d42010-01-06 13:34:01 +000040static void dma_init(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -070041{
42 /* Reset the channel */
43 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
44
45 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
46 case FSL_DMA_IP_85XX:
47 /* Set the channel to below modes:
48 * EIE - Error interrupt enable
49 * EOSIE - End of segments interrupt enable (basic mode)
50 * EOLNIE - End of links interrupt enable
51 */
52 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
53 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
54 break;
55 case FSL_DMA_IP_83XX:
56 /* Set the channel to below modes:
57 * EOTIE - End-of-transfer interrupt enable
Ira W. Snydera7aea372009-04-23 16:17:54 -070058 * PRC_RM - PCI read multiple
Zhang Wei173acc72008-03-01 07:42:48 -070059 */
Ira W. Snydera7aea372009-04-23 16:17:54 -070060 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
61 | FSL_DMA_MR_PRC_RM, 32);
Zhang Wei173acc72008-03-01 07:42:48 -070062 break;
63 }
64
65}
66
Ira Snydera4f56d42010-01-06 13:34:01 +000067static void set_sr(struct fsldma_chan *fsl_chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070068{
69 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
70}
71
Ira Snydera4f56d42010-01-06 13:34:01 +000072static u32 get_sr(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -070073{
74 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
75}
76
Ira Snydera4f56d42010-01-06 13:34:01 +000077static void set_desc_cnt(struct fsldma_chan *fsl_chan,
Zhang Wei173acc72008-03-01 07:42:48 -070078 struct fsl_dma_ld_hw *hw, u32 count)
79{
80 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
81}
82
Ira Snydera4f56d42010-01-06 13:34:01 +000083static void set_desc_src(struct fsldma_chan *fsl_chan,
Zhang Wei173acc72008-03-01 07:42:48 -070084 struct fsl_dma_ld_hw *hw, dma_addr_t src)
85{
86 u64 snoop_bits;
87
88 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
89 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
90 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
91}
92
Ira Snyder738f5f72010-01-06 13:34:02 +000093static void set_desc_dst(struct fsldma_chan *fsl_chan,
94 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
Zhang Wei173acc72008-03-01 07:42:48 -070095{
96 u64 snoop_bits;
97
98 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
99 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
Ira Snyder738f5f72010-01-06 13:34:02 +0000100 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dst, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700101}
102
Ira Snydera4f56d42010-01-06 13:34:01 +0000103static void set_desc_next(struct fsldma_chan *fsl_chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700104 struct fsl_dma_ld_hw *hw, dma_addr_t next)
105{
106 u64 snoop_bits;
107
108 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
109 ? FSL_DMA_SNEN : 0;
110 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
111}
112
Ira Snydera4f56d42010-01-06 13:34:01 +0000113static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
Zhang Wei173acc72008-03-01 07:42:48 -0700114{
115 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
116}
117
Ira Snydera4f56d42010-01-06 13:34:01 +0000118static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700119{
120 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
121}
122
Ira Snydera4f56d42010-01-06 13:34:01 +0000123static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
Zhang Wei173acc72008-03-01 07:42:48 -0700124{
125 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
126}
127
Ira Snydera4f56d42010-01-06 13:34:01 +0000128static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700129{
130 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
131}
132
Ira Snydera4f56d42010-01-06 13:34:01 +0000133static u32 get_bcr(struct fsldma_chan *fsl_chan)
Zhang Weif79abb62008-03-18 18:45:00 -0700134{
135 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
136}
137
Ira Snydera4f56d42010-01-06 13:34:01 +0000138static int dma_is_idle(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700139{
140 u32 sr = get_sr(fsl_chan);
141 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
142}
143
Ira Snydera4f56d42010-01-06 13:34:01 +0000144static void dma_start(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700145{
Ira Snyder272ca652010-01-06 13:33:59 +0000146 u32 mode;
Zhang Wei173acc72008-03-01 07:42:48 -0700147
Ira Snyder272ca652010-01-06 13:33:59 +0000148 mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
149
150 if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
151 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
152 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
153 mode |= FSL_DMA_MR_EMP_EN;
154 } else {
155 mode &= ~FSL_DMA_MR_EMP_EN;
156 }
Ira Snyder43a1a3e2009-05-28 09:26:40 +0000157 }
Zhang Wei173acc72008-03-01 07:42:48 -0700158
159 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
Ira Snyder272ca652010-01-06 13:33:59 +0000160 mode |= FSL_DMA_MR_EMS_EN;
Zhang Wei173acc72008-03-01 07:42:48 -0700161 else
Ira Snyder272ca652010-01-06 13:33:59 +0000162 mode |= FSL_DMA_MR_CS;
Zhang Wei173acc72008-03-01 07:42:48 -0700163
Ira Snyder272ca652010-01-06 13:33:59 +0000164 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700165}
166
Ira Snydera4f56d42010-01-06 13:34:01 +0000167static void dma_halt(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700168{
Ira Snyder272ca652010-01-06 13:33:59 +0000169 u32 mode;
Dan Williams900325a2009-03-02 15:33:46 -0700170 int i;
171
Ira Snyder272ca652010-01-06 13:33:59 +0000172 mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
173 mode |= FSL_DMA_MR_CA;
174 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
175
176 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
177 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700178
Dan Williams900325a2009-03-02 15:33:46 -0700179 for (i = 0; i < 100; i++) {
180 if (dma_is_idle(fsl_chan))
181 break;
Zhang Wei173acc72008-03-01 07:42:48 -0700182 udelay(10);
Dan Williams900325a2009-03-02 15:33:46 -0700183 }
Ira Snyder272ca652010-01-06 13:33:59 +0000184
Zhang Wei173acc72008-03-01 07:42:48 -0700185 if (i >= 100 && !dma_is_idle(fsl_chan))
186 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
187}
188
Ira Snydera4f56d42010-01-06 13:34:01 +0000189static void set_ld_eol(struct fsldma_chan *fsl_chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700190 struct fsl_desc_sw *desc)
191{
Ira Snyder776c8942009-05-15 11:33:20 -0700192 u64 snoop_bits;
193
194 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
195 ? FSL_DMA_SNEN : 0;
196
Zhang Wei173acc72008-03-01 07:42:48 -0700197 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
Ira Snyder776c8942009-05-15 11:33:20 -0700198 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
199 | snoop_bits, 64);
Zhang Wei173acc72008-03-01 07:42:48 -0700200}
201
Ira Snydera4f56d42010-01-06 13:34:01 +0000202static void append_ld_queue(struct fsldma_chan *fsl_chan,
Zhang Wei173acc72008-03-01 07:42:48 -0700203 struct fsl_desc_sw *new_desc)
204{
205 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
206
207 if (list_empty(&fsl_chan->ld_queue))
208 return;
209
210 /* Link to the new descriptor physical address and
211 * Enable End-of-segment interrupt for
212 * the last link descriptor.
213 * (the previous node's next link descriptor)
214 *
215 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
216 */
217 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
218 new_desc->async_tx.phys | FSL_DMA_EOSIE |
219 (((fsl_chan->feature & FSL_DMA_IP_MASK)
220 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
221}
222
223/**
224 * fsl_chan_set_src_loop_size - Set source address hold transfer size
225 * @fsl_chan : Freescale DMA channel
226 * @size : Address loop size, 0 for disable loop
227 *
228 * The set source address hold transfer size. The source
229 * address hold or loop transfer size is when the DMA transfer
230 * data from source address (SA), if the loop size is 4, the DMA will
231 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
232 * SA + 1 ... and so on.
233 */
Ira Snydera4f56d42010-01-06 13:34:01 +0000234static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700235{
Ira Snyder272ca652010-01-06 13:33:59 +0000236 u32 mode;
237
238 mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
239
Zhang Wei173acc72008-03-01 07:42:48 -0700240 switch (size) {
241 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000242 mode &= ~FSL_DMA_MR_SAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700243 break;
244 case 1:
245 case 2:
246 case 4:
247 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000248 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
Zhang Wei173acc72008-03-01 07:42:48 -0700249 break;
250 }
Ira Snyder272ca652010-01-06 13:33:59 +0000251
252 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700253}
254
255/**
Ira Snyder738f5f72010-01-06 13:34:02 +0000256 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
Zhang Wei173acc72008-03-01 07:42:48 -0700257 * @fsl_chan : Freescale DMA channel
258 * @size : Address loop size, 0 for disable loop
259 *
260 * The set destination address hold transfer size. The destination
261 * address hold or loop transfer size is when the DMA transfer
262 * data to destination address (TA), if the loop size is 4, the DMA will
263 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
264 * TA + 1 ... and so on.
265 */
Ira Snyder738f5f72010-01-06 13:34:02 +0000266static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size)
Zhang Wei173acc72008-03-01 07:42:48 -0700267{
Ira Snyder272ca652010-01-06 13:33:59 +0000268 u32 mode;
269
270 mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
271
Zhang Wei173acc72008-03-01 07:42:48 -0700272 switch (size) {
273 case 0:
Ira Snyder272ca652010-01-06 13:33:59 +0000274 mode &= ~FSL_DMA_MR_DAHE;
Zhang Wei173acc72008-03-01 07:42:48 -0700275 break;
276 case 1:
277 case 2:
278 case 4:
279 case 8:
Ira Snyder272ca652010-01-06 13:33:59 +0000280 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
Zhang Wei173acc72008-03-01 07:42:48 -0700281 break;
282 }
Ira Snyder272ca652010-01-06 13:33:59 +0000283
284 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
Zhang Wei173acc72008-03-01 07:42:48 -0700285}
286
287/**
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700288 * fsl_chan_set_request_count - Set DMA Request Count for external control
289 * @fsl_chan : Freescale DMA channel
290 * @size : Number of bytes to transfer in a single request
291 *
292 * The Freescale DMA channel can be controlled by the external signal DREQ#.
293 * The DMA request count is how many bytes are allowed to transfer before
294 * pausing the channel, after which a new assertion of DREQ# resumes channel
295 * operation.
296 *
297 * A size of 0 disables external pause control. The maximum size is 1024.
298 */
Ira Snydera4f56d42010-01-06 13:34:01 +0000299static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size)
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700300{
Ira Snyder272ca652010-01-06 13:33:59 +0000301 u32 mode;
302
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700303 BUG_ON(size > 1024);
Ira Snyder272ca652010-01-06 13:33:59 +0000304
305 mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
306 mode |= (__ilog2(size) << 24) & 0x0f000000;
307
308 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700309}
310
311/**
Zhang Wei173acc72008-03-01 07:42:48 -0700312 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
313 * @fsl_chan : Freescale DMA channel
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700314 * @enable : 0 is disabled, 1 is enabled.
Zhang Wei173acc72008-03-01 07:42:48 -0700315 *
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700316 * The Freescale DMA channel can be controlled by the external signal DREQ#.
317 * The DMA Request Count feature should be used in addition to this feature
318 * to set the number of bytes to transfer before pausing the channel.
Zhang Wei173acc72008-03-01 07:42:48 -0700319 */
Ira Snydera4f56d42010-01-06 13:34:01 +0000320static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700321{
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700322 if (enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700323 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
Ira Snydere6c7ecb2009-09-08 17:53:04 -0700324 else
Zhang Wei173acc72008-03-01 07:42:48 -0700325 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
326}
327
328/**
329 * fsl_chan_toggle_ext_start - Toggle channel external start status
330 * @fsl_chan : Freescale DMA channel
331 * @enable : 0 is disabled, 1 is enabled.
332 *
333 * If enable the external start, the channel can be started by an
334 * external DMA start pin. So the dma_start() does not start the
335 * transfer immediately. The DMA channel will wait for the
336 * control pin asserted.
337 */
Ira Snydera4f56d42010-01-06 13:34:01 +0000338static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable)
Zhang Wei173acc72008-03-01 07:42:48 -0700339{
340 if (enable)
341 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
342 else
343 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
344}
345
346static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
347{
Ira Snydera4f56d42010-01-06 13:34:01 +0000348 struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan);
Dan Williamseda34232009-09-08 17:53:02 -0700349 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
350 struct fsl_desc_sw *child;
Zhang Wei173acc72008-03-01 07:42:48 -0700351 unsigned long flags;
352 dma_cookie_t cookie;
353
354 /* cookie increment and adding to ld_queue must be atomic */
355 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
356
357 cookie = fsl_chan->common.cookie;
Dan Williamseda34232009-09-08 17:53:02 -0700358 list_for_each_entry(child, &desc->tx_list, node) {
Ira Snyderbcfb7462009-05-15 14:27:16 -0700359 cookie++;
360 if (cookie < 0)
361 cookie = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700362
Ira Snyderbcfb7462009-05-15 14:27:16 -0700363 desc->async_tx.cookie = cookie;
364 }
365
366 fsl_chan->common.cookie = cookie;
Dan Williamseda34232009-09-08 17:53:02 -0700367 append_ld_queue(fsl_chan, desc);
368 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
Zhang Wei173acc72008-03-01 07:42:48 -0700369
370 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
371
372 return cookie;
373}
374
375/**
376 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
377 * @fsl_chan : Freescale DMA channel
378 *
379 * Return - The descriptor allocated. NULL for failed.
380 */
381static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
Ira Snydera4f56d42010-01-06 13:34:01 +0000382 struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700383{
384 dma_addr_t pdesc;
385 struct fsl_desc_sw *desc_sw;
386
387 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
388 if (desc_sw) {
389 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
Dan Williamseda34232009-09-08 17:53:02 -0700390 INIT_LIST_HEAD(&desc_sw->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700391 dma_async_tx_descriptor_init(&desc_sw->async_tx,
392 &fsl_chan->common);
393 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
Zhang Wei173acc72008-03-01 07:42:48 -0700394 desc_sw->async_tx.phys = pdesc;
395 }
396
397 return desc_sw;
398}
399
400
401/**
402 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
403 * @fsl_chan : Freescale DMA channel
404 *
405 * This function will create a dma pool for descriptor allocation.
406 *
407 * Return - The number of descriptors allocated.
408 */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700409static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700410{
Ira Snydera4f56d42010-01-06 13:34:01 +0000411 struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700412
413 /* Has this channel already been allocated? */
414 if (fsl_chan->desc_pool)
415 return 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700416
417 /* We need the descriptor to be aligned to 32bytes
418 * for meeting FSL DMA specification requirement.
419 */
420 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
421 fsl_chan->dev, sizeof(struct fsl_desc_sw),
422 32, 0);
423 if (!fsl_chan->desc_pool) {
424 dev_err(fsl_chan->dev, "No memory for channel %d "
425 "descriptor dma pool.\n", fsl_chan->id);
426 return 0;
427 }
428
429 return 1;
430}
431
432/**
433 * fsl_dma_free_chan_resources - Free all resources of the channel.
434 * @fsl_chan : Freescale DMA channel
435 */
436static void fsl_dma_free_chan_resources(struct dma_chan *chan)
437{
Ira Snydera4f56d42010-01-06 13:34:01 +0000438 struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700439 struct fsl_desc_sw *desc, *_desc;
440 unsigned long flags;
441
442 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
443 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
444 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
445#ifdef FSL_DMA_LD_DEBUG
446 dev_dbg(fsl_chan->dev,
447 "LD %p will be released.\n", desc);
448#endif
449 list_del(&desc->node);
450 /* free link descriptor */
451 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
452 }
453 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
454 dma_pool_destroy(fsl_chan->desc_pool);
Timur Tabi77cd62e2008-09-26 17:00:11 -0700455
456 fsl_chan->desc_pool = NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700457}
458
Zhang Wei2187c262008-03-13 17:45:28 -0700459static struct dma_async_tx_descriptor *
Dan Williams636bdea2008-04-17 20:17:26 -0700460fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
Zhang Wei2187c262008-03-13 17:45:28 -0700461{
Ira Snydera4f56d42010-01-06 13:34:01 +0000462 struct fsldma_chan *fsl_chan;
Zhang Wei2187c262008-03-13 17:45:28 -0700463 struct fsl_desc_sw *new;
464
465 if (!chan)
466 return NULL;
467
468 fsl_chan = to_fsl_chan(chan);
469
470 new = fsl_dma_alloc_descriptor(fsl_chan);
471 if (!new) {
472 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
473 return NULL;
474 }
475
476 new->async_tx.cookie = -EBUSY;
Dan Williams636bdea2008-04-17 20:17:26 -0700477 new->async_tx.flags = flags;
Zhang Wei2187c262008-03-13 17:45:28 -0700478
Zhang Weif79abb62008-03-18 18:45:00 -0700479 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700480 list_add_tail(&new->node, &new->tx_list);
Zhang Weif79abb62008-03-18 18:45:00 -0700481
Zhang Wei2187c262008-03-13 17:45:28 -0700482 /* Set End-of-link to the last link descriptor of new list*/
483 set_ld_eol(fsl_chan, new);
484
485 return &new->async_tx;
486}
487
Zhang Wei173acc72008-03-01 07:42:48 -0700488static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
Ira Snyder738f5f72010-01-06 13:34:02 +0000489 struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
Zhang Wei173acc72008-03-01 07:42:48 -0700490 size_t len, unsigned long flags)
491{
Ira Snydera4f56d42010-01-06 13:34:01 +0000492 struct fsldma_chan *fsl_chan;
Zhang Wei173acc72008-03-01 07:42:48 -0700493 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
Ira Snyder2e077f82009-05-15 09:59:46 -0700494 struct list_head *list;
Zhang Wei173acc72008-03-01 07:42:48 -0700495 size_t copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700496
497 if (!chan)
498 return NULL;
499
500 if (!len)
501 return NULL;
502
503 fsl_chan = to_fsl_chan(chan);
504
505 do {
506
507 /* Allocate the link descriptor from DMA pool */
508 new = fsl_dma_alloc_descriptor(fsl_chan);
509 if (!new) {
510 dev_err(fsl_chan->dev,
511 "No free memory for link descriptor\n");
Ira Snyder2e077f82009-05-15 09:59:46 -0700512 goto fail;
Zhang Wei173acc72008-03-01 07:42:48 -0700513 }
514#ifdef FSL_DMA_LD_DEBUG
515 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
516#endif
517
Zhang Wei56822842008-03-13 10:45:27 -0700518 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700519
520 set_desc_cnt(fsl_chan, &new->hw, copy);
521 set_desc_src(fsl_chan, &new->hw, dma_src);
Ira Snyder738f5f72010-01-06 13:34:02 +0000522 set_desc_dst(fsl_chan, &new->hw, dma_dst);
Zhang Wei173acc72008-03-01 07:42:48 -0700523
524 if (!first)
525 first = new;
526 else
527 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
528
529 new->async_tx.cookie = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700530 async_tx_ack(&new->async_tx);
Zhang Wei173acc72008-03-01 07:42:48 -0700531
532 prev = new;
533 len -= copy;
534 dma_src += copy;
Ira Snyder738f5f72010-01-06 13:34:02 +0000535 dma_dst += copy;
Zhang Wei173acc72008-03-01 07:42:48 -0700536
537 /* Insert the link descriptor to the LD ring */
Dan Williamseda34232009-09-08 17:53:02 -0700538 list_add_tail(&new->node, &first->tx_list);
Zhang Wei173acc72008-03-01 07:42:48 -0700539 } while (len);
540
Dan Williams636bdea2008-04-17 20:17:26 -0700541 new->async_tx.flags = flags; /* client is in control of this ack */
Zhang Wei173acc72008-03-01 07:42:48 -0700542 new->async_tx.cookie = -EBUSY;
543
544 /* Set End-of-link to the last link descriptor of new list*/
545 set_ld_eol(fsl_chan, new);
546
Ira Snyder2e077f82009-05-15 09:59:46 -0700547 return &first->async_tx;
548
549fail:
550 if (!first)
551 return NULL;
552
Dan Williamseda34232009-09-08 17:53:02 -0700553 list = &first->tx_list;
Ira Snyder2e077f82009-05-15 09:59:46 -0700554 list_for_each_entry_safe_reverse(new, prev, list, node) {
555 list_del(&new->node);
556 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
557 }
558
559 return NULL;
Zhang Wei173acc72008-03-01 07:42:48 -0700560}
561
562/**
Ira Snyderbbea0b62009-09-08 17:53:04 -0700563 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
564 * @chan: DMA channel
565 * @sgl: scatterlist to transfer to/from
566 * @sg_len: number of entries in @scatterlist
567 * @direction: DMA direction
568 * @flags: DMAEngine flags
569 *
570 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
571 * DMA_SLAVE API, this gets the device-specific information from the
572 * chan->private variable.
573 */
574static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
575 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
576 enum dma_data_direction direction, unsigned long flags)
577{
Ira Snydera4f56d42010-01-06 13:34:01 +0000578 struct fsldma_chan *fsl_chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700579 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
580 struct fsl_dma_slave *slave;
581 struct list_head *tx_list;
582 size_t copy;
583
584 int i;
585 struct scatterlist *sg;
586 size_t sg_used;
587 size_t hw_used;
588 struct fsl_dma_hw_addr *hw;
589 dma_addr_t dma_dst, dma_src;
590
591 if (!chan)
592 return NULL;
593
594 if (!chan->private)
595 return NULL;
596
597 fsl_chan = to_fsl_chan(chan);
598 slave = chan->private;
599
600 if (list_empty(&slave->addresses))
601 return NULL;
602
603 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
604 hw_used = 0;
605
606 /*
607 * Build the hardware transaction to copy from the scatterlist to
608 * the hardware, or from the hardware to the scatterlist
609 *
610 * If you are copying from the hardware to the scatterlist and it
611 * takes two hardware entries to fill an entire page, then both
612 * hardware entries will be coalesced into the same page
613 *
614 * If you are copying from the scatterlist to the hardware and a
615 * single page can fill two hardware entries, then the data will
616 * be read out of the page into the first hardware entry, and so on
617 */
618 for_each_sg(sgl, sg, sg_len, i) {
619 sg_used = 0;
620
621 /* Loop until the entire scatterlist entry is used */
622 while (sg_used < sg_dma_len(sg)) {
623
624 /*
625 * If we've used up the current hardware address/length
626 * pair, we need to load a new one
627 *
628 * This is done in a while loop so that descriptors with
629 * length == 0 will be skipped
630 */
631 while (hw_used >= hw->length) {
632
633 /*
634 * If the current hardware entry is the last
635 * entry in the list, we're finished
636 */
637 if (list_is_last(&hw->entry, &slave->addresses))
638 goto finished;
639
640 /* Get the next hardware address/length pair */
641 hw = list_entry(hw->entry.next,
642 struct fsl_dma_hw_addr, entry);
643 hw_used = 0;
644 }
645
646 /* Allocate the link descriptor from DMA pool */
647 new = fsl_dma_alloc_descriptor(fsl_chan);
648 if (!new) {
649 dev_err(fsl_chan->dev, "No free memory for "
650 "link descriptor\n");
651 goto fail;
652 }
653#ifdef FSL_DMA_LD_DEBUG
654 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
655#endif
656
657 /*
658 * Calculate the maximum number of bytes to transfer,
659 * making sure it is less than the DMA controller limit
660 */
661 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
662 hw->length - hw_used);
663 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
664
665 /*
666 * DMA_FROM_DEVICE
667 * from the hardware to the scatterlist
668 *
669 * DMA_TO_DEVICE
670 * from the scatterlist to the hardware
671 */
672 if (direction == DMA_FROM_DEVICE) {
673 dma_src = hw->address + hw_used;
674 dma_dst = sg_dma_address(sg) + sg_used;
675 } else {
676 dma_src = sg_dma_address(sg) + sg_used;
677 dma_dst = hw->address + hw_used;
678 }
679
680 /* Fill in the descriptor */
681 set_desc_cnt(fsl_chan, &new->hw, copy);
682 set_desc_src(fsl_chan, &new->hw, dma_src);
Ira Snyder738f5f72010-01-06 13:34:02 +0000683 set_desc_dst(fsl_chan, &new->hw, dma_dst);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700684
685 /*
686 * If this is not the first descriptor, chain the
687 * current descriptor after the previous descriptor
688 */
689 if (!first) {
690 first = new;
691 } else {
692 set_desc_next(fsl_chan, &prev->hw,
693 new->async_tx.phys);
694 }
695
696 new->async_tx.cookie = 0;
697 async_tx_ack(&new->async_tx);
698
699 prev = new;
700 sg_used += copy;
701 hw_used += copy;
702
703 /* Insert the link descriptor into the LD ring */
704 list_add_tail(&new->node, &first->tx_list);
705 }
706 }
707
708finished:
709
710 /* All of the hardware address/length pairs had length == 0 */
711 if (!first || !new)
712 return NULL;
713
714 new->async_tx.flags = flags;
715 new->async_tx.cookie = -EBUSY;
716
717 /* Set End-of-link to the last link descriptor of new list */
718 set_ld_eol(fsl_chan, new);
719
720 /* Enable extra controller features */
721 if (fsl_chan->set_src_loop_size)
722 fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
723
Ira Snyder738f5f72010-01-06 13:34:02 +0000724 if (fsl_chan->set_dst_loop_size)
725 fsl_chan->set_dst_loop_size(fsl_chan, slave->dst_loop_size);
Ira Snyderbbea0b62009-09-08 17:53:04 -0700726
727 if (fsl_chan->toggle_ext_start)
728 fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
729
730 if (fsl_chan->toggle_ext_pause)
731 fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
732
733 if (fsl_chan->set_request_count)
734 fsl_chan->set_request_count(fsl_chan, slave->request_count);
735
736 return &first->async_tx;
737
738fail:
739 /* If first was not set, then we failed to allocate the very first
740 * descriptor, and we're done */
741 if (!first)
742 return NULL;
743
744 /*
745 * First is set, so all of the descriptors we allocated have been added
746 * to first->tx_list, INCLUDING "first" itself. Therefore we
747 * must traverse the list backwards freeing each descriptor in turn
748 *
749 * We're re-using variables for the loop, oh well
750 */
751 tx_list = &first->tx_list;
752 list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
753 list_del_init(&new->node);
754 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
755 }
756
757 return NULL;
758}
759
760static void fsl_dma_device_terminate_all(struct dma_chan *chan)
761{
Ira Snydera4f56d42010-01-06 13:34:01 +0000762 struct fsldma_chan *fsl_chan;
Ira Snyderbbea0b62009-09-08 17:53:04 -0700763 struct fsl_desc_sw *desc, *tmp;
764 unsigned long flags;
765
766 if (!chan)
767 return;
768
769 fsl_chan = to_fsl_chan(chan);
770
771 /* Halt the DMA engine */
772 dma_halt(fsl_chan);
773
774 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
775
776 /* Remove and free all of the descriptors in the LD queue */
777 list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
778 list_del(&desc->node);
779 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
780 }
781
782 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
783}
784
785/**
Zhang Wei173acc72008-03-01 07:42:48 -0700786 * fsl_dma_update_completed_cookie - Update the completed cookie.
787 * @fsl_chan : Freescale DMA channel
788 */
Ira Snydera4f56d42010-01-06 13:34:01 +0000789static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700790{
791 struct fsl_desc_sw *cur_desc, *desc;
792 dma_addr_t ld_phy;
793
794 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
795
796 if (ld_phy) {
797 cur_desc = NULL;
798 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
799 if (desc->async_tx.phys == ld_phy) {
800 cur_desc = desc;
801 break;
802 }
803
804 if (cur_desc && cur_desc->async_tx.cookie) {
805 if (dma_is_idle(fsl_chan))
806 fsl_chan->completed_cookie =
807 cur_desc->async_tx.cookie;
808 else
809 fsl_chan->completed_cookie =
810 cur_desc->async_tx.cookie - 1;
811 }
812 }
813}
814
815/**
816 * fsl_chan_ld_cleanup - Clean up link descriptors
817 * @fsl_chan : Freescale DMA channel
818 *
819 * This function clean up the ld_queue of DMA channel.
820 * If 'in_intr' is set, the function will move the link descriptor to
821 * the recycle list. Otherwise, free it directly.
822 */
Ira Snydera4f56d42010-01-06 13:34:01 +0000823static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700824{
825 struct fsl_desc_sw *desc, *_desc;
826 unsigned long flags;
827
828 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
829
Zhang Wei173acc72008-03-01 07:42:48 -0700830 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
831 fsl_chan->completed_cookie);
832 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
833 dma_async_tx_callback callback;
834 void *callback_param;
835
836 if (dma_async_is_complete(desc->async_tx.cookie,
837 fsl_chan->completed_cookie, fsl_chan->common.cookie)
838 == DMA_IN_PROGRESS)
839 break;
840
841 callback = desc->async_tx.callback;
842 callback_param = desc->async_tx.callback_param;
843
844 /* Remove from ld_queue list */
845 list_del(&desc->node);
846
847 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
848 desc);
849 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
850
851 /* Run the link descriptor callback function */
852 if (callback) {
853 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
854 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
855 desc);
856 callback(callback_param);
857 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
858 }
859 }
860 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
861}
862
863/**
864 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
865 * @fsl_chan : Freescale DMA channel
866 */
Ira Snydera4f56d42010-01-06 13:34:01 +0000867static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -0700868{
869 struct list_head *ld_node;
Ira Snyder738f5f72010-01-06 13:34:02 +0000870 dma_addr_t next_dst_addr;
Zhang Wei173acc72008-03-01 07:42:48 -0700871 unsigned long flags;
872
Ira Snyder138ef012009-05-19 15:42:13 -0700873 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
874
Zhang Wei173acc72008-03-01 07:42:48 -0700875 if (!dma_is_idle(fsl_chan))
Ira Snyder138ef012009-05-19 15:42:13 -0700876 goto out_unlock;
Zhang Wei173acc72008-03-01 07:42:48 -0700877
878 dma_halt(fsl_chan);
879
880 /* If there are some link descriptors
881 * not transfered in queue. We need to start it.
882 */
Zhang Wei173acc72008-03-01 07:42:48 -0700883
884 /* Find the first un-transfer desciptor */
885 for (ld_node = fsl_chan->ld_queue.next;
886 (ld_node != &fsl_chan->ld_queue)
887 && (dma_async_is_complete(
888 to_fsl_desc(ld_node)->async_tx.cookie,
889 fsl_chan->completed_cookie,
890 fsl_chan->common.cookie) == DMA_SUCCESS);
891 ld_node = ld_node->next);
892
Zhang Wei173acc72008-03-01 07:42:48 -0700893 if (ld_node != &fsl_chan->ld_queue) {
894 /* Get the ld start address from ld_queue */
Ira Snyder738f5f72010-01-06 13:34:02 +0000895 next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys;
Kumar Galab787f2e2009-05-13 16:25:57 -0500896 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
Ira Snyder738f5f72010-01-06 13:34:02 +0000897 (unsigned long long)next_dst_addr);
898 set_cdar(fsl_chan, next_dst_addr);
Zhang Wei173acc72008-03-01 07:42:48 -0700899 dma_start(fsl_chan);
900 } else {
901 set_cdar(fsl_chan, 0);
902 set_ndar(fsl_chan, 0);
903 }
Ira Snyder138ef012009-05-19 15:42:13 -0700904
905out_unlock:
906 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
Zhang Wei173acc72008-03-01 07:42:48 -0700907}
908
909/**
910 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
911 * @fsl_chan : Freescale DMA channel
912 */
913static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
914{
Ira Snydera4f56d42010-01-06 13:34:01 +0000915 struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700916
917#ifdef FSL_DMA_LD_DEBUG
918 struct fsl_desc_sw *ld;
919 unsigned long flags;
920
921 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
922 if (list_empty(&fsl_chan->ld_queue)) {
923 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
924 return;
925 }
926
927 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
928 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
929 int i;
930 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
931 fsl_chan->id, ld->async_tx.phys);
932 for (i = 0; i < 8; i++)
933 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
934 i, *(((u32 *)&ld->hw) + i));
935 }
936 dev_dbg(fsl_chan->dev, "----------------\n");
937 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
938#endif
939
940 fsl_chan_xfer_ld_queue(fsl_chan);
941}
942
Zhang Wei173acc72008-03-01 07:42:48 -0700943/**
944 * fsl_dma_is_complete - Determine the DMA status
945 * @fsl_chan : Freescale DMA channel
946 */
947static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
948 dma_cookie_t cookie,
949 dma_cookie_t *done,
950 dma_cookie_t *used)
951{
Ira Snydera4f56d42010-01-06 13:34:01 +0000952 struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700953 dma_cookie_t last_used;
954 dma_cookie_t last_complete;
955
956 fsl_chan_ld_cleanup(fsl_chan);
957
958 last_used = chan->cookie;
959 last_complete = fsl_chan->completed_cookie;
960
961 if (done)
962 *done = last_complete;
963
964 if (used)
965 *used = last_used;
966
967 return dma_async_is_complete(cookie, last_complete, last_used);
968}
969
970static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
971{
Ira Snydera4f56d42010-01-06 13:34:01 +0000972 struct fsldma_chan *fsl_chan = data;
Zhang Wei56822842008-03-13 10:45:27 -0700973 u32 stat;
Zhang Wei1c629792008-04-17 20:17:25 -0700974 int update_cookie = 0;
975 int xfer_ld_q = 0;
Zhang Wei173acc72008-03-01 07:42:48 -0700976
977 stat = get_sr(fsl_chan);
978 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
979 fsl_chan->id, stat);
980 set_sr(fsl_chan, stat); /* Clear the event register */
981
982 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
983 if (!stat)
984 return IRQ_NONE;
985
986 if (stat & FSL_DMA_SR_TE)
987 dev_err(fsl_chan->dev, "Transfer Error!\n");
988
Zhang Weif79abb62008-03-18 18:45:00 -0700989 /* Programming Error
990 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
991 * triger a PE interrupt.
992 */
993 if (stat & FSL_DMA_SR_PE) {
994 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
995 if (get_bcr(fsl_chan) == 0) {
996 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
997 * Now, update the completed cookie, and continue the
998 * next uncompleted transfer.
999 */
Zhang Wei1c629792008-04-17 20:17:25 -07001000 update_cookie = 1;
1001 xfer_ld_q = 1;
Zhang Weif79abb62008-03-18 18:45:00 -07001002 }
1003 stat &= ~FSL_DMA_SR_PE;
1004 }
1005
Zhang Wei173acc72008-03-01 07:42:48 -07001006 /* If the link descriptor segment transfer finishes,
1007 * we will recycle the used descriptor.
1008 */
1009 if (stat & FSL_DMA_SR_EOSI) {
1010 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
Kumar Galab787f2e2009-05-13 16:25:57 -05001011 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
1012 (unsigned long long)get_cdar(fsl_chan),
1013 (unsigned long long)get_ndar(fsl_chan));
Zhang Wei173acc72008-03-01 07:42:48 -07001014 stat &= ~FSL_DMA_SR_EOSI;
Zhang Wei1c629792008-04-17 20:17:25 -07001015 update_cookie = 1;
1016 }
1017
1018 /* For MPC8349, EOCDI event need to update cookie
1019 * and start the next transfer if it exist.
1020 */
1021 if (stat & FSL_DMA_SR_EOCDI) {
1022 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
1023 stat &= ~FSL_DMA_SR_EOCDI;
1024 update_cookie = 1;
1025 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001026 }
1027
1028 /* If it current transfer is the end-of-transfer,
1029 * we should clear the Channel Start bit for
1030 * prepare next transfer.
1031 */
Zhang Wei1c629792008-04-17 20:17:25 -07001032 if (stat & FSL_DMA_SR_EOLNI) {
Zhang Wei173acc72008-03-01 07:42:48 -07001033 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
1034 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei1c629792008-04-17 20:17:25 -07001035 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -07001036 }
1037
Zhang Wei1c629792008-04-17 20:17:25 -07001038 if (update_cookie)
1039 fsl_dma_update_completed_cookie(fsl_chan);
1040 if (xfer_ld_q)
1041 fsl_chan_xfer_ld_queue(fsl_chan);
Zhang Wei173acc72008-03-01 07:42:48 -07001042 if (stat)
1043 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
1044 stat);
1045
1046 dev_dbg(fsl_chan->dev, "event: Exit\n");
1047 tasklet_schedule(&fsl_chan->tasklet);
1048 return IRQ_HANDLED;
1049}
1050
1051static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
1052{
Ira Snydera4f56d42010-01-06 13:34:01 +00001053 struct fsldma_device *fdev = data;
Zhang Wei173acc72008-03-01 07:42:48 -07001054 int ch_nr;
Ira Snydera4f56d42010-01-06 13:34:01 +00001055 u32 gsr;
Zhang Wei173acc72008-03-01 07:42:48 -07001056
1057 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
1058 : in_le32(fdev->reg_base);
1059 ch_nr = (32 - ffs(gsr)) / 8;
1060
1061 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
1062 fdev->chan[ch_nr]) : IRQ_NONE;
1063}
1064
1065static void dma_do_tasklet(unsigned long data)
1066{
Ira Snydera4f56d42010-01-06 13:34:01 +00001067 struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data;
Zhang Wei173acc72008-03-01 07:42:48 -07001068 fsl_chan_ld_cleanup(fsl_chan);
1069}
1070
Ira Snydera4f56d42010-01-06 13:34:01 +00001071/*----------------------------------------------------------------------------*/
1072/* OpenFirmware Subsystem */
1073/*----------------------------------------------------------------------------*/
1074
1075static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
Timur Tabi77cd62e2008-09-26 17:00:11 -07001076 struct device_node *node, u32 feature, const char *compatible)
Zhang Wei173acc72008-03-01 07:42:48 -07001077{
Ira Snydera4f56d42010-01-06 13:34:01 +00001078 struct fsldma_chan *new_fsl_chan;
Ira Snyder4ce0e952010-01-06 13:34:00 +00001079 struct resource res;
Zhang Wei173acc72008-03-01 07:42:48 -07001080 int err;
1081
Zhang Wei173acc72008-03-01 07:42:48 -07001082 /* alloc channel */
Ira Snydera4f56d42010-01-06 13:34:01 +00001083 new_fsl_chan = kzalloc(sizeof(*new_fsl_chan), GFP_KERNEL);
Zhang Wei173acc72008-03-01 07:42:48 -07001084 if (!new_fsl_chan) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001085 dev_err(fdev->dev, "No free memory for allocating "
Zhang Wei173acc72008-03-01 07:42:48 -07001086 "dma channels!\n");
Li Yang51ee87f2008-05-29 23:25:45 -07001087 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -07001088 }
1089
1090 /* get dma channel register base */
Ira Snyder4ce0e952010-01-06 13:34:00 +00001091 err = of_address_to_resource(node, 0, &res);
Zhang Wei173acc72008-03-01 07:42:48 -07001092 if (err) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001093 dev_err(fdev->dev, "Can't get %s property 'reg'\n",
1094 node->full_name);
Li Yang51ee87f2008-05-29 23:25:45 -07001095 goto err_no_reg;
Zhang Wei173acc72008-03-01 07:42:48 -07001096 }
1097
Timur Tabi77cd62e2008-09-26 17:00:11 -07001098 new_fsl_chan->feature = feature;
Zhang Wei173acc72008-03-01 07:42:48 -07001099
1100 if (!fdev->feature)
1101 fdev->feature = new_fsl_chan->feature;
1102
1103 /* If the DMA device's feature is different than its channels',
1104 * report the bug.
1105 */
1106 WARN_ON(fdev->feature != new_fsl_chan->feature);
1107
Dan Williams6527de62009-01-12 15:18:34 -07001108 new_fsl_chan->dev = fdev->dev;
Ira Snyder4ce0e952010-01-06 13:34:00 +00001109 new_fsl_chan->reg_base = ioremap(res.start, resource_size(&res));
1110 new_fsl_chan->id = ((res.start - 0x100) & 0xfff) >> 7;
Roel Kluinf47edc62009-05-22 16:46:52 +08001111 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001112 dev_err(fdev->dev, "There is no %d channel!\n",
Zhang Wei173acc72008-03-01 07:42:48 -07001113 new_fsl_chan->id);
1114 err = -EINVAL;
Li Yang51ee87f2008-05-29 23:25:45 -07001115 goto err_no_chan;
Zhang Wei173acc72008-03-01 07:42:48 -07001116 }
1117 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
1118 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
1119 (unsigned long)new_fsl_chan);
1120
1121 /* Init the channel */
1122 dma_init(new_fsl_chan);
1123
1124 /* Clear cdar registers */
1125 set_cdar(new_fsl_chan, 0);
1126
1127 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
1128 case FSL_DMA_IP_85XX:
Zhang Wei173acc72008-03-01 07:42:48 -07001129 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1130 case FSL_DMA_IP_83XX:
Ira Snyderbe30b222009-05-28 09:20:42 +00001131 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
Zhang Wei173acc72008-03-01 07:42:48 -07001132 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
Ira Snyder738f5f72010-01-06 13:34:02 +00001133 new_fsl_chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
Ira Snydere6c7ecb2009-09-08 17:53:04 -07001134 new_fsl_chan->set_request_count = fsl_chan_set_request_count;
Zhang Wei173acc72008-03-01 07:42:48 -07001135 }
1136
1137 spin_lock_init(&new_fsl_chan->desc_lock);
1138 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
1139
1140 new_fsl_chan->common.device = &fdev->common;
1141
1142 /* Add the channel to DMA device channel list */
1143 list_add_tail(&new_fsl_chan->common.device_node,
1144 &fdev->common.channels);
1145 fdev->common.chancnt++;
1146
Timur Tabi77cd62e2008-09-26 17:00:11 -07001147 new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
Zhang Wei173acc72008-03-01 07:42:48 -07001148 if (new_fsl_chan->irq != NO_IRQ) {
1149 err = request_irq(new_fsl_chan->irq,
1150 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
1151 "fsldma-channel", new_fsl_chan);
1152 if (err) {
Timur Tabi77cd62e2008-09-26 17:00:11 -07001153 dev_err(fdev->dev, "DMA channel %s request_irq error "
1154 "with return %d\n", node->full_name, err);
Li Yang51ee87f2008-05-29 23:25:45 -07001155 goto err_no_irq;
Zhang Wei173acc72008-03-01 07:42:48 -07001156 }
1157 }
1158
Timur Tabi77cd62e2008-09-26 17:00:11 -07001159 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
Peter Korsgaard169d5f62009-01-14 22:33:31 -07001160 compatible,
1161 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
Zhang Wei173acc72008-03-01 07:42:48 -07001162
1163 return 0;
Li Yang51ee87f2008-05-29 23:25:45 -07001164
Li Yang51ee87f2008-05-29 23:25:45 -07001165err_no_irq:
Zhang Wei173acc72008-03-01 07:42:48 -07001166 list_del(&new_fsl_chan->common.device_node);
Li Yang51ee87f2008-05-29 23:25:45 -07001167err_no_chan:
1168 iounmap(new_fsl_chan->reg_base);
1169err_no_reg:
Zhang Wei173acc72008-03-01 07:42:48 -07001170 kfree(new_fsl_chan);
1171 return err;
1172}
1173
Ira Snydera4f56d42010-01-06 13:34:01 +00001174static void fsl_dma_chan_remove(struct fsldma_chan *fchan)
Zhang Wei173acc72008-03-01 07:42:48 -07001175{
Peter Korsgaard6782dfe2009-01-14 22:32:58 -07001176 if (fchan->irq != NO_IRQ)
1177 free_irq(fchan->irq, fchan);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001178 list_del(&fchan->common.device_node);
1179 iounmap(fchan->reg_base);
1180 kfree(fchan);
Zhang Wei173acc72008-03-01 07:42:48 -07001181}
1182
Ira Snydera4f56d42010-01-06 13:34:01 +00001183static int __devinit fsldma_of_probe(struct of_device *dev,
Zhang Wei173acc72008-03-01 07:42:48 -07001184 const struct of_device_id *match)
1185{
1186 int err;
Ira Snydera4f56d42010-01-06 13:34:01 +00001187 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001188 struct device_node *child;
Ira Snyder4ce0e952010-01-06 13:34:00 +00001189 struct resource res;
Zhang Wei173acc72008-03-01 07:42:48 -07001190
Ira Snydera4f56d42010-01-06 13:34:01 +00001191 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
Zhang Wei173acc72008-03-01 07:42:48 -07001192 if (!fdev) {
1193 dev_err(&dev->dev, "No enough memory for 'priv'\n");
Li Yang51ee87f2008-05-29 23:25:45 -07001194 return -ENOMEM;
Zhang Wei173acc72008-03-01 07:42:48 -07001195 }
1196 fdev->dev = &dev->dev;
1197 INIT_LIST_HEAD(&fdev->common.channels);
1198
1199 /* get DMA controller register base */
Ira Snyder4ce0e952010-01-06 13:34:00 +00001200 err = of_address_to_resource(dev->node, 0, &res);
Zhang Wei173acc72008-03-01 07:42:48 -07001201 if (err) {
1202 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1203 dev->node->full_name);
Li Yang51ee87f2008-05-29 23:25:45 -07001204 goto err_no_reg;
Zhang Wei173acc72008-03-01 07:42:48 -07001205 }
1206
1207 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
Kumar Galab787f2e2009-05-13 16:25:57 -05001208 "controller at 0x%llx...\n",
Ira Snyder4ce0e952010-01-06 13:34:00 +00001209 match->compatible, (unsigned long long)res.start);
1210 fdev->reg_base = ioremap(res.start, resource_size(&res));
Zhang Wei173acc72008-03-01 07:42:48 -07001211
1212 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1213 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
Ira Snyderbbea0b62009-09-08 17:53:04 -07001214 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
Zhang Wei173acc72008-03-01 07:42:48 -07001215 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1216 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -07001217 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -07001218 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1219 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1220 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
Ira Snyderbbea0b62009-09-08 17:53:04 -07001221 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1222 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
Zhang Wei173acc72008-03-01 07:42:48 -07001223 fdev->common.dev = &dev->dev;
1224
Timur Tabi77cd62e2008-09-26 17:00:11 -07001225 fdev->irq = irq_of_parse_and_map(dev->node, 0);
1226 if (fdev->irq != NO_IRQ) {
1227 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
Zhang Wei173acc72008-03-01 07:42:48 -07001228 "fsldma-device", fdev);
1229 if (err) {
1230 dev_err(&dev->dev, "DMA device request_irq error "
1231 "with return %d\n", err);
1232 goto err;
1233 }
1234 }
1235
1236 dev_set_drvdata(&(dev->dev), fdev);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001237
1238 /* We cannot use of_platform_bus_probe() because there is no
1239 * of_platform_bus_remove. Instead, we manually instantiate every DMA
1240 * channel object.
1241 */
1242 for_each_child_of_node(dev->node, child) {
1243 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
1244 fsl_dma_chan_probe(fdev, child,
1245 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1246 "fsl,eloplus-dma-channel");
1247 if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
1248 fsl_dma_chan_probe(fdev, child,
1249 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1250 "fsl,elo-dma-channel");
1251 }
Zhang Wei173acc72008-03-01 07:42:48 -07001252
1253 dma_async_device_register(&fdev->common);
1254 return 0;
1255
1256err:
1257 iounmap(fdev->reg_base);
Li Yang51ee87f2008-05-29 23:25:45 -07001258err_no_reg:
Zhang Wei173acc72008-03-01 07:42:48 -07001259 kfree(fdev);
1260 return err;
1261}
1262
Ira Snydera4f56d42010-01-06 13:34:01 +00001263static int fsldma_of_remove(struct of_device *of_dev)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001264{
Ira Snydera4f56d42010-01-06 13:34:01 +00001265 struct fsldma_device *fdev;
Timur Tabi77cd62e2008-09-26 17:00:11 -07001266 unsigned int i;
1267
1268 fdev = dev_get_drvdata(&of_dev->dev);
1269
1270 dma_async_device_unregister(&fdev->common);
1271
1272 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
1273 if (fdev->chan[i])
1274 fsl_dma_chan_remove(fdev->chan[i]);
1275
1276 if (fdev->irq != NO_IRQ)
1277 free_irq(fdev->irq, fdev);
1278
1279 iounmap(fdev->reg_base);
1280
1281 kfree(fdev);
1282 dev_set_drvdata(&of_dev->dev, NULL);
1283
1284 return 0;
1285}
1286
Ira Snydera4f56d42010-01-06 13:34:01 +00001287static struct of_device_id fsldma_of_ids[] = {
Kumar Gala049c9d42008-03-31 11:13:21 -05001288 { .compatible = "fsl,eloplus-dma", },
1289 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001290 {}
1291};
1292
Ira Snydera4f56d42010-01-06 13:34:01 +00001293static struct of_platform_driver fsldma_of_driver = {
1294 .name = "fsl-elo-dma",
1295 .match_table = fsldma_of_ids,
1296 .probe = fsldma_of_probe,
1297 .remove = fsldma_of_remove,
Zhang Wei173acc72008-03-01 07:42:48 -07001298};
1299
Ira Snydera4f56d42010-01-06 13:34:01 +00001300/*----------------------------------------------------------------------------*/
1301/* Module Init / Exit */
1302/*----------------------------------------------------------------------------*/
1303
1304static __init int fsldma_init(void)
Zhang Wei173acc72008-03-01 07:42:48 -07001305{
Timur Tabi77cd62e2008-09-26 17:00:11 -07001306 int ret;
1307
1308 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1309
Ira Snydera4f56d42010-01-06 13:34:01 +00001310 ret = of_register_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001311 if (ret)
1312 pr_err("fsldma: failed to register platform driver\n");
1313
1314 return ret;
Zhang Wei173acc72008-03-01 07:42:48 -07001315}
1316
Ira Snydera4f56d42010-01-06 13:34:01 +00001317static void __exit fsldma_exit(void)
Timur Tabi77cd62e2008-09-26 17:00:11 -07001318{
Ira Snydera4f56d42010-01-06 13:34:01 +00001319 of_unregister_platform_driver(&fsldma_of_driver);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001320}
1321
Ira Snydera4f56d42010-01-06 13:34:01 +00001322subsys_initcall(fsldma_init);
1323module_exit(fsldma_exit);
Timur Tabi77cd62e2008-09-26 17:00:11 -07001324
1325MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1326MODULE_LICENSE("GPL");