blob: 2e5deaa82b60579d24bd80fc42ac5327ac5e273f [file] [log] [blame]
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001/*
Heikki Krogerusb8014792012-10-18 17:34:08 +03002 * Core driver for the Synopsys DesignWare DMA Controller
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07003 *
4 * Copyright (C) 2007-2008 Atmel Corporation
Viresh Kumaraecb7b62011-05-24 14:04:09 +05305 * Copyright (C) 2010-2011 ST Microelectronics
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Heikki Krogerusb8014792012-10-18 17:34:08 +030011
Viresh Kumar327e6972012-02-01 16:12:26 +053012#include <linux/bitops.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070013#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
Andy Shevchenkof8122a82013-01-16 15:48:50 +020017#include <linux/dmapool.h>
Thierry Reding73312052013-01-21 11:09:00 +010018#include <linux/err.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070019#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
Viresh Kumard3f797d2012-04-20 20:15:34 +053022#include <linux/of.h>
Arnd Bergmannf9c6a652013-02-27 21:36:03 +000023#include <linux/of_dma.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070024#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
Andy Shevchenko42c91ee2013-04-09 14:05:46 +030028#include <linux/acpi.h>
29#include <linux/acpi_dma.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070030
31#include "dw_dmac_regs.h"
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000032#include "dmaengine.h"
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070033
34/*
35 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
36 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
37 * of which use ARM any more). See the "Databook" from Synopsys for
38 * information beyond what licensees probably provide.
39 *
40 * The driver has currently been tested only with the Atmel AT32AP7000,
41 * which does not support descriptor writeback.
42 */
43
Andy Shevchenkoa0982002012-09-21 15:05:48 +030044static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
45{
46 return slave ? slave->dst_master : 0;
47}
48
49static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
50{
51 return slave ? slave->src_master : 1;
52}
53
Arnd Bergmannf7760762013-03-26 16:53:57 +020054static inline void dwc_set_masters(struct dw_dma_chan *dwc)
Andy Shevchenko5be10f32013-01-17 10:03:01 +020055{
Arnd Bergmannf7760762013-03-26 16:53:57 +020056 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
57 struct dw_dma_slave *dws = dwc->chan.private;
58 unsigned char mmax = dw->nr_masters - 1;
Andy Shevchenko5be10f32013-01-17 10:03:01 +020059
Arnd Bergmannf7760762013-03-26 16:53:57 +020060 if (dwc->request_line == ~0) {
61 dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
62 dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
63 }
Andy Shevchenko5be10f32013-01-17 10:03:01 +020064}
65
Viresh Kumar327e6972012-02-01 16:12:26 +053066#define DWC_DEFAULT_CTLLO(_chan) ({ \
Viresh Kumar327e6972012-02-01 16:12:26 +053067 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
68 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
Andy Shevchenko495aea42013-01-10 11:11:41 +020069 bool _is_slave = is_slave_direction(_dwc->direction); \
Andy Shevchenko495aea42013-01-10 11:11:41 +020070 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
Viresh Kumar327e6972012-02-01 16:12:26 +053071 DW_DMA_MSIZE_16; \
Andy Shevchenko495aea42013-01-10 11:11:41 +020072 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
Viresh Kumar327e6972012-02-01 16:12:26 +053073 DW_DMA_MSIZE_16; \
Jamie Ilesf301c062011-01-21 14:11:53 +000074 \
Viresh Kumar327e6972012-02-01 16:12:26 +053075 (DWC_CTLL_DST_MSIZE(_dmsize) \
76 | DWC_CTLL_SRC_MSIZE(_smsize) \
Jamie Ilesf301c062011-01-21 14:11:53 +000077 | DWC_CTLL_LLP_D_EN \
78 | DWC_CTLL_LLP_S_EN \
Arnd Bergmannf7760762013-03-26 16:53:57 +020079 | DWC_CTLL_DMS(_dwc->dst_master) \
80 | DWC_CTLL_SMS(_dwc->src_master)); \
Jamie Ilesf301c062011-01-21 14:11:53 +000081 })
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070082
83/*
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070084 * Number of descriptors to allocate for each channel. This should be
85 * made configurable somehow; preferably, the clients (at least the
86 * ones using slave transfers) should be able to give us a hint.
87 */
88#define NR_DESCS_PER_CHANNEL 64
89
90/*----------------------------------------------------------------------*/
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070091
Dan Williams41d5e592009-01-06 11:38:21 -070092static struct device *chan2dev(struct dma_chan *chan)
93{
94 return &chan->dev->device;
95}
96static struct device *chan2parent(struct dma_chan *chan)
97{
98 return chan->dev->device.parent;
99}
100
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700101static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
102{
Andy Shevchenkoe63a47a32012-10-18 17:34:12 +0300103 return to_dw_desc(dwc->active_list.next);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700104}
105
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700106static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
107{
108 struct dw_desc *desc, *_desc;
109 struct dw_desc *ret = NULL;
110 unsigned int i = 0;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530111 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700112
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530113 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700114 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
Andy Shevchenko2ab37272012-06-19 13:34:04 +0300115 i++;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700116 if (async_tx_test_ack(&desc->txd)) {
117 list_del(&desc->desc_node);
118 ret = desc;
119 break;
120 }
Dan Williams41d5e592009-01-06 11:38:21 -0700121 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700122 }
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530123 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700124
Dan Williams41d5e592009-01-06 11:38:21 -0700125 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700126
127 return ret;
128}
129
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700130/*
131 * Move a descriptor, including any children, to the free list.
132 * `desc' must not be on any lists.
133 */
134static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
135{
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530136 unsigned long flags;
137
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700138 if (desc) {
139 struct dw_desc *child;
140
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530141 spin_lock_irqsave(&dwc->lock, flags);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700142 list_for_each_entry(child, &desc->tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700143 dev_vdbg(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700144 "moving child desc %p to freelist\n",
145 child);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700146 list_splice_init(&desc->tx_list, &dwc->free_list);
Dan Williams41d5e592009-01-06 11:38:21 -0700147 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700148 list_add(&desc->desc_node, &dwc->free_list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530149 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700150 }
151}
152
Viresh Kumar61e183f2011-11-17 16:01:29 +0530153static void dwc_initialize(struct dw_dma_chan *dwc)
154{
155 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
156 struct dw_dma_slave *dws = dwc->chan.private;
157 u32 cfghi = DWC_CFGH_FIFO_MODE;
158 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
159
160 if (dwc->initialized == true)
161 return;
162
Arnd Bergmannf7760762013-03-26 16:53:57 +0200163 if (dws) {
Viresh Kumar61e183f2011-11-17 16:01:29 +0530164 /*
165 * We need controller-specific data to set up slave
166 * transfers.
167 */
168 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
169
170 cfghi = dws->cfg_hi;
171 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
Andy Shevchenko8fccc5b2012-09-03 13:46:19 +0300172 } else {
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200173 if (dwc->direction == DMA_MEM_TO_DEV)
Arnd Bergmannf7760762013-03-26 16:53:57 +0200174 cfghi = DWC_CFGH_DST_PER(dwc->request_line);
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200175 else if (dwc->direction == DMA_DEV_TO_MEM)
Arnd Bergmannf7760762013-03-26 16:53:57 +0200176 cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530177 }
178
179 channel_writel(dwc, CFG_LO, cfglo);
180 channel_writel(dwc, CFG_HI, cfghi);
181
182 /* Enable interrupts */
183 channel_set_bit(dw, MASK.XFER, dwc->mask);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530184 channel_set_bit(dw, MASK.ERROR, dwc->mask);
185
186 dwc->initialized = true;
187}
188
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700189/*----------------------------------------------------------------------*/
190
Andy Shevchenko4c2d56c2012-06-19 13:34:08 +0300191static inline unsigned int dwc_fast_fls(unsigned long long v)
192{
193 /*
194 * We can be a lot more clever here, but this should take care
195 * of the most common optimization.
196 */
197 if (!(v & 7))
198 return 3;
199 else if (!(v & 3))
200 return 2;
201 else if (!(v & 1))
202 return 1;
203 return 0;
204}
205
Andy Shevchenkof52b36d2012-09-21 15:05:44 +0300206static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
Andy Shevchenko1d455432012-06-19 13:34:03 +0300207{
208 dev_err(chan2dev(&dwc->chan),
209 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
210 channel_readl(dwc, SAR),
211 channel_readl(dwc, DAR),
212 channel_readl(dwc, LLP),
213 channel_readl(dwc, CTL_HI),
214 channel_readl(dwc, CTL_LO));
215}
216
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300217static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
218{
219 channel_clear_bit(dw, CH_EN, dwc->mask);
220 while (dma_readl(dw, CH_EN) & dwc->mask)
221 cpu_relax();
222}
223
Andy Shevchenko1d455432012-06-19 13:34:03 +0300224/*----------------------------------------------------------------------*/
225
Andy Shevchenkofed25742012-09-21 15:05:49 +0300226/* Perform single block transfer */
227static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
228 struct dw_desc *desc)
229{
230 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
231 u32 ctllo;
232
233 /* Software emulation of LLP mode relies on interrupts to continue
234 * multi block transfer. */
235 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
236
237 channel_writel(dwc, SAR, desc->lli.sar);
238 channel_writel(dwc, DAR, desc->lli.dar);
239 channel_writel(dwc, CTL_LO, ctllo);
240 channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
241 channel_set_bit(dw, CH_EN, dwc->mask);
Andy Shevchenkof5c6a7d2013-01-09 10:17:13 +0200242
243 /* Move pointer to next descriptor */
244 dwc->tx_node_active = dwc->tx_node_active->next;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300245}
246
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700247/* Called with dwc->lock held and bh disabled */
248static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
249{
250 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Andy Shevchenkofed25742012-09-21 15:05:49 +0300251 unsigned long was_soft_llp;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700252
253 /* ASSERT: channel is idle */
254 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700255 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700256 "BUG: Attempted to start non-idle channel\n");
Andy Shevchenko1d455432012-06-19 13:34:03 +0300257 dwc_dump_chan_regs(dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700258
259 /* The tasklet will hopefully advance the queue... */
260 return;
261 }
262
Andy Shevchenkofed25742012-09-21 15:05:49 +0300263 if (dwc->nollp) {
264 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
265 &dwc->flags);
266 if (was_soft_llp) {
267 dev_err(chan2dev(&dwc->chan),
268 "BUG: Attempted to start new LLP transfer "
269 "inside ongoing one\n");
270 return;
271 }
272
273 dwc_initialize(dwc);
274
Andy Shevchenko4702d522013-01-25 11:48:03 +0200275 dwc->residue = first->total_len;
Andy Shevchenkof5c6a7d2013-01-09 10:17:13 +0200276 dwc->tx_node_active = &first->tx_list;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300277
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200278 /* Submit first block */
Andy Shevchenkofed25742012-09-21 15:05:49 +0300279 dwc_do_single_block(dwc, first);
280
281 return;
282 }
283
Viresh Kumar61e183f2011-11-17 16:01:29 +0530284 dwc_initialize(dwc);
285
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700286 channel_writel(dwc, LLP, first->txd.phys);
287 channel_writel(dwc, CTL_LO,
288 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
289 channel_writel(dwc, CTL_HI, 0);
290 channel_set_bit(dw, CH_EN, dwc->mask);
291}
292
293/*----------------------------------------------------------------------*/
294
295static void
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530296dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
297 bool callback_required)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700298{
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530299 dma_async_tx_callback callback = NULL;
300 void *param = NULL;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700301 struct dma_async_tx_descriptor *txd = &desc->txd;
Viresh Kumare5180762011-03-03 15:47:20 +0530302 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530303 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700304
Dan Williams41d5e592009-01-06 11:38:21 -0700305 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700306
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530307 spin_lock_irqsave(&dwc->lock, flags);
Russell King - ARM Linuxf7fbce02012-03-06 22:35:07 +0000308 dma_cookie_complete(txd);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530309 if (callback_required) {
310 callback = txd->callback;
311 param = txd->callback_param;
312 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700313
Viresh Kumare5180762011-03-03 15:47:20 +0530314 /* async_tx_ack */
315 list_for_each_entry(child, &desc->tx_list, desc_node)
316 async_tx_ack(&child->txd);
317 async_tx_ack(&desc->txd);
318
Dan Williamse0bd0f82009-09-08 17:53:02 -0700319 list_splice_init(&desc->tx_list, &dwc->free_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700320 list_move(&desc->desc_node, &dwc->free_list);
321
Andy Shevchenko495aea42013-01-10 11:11:41 +0200322 if (!is_slave_direction(dwc->direction)) {
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700323 struct device *parent = chan2parent(&dwc->chan);
324 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
325 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
326 dma_unmap_single(parent, desc->lli.dar,
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200327 desc->total_len, DMA_FROM_DEVICE);
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700328 else
329 dma_unmap_page(parent, desc->lli.dar,
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200330 desc->total_len, DMA_FROM_DEVICE);
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700331 }
332 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
333 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
334 dma_unmap_single(parent, desc->lli.sar,
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200335 desc->total_len, DMA_TO_DEVICE);
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700336 else
337 dma_unmap_page(parent, desc->lli.sar,
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200338 desc->total_len, DMA_TO_DEVICE);
Atsushi Nemoto657a77fa2009-09-08 17:53:05 -0700339 }
340 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700341
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530342 spin_unlock_irqrestore(&dwc->lock, flags);
343
Andy Shevchenko21e93c12013-01-09 10:17:12 +0200344 if (callback)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700345 callback(param);
346}
347
348static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
349{
350 struct dw_desc *desc, *_desc;
351 LIST_HEAD(list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530352 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700353
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530354 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700355 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700356 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700357 "BUG: XFER bit set, but channel not idle!\n");
358
359 /* Try to continue after resetting the channel... */
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300360 dwc_chan_disable(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700361 }
362
363 /*
364 * Submit queued descriptors ASAP, i.e. before we go through
365 * the completed ones.
366 */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700367 list_splice_init(&dwc->active_list, &list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530368 if (!list_empty(&dwc->queue)) {
369 list_move(dwc->queue.next, &dwc->active_list);
370 dwc_dostart(dwc, dwc_first_active(dwc));
371 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700372
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530373 spin_unlock_irqrestore(&dwc->lock, flags);
374
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700375 list_for_each_entry_safe(desc, _desc, &list, desc_node)
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530376 dwc_descriptor_complete(dwc, desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700377}
378
Andy Shevchenko4702d522013-01-25 11:48:03 +0200379/* Returns how many bytes were already received from source */
380static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
381{
382 u32 ctlhi = channel_readl(dwc, CTL_HI);
383 u32 ctllo = channel_readl(dwc, CTL_LO);
384
385 return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
386}
387
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700388static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
389{
390 dma_addr_t llp;
391 struct dw_desc *desc, *_desc;
392 struct dw_desc *child;
393 u32 status_xfer;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530394 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700395
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530396 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700397 llp = channel_readl(dwc, LLP);
398 status_xfer = dma_readl(dw, RAW.XFER);
399
400 if (status_xfer & dwc->mask) {
401 /* Everything we've submitted is done */
402 dma_writel(dw, CLEAR.XFER, dwc->mask);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200403
404 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200405 struct list_head *head, *active = dwc->tx_node_active;
406
407 /*
408 * We are inside first active descriptor.
409 * Otherwise something is really wrong.
410 */
411 desc = dwc_first_active(dwc);
412
413 head = &desc->tx_list;
414 if (active != head) {
Andy Shevchenko4702d522013-01-25 11:48:03 +0200415 /* Update desc to reflect last sent one */
416 if (active != head->next)
417 desc = to_dw_desc(active->prev);
418
419 dwc->residue -= desc->len;
420
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200421 child = to_dw_desc(active);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200422
423 /* Submit next block */
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200424 dwc_do_single_block(dwc, child);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200425
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200426 spin_unlock_irqrestore(&dwc->lock, flags);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200427 return;
428 }
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200429
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200430 /* We are done here */
431 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
432 }
Andy Shevchenko4702d522013-01-25 11:48:03 +0200433
434 dwc->residue = 0;
435
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530436 spin_unlock_irqrestore(&dwc->lock, flags);
437
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700438 dwc_complete_all(dw, dwc);
439 return;
440 }
441
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530442 if (list_empty(&dwc->active_list)) {
Andy Shevchenko4702d522013-01-25 11:48:03 +0200443 dwc->residue = 0;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530444 spin_unlock_irqrestore(&dwc->lock, flags);
Jamie Iles087809f2011-01-21 14:11:52 +0000445 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530446 }
Jamie Iles087809f2011-01-21 14:11:52 +0000447
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200448 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
449 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700450 spin_unlock_irqrestore(&dwc->lock, flags);
Dan Williams41d5e592009-01-06 11:38:21 -0700451 return;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700452 }
453
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300454 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300455 (unsigned long long)llp);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700456
457 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
Andy Shevchenko75c61222013-03-26 16:53:54 +0200458 /* Initial residue value */
Andy Shevchenko4702d522013-01-25 11:48:03 +0200459 dwc->residue = desc->total_len;
460
Andy Shevchenko75c61222013-03-26 16:53:54 +0200461 /* Check first descriptors addr */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530462 if (desc->txd.phys == llp) {
463 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700464 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530465 }
Viresh Kumar84adccf2011-03-24 11:32:15 +0530466
Andy Shevchenko75c61222013-03-26 16:53:54 +0200467 /* Check first descriptors llp */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530468 if (desc->lli.llp == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700469 /* This one is currently in progress */
Andy Shevchenko4702d522013-01-25 11:48:03 +0200470 dwc->residue -= dwc_get_sent(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530471 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700472 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530473 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700474
Andy Shevchenko4702d522013-01-25 11:48:03 +0200475 dwc->residue -= desc->len;
476 list_for_each_entry(child, &desc->tx_list, desc_node) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530477 if (child->lli.llp == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700478 /* Currently in progress */
Andy Shevchenko4702d522013-01-25 11:48:03 +0200479 dwc->residue -= dwc_get_sent(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530480 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700481 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530482 }
Andy Shevchenko4702d522013-01-25 11:48:03 +0200483 dwc->residue -= child->len;
484 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700485
486 /*
487 * No descriptors so far seem to be in progress, i.e.
488 * this one must be done.
489 */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530490 spin_unlock_irqrestore(&dwc->lock, flags);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530491 dwc_descriptor_complete(dwc, desc, true);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530492 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700493 }
494
Dan Williams41d5e592009-01-06 11:38:21 -0700495 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700496 "BUG: All descriptors done, but channel not idle!\n");
497
498 /* Try to continue after resetting the channel... */
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300499 dwc_chan_disable(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700500
501 if (!list_empty(&dwc->queue)) {
Viresh Kumarf336e422011-03-03 15:47:16 +0530502 list_move(dwc->queue.next, &dwc->active_list);
503 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700504 }
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530505 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700506}
507
Andy Shevchenko93aad1b2012-07-13 11:09:32 +0300508static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700509{
Andy Shevchenko21d43f42012-10-18 17:34:09 +0300510 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
511 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700512}
513
514static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
515{
516 struct dw_desc *bad_desc;
517 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530518 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700519
520 dwc_scan_descriptors(dw, dwc);
521
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530522 spin_lock_irqsave(&dwc->lock, flags);
523
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700524 /*
525 * The descriptor currently at the head of the active list is
526 * borked. Since we don't have any way to report errors, we'll
527 * just have to scream loudly and try to carry on.
528 */
529 bad_desc = dwc_first_active(dwc);
530 list_del_init(&bad_desc->desc_node);
Viresh Kumarf336e422011-03-03 15:47:16 +0530531 list_move(dwc->queue.next, dwc->active_list.prev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700532
533 /* Clear the error flag and try to restart the controller */
534 dma_writel(dw, CLEAR.ERROR, dwc->mask);
535 if (!list_empty(&dwc->active_list))
536 dwc_dostart(dwc, dwc_first_active(dwc));
537
538 /*
Andy Shevchenkoba84bd72012-10-18 17:34:11 +0300539 * WARN may seem harsh, but since this only happens
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700540 * when someone submits a bad physical address in a
541 * descriptor, we should consider ourselves lucky that the
542 * controller flagged an error instead of scribbling over
543 * random memory locations.
544 */
Andy Shevchenkoba84bd72012-10-18 17:34:11 +0300545 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
546 " cookie: %d\n", bad_desc->txd.cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700547 dwc_dump_lli(dwc, &bad_desc->lli);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700548 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700549 dwc_dump_lli(dwc, &child->lli);
550
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530551 spin_unlock_irqrestore(&dwc->lock, flags);
552
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700553 /* Pretend the descriptor completed successfully */
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530554 dwc_descriptor_complete(dwc, bad_desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700555}
556
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200557/* --------------------- Cyclic DMA API extensions -------------------- */
558
559inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
560{
561 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
562 return channel_readl(dwc, SAR);
563}
564EXPORT_SYMBOL(dw_dma_get_src_addr);
565
566inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
567{
568 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
569 return channel_readl(dwc, DAR);
570}
571EXPORT_SYMBOL(dw_dma_get_dst_addr);
572
Andy Shevchenko75c61222013-03-26 16:53:54 +0200573/* Called with dwc->lock held and all DMAC interrupts disabled */
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200574static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530575 u32 status_err, u32 status_xfer)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200576{
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530577 unsigned long flags;
578
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530579 if (dwc->mask) {
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200580 void (*callback)(void *param);
581 void *callback_param;
582
583 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
584 channel_readl(dwc, LLP));
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200585
586 callback = dwc->cdesc->period_callback;
587 callback_param = dwc->cdesc->period_callback_param;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530588
589 if (callback)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200590 callback(callback_param);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200591 }
592
593 /*
594 * Error and transfer complete are highly unlikely, and will most
595 * likely be due to a configuration error by the user.
596 */
597 if (unlikely(status_err & dwc->mask) ||
598 unlikely(status_xfer & dwc->mask)) {
599 int i;
600
601 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
602 "interrupt, stopping DMA transfer\n",
603 status_xfer ? "xfer" : "error");
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530604
605 spin_lock_irqsave(&dwc->lock, flags);
606
Andy Shevchenko1d455432012-06-19 13:34:03 +0300607 dwc_dump_chan_regs(dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200608
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300609 dwc_chan_disable(dw, dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200610
Andy Shevchenko75c61222013-03-26 16:53:54 +0200611 /* Make sure DMA does not restart by loading a new list */
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200612 channel_writel(dwc, LLP, 0);
613 channel_writel(dwc, CTL_LO, 0);
614 channel_writel(dwc, CTL_HI, 0);
615
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200616 dma_writel(dw, CLEAR.ERROR, dwc->mask);
617 dma_writel(dw, CLEAR.XFER, dwc->mask);
618
619 for (i = 0; i < dwc->cdesc->periods; i++)
620 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530621
622 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200623 }
624}
625
626/* ------------------------------------------------------------------------- */
627
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700628static void dw_dma_tasklet(unsigned long data)
629{
630 struct dw_dma *dw = (struct dw_dma *)data;
631 struct dw_dma_chan *dwc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700632 u32 status_xfer;
633 u32 status_err;
634 int i;
635
Haavard Skinnemoen7fe7b2f2008-10-03 15:23:46 -0700636 status_xfer = dma_readl(dw, RAW.XFER);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700637 status_err = dma_readl(dw, RAW.ERROR);
638
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300639 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700640
641 for (i = 0; i < dw->dma.chancnt; i++) {
642 dwc = &dw->chan[i];
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200643 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530644 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200645 else if (status_err & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700646 dwc_handle_error(dw, dwc);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200647 else if (status_xfer & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700648 dwc_scan_descriptors(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700649 }
650
651 /*
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530652 * Re-enable interrupts.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700653 */
654 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700655 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
656}
657
658static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
659{
660 struct dw_dma *dw = dev_id;
661 u32 status;
662
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300663 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700664 dma_readl(dw, STATUS_INT));
665
666 /*
667 * Just disable the interrupts. We'll turn them back on in the
668 * softirq handler.
669 */
670 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700671 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
672
673 status = dma_readl(dw, STATUS_INT);
674 if (status) {
675 dev_err(dw->dma.dev,
676 "BUG: Unexpected interrupts pending: 0x%x\n",
677 status);
678
679 /* Try to recover */
680 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700681 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
682 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
683 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
684 }
685
686 tasklet_schedule(&dw->tasklet);
687
688 return IRQ_HANDLED;
689}
690
691/*----------------------------------------------------------------------*/
692
693static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
694{
695 struct dw_desc *desc = txd_to_dw_desc(tx);
696 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
697 dma_cookie_t cookie;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530698 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700699
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530700 spin_lock_irqsave(&dwc->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000701 cookie = dma_cookie_assign(tx);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700702
703 /*
704 * REVISIT: We should attempt to chain as many descriptors as
705 * possible, perhaps even appending to those already submitted
706 * for DMA. But this is hard to do in a race-free manner.
707 */
708 if (list_empty(&dwc->active_list)) {
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300709 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700710 desc->txd.cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700711 list_add_tail(&desc->desc_node, &dwc->active_list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530712 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700713 } else {
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300714 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700715 desc->txd.cookie);
716
717 list_add_tail(&desc->desc_node, &dwc->queue);
718 }
719
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530720 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700721
722 return cookie;
723}
724
725static struct dma_async_tx_descriptor *
726dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
727 size_t len, unsigned long flags)
728{
729 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Arnd Bergmannf7760762013-03-26 16:53:57 +0200730 struct dw_dma *dw = to_dw_dma(chan->device);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700731 struct dw_desc *desc;
732 struct dw_desc *first;
733 struct dw_desc *prev;
734 size_t xfer_count;
735 size_t offset;
736 unsigned int src_width;
737 unsigned int dst_width;
Andy Shevchenko3d4f8602012-10-01 13:06:25 +0300738 unsigned int data_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700739 u32 ctllo;
740
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300741 dev_vdbg(chan2dev(chan),
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300742 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300743 (unsigned long long)dest, (unsigned long long)src,
744 len, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700745
746 if (unlikely(!len)) {
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300747 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700748 return NULL;
749 }
750
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200751 dwc->direction = DMA_MEM_TO_MEM;
752
Arnd Bergmannf7760762013-03-26 16:53:57 +0200753 data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
754 dw->data_width[dwc->dst_master]);
Andy Shevchenkoa0982002012-09-21 15:05:48 +0300755
Andy Shevchenko3d4f8602012-10-01 13:06:25 +0300756 src_width = dst_width = min_t(unsigned int, data_width,
757 dwc_fast_fls(src | dest | len));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700758
Viresh Kumar327e6972012-02-01 16:12:26 +0530759 ctllo = DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700760 | DWC_CTLL_DST_WIDTH(dst_width)
761 | DWC_CTLL_SRC_WIDTH(src_width)
762 | DWC_CTLL_DST_INC
763 | DWC_CTLL_SRC_INC
764 | DWC_CTLL_FC_M2M;
765 prev = first = NULL;
766
767 for (offset = 0; offset < len; offset += xfer_count << src_width) {
768 xfer_count = min_t(size_t, (len - offset) >> src_width,
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +0300769 dwc->block_size);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700770
771 desc = dwc_desc_get(dwc);
772 if (!desc)
773 goto err_desc_get;
774
775 desc->lli.sar = src + offset;
776 desc->lli.dar = dest + offset;
777 desc->lli.ctllo = ctllo;
778 desc->lli.ctlhi = xfer_count;
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200779 desc->len = xfer_count << src_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700780
781 if (!first) {
782 first = desc;
783 } else {
784 prev->lli.llp = desc->txd.phys;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700785 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700786 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700787 }
788 prev = desc;
789 }
790
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700791 if (flags & DMA_PREP_INTERRUPT)
792 /* Trigger interrupt after last block */
793 prev->lli.ctllo |= DWC_CTLL_INT_EN;
794
795 prev->lli.llp = 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700796 first->txd.flags = flags;
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200797 first->total_len = len;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700798
799 return &first->txd;
800
801err_desc_get:
802 dwc_desc_put(dwc, first);
803 return NULL;
804}
805
806static struct dma_async_tx_descriptor *
807dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530808 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500809 unsigned long flags, void *context)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700810{
811 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Arnd Bergmannf7760762013-03-26 16:53:57 +0200812 struct dw_dma *dw = to_dw_dma(chan->device);
Viresh Kumar327e6972012-02-01 16:12:26 +0530813 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700814 struct dw_desc *prev;
815 struct dw_desc *first;
816 u32 ctllo;
817 dma_addr_t reg;
818 unsigned int reg_width;
819 unsigned int mem_width;
Andy Shevchenkoa0982002012-09-21 15:05:48 +0300820 unsigned int data_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700821 unsigned int i;
822 struct scatterlist *sg;
823 size_t total_len = 0;
824
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300825 dev_vdbg(chan2dev(chan), "%s\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700826
Andy Shevchenko495aea42013-01-10 11:11:41 +0200827 if (unlikely(!is_slave_direction(direction) || !sg_len))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700828 return NULL;
829
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200830 dwc->direction = direction;
831
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700832 prev = first = NULL;
833
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700834 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530835 case DMA_MEM_TO_DEV:
Viresh Kumar327e6972012-02-01 16:12:26 +0530836 reg_width = __fls(sconfig->dst_addr_width);
837 reg = sconfig->dst_addr;
838 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700839 | DWC_CTLL_DST_WIDTH(reg_width)
840 | DWC_CTLL_DST_FIX
Viresh Kumar327e6972012-02-01 16:12:26 +0530841 | DWC_CTLL_SRC_INC);
842
843 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
844 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
845
Arnd Bergmannf7760762013-03-26 16:53:57 +0200846 data_width = dw->data_width[dwc->src_master];
Andy Shevchenkoa0982002012-09-21 15:05:48 +0300847
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700848 for_each_sg(sgl, sg, sg_len, i) {
849 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530850 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700851
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200852 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700853 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530854
Andy Shevchenkoa0982002012-09-21 15:05:48 +0300855 mem_width = min_t(unsigned int,
856 data_width, dwc_fast_fls(mem | len));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700857
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530858slave_sg_todev_fill_desc:
859 desc = dwc_desc_get(dwc);
860 if (!desc) {
861 dev_err(chan2dev(chan),
862 "not enough descriptors available\n");
863 goto err_desc_get;
864 }
865
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700866 desc->lli.sar = mem;
867 desc->lli.dar = reg;
868 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +0300869 if ((len >> mem_width) > dwc->block_size) {
870 dlen = dwc->block_size << mem_width;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530871 mem += dlen;
872 len -= dlen;
873 } else {
874 dlen = len;
875 len = 0;
876 }
877
878 desc->lli.ctlhi = dlen >> mem_width;
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200879 desc->len = dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700880
881 if (!first) {
882 first = desc;
883 } else {
884 prev->lli.llp = desc->txd.phys;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700885 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700886 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700887 }
888 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530889 total_len += dlen;
890
891 if (len)
892 goto slave_sg_todev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700893 }
894 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530895 case DMA_DEV_TO_MEM:
Viresh Kumar327e6972012-02-01 16:12:26 +0530896 reg_width = __fls(sconfig->src_addr_width);
897 reg = sconfig->src_addr;
898 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700899 | DWC_CTLL_SRC_WIDTH(reg_width)
900 | DWC_CTLL_DST_INC
Viresh Kumar327e6972012-02-01 16:12:26 +0530901 | DWC_CTLL_SRC_FIX);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700902
Viresh Kumar327e6972012-02-01 16:12:26 +0530903 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
904 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
905
Arnd Bergmannf7760762013-03-26 16:53:57 +0200906 data_width = dw->data_width[dwc->dst_master];
Andy Shevchenkoa0982002012-09-21 15:05:48 +0300907
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700908 for_each_sg(sgl, sg, sg_len, i) {
909 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530910 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700911
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200912 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700913 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530914
Andy Shevchenkoa0982002012-09-21 15:05:48 +0300915 mem_width = min_t(unsigned int,
916 data_width, dwc_fast_fls(mem | len));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700917
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530918slave_sg_fromdev_fill_desc:
919 desc = dwc_desc_get(dwc);
920 if (!desc) {
921 dev_err(chan2dev(chan),
922 "not enough descriptors available\n");
923 goto err_desc_get;
924 }
925
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700926 desc->lli.sar = reg;
927 desc->lli.dar = mem;
928 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +0300929 if ((len >> reg_width) > dwc->block_size) {
930 dlen = dwc->block_size << reg_width;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530931 mem += dlen;
932 len -= dlen;
933 } else {
934 dlen = len;
935 len = 0;
936 }
937 desc->lli.ctlhi = dlen >> reg_width;
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200938 desc->len = dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700939
940 if (!first) {
941 first = desc;
942 } else {
943 prev->lli.llp = desc->txd.phys;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700944 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700945 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700946 }
947 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530948 total_len += dlen;
949
950 if (len)
951 goto slave_sg_fromdev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700952 }
953 break;
954 default:
955 return NULL;
956 }
957
958 if (flags & DMA_PREP_INTERRUPT)
959 /* Trigger interrupt after last block */
960 prev->lli.ctllo |= DWC_CTLL_INT_EN;
961
962 prev->lli.llp = 0;
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200963 first->total_len = total_len;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700964
965 return &first->txd;
966
967err_desc_get:
968 dwc_desc_put(dwc, first);
969 return NULL;
970}
971
Viresh Kumar327e6972012-02-01 16:12:26 +0530972/*
973 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
974 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
975 *
976 * NOTE: burst size 2 is not supported by controller.
977 *
978 * This can be done by finding least significant bit set: n & (n - 1)
979 */
980static inline void convert_burst(u32 *maxburst)
981{
982 if (*maxburst > 1)
983 *maxburst = fls(*maxburst) - 2;
984 else
985 *maxburst = 0;
986}
987
988static int
989set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
990{
991 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
992
Andy Shevchenko495aea42013-01-10 11:11:41 +0200993 /* Check if chan will be configured for slave transfers */
994 if (!is_slave_direction(sconfig->direction))
Viresh Kumar327e6972012-02-01 16:12:26 +0530995 return -EINVAL;
996
997 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200998 dwc->direction = sconfig->direction;
Viresh Kumar327e6972012-02-01 16:12:26 +0530999
Arnd Bergmannf7760762013-03-26 16:53:57 +02001000 /* Take the request line from slave_id member */
1001 if (dwc->request_line == ~0)
1002 dwc->request_line = sconfig->slave_id;
1003
Viresh Kumar327e6972012-02-01 16:12:26 +05301004 convert_burst(&dwc->dma_sconfig.src_maxburst);
1005 convert_burst(&dwc->dma_sconfig.dst_maxburst);
1006
1007 return 0;
1008}
1009
Andy Shevchenko21fe3c52013-01-09 10:17:14 +02001010static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
1011{
1012 u32 cfglo = channel_readl(dwc, CFG_LO);
Andy Shevchenko123b69a2013-03-21 11:49:17 +02001013 unsigned int count = 20; /* timeout iterations */
Andy Shevchenko21fe3c52013-01-09 10:17:14 +02001014
1015 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
Andy Shevchenko123b69a2013-03-21 11:49:17 +02001016 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
1017 udelay(2);
Andy Shevchenko21fe3c52013-01-09 10:17:14 +02001018
1019 dwc->paused = true;
1020}
1021
1022static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
1023{
1024 u32 cfglo = channel_readl(dwc, CFG_LO);
1025
1026 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
1027
1028 dwc->paused = false;
1029}
1030
Linus Walleij05827632010-05-17 16:30:42 -07001031static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1032 unsigned long arg)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001033{
1034 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1035 struct dw_dma *dw = to_dw_dma(chan->device);
1036 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301037 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001038 LIST_HEAD(list);
1039
Linus Walleija7c57cf2011-04-19 08:31:32 +08001040 if (cmd == DMA_PAUSE) {
1041 spin_lock_irqsave(&dwc->lock, flags);
1042
Andy Shevchenko21fe3c52013-01-09 10:17:14 +02001043 dwc_chan_pause(dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +08001044
Linus Walleija7c57cf2011-04-19 08:31:32 +08001045 spin_unlock_irqrestore(&dwc->lock, flags);
1046 } else if (cmd == DMA_RESUME) {
1047 if (!dwc->paused)
1048 return 0;
1049
1050 spin_lock_irqsave(&dwc->lock, flags);
1051
Andy Shevchenko21fe3c52013-01-09 10:17:14 +02001052 dwc_chan_resume(dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +08001053
1054 spin_unlock_irqrestore(&dwc->lock, flags);
1055 } else if (cmd == DMA_TERMINATE_ALL) {
1056 spin_lock_irqsave(&dwc->lock, flags);
1057
Andy Shevchenkofed25742012-09-21 15:05:49 +03001058 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1059
Andy Shevchenko3f9362072012-06-19 13:46:32 +03001060 dwc_chan_disable(dw, dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +08001061
Heikki Krogerusa5dbff12013-01-10 10:53:06 +02001062 dwc_chan_resume(dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +08001063
1064 /* active_list entries will end up before queued entries */
1065 list_splice_init(&dwc->queue, &list);
1066 list_splice_init(&dwc->active_list, &list);
1067
1068 spin_unlock_irqrestore(&dwc->lock, flags);
1069
1070 /* Flush all pending and queued descriptors */
1071 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1072 dwc_descriptor_complete(dwc, desc, false);
Viresh Kumar327e6972012-02-01 16:12:26 +05301073 } else if (cmd == DMA_SLAVE_CONFIG) {
1074 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1075 } else {
Linus Walleijc3635c72010-03-26 16:44:01 -07001076 return -ENXIO;
Viresh Kumar327e6972012-02-01 16:12:26 +05301077 }
Linus Walleijc3635c72010-03-26 16:44:01 -07001078
Linus Walleijc3635c72010-03-26 16:44:01 -07001079 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001080}
1081
Andy Shevchenko4702d522013-01-25 11:48:03 +02001082static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
1083{
1084 unsigned long flags;
1085 u32 residue;
1086
1087 spin_lock_irqsave(&dwc->lock, flags);
1088
1089 residue = dwc->residue;
1090 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1091 residue -= dwc_get_sent(dwc);
1092
1093 spin_unlock_irqrestore(&dwc->lock, flags);
1094 return residue;
1095}
1096
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001097static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -07001098dwc_tx_status(struct dma_chan *chan,
1099 dma_cookie_t cookie,
1100 struct dma_tx_state *txstate)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001101{
1102 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001103 enum dma_status ret;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001104
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001105 ret = dma_cookie_status(chan, cookie, txstate);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001106 if (ret != DMA_SUCCESS) {
1107 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1108
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001109 ret = dma_cookie_status(chan, cookie, txstate);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001110 }
1111
Viresh Kumarabf53902011-04-15 16:03:35 +05301112 if (ret != DMA_SUCCESS)
Andy Shevchenko4702d522013-01-25 11:48:03 +02001113 dma_set_residue(txstate, dwc_get_residue(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001114
Linus Walleija7c57cf2011-04-19 08:31:32 +08001115 if (dwc->paused)
1116 return DMA_PAUSED;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001117
1118 return ret;
1119}
1120
1121static void dwc_issue_pending(struct dma_chan *chan)
1122{
1123 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1124
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001125 if (!list_empty(&dwc->queue))
1126 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001127}
1128
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001129static int dwc_alloc_chan_resources(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001130{
1131 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1132 struct dw_dma *dw = to_dw_dma(chan->device);
1133 struct dw_desc *desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001134 int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301135 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001136
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001137 dev_vdbg(chan2dev(chan), "%s\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001138
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001139 /* ASSERT: channel is idle */
1140 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -07001141 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001142 return -EIO;
1143 }
1144
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001145 dma_cookie_init(chan);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001146
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001147 /*
1148 * NOTE: some controllers may have additional features that we
1149 * need to initialize here, like "scatter-gather" (which
1150 * doesn't mean what you think it means), and status writeback.
1151 */
1152
Arnd Bergmannf7760762013-03-26 16:53:57 +02001153 dwc_set_masters(dwc);
1154
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301155 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001156 i = dwc->descs_allocated;
1157 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001158 dma_addr_t phys;
1159
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301160 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001161
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001162 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
Andy Shevchenkocbd65312013-01-09 10:17:11 +02001163 if (!desc)
1164 goto err_desc_alloc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001165
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001166 memset(desc, 0, sizeof(struct dw_desc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001167
Dan Williamse0bd0f82009-09-08 17:53:02 -07001168 INIT_LIST_HEAD(&desc->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001169 dma_async_tx_descriptor_init(&desc->txd, chan);
1170 desc->txd.tx_submit = dwc_tx_submit;
1171 desc->txd.flags = DMA_CTRL_ACK;
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001172 desc->txd.phys = phys;
Andy Shevchenkocbd65312013-01-09 10:17:11 +02001173
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001174 dwc_desc_put(dwc, desc);
1175
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301176 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001177 i = ++dwc->descs_allocated;
1178 }
1179
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301180 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001181
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001182 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001183
1184 return i;
Andy Shevchenkocbd65312013-01-09 10:17:11 +02001185
1186err_desc_alloc:
Andy Shevchenkocbd65312013-01-09 10:17:11 +02001187 dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
1188
1189 return i;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001190}
1191
1192static void dwc_free_chan_resources(struct dma_chan *chan)
1193{
1194 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1195 struct dw_dma *dw = to_dw_dma(chan->device);
1196 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301197 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001198 LIST_HEAD(list);
1199
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001200 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001201 dwc->descs_allocated);
1202
1203 /* ASSERT: channel is idle */
1204 BUG_ON(!list_empty(&dwc->active_list));
1205 BUG_ON(!list_empty(&dwc->queue));
1206 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1207
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301208 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001209 list_splice_init(&dwc->free_list, &list);
1210 dwc->descs_allocated = 0;
Viresh Kumar61e183f2011-11-17 16:01:29 +05301211 dwc->initialized = false;
Arnd Bergmannf7760762013-03-26 16:53:57 +02001212 dwc->request_line = ~0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001213
1214 /* Disable interrupts */
1215 channel_clear_bit(dw, MASK.XFER, dwc->mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001216 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1217
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301218 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001219
1220 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
Dan Williams41d5e592009-01-06 11:38:21 -07001221 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001222 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001223 }
1224
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001225 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001226}
1227
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001228/*----------------------------------------------------------------------*/
1229
1230struct dw_dma_of_filter_args {
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001231 struct dw_dma *dw;
1232 unsigned int req;
1233 unsigned int src;
1234 unsigned int dst;
1235};
1236
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001237static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
Viresh Kumara9ddb572012-10-16 09:49:17 +05301238{
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001239 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001240 struct dw_dma_of_filter_args *fargs = param;
Viresh Kumara9ddb572012-10-16 09:49:17 +05301241
Andy Shevchenko75c61222013-03-26 16:53:54 +02001242 /* Ensure the device matches our channel */
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001243 if (chan->device != &fargs->dw->dma)
1244 return false;
Viresh Kumara9ddb572012-10-16 09:49:17 +05301245
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001246 dwc->request_line = fargs->req;
Arnd Bergmannf7760762013-03-26 16:53:57 +02001247 dwc->src_master = fargs->src;
1248 dwc->dst_master = fargs->dst;
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001249
1250 return true;
Viresh Kumara9ddb572012-10-16 09:49:17 +05301251}
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001252
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001253static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
1254 struct of_dma *ofdma)
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001255{
1256 struct dw_dma *dw = ofdma->of_dma_data;
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001257 struct dw_dma_of_filter_args fargs = {
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001258 .dw = dw,
1259 };
1260 dma_cap_mask_t cap;
1261
1262 if (dma_spec->args_count != 3)
1263 return NULL;
1264
Arnd Bergmannf73bb9b2013-03-03 20:51:28 +00001265 fargs.req = dma_spec->args[0];
1266 fargs.src = dma_spec->args[1];
1267 fargs.dst = dma_spec->args[2];
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001268
1269 if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
1270 fargs.src >= dw->nr_masters ||
1271 fargs.dst >= dw->nr_masters))
1272 return NULL;
1273
1274 dma_cap_zero(cap);
1275 dma_cap_set(DMA_SLAVE, cap);
1276
1277 /* TODO: there should be a simpler way to do this */
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001278 return dma_request_channel(cap, dw_dma_of_filter, &fargs);
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001279}
Viresh Kumara9ddb572012-10-16 09:49:17 +05301280
Andy Shevchenko42c91ee2013-04-09 14:05:46 +03001281#ifdef CONFIG_ACPI
1282static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
1283{
1284 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1285 struct acpi_dma_spec *dma_spec = param;
1286
1287 if (chan->device->dev != dma_spec->dev ||
1288 chan->chan_id != dma_spec->chan_id)
1289 return false;
1290
1291 dwc->request_line = dma_spec->slave_id;
1292 dwc->src_master = dwc_get_sms(NULL);
1293 dwc->dst_master = dwc_get_dms(NULL);
1294
1295 return true;
1296}
1297
1298static void dw_dma_acpi_controller_register(struct dw_dma *dw)
1299{
1300 struct device *dev = dw->dma.dev;
1301 struct acpi_dma_filter_info *info;
1302 int ret;
1303
1304 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1305 if (!info)
1306 return;
1307
1308 dma_cap_zero(info->dma_cap);
1309 dma_cap_set(DMA_SLAVE, info->dma_cap);
1310 info->filter_fn = dw_dma_acpi_filter;
1311
1312 ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
1313 info);
1314 if (ret)
1315 dev_err(dev, "could not register acpi_dma_controller\n");
1316}
1317#else /* !CONFIG_ACPI */
1318static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
1319#endif /* !CONFIG_ACPI */
1320
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001321/* --------------------- Cyclic DMA API extensions -------------------- */
1322
1323/**
1324 * dw_dma_cyclic_start - start the cyclic DMA transfer
1325 * @chan: the DMA channel to start
1326 *
1327 * Must be called with soft interrupts disabled. Returns zero on success or
1328 * -errno on failure.
1329 */
1330int dw_dma_cyclic_start(struct dma_chan *chan)
1331{
1332 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1333 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301334 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001335
1336 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1337 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1338 return -ENODEV;
1339 }
1340
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301341 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001342
Andy Shevchenko75c61222013-03-26 16:53:54 +02001343 /* Assert channel is idle */
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001344 if (dma_readl(dw, CH_EN) & dwc->mask) {
1345 dev_err(chan2dev(&dwc->chan),
1346 "BUG: Attempted to start non-idle channel\n");
Andy Shevchenko1d455432012-06-19 13:34:03 +03001347 dwc_dump_chan_regs(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301348 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001349 return -EBUSY;
1350 }
1351
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001352 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1353 dma_writel(dw, CLEAR.XFER, dwc->mask);
1354
Andy Shevchenko75c61222013-03-26 16:53:54 +02001355 /* Setup DMAC channel registers */
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001356 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1357 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1358 channel_writel(dwc, CTL_HI, 0);
1359
1360 channel_set_bit(dw, CH_EN, dwc->mask);
1361
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301362 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001363
1364 return 0;
1365}
1366EXPORT_SYMBOL(dw_dma_cyclic_start);
1367
1368/**
1369 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1370 * @chan: the DMA channel to stop
1371 *
1372 * Must be called with soft interrupts disabled.
1373 */
1374void dw_dma_cyclic_stop(struct dma_chan *chan)
1375{
1376 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1377 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301378 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001379
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301380 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001381
Andy Shevchenko3f9362072012-06-19 13:46:32 +03001382 dwc_chan_disable(dw, dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001383
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301384 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001385}
1386EXPORT_SYMBOL(dw_dma_cyclic_stop);
1387
1388/**
1389 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1390 * @chan: the DMA channel to prepare
1391 * @buf_addr: physical DMA address where the buffer starts
1392 * @buf_len: total number of bytes for the entire buffer
1393 * @period_len: number of bytes for each period
1394 * @direction: transfer direction, to or from device
1395 *
1396 * Must be called before trying to start the transfer. Returns a valid struct
1397 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1398 */
1399struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1400 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +05301401 enum dma_transfer_direction direction)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001402{
1403 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Viresh Kumar327e6972012-02-01 16:12:26 +05301404 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001405 struct dw_cyclic_desc *cdesc;
1406 struct dw_cyclic_desc *retval = NULL;
1407 struct dw_desc *desc;
1408 struct dw_desc *last = NULL;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001409 unsigned long was_cyclic;
1410 unsigned int reg_width;
1411 unsigned int periods;
1412 unsigned int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301413 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001414
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301415 spin_lock_irqsave(&dwc->lock, flags);
Andy Shevchenkofed25742012-09-21 15:05:49 +03001416 if (dwc->nollp) {
1417 spin_unlock_irqrestore(&dwc->lock, flags);
1418 dev_dbg(chan2dev(&dwc->chan),
1419 "channel doesn't support LLP transfers\n");
1420 return ERR_PTR(-EINVAL);
1421 }
1422
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001423 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301424 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001425 dev_dbg(chan2dev(&dwc->chan),
1426 "queue and/or active list are not empty\n");
1427 return ERR_PTR(-EBUSY);
1428 }
1429
1430 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301431 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001432 if (was_cyclic) {
1433 dev_dbg(chan2dev(&dwc->chan),
1434 "channel already prepared for cyclic DMA\n");
1435 return ERR_PTR(-EBUSY);
1436 }
1437
1438 retval = ERR_PTR(-EINVAL);
Viresh Kumar327e6972012-02-01 16:12:26 +05301439
Andy Shevchenkof44b92f2013-01-10 10:52:58 +02001440 if (unlikely(!is_slave_direction(direction)))
1441 goto out_err;
1442
Andy Shevchenko0fdb5672013-01-10 10:53:03 +02001443 dwc->direction = direction;
1444
Viresh Kumar327e6972012-02-01 16:12:26 +05301445 if (direction == DMA_MEM_TO_DEV)
1446 reg_width = __ffs(sconfig->dst_addr_width);
1447 else
1448 reg_width = __ffs(sconfig->src_addr_width);
1449
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001450 periods = buf_len / period_len;
1451
1452 /* Check for too big/unaligned periods and unaligned DMA buffer. */
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001453 if (period_len > (dwc->block_size << reg_width))
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001454 goto out_err;
1455 if (unlikely(period_len & ((1 << reg_width) - 1)))
1456 goto out_err;
1457 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1458 goto out_err;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001459
1460 retval = ERR_PTR(-ENOMEM);
1461
1462 if (periods > NR_DESCS_PER_CHANNEL)
1463 goto out_err;
1464
1465 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1466 if (!cdesc)
1467 goto out_err;
1468
1469 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1470 if (!cdesc->desc)
1471 goto out_err_alloc;
1472
1473 for (i = 0; i < periods; i++) {
1474 desc = dwc_desc_get(dwc);
1475 if (!desc)
1476 goto out_err_desc_get;
1477
1478 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +05301479 case DMA_MEM_TO_DEV:
Viresh Kumar327e6972012-02-01 16:12:26 +05301480 desc->lli.dar = sconfig->dst_addr;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001481 desc->lli.sar = buf_addr + (period_len * i);
Viresh Kumar327e6972012-02-01 16:12:26 +05301482 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001483 | DWC_CTLL_DST_WIDTH(reg_width)
1484 | DWC_CTLL_SRC_WIDTH(reg_width)
1485 | DWC_CTLL_DST_FIX
1486 | DWC_CTLL_SRC_INC
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001487 | DWC_CTLL_INT_EN);
Viresh Kumar327e6972012-02-01 16:12:26 +05301488
1489 desc->lli.ctllo |= sconfig->device_fc ?
1490 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1491 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1492
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001493 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +05301494 case DMA_DEV_TO_MEM:
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001495 desc->lli.dar = buf_addr + (period_len * i);
Viresh Kumar327e6972012-02-01 16:12:26 +05301496 desc->lli.sar = sconfig->src_addr;
1497 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001498 | DWC_CTLL_SRC_WIDTH(reg_width)
1499 | DWC_CTLL_DST_WIDTH(reg_width)
1500 | DWC_CTLL_DST_INC
1501 | DWC_CTLL_SRC_FIX
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001502 | DWC_CTLL_INT_EN);
Viresh Kumar327e6972012-02-01 16:12:26 +05301503
1504 desc->lli.ctllo |= sconfig->device_fc ?
1505 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1506 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1507
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001508 break;
1509 default:
1510 break;
1511 }
1512
1513 desc->lli.ctlhi = (period_len >> reg_width);
1514 cdesc->desc[i] = desc;
1515
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001516 if (last)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001517 last->lli.llp = desc->txd.phys;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001518
1519 last = desc;
1520 }
1521
Andy Shevchenko75c61222013-03-26 16:53:54 +02001522 /* Let's make a cyclic list */
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001523 last->lli.llp = cdesc->desc[0]->txd.phys;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001524
Andy Shevchenko2f45d612012-06-19 13:34:02 +03001525 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1526 "period %zu periods %d\n", (unsigned long long)buf_addr,
1527 buf_len, period_len, periods);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001528
1529 cdesc->periods = periods;
1530 dwc->cdesc = cdesc;
1531
1532 return cdesc;
1533
1534out_err_desc_get:
1535 while (i--)
1536 dwc_desc_put(dwc, cdesc->desc[i]);
1537out_err_alloc:
1538 kfree(cdesc);
1539out_err:
1540 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1541 return (struct dw_cyclic_desc *)retval;
1542}
1543EXPORT_SYMBOL(dw_dma_cyclic_prep);
1544
1545/**
1546 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1547 * @chan: the DMA channel to free
1548 */
1549void dw_dma_cyclic_free(struct dma_chan *chan)
1550{
1551 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1552 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1553 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1554 int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301555 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001556
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001557 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001558
1559 if (!cdesc)
1560 return;
1561
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301562 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001563
Andy Shevchenko3f9362072012-06-19 13:46:32 +03001564 dwc_chan_disable(dw, dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001565
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001566 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1567 dma_writel(dw, CLEAR.XFER, dwc->mask);
1568
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301569 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001570
1571 for (i = 0; i < cdesc->periods; i++)
1572 dwc_desc_put(dwc, cdesc->desc[i]);
1573
1574 kfree(cdesc->desc);
1575 kfree(cdesc);
1576
1577 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1578}
1579EXPORT_SYMBOL(dw_dma_cyclic_free);
1580
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001581/*----------------------------------------------------------------------*/
1582
1583static void dw_dma_off(struct dw_dma *dw)
1584{
Viresh Kumar61e183f2011-11-17 16:01:29 +05301585 int i;
1586
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001587 dma_writel(dw, CFG, 0);
1588
1589 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001590 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1591 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1592 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1593
1594 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1595 cpu_relax();
Viresh Kumar61e183f2011-11-17 16:01:29 +05301596
1597 for (i = 0; i < dw->dma.chancnt; i++)
1598 dw->chan[i].initialized = false;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001599}
1600
Viresh Kumara9ddb572012-10-16 09:49:17 +05301601#ifdef CONFIG_OF
1602static struct dw_dma_platform_data *
1603dw_dma_parse_dt(struct platform_device *pdev)
1604{
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001605 struct device_node *np = pdev->dev.of_node;
Viresh Kumara9ddb572012-10-16 09:49:17 +05301606 struct dw_dma_platform_data *pdata;
Viresh Kumara9ddb572012-10-16 09:49:17 +05301607 u32 tmp, arr[4];
1608
1609 if (!np) {
1610 dev_err(&pdev->dev, "Missing DT data\n");
1611 return NULL;
1612 }
1613
1614 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1615 if (!pdata)
1616 return NULL;
1617
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001618 if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
Viresh Kumara9ddb572012-10-16 09:49:17 +05301619 return NULL;
1620
1621 if (of_property_read_bool(np, "is_private"))
1622 pdata->is_private = true;
1623
1624 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
1625 pdata->chan_allocation_order = (unsigned char)tmp;
1626
1627 if (!of_property_read_u32(np, "chan_priority", &tmp))
1628 pdata->chan_priority = tmp;
1629
1630 if (!of_property_read_u32(np, "block_size", &tmp))
1631 pdata->block_size = tmp;
1632
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001633 if (!of_property_read_u32(np, "dma-masters", &tmp)) {
Viresh Kumara9ddb572012-10-16 09:49:17 +05301634 if (tmp > 4)
1635 return NULL;
1636
1637 pdata->nr_masters = tmp;
1638 }
1639
1640 if (!of_property_read_u32_array(np, "data_width", arr,
1641 pdata->nr_masters))
1642 for (tmp = 0; tmp < pdata->nr_masters; tmp++)
1643 pdata->data_width[tmp] = arr[tmp];
1644
Viresh Kumara9ddb572012-10-16 09:49:17 +05301645 return pdata;
1646}
1647#else
1648static inline struct dw_dma_platform_data *
1649dw_dma_parse_dt(struct platform_device *pdev)
1650{
1651 return NULL;
1652}
1653#endif
1654
Bill Pemberton463a1f82012-11-19 13:22:55 -05001655static int dw_probe(struct platform_device *pdev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001656{
1657 struct dw_dma_platform_data *pdata;
1658 struct resource *io;
1659 struct dw_dma *dw;
1660 size_t size;
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001661 void __iomem *regs;
1662 bool autocfg;
1663 unsigned int dw_params;
1664 unsigned int nr_channels;
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001665 unsigned int max_blk_size = 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001666 int irq;
1667 int err;
1668 int i;
1669
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001670 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1671 if (!io)
1672 return -EINVAL;
1673
1674 irq = platform_get_irq(pdev, 0);
1675 if (irq < 0)
1676 return irq;
1677
Thierry Reding73312052013-01-21 11:09:00 +01001678 regs = devm_ioremap_resource(&pdev->dev, io);
1679 if (IS_ERR(regs))
1680 return PTR_ERR(regs);
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001681
Andy Shevchenko877e86f2013-02-14 10:41:09 +02001682 /* Apply default dma_mask if needed */
1683 if (!pdev->dev.dma_mask) {
1684 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1685 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1686 }
1687
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001688 dw_params = dma_read_byaddr(regs, DW_PARAMS);
1689 autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1690
Andy Shevchenko985a6c72013-01-18 17:10:59 +02001691 dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1692
Andy Shevchenko123de542013-01-09 10:17:01 +02001693 pdata = dev_get_platdata(&pdev->dev);
1694 if (!pdata)
1695 pdata = dw_dma_parse_dt(pdev);
1696
1697 if (!pdata && autocfg) {
1698 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1699 if (!pdata)
1700 return -ENOMEM;
1701
1702 /* Fill platform data with the default values */
1703 pdata->is_private = true;
1704 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1705 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1706 } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1707 return -EINVAL;
1708
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001709 if (autocfg)
1710 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
1711 else
1712 nr_channels = pdata->nr_channels;
1713
1714 size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
Andy Shevchenkodbde5c22012-07-24 11:00:55 +03001715 dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001716 if (!dw)
1717 return -ENOMEM;
1718
Andy Shevchenkodbde5c22012-07-24 11:00:55 +03001719 dw->clk = devm_clk_get(&pdev->dev, "hclk");
1720 if (IS_ERR(dw->clk))
1721 return PTR_ERR(dw->clk);
Viresh Kumar30755282012-04-17 17:10:07 +05301722 clk_prepare_enable(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001723
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001724 dw->regs = regs;
1725
Andy Shevchenko75c61222013-03-26 16:53:54 +02001726 /* Get hardware configuration parameters */
Andy Shevchenkoa0982002012-09-21 15:05:48 +03001727 if (autocfg) {
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001728 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1729
Andy Shevchenkoa0982002012-09-21 15:05:48 +03001730 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1731 for (i = 0; i < dw->nr_masters; i++) {
1732 dw->data_width[i] =
1733 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1734 }
1735 } else {
1736 dw->nr_masters = pdata->nr_masters;
1737 memcpy(dw->data_width, pdata->data_width, 4);
1738 }
1739
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001740 /* Calculate all channel mask before DMA setup */
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001741 dw->all_chan_mask = (1 << nr_channels) - 1;
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001742
Andy Shevchenko75c61222013-03-26 16:53:54 +02001743 /* Force dma off, just in case */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001744 dw_dma_off(dw);
1745
Andy Shevchenko75c61222013-03-26 16:53:54 +02001746 /* Disable BLOCK interrupts as well */
Andy Shevchenko236b1062012-06-19 13:34:07 +03001747 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1748
Andy Shevchenkodbde5c22012-07-24 11:00:55 +03001749 err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
1750 "dw_dmac", dw);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001751 if (err)
Andy Shevchenkodbde5c22012-07-24 11:00:55 +03001752 return err;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001753
1754 platform_set_drvdata(pdev, dw);
1755
Andy Shevchenko75c61222013-03-26 16:53:54 +02001756 /* Create a pool of consistent memory blocks for hardware descriptors */
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001757 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
1758 sizeof(struct dw_desc), 4, 0);
1759 if (!dw->desc_pool) {
1760 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1761 return -ENOMEM;
1762 }
1763
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001764 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1765
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001766 INIT_LIST_HEAD(&dw->dma.channels);
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001767 for (i = 0; i < nr_channels; i++) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001768 struct dw_dma_chan *dwc = &dw->chan[i];
Andy Shevchenkofed25742012-09-21 15:05:49 +03001769 int r = nr_channels - i - 1;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001770
1771 dwc->chan.device = &dw->dma;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001772 dma_cookie_init(&dwc->chan);
Viresh Kumarb0c31302011-03-03 15:47:21 +05301773 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1774 list_add_tail(&dwc->chan.device_node,
1775 &dw->dma.channels);
1776 else
1777 list_add(&dwc->chan.device_node, &dw->dma.channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001778
Viresh Kumar93317e82011-03-03 15:47:22 +05301779 /* 7 is highest priority & 0 is lowest. */
1780 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
Andy Shevchenkofed25742012-09-21 15:05:49 +03001781 dwc->priority = r;
Viresh Kumar93317e82011-03-03 15:47:22 +05301782 else
1783 dwc->priority = i;
1784
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001785 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1786 spin_lock_init(&dwc->lock);
1787 dwc->mask = 1 << i;
1788
1789 INIT_LIST_HEAD(&dwc->active_list);
1790 INIT_LIST_HEAD(&dwc->queue);
1791 INIT_LIST_HEAD(&dwc->free_list);
1792
1793 channel_clear_bit(dw, CH_EN, dwc->mask);
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001794
Andy Shevchenko0fdb5672013-01-10 10:53:03 +02001795 dwc->direction = DMA_TRANS_NONE;
Arnd Bergmannf7760762013-03-26 16:53:57 +02001796 dwc->request_line = ~0;
Andy Shevchenkoa0982002012-09-21 15:05:48 +03001797
Andy Shevchenko75c61222013-03-26 16:53:54 +02001798 /* Hardware configuration */
Andy Shevchenkofed25742012-09-21 15:05:49 +03001799 if (autocfg) {
1800 unsigned int dwc_params;
1801
1802 dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
1803 DWC_PARAMS);
1804
Andy Shevchenko985a6c72013-01-18 17:10:59 +02001805 dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1806 dwc_params);
1807
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001808 /* Decode maximum block size for given channel. The
1809 * stored 4 bit value represents blocks from 0x00 for 3
1810 * up to 0x0a for 4095. */
1811 dwc->block_size =
1812 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
Andy Shevchenkofed25742012-09-21 15:05:49 +03001813 dwc->nollp =
1814 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1815 } else {
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001816 dwc->block_size = pdata->block_size;
Andy Shevchenkofed25742012-09-21 15:05:49 +03001817
1818 /* Check if channel supports multi block transfer */
1819 channel_writel(dwc, LLP, 0xfffffffc);
1820 dwc->nollp =
1821 (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1822 channel_writel(dwc, LLP, 0);
1823 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001824 }
1825
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001826 /* Clear all interrupts on all channels. */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001827 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
Andy Shevchenko236b1062012-06-19 13:34:07 +03001828 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001829 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1830 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1831 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1832
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001833 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1834 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
Jamie Iles95ea7592011-01-21 14:11:54 +00001835 if (pdata->is_private)
1836 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001837 dw->dma.dev = &pdev->dev;
1838 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1839 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1840
1841 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1842
1843 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001844 dw->dma.device_control = dwc_control;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001845
Linus Walleij07934482010-03-26 16:50:49 -07001846 dw->dma.device_tx_status = dwc_tx_status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001847 dw->dma.device_issue_pending = dwc_issue_pending;
1848
1849 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1850
Andy Shevchenko21d43f42012-10-18 17:34:09 +03001851 dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
1852 nr_channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001853
1854 dma_async_device_register(&dw->dma);
1855
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001856 if (pdev->dev.of_node) {
1857 err = of_dma_controller_register(pdev->dev.of_node,
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001858 dw_dma_of_xlate, dw);
Andy Shevchenkof5b9b772013-03-26 19:29:13 +02001859 if (err)
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001860 dev_err(&pdev->dev,
1861 "could not register of_dma_controller\n");
1862 }
1863
Andy Shevchenko42c91ee2013-04-09 14:05:46 +03001864 if (ACPI_HANDLE(&pdev->dev))
1865 dw_dma_acpi_controller_register(dw);
1866
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001867 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001868}
1869
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001870static int dw_remove(struct platform_device *pdev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001871{
1872 struct dw_dma *dw = platform_get_drvdata(pdev);
1873 struct dw_dma_chan *dwc, *_dwc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001874
Arnd Bergmannf9c6a652013-02-27 21:36:03 +00001875 if (pdev->dev.of_node)
1876 of_dma_controller_free(pdev->dev.of_node);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001877 dw_dma_off(dw);
1878 dma_async_device_unregister(&dw->dma);
1879
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001880 tasklet_kill(&dw->tasklet);
1881
1882 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1883 chan.device_node) {
1884 list_del(&dwc->chan.device_node);
1885 channel_clear_bit(dw, CH_EN, dwc->mask);
1886 }
1887
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001888 return 0;
1889}
1890
1891static void dw_shutdown(struct platform_device *pdev)
1892{
1893 struct dw_dma *dw = platform_get_drvdata(pdev);
1894
Andy Shevchenko6168d562012-10-18 17:34:10 +03001895 dw_dma_off(dw);
Viresh Kumar30755282012-04-17 17:10:07 +05301896 clk_disable_unprepare(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001897}
1898
Magnus Damm4a256b52009-07-08 13:22:18 +02001899static int dw_suspend_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001900{
Magnus Damm4a256b52009-07-08 13:22:18 +02001901 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001902 struct dw_dma *dw = platform_get_drvdata(pdev);
1903
Andy Shevchenko6168d562012-10-18 17:34:10 +03001904 dw_dma_off(dw);
Viresh Kumar30755282012-04-17 17:10:07 +05301905 clk_disable_unprepare(dw->clk);
Viresh Kumar61e183f2011-11-17 16:01:29 +05301906
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001907 return 0;
1908}
1909
Magnus Damm4a256b52009-07-08 13:22:18 +02001910static int dw_resume_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001911{
Magnus Damm4a256b52009-07-08 13:22:18 +02001912 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001913 struct dw_dma *dw = platform_get_drvdata(pdev);
1914
Viresh Kumar30755282012-04-17 17:10:07 +05301915 clk_prepare_enable(dw->clk);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001916 dma_writel(dw, CFG, DW_CFG_DMA_EN);
Heikki Krogerusb8014792012-10-18 17:34:08 +03001917
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001918 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001919}
1920
Alexey Dobriyan47145212009-12-14 18:00:08 -08001921static const struct dev_pm_ops dw_dev_pm_ops = {
Magnus Damm4a256b52009-07-08 13:22:18 +02001922 .suspend_noirq = dw_suspend_noirq,
1923 .resume_noirq = dw_resume_noirq,
Rajeev KUMAR7414a1b2012-02-01 16:12:17 +05301924 .freeze_noirq = dw_suspend_noirq,
1925 .thaw_noirq = dw_resume_noirq,
1926 .restore_noirq = dw_resume_noirq,
1927 .poweroff_noirq = dw_suspend_noirq,
Magnus Damm4a256b52009-07-08 13:22:18 +02001928};
1929
Viresh Kumard3f797d2012-04-20 20:15:34 +05301930#ifdef CONFIG_OF
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001931static const struct of_device_id dw_dma_of_id_table[] = {
Viresh Kumard3f797d2012-04-20 20:15:34 +05301932 { .compatible = "snps,dma-spear1340" },
1933 {}
1934};
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001935MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
Viresh Kumard3f797d2012-04-20 20:15:34 +05301936#endif
1937
Andy Shevchenko42c91ee2013-04-09 14:05:46 +03001938#ifdef CONFIG_ACPI
1939static const struct acpi_device_id dw_dma_acpi_id_table[] = {
1940 { "INTL9C60", 0 },
Mika Westerbergcfdf5b62013-02-07 17:36:28 +02001941 { }
1942};
Andy Shevchenko42c91ee2013-04-09 14:05:46 +03001943#endif
Mika Westerbergcfdf5b62013-02-07 17:36:28 +02001944
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001945static struct platform_driver dw_driver = {
Andy Shevchenko01126852013-01-10 10:53:02 +02001946 .probe = dw_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05001947 .remove = dw_remove,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001948 .shutdown = dw_shutdown,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001949 .driver = {
1950 .name = "dw_dmac",
Magnus Damm4a256b52009-07-08 13:22:18 +02001951 .pm = &dw_dev_pm_ops,
Andy Shevchenkobd2e6b62013-03-26 16:53:55 +02001952 .of_match_table = of_match_ptr(dw_dma_of_id_table),
Andy Shevchenko42c91ee2013-04-09 14:05:46 +03001953 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001954 },
1955};
1956
1957static int __init dw_init(void)
1958{
Andy Shevchenko01126852013-01-10 10:53:02 +02001959 return platform_driver_register(&dw_driver);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001960}
Viresh Kumarcb689a72011-03-03 15:47:15 +05301961subsys_initcall(dw_init);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001962
1963static void __exit dw_exit(void)
1964{
1965 platform_driver_unregister(&dw_driver);
1966}
1967module_exit(dw_exit);
1968
1969MODULE_LICENSE("GPL v2");
1970MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02001971MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Viresh Kumar10d89352012-06-20 12:53:02 -07001972MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");