blob: f43e6dafe446d47dc906137b3fe9733fef1182f4 [file] [log] [blame]
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001/*
Heikki Krogerusb8014792012-10-18 17:34:08 +03002 * Core driver for the Synopsys DesignWare DMA Controller
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07003 *
4 * Copyright (C) 2007-2008 Atmel Corporation
Viresh Kumaraecb7b62011-05-24 14:04:09 +05305 * Copyright (C) 2010-2011 ST Microelectronics
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03006 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
Heikki Krogerusb8014792012-10-18 17:34:08 +030012
Viresh Kumar327e6972012-02-01 16:12:26 +053013#include <linux/bitops.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070014#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
Andy Shevchenkof8122a82013-01-16 15:48:50 +020017#include <linux/dmapool.h>
Thierry Reding73312052013-01-21 11:09:00 +010018#include <linux/err.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070019#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/mm.h>
23#include <linux/module.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070024#include <linux/slab.h>
Andy Shevchenkobb32baf2014-11-05 18:34:48 +020025#include <linux/pm_runtime.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070026
Andy Shevchenko61a76492013-06-05 15:26:44 +030027#include "../dmaengine.h"
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030028#include "internal.h"
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070029
30/*
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide.
35 *
Andy Shevchenkodd5720b2014-02-12 11:16:17 +020036 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * support descriptor writeback.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070038 */
39
Viresh Kumar327e6972012-02-01 16:12:26 +053040#define DWC_DEFAULT_CTLLO(_chan) ({ \
Viresh Kumar327e6972012-02-01 16:12:26 +053041 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
Andy Shevchenko495aea42013-01-10 11:11:41 +020043 bool _is_slave = is_slave_direction(_dwc->direction); \
Andy Shevchenko495aea42013-01-10 11:11:41 +020044 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
Viresh Kumar327e6972012-02-01 16:12:26 +053045 DW_DMA_MSIZE_16; \
Andy Shevchenko495aea42013-01-10 11:11:41 +020046 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
Viresh Kumar327e6972012-02-01 16:12:26 +053047 DW_DMA_MSIZE_16; \
Mans Rullgardbb3450a2016-03-18 16:24:42 +020048 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
Andy Shevchenko9217a5b2016-08-17 19:20:20 +030049 _dwc->dws.p_master : _dwc->dws.m_master; \
Mans Rullgardbb3450a2016-03-18 16:24:42 +020050 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
Andy Shevchenko9217a5b2016-08-17 19:20:20 +030051 _dwc->dws.p_master : _dwc->dws.m_master; \
Jamie Ilesf301c062011-01-21 14:11:53 +000052 \
Viresh Kumar327e6972012-02-01 16:12:26 +053053 (DWC_CTLL_DST_MSIZE(_dmsize) \
54 | DWC_CTLL_SRC_MSIZE(_smsize) \
Jamie Ilesf301c062011-01-21 14:11:53 +000055 | DWC_CTLL_LLP_D_EN \
56 | DWC_CTLL_LLP_S_EN \
Mans Rullgardbb3450a2016-03-18 16:24:42 +020057 | DWC_CTLL_DMS(_dms) \
58 | DWC_CTLL_SMS(_sms)); \
Jamie Ilesf301c062011-01-21 14:11:53 +000059 })
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070060
Andy Shevchenko029a40e2015-01-02 16:17:24 +020061/* The set of bus widths supported by the DMA controller */
62#define DW_DMA_BUSWIDTHS \
63 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
64 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
65 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
66 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
67
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070068/*----------------------------------------------------------------------*/
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070069
Dan Williams41d5e592009-01-06 11:38:21 -070070static struct device *chan2dev(struct dma_chan *chan)
71{
72 return &chan->dev->device;
73}
Dan Williams41d5e592009-01-06 11:38:21 -070074
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070075static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
76{
Andy Shevchenkoe63a47a2012-10-18 17:34:12 +030077 return to_dw_desc(dwc->active_list.next);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070078}
79
Christian Lamparterab703f82016-04-14 18:11:01 +020080static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070081{
Christian Lamparterab703f82016-04-14 18:11:01 +020082 struct dw_desc *desc = txd_to_dw_desc(tx);
83 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
84 dma_cookie_t cookie;
85 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070086
Viresh Kumar69cea5a2011-04-15 16:03:35 +053087 spin_lock_irqsave(&dwc->lock, flags);
Christian Lamparterab703f82016-04-14 18:11:01 +020088 cookie = dma_cookie_assign(tx);
89
90 /*
91 * REVISIT: We should attempt to chain as many descriptors as
92 * possible, perhaps even appending to those already submitted
93 * for DMA. But this is hard to do in a race-free manner.
94 */
95
96 list_add_tail(&desc->desc_node, &dwc->queue);
Viresh Kumar69cea5a2011-04-15 16:03:35 +053097 spin_unlock_irqrestore(&dwc->lock, flags);
Christian Lamparterab703f82016-04-14 18:11:01 +020098 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99 __func__, desc->txd.cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700100
Christian Lamparterab703f82016-04-14 18:11:01 +0200101 return cookie;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700102}
103
Christian Lamparterab703f82016-04-14 18:11:01 +0200104static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105{
106 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107 struct dw_desc *desc;
108 dma_addr_t phys;
109
110 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
111 if (!desc)
112 return NULL;
113
114 dwc->descs_allocated++;
115 INIT_LIST_HEAD(&desc->tx_list);
116 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117 desc->txd.tx_submit = dwc_tx_submit;
118 desc->txd.flags = DMA_CTRL_ACK;
119 desc->txd.phys = phys;
120 return desc;
121}
122
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
Christian Lamparterab703f82016-04-14 18:11:01 +0200125 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126 struct dw_desc *child, *_next;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530127
Christian Lamparterab703f82016-04-14 18:11:01 +0200128 if (unlikely(!desc))
129 return;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700130
Christian Lamparterab703f82016-04-14 18:11:01 +0200131 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132 list_del(&child->desc_node);
133 dma_pool_free(dw->desc_pool, child, child->txd.phys);
134 dwc->descs_allocated--;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700135 }
Christian Lamparterab703f82016-04-14 18:11:01 +0200136
137 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138 dwc->descs_allocated--;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700139}
140
Andy Shevchenko199244d2017-01-17 13:57:31 +0200141static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
Viresh Kumar61e183f2011-11-17 16:01:29 +0530142{
Andy Shevchenko199244d2017-01-17 13:57:31 +0200143 u32 cfghi = 0;
144 u32 cfglo = 0;
145
146 /* Set default burst alignment */
147 cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
148
149 /* Low 4 bits of the request lines */
150 cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
151 cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
152
153 /* Request line extension (2 bits) */
154 cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
155 cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
156
157 channel_writel(dwc, CFG_LO, cfglo);
158 channel_writel(dwc, CFG_HI, cfghi);
159}
160
161static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
162{
Viresh Kumar61e183f2011-11-17 16:01:29 +0530163 u32 cfghi = DWC_CFGH_FIFO_MODE;
164 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
Andy Shevchenkoc072e112016-08-17 19:20:21 +0300165 bool hs_polarity = dwc->dws.hs_polarity;
Viresh Kumar61e183f2011-11-17 16:01:29 +0530166
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300167 cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
168 cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530169
Andy Shevchenkoc072e112016-08-17 19:20:21 +0300170 /* Set polarity of handshake interface */
171 cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
172
Viresh Kumar61e183f2011-11-17 16:01:29 +0530173 channel_writel(dwc, CFG_LO, cfglo);
174 channel_writel(dwc, CFG_HI, cfghi);
Andy Shevchenko199244d2017-01-17 13:57:31 +0200175}
176
177static void dwc_initialize(struct dw_dma_chan *dwc)
178{
179 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
180
181 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
182 return;
183
184 if (dw->pdata->is_idma32)
185 dwc_initialize_chan_idma32(dwc);
186 else
187 dwc_initialize_chan_dw(dwc);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530188
189 /* Enable interrupts */
190 channel_set_bit(dw, MASK.XFER, dwc->mask);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530191 channel_set_bit(dw, MASK.ERROR, dwc->mask);
192
Andy Shevchenko423f9cb2016-03-18 16:24:52 +0200193 set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530194}
195
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700196/*----------------------------------------------------------------------*/
197
Andy Shevchenkof52b36d2012-09-21 15:05:44 +0300198static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
Andy Shevchenko1d455432012-06-19 13:34:03 +0300199{
200 dev_err(chan2dev(&dwc->chan),
201 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
202 channel_readl(dwc, SAR),
203 channel_readl(dwc, DAR),
204 channel_readl(dwc, LLP),
205 channel_readl(dwc, CTL_HI),
206 channel_readl(dwc, CTL_LO));
207}
208
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300209static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
210{
211 channel_clear_bit(dw, CH_EN, dwc->mask);
212 while (dma_readl(dw, CH_EN) & dwc->mask)
213 cpu_relax();
214}
215
Andy Shevchenko2d248812017-01-17 13:57:29 +0200216static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
217 unsigned int width, size_t *len)
218{
Andy Shevchenko199244d2017-01-17 13:57:31 +0200219 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Andy Shevchenko2d248812017-01-17 13:57:29 +0200220 u32 block;
221
Andy Shevchenko199244d2017-01-17 13:57:31 +0200222 /* Always in bytes for iDMA 32-bit */
223 if (dw->pdata->is_idma32)
224 width = 0;
225
Andy Shevchenko2d248812017-01-17 13:57:29 +0200226 if ((bytes >> width) > dwc->block_size) {
227 block = dwc->block_size;
228 *len = block << width;
229 } else {
230 block = bytes >> width;
231 *len = bytes;
232 }
233
234 return block;
235}
236
237static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
238{
Andy Shevchenko199244d2017-01-17 13:57:31 +0200239 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
240
241 if (dw->pdata->is_idma32)
242 return IDMA32C_CTLH_BLOCK_TS(block);
243
Andy Shevchenko2d248812017-01-17 13:57:29 +0200244 return DWC_CTLH_BLOCK_TS(block) << width;
245}
246
Andy Shevchenko1d455432012-06-19 13:34:03 +0300247/*----------------------------------------------------------------------*/
248
Andy Shevchenkofed25742012-09-21 15:05:49 +0300249/* Perform single block transfer */
250static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
251 struct dw_desc *desc)
252{
253 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
254 u32 ctllo;
255
Andy Shevchenko1d566f12014-01-13 14:04:48 +0200256 /*
257 * Software emulation of LLP mode relies on interrupts to continue
258 * multi block transfer.
259 */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200260 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300261
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200262 channel_writel(dwc, SAR, lli_read(desc, sar));
263 channel_writel(dwc, DAR, lli_read(desc, dar));
Andy Shevchenkofed25742012-09-21 15:05:49 +0300264 channel_writel(dwc, CTL_LO, ctllo);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200265 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
Andy Shevchenkofed25742012-09-21 15:05:49 +0300266 channel_set_bit(dw, CH_EN, dwc->mask);
Andy Shevchenkof5c6a7d2013-01-09 10:17:13 +0200267
268 /* Move pointer to next descriptor */
269 dwc->tx_node_active = dwc->tx_node_active->next;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300270}
271
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700272/* Called with dwc->lock held and bh disabled */
273static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
274{
275 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300276 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
Andy Shevchenkofed25742012-09-21 15:05:49 +0300277 unsigned long was_soft_llp;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700278
279 /* ASSERT: channel is idle */
280 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700281 dev_err(chan2dev(&dwc->chan),
Jarkko Nikula550da642015-03-10 11:37:23 +0200282 "%s: BUG: Attempted to start non-idle channel\n",
283 __func__);
Andy Shevchenko1d455432012-06-19 13:34:03 +0300284 dwc_dump_chan_regs(dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700285
286 /* The tasklet will hopefully advance the queue... */
287 return;
288 }
289
Andy Shevchenkofed25742012-09-21 15:05:49 +0300290 if (dwc->nollp) {
291 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
292 &dwc->flags);
293 if (was_soft_llp) {
294 dev_err(chan2dev(&dwc->chan),
Andy Shevchenkofc61f6b2014-01-13 14:04:49 +0200295 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
Andy Shevchenkofed25742012-09-21 15:05:49 +0300296 return;
297 }
298
299 dwc_initialize(dwc);
300
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200301 first->residue = first->total_len;
Andy Shevchenkof5c6a7d2013-01-09 10:17:13 +0200302 dwc->tx_node_active = &first->tx_list;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300303
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200304 /* Submit first block */
Andy Shevchenkofed25742012-09-21 15:05:49 +0300305 dwc_do_single_block(dwc, first);
306
307 return;
308 }
309
Viresh Kumar61e183f2011-11-17 16:01:29 +0530310 dwc_initialize(dwc);
311
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200312 channel_writel(dwc, LLP, first->txd.phys | lms);
313 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700314 channel_writel(dwc, CTL_HI, 0);
315 channel_set_bit(dw, CH_EN, dwc->mask);
316}
317
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300318static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
319{
Andy Shevchenkocba15612014-06-18 12:15:37 +0300320 struct dw_desc *desc;
321
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300322 if (list_empty(&dwc->queue))
323 return;
324
325 list_move(dwc->queue.next, &dwc->active_list);
Andy Shevchenkocba15612014-06-18 12:15:37 +0300326 desc = dwc_first_active(dwc);
327 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
328 dwc_dostart(dwc, desc);
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300329}
330
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700331/*----------------------------------------------------------------------*/
332
333static void
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530334dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
335 bool callback_required)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700336{
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700337 struct dma_async_tx_descriptor *txd = &desc->txd;
Viresh Kumare5180762011-03-03 15:47:20 +0530338 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530339 unsigned long flags;
Dave Jiang577ef922016-07-20 13:11:00 -0700340 struct dmaengine_desc_callback cb;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700341
Dan Williams41d5e592009-01-06 11:38:21 -0700342 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700343
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530344 spin_lock_irqsave(&dwc->lock, flags);
Russell King - ARM Linuxf7fbce02012-03-06 22:35:07 +0000345 dma_cookie_complete(txd);
Dave Jiang577ef922016-07-20 13:11:00 -0700346 if (callback_required)
347 dmaengine_desc_get_callback(txd, &cb);
348 else
349 memset(&cb, 0, sizeof(cb));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700350
Viresh Kumare5180762011-03-03 15:47:20 +0530351 /* async_tx_ack */
352 list_for_each_entry(child, &desc->tx_list, desc_node)
353 async_tx_ack(&child->txd);
354 async_tx_ack(&desc->txd);
Christian Lamparterab703f82016-04-14 18:11:01 +0200355 dwc_desc_put(dwc, desc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530356 spin_unlock_irqrestore(&dwc->lock, flags);
357
Dave Jiang577ef922016-07-20 13:11:00 -0700358 dmaengine_desc_callback_invoke(&cb, NULL);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700359}
360
361static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
362{
363 struct dw_desc *desc, *_desc;
364 LIST_HEAD(list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530365 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700366
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530367 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700368 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700369 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700370 "BUG: XFER bit set, but channel not idle!\n");
371
372 /* Try to continue after resetting the channel... */
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300373 dwc_chan_disable(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700374 }
375
376 /*
377 * Submit queued descriptors ASAP, i.e. before we go through
378 * the completed ones.
379 */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700380 list_splice_init(&dwc->active_list, &list);
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300381 dwc_dostart_first_queued(dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700382
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530383 spin_unlock_irqrestore(&dwc->lock, flags);
384
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700385 list_for_each_entry_safe(desc, _desc, &list, desc_node)
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530386 dwc_descriptor_complete(dwc, desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700387}
388
Andy Shevchenko4702d522013-01-25 11:48:03 +0200389/* Returns how many bytes were already received from source */
390static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
391{
392 u32 ctlhi = channel_readl(dwc, CTL_HI);
393 u32 ctllo = channel_readl(dwc, CTL_LO);
394
Andy Shevchenko2d248812017-01-17 13:57:29 +0200395 return block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
Andy Shevchenko4702d522013-01-25 11:48:03 +0200396}
397
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700398static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
399{
400 dma_addr_t llp;
401 struct dw_desc *desc, *_desc;
402 struct dw_desc *child;
403 u32 status_xfer;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530404 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700405
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530406 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700407 llp = channel_readl(dwc, LLP);
408 status_xfer = dma_readl(dw, RAW.XFER);
409
410 if (status_xfer & dwc->mask) {
411 /* Everything we've submitted is done */
412 dma_writel(dw, CLEAR.XFER, dwc->mask);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200413
414 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200415 struct list_head *head, *active = dwc->tx_node_active;
416
417 /*
418 * We are inside first active descriptor.
419 * Otherwise something is really wrong.
420 */
421 desc = dwc_first_active(dwc);
422
423 head = &desc->tx_list;
424 if (active != head) {
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200425 /* Update residue to reflect last sent descriptor */
426 if (active == head->next)
427 desc->residue -= desc->len;
428 else
429 desc->residue -= to_dw_desc(active->prev)->len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200430
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200431 child = to_dw_desc(active);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200432
433 /* Submit next block */
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200434 dwc_do_single_block(dwc, child);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200435
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200436 spin_unlock_irqrestore(&dwc->lock, flags);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200437 return;
438 }
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200439
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200440 /* We are done here */
441 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
442 }
Andy Shevchenko4702d522013-01-25 11:48:03 +0200443
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530444 spin_unlock_irqrestore(&dwc->lock, flags);
445
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700446 dwc_complete_all(dw, dwc);
447 return;
448 }
449
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530450 if (list_empty(&dwc->active_list)) {
451 spin_unlock_irqrestore(&dwc->lock, flags);
Jamie Iles087809f2011-01-21 14:11:52 +0000452 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530453 }
Jamie Iles087809f2011-01-21 14:11:52 +0000454
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200455 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
456 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700457 spin_unlock_irqrestore(&dwc->lock, flags);
Dan Williams41d5e592009-01-06 11:38:21 -0700458 return;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700459 }
460
Andy Shevchenko5a87f0e2014-01-13 14:04:50 +0200461 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700462
463 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
Andy Shevchenko75c61222013-03-26 16:53:54 +0200464 /* Initial residue value */
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200465 desc->residue = desc->total_len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200466
Andy Shevchenko75c61222013-03-26 16:53:54 +0200467 /* Check first descriptors addr */
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200468 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530469 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700470 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530471 }
Viresh Kumar84adccf2011-03-24 11:32:15 +0530472
Andy Shevchenko75c61222013-03-26 16:53:54 +0200473 /* Check first descriptors llp */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200474 if (lli_read(desc, llp) == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700475 /* This one is currently in progress */
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200476 desc->residue -= dwc_get_sent(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530477 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700478 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530479 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700480
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200481 desc->residue -= desc->len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200482 list_for_each_entry(child, &desc->tx_list, desc_node) {
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200483 if (lli_read(child, llp) == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700484 /* Currently in progress */
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200485 desc->residue -= dwc_get_sent(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530486 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700487 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530488 }
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200489 desc->residue -= child->len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200490 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700491
492 /*
493 * No descriptors so far seem to be in progress, i.e.
494 * this one must be done.
495 */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530496 spin_unlock_irqrestore(&dwc->lock, flags);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530497 dwc_descriptor_complete(dwc, desc, true);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530498 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700499 }
500
Dan Williams41d5e592009-01-06 11:38:21 -0700501 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700502 "BUG: All descriptors done, but channel not idle!\n");
503
504 /* Try to continue after resetting the channel... */
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300505 dwc_chan_disable(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700506
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300507 dwc_dostart_first_queued(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530508 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700509}
510
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200511static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700512{
Andy Shevchenko21d43f42012-10-18 17:34:09 +0300513 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200514 lli_read(desc, sar),
515 lli_read(desc, dar),
516 lli_read(desc, llp),
517 lli_read(desc, ctlhi),
518 lli_read(desc, ctllo));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700519}
520
521static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
522{
523 struct dw_desc *bad_desc;
524 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530525 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700526
527 dwc_scan_descriptors(dw, dwc);
528
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530529 spin_lock_irqsave(&dwc->lock, flags);
530
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700531 /*
532 * The descriptor currently at the head of the active list is
533 * borked. Since we don't have any way to report errors, we'll
534 * just have to scream loudly and try to carry on.
535 */
536 bad_desc = dwc_first_active(dwc);
537 list_del_init(&bad_desc->desc_node);
Viresh Kumarf336e422011-03-03 15:47:16 +0530538 list_move(dwc->queue.next, dwc->active_list.prev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700539
540 /* Clear the error flag and try to restart the controller */
541 dma_writel(dw, CLEAR.ERROR, dwc->mask);
542 if (!list_empty(&dwc->active_list))
543 dwc_dostart(dwc, dwc_first_active(dwc));
544
545 /*
Andy Shevchenkoba84bd72012-10-18 17:34:11 +0300546 * WARN may seem harsh, but since this only happens
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700547 * when someone submits a bad physical address in a
548 * descriptor, we should consider ourselves lucky that the
549 * controller flagged an error instead of scribbling over
550 * random memory locations.
551 */
Andy Shevchenkoba84bd72012-10-18 17:34:11 +0300552 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
553 " cookie: %d\n", bad_desc->txd.cookie);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200554 dwc_dump_lli(dwc, bad_desc);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700555 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200556 dwc_dump_lli(dwc, child);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700557
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530558 spin_unlock_irqrestore(&dwc->lock, flags);
559
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700560 /* Pretend the descriptor completed successfully */
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530561 dwc_descriptor_complete(dwc, bad_desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700562}
563
564static void dw_dma_tasklet(unsigned long data)
565{
566 struct dw_dma *dw = (struct dw_dma *)data;
567 struct dw_dma_chan *dwc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700568 u32 status_xfer;
569 u32 status_err;
Andy Shevchenko7794e5b2016-03-18 16:24:48 +0200570 unsigned int i;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700571
Haavard Skinnemoen7fe7b2f2008-10-03 15:23:46 -0700572 status_xfer = dma_readl(dw, RAW.XFER);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700573 status_err = dma_readl(dw, RAW.ERROR);
574
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300575 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700576
577 for (i = 0; i < dw->dma.chancnt; i++) {
578 dwc = &dw->chan[i];
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200579 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
Andy Shevchenko14bebd02017-05-09 19:18:37 +0300580 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200581 else if (status_err & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700582 dwc_handle_error(dw, dwc);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200583 else if (status_xfer & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700584 dwc_scan_descriptors(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700585 }
586
Andy Shevchenkoee1cdcd2016-02-10 15:59:42 +0200587 /* Re-enable interrupts */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700588 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700589 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
590}
591
592static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
593{
594 struct dw_dma *dw = dev_id;
Andy Shevchenko02a21b72015-12-04 23:49:24 +0200595 u32 status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700596
Andy Shevchenko02a21b72015-12-04 23:49:24 +0200597 /* Check if we have any interrupt from the DMAC which is not in use */
598 if (!dw->in_use)
599 return IRQ_NONE;
600
601 status = dma_readl(dw, STATUS_INT);
Andy Shevchenko3783cef2013-07-15 15:04:39 +0300602 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
603
604 /* Check if we have any interrupt from the DMAC */
Andy Shevchenko02a21b72015-12-04 23:49:24 +0200605 if (!status)
Andy Shevchenko3783cef2013-07-15 15:04:39 +0300606 return IRQ_NONE;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700607
608 /*
609 * Just disable the interrupts. We'll turn them back on in the
610 * softirq handler.
611 */
612 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000613 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700614 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
615
616 status = dma_readl(dw, STATUS_INT);
617 if (status) {
618 dev_err(dw->dma.dev,
619 "BUG: Unexpected interrupts pending: 0x%x\n",
620 status);
621
622 /* Try to recover */
623 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000624 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700625 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
626 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
627 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
628 }
629
630 tasklet_schedule(&dw->tasklet);
631
632 return IRQ_HANDLED;
633}
634
635/*----------------------------------------------------------------------*/
636
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700637static struct dma_async_tx_descriptor *
638dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
639 size_t len, unsigned long flags)
640{
641 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Arnd Bergmannf7760762013-03-26 16:53:57 +0200642 struct dw_dma *dw = to_dw_dma(chan->device);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700643 struct dw_desc *desc;
644 struct dw_desc *first;
645 struct dw_desc *prev;
646 size_t xfer_count;
647 size_t offset;
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300648 u8 m_master = dwc->dws.m_master;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700649 unsigned int src_width;
650 unsigned int dst_width;
Andy Shevchenko161c3d02016-04-27 14:15:39 +0300651 unsigned int data_width = dw->pdata->data_width[m_master];
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700652 u32 ctllo;
Andy Shevchenko2e650602016-04-27 14:15:38 +0300653 u8 lms = DWC_LLP_LMS(m_master);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700654
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300655 dev_vdbg(chan2dev(chan),
Andy Shevchenko5a87f0e2014-01-13 14:04:50 +0200656 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
657 &dest, &src, len, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700658
659 if (unlikely(!len)) {
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300660 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700661 return NULL;
662 }
663
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200664 dwc->direction = DMA_MEM_TO_MEM;
665
Andy Shevchenko2e650602016-04-27 14:15:38 +0300666 src_width = dst_width = __ffs(data_width | src | dest | len);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700667
Viresh Kumar327e6972012-02-01 16:12:26 +0530668 ctllo = DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700669 | DWC_CTLL_DST_WIDTH(dst_width)
670 | DWC_CTLL_SRC_WIDTH(src_width)
671 | DWC_CTLL_DST_INC
672 | DWC_CTLL_SRC_INC
673 | DWC_CTLL_FC_M2M;
674 prev = first = NULL;
675
Andy Shevchenko2d248812017-01-17 13:57:29 +0200676 for (offset = 0; offset < len; offset += xfer_count) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700677 desc = dwc_desc_get(dwc);
678 if (!desc)
679 goto err_desc_get;
680
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200681 lli_write(desc, sar, src + offset);
682 lli_write(desc, dar, dest + offset);
683 lli_write(desc, ctllo, ctllo);
Andy Shevchenko2d248812017-01-17 13:57:29 +0200684 lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count));
685 desc->len = xfer_count;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700686
687 if (!first) {
688 first = desc;
689 } else {
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200690 lli_write(prev, llp, desc->txd.phys | lms);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200691 list_add_tail(&desc->desc_node, &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700692 }
693 prev = desc;
694 }
695
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700696 if (flags & DMA_PREP_INTERRUPT)
697 /* Trigger interrupt after last block */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200698 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700699
700 prev->lli.llp = 0;
Mans Rullgarda3e55792016-03-18 16:24:45 +0200701 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700702 first->txd.flags = flags;
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200703 first->total_len = len;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700704
705 return &first->txd;
706
707err_desc_get:
708 dwc_desc_put(dwc, first);
709 return NULL;
710}
711
712static struct dma_async_tx_descriptor *
713dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530714 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500715 unsigned long flags, void *context)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700716{
717 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Arnd Bergmannf7760762013-03-26 16:53:57 +0200718 struct dw_dma *dw = to_dw_dma(chan->device);
Viresh Kumar327e6972012-02-01 16:12:26 +0530719 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700720 struct dw_desc *prev;
721 struct dw_desc *first;
722 u32 ctllo;
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300723 u8 m_master = dwc->dws.m_master;
Andy Shevchenko2e650602016-04-27 14:15:38 +0300724 u8 lms = DWC_LLP_LMS(m_master);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700725 dma_addr_t reg;
726 unsigned int reg_width;
727 unsigned int mem_width;
Andy Shevchenko161c3d02016-04-27 14:15:39 +0300728 unsigned int data_width = dw->pdata->data_width[m_master];
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700729 unsigned int i;
730 struct scatterlist *sg;
731 size_t total_len = 0;
732
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300733 dev_vdbg(chan2dev(chan), "%s\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700734
Andy Shevchenko495aea42013-01-10 11:11:41 +0200735 if (unlikely(!is_slave_direction(direction) || !sg_len))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700736 return NULL;
737
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200738 dwc->direction = direction;
739
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700740 prev = first = NULL;
741
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700742 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530743 case DMA_MEM_TO_DEV:
Andy Shevchenko39416672015-09-28 18:57:04 +0300744 reg_width = __ffs(sconfig->dst_addr_width);
Viresh Kumar327e6972012-02-01 16:12:26 +0530745 reg = sconfig->dst_addr;
746 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700747 | DWC_CTLL_DST_WIDTH(reg_width)
748 | DWC_CTLL_DST_FIX
Viresh Kumar327e6972012-02-01 16:12:26 +0530749 | DWC_CTLL_SRC_INC);
750
751 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
752 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
753
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700754 for_each_sg(sgl, sg, sg_len, i) {
755 struct dw_desc *desc;
Andy Shevchenko2d248812017-01-17 13:57:29 +0200756 u32 len, mem;
757 size_t dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700758
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200759 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700760 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530761
Andy Shevchenko2e650602016-04-27 14:15:38 +0300762 mem_width = __ffs(data_width | mem | len);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700763
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530764slave_sg_todev_fill_desc:
765 desc = dwc_desc_get(dwc);
Jarkko Nikulab2607222015-03-10 11:37:24 +0200766 if (!desc)
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530767 goto err_desc_get;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530768
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200769 lli_write(desc, sar, mem);
770 lli_write(desc, dar, reg);
Andy Shevchenko2d248812017-01-17 13:57:29 +0200771 lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen));
Jarkko Nikulaa46a7632017-01-17 13:57:25 +0200772 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200773 desc->len = dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700774
775 if (!first) {
776 first = desc;
777 } else {
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200778 lli_write(prev, llp, desc->txd.phys | lms);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200779 list_add_tail(&desc->desc_node, &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700780 }
781 prev = desc;
Jarkko Nikulaa46a7632017-01-17 13:57:25 +0200782
783 mem += dlen;
784 len -= dlen;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530785 total_len += dlen;
786
787 if (len)
788 goto slave_sg_todev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700789 }
790 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530791 case DMA_DEV_TO_MEM:
Andy Shevchenko39416672015-09-28 18:57:04 +0300792 reg_width = __ffs(sconfig->src_addr_width);
Viresh Kumar327e6972012-02-01 16:12:26 +0530793 reg = sconfig->src_addr;
794 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700795 | DWC_CTLL_SRC_WIDTH(reg_width)
796 | DWC_CTLL_DST_INC
Viresh Kumar327e6972012-02-01 16:12:26 +0530797 | DWC_CTLL_SRC_FIX);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700798
Viresh Kumar327e6972012-02-01 16:12:26 +0530799 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
800 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
801
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700802 for_each_sg(sgl, sg, sg_len, i) {
803 struct dw_desc *desc;
Andy Shevchenko2d248812017-01-17 13:57:29 +0200804 u32 len, mem;
805 size_t dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700806
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200807 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700808 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530809
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530810slave_sg_fromdev_fill_desc:
811 desc = dwc_desc_get(dwc);
Jarkko Nikulab2607222015-03-10 11:37:24 +0200812 if (!desc)
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530813 goto err_desc_get;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530814
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200815 lli_write(desc, sar, reg);
816 lli_write(desc, dar, mem);
Andy Shevchenko2d248812017-01-17 13:57:29 +0200817 lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen));
Jarkko Nikulaa46a7632017-01-17 13:57:25 +0200818 mem_width = __ffs(data_width | mem | dlen);
819 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200820 desc->len = dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700821
822 if (!first) {
823 first = desc;
824 } else {
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200825 lli_write(prev, llp, desc->txd.phys | lms);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200826 list_add_tail(&desc->desc_node, &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700827 }
828 prev = desc;
Jarkko Nikulaa46a7632017-01-17 13:57:25 +0200829
830 mem += dlen;
831 len -= dlen;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530832 total_len += dlen;
833
834 if (len)
835 goto slave_sg_fromdev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700836 }
837 break;
838 default:
839 return NULL;
840 }
841
842 if (flags & DMA_PREP_INTERRUPT)
843 /* Trigger interrupt after last block */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200844 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700845
846 prev->lli.llp = 0;
Mans Rullgarda3e55792016-03-18 16:24:45 +0200847 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200848 first->total_len = total_len;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700849
850 return &first->txd;
851
852err_desc_get:
Jarkko Nikulab2607222015-03-10 11:37:24 +0200853 dev_err(chan2dev(chan),
854 "not enough descriptors available. Direction %d\n", direction);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700855 dwc_desc_put(dwc, first);
856 return NULL;
857}
858
Andy Shevchenko4d130de2014-08-19 20:29:16 +0300859bool dw_dma_filter(struct dma_chan *chan, void *param)
860{
861 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
862 struct dw_dma_slave *dws = param;
863
Andy Shevchenko3fe64092016-04-08 16:22:17 +0300864 if (dws->dma_dev != chan->device->dev)
Andy Shevchenko4d130de2014-08-19 20:29:16 +0300865 return false;
866
867 /* We have to copy data since dws can be temporary storage */
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300868 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
Andy Shevchenko4d130de2014-08-19 20:29:16 +0300869
870 return true;
871}
872EXPORT_SYMBOL_GPL(dw_dma_filter);
873
Maxime Riparda4b0d342014-11-17 14:42:12 +0100874static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
Viresh Kumar327e6972012-02-01 16:12:26 +0530875{
876 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Andy Shevchenkobe242f42017-01-17 13:57:27 +0200877 struct dma_slave_config *sc = &dwc->dma_sconfig;
Andy Shevchenko199244d2017-01-17 13:57:31 +0200878 struct dw_dma *dw = to_dw_dma(chan->device);
Andy Shevchenkobe242f42017-01-17 13:57:27 +0200879 /*
880 * Fix sconfig's burst size according to dw_dmac. We need to convert
881 * them as:
882 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
883 *
884 * NOTE: burst size 2 is not supported by DesignWare controller.
Andy Shevchenko199244d2017-01-17 13:57:31 +0200885 * iDMA 32-bit supports it.
Andy Shevchenkobe242f42017-01-17 13:57:27 +0200886 */
Andy Shevchenko199244d2017-01-17 13:57:31 +0200887 u32 s = dw->pdata->is_idma32 ? 1 : 2;
Viresh Kumar327e6972012-02-01 16:12:26 +0530888
Andy Shevchenko495aea42013-01-10 11:11:41 +0200889 /* Check if chan will be configured for slave transfers */
890 if (!is_slave_direction(sconfig->direction))
Viresh Kumar327e6972012-02-01 16:12:26 +0530891 return -EINVAL;
892
893 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200894 dwc->direction = sconfig->direction;
Viresh Kumar327e6972012-02-01 16:12:26 +0530895
Andy Shevchenkobe242f42017-01-17 13:57:27 +0200896 sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
897 sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
Viresh Kumar327e6972012-02-01 16:12:26 +0530898
899 return 0;
900}
901
Andy Shevchenko199244d2017-01-17 13:57:31 +0200902static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200903{
Andy Shevchenko199244d2017-01-17 13:57:31 +0200904 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Maxime Riparda4b0d342014-11-17 14:42:12 +0100905 unsigned int count = 20; /* timeout iterations */
906 u32 cfglo;
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200907
Maxime Riparda4b0d342014-11-17 14:42:12 +0100908 cfglo = channel_readl(dwc, CFG_LO);
Andy Shevchenko199244d2017-01-17 13:57:31 +0200909 if (dw->pdata->is_idma32) {
910 if (drain)
911 cfglo |= IDMA32C_CFGL_CH_DRAIN;
912 else
913 cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
914 }
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200915 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
Andy Shevchenko123b69a2013-03-21 11:49:17 +0200916 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
917 udelay(2);
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200918
Andy Shevchenko5e09f982016-03-18 16:24:51 +0200919 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
Andy Shevchenkof4aa3182017-01-17 13:57:28 +0200920}
Maxime Riparda4b0d342014-11-17 14:42:12 +0100921
Andy Shevchenkof4aa3182017-01-17 13:57:28 +0200922static int dwc_pause(struct dma_chan *chan)
923{
924 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
925 unsigned long flags;
926
927 spin_lock_irqsave(&dwc->lock, flags);
Andy Shevchenko199244d2017-01-17 13:57:31 +0200928 dwc_chan_pause(dwc, false);
Maxime Riparda4b0d342014-11-17 14:42:12 +0100929 spin_unlock_irqrestore(&dwc->lock, flags);
930
931 return 0;
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200932}
933
934static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
935{
936 u32 cfglo = channel_readl(dwc, CFG_LO);
937
938 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
939
Andy Shevchenko5e09f982016-03-18 16:24:51 +0200940 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200941}
942
Maxime Riparda4b0d342014-11-17 14:42:12 +0100943static int dwc_resume(struct dma_chan *chan)
944{
945 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
946 unsigned long flags;
947
Maxime Riparda4b0d342014-11-17 14:42:12 +0100948 spin_lock_irqsave(&dwc->lock, flags);
949
Andy Shevchenko5e09f982016-03-18 16:24:51 +0200950 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
951 dwc_chan_resume(dwc);
Maxime Riparda4b0d342014-11-17 14:42:12 +0100952
953 spin_unlock_irqrestore(&dwc->lock, flags);
954
955 return 0;
956}
957
958static int dwc_terminate_all(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700959{
960 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
961 struct dw_dma *dw = to_dw_dma(chan->device);
962 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530963 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700964 LIST_HEAD(list);
965
Maxime Riparda4b0d342014-11-17 14:42:12 +0100966 spin_lock_irqsave(&dwc->lock, flags);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800967
Maxime Riparda4b0d342014-11-17 14:42:12 +0100968 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800969
Andy Shevchenko199244d2017-01-17 13:57:31 +0200970 dwc_chan_pause(dwc, true);
971
Maxime Riparda4b0d342014-11-17 14:42:12 +0100972 dwc_chan_disable(dw, dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800973
Maxime Riparda4b0d342014-11-17 14:42:12 +0100974 dwc_chan_resume(dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800975
Maxime Riparda4b0d342014-11-17 14:42:12 +0100976 /* active_list entries will end up before queued entries */
977 list_splice_init(&dwc->queue, &list);
978 list_splice_init(&dwc->active_list, &list);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800979
Maxime Riparda4b0d342014-11-17 14:42:12 +0100980 spin_unlock_irqrestore(&dwc->lock, flags);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800981
Maxime Riparda4b0d342014-11-17 14:42:12 +0100982 /* Flush all pending and queued descriptors */
983 list_for_each_entry_safe(desc, _desc, &list, desc_node)
984 dwc_descriptor_complete(dwc, desc, false);
Linus Walleijc3635c72010-03-26 16:44:01 -0700985
Linus Walleijc3635c72010-03-26 16:44:01 -0700986 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700987}
988
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200989static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
Andy Shevchenko4702d522013-01-25 11:48:03 +0200990{
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200991 struct dw_desc *desc;
992
993 list_for_each_entry(desc, &dwc->active_list, desc_node)
994 if (desc->txd.cookie == c)
995 return desc;
996
997 return NULL;
998}
999
1000static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
1001{
1002 struct dw_desc *desc;
Andy Shevchenko4702d522013-01-25 11:48:03 +02001003 unsigned long flags;
1004 u32 residue;
1005
1006 spin_lock_irqsave(&dwc->lock, flags);
1007
Andy Shevchenkob68fd092016-03-18 16:24:53 +02001008 desc = dwc_find_desc(dwc, cookie);
1009 if (desc) {
1010 if (desc == dwc_first_active(dwc)) {
1011 residue = desc->residue;
1012 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1013 residue -= dwc_get_sent(dwc);
1014 } else {
1015 residue = desc->total_len;
1016 }
1017 } else {
1018 residue = 0;
1019 }
Andy Shevchenko4702d522013-01-25 11:48:03 +02001020
1021 spin_unlock_irqrestore(&dwc->lock, flags);
1022 return residue;
1023}
1024
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001025static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -07001026dwc_tx_status(struct dma_chan *chan,
1027 dma_cookie_t cookie,
1028 struct dma_tx_state *txstate)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001029{
1030 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001031 enum dma_status ret;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001032
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001033 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Koul2c404102013-10-16 13:41:15 +05301034 if (ret == DMA_COMPLETE)
Andy Shevchenko12381dc2013-07-15 15:04:40 +03001035 return ret;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001036
Andy Shevchenko12381dc2013-07-15 15:04:40 +03001037 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001038
Andy Shevchenko12381dc2013-07-15 15:04:40 +03001039 ret = dma_cookie_status(chan, cookie, txstate);
Andy Shevchenkob68fd092016-03-18 16:24:53 +02001040 if (ret == DMA_COMPLETE)
1041 return ret;
1042
1043 dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001044
Andy Shevchenko5e09f982016-03-18 16:24:51 +02001045 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
Linus Walleija7c57cf2011-04-19 08:31:32 +08001046 return DMA_PAUSED;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001047
1048 return ret;
1049}
1050
1051static void dwc_issue_pending(struct dma_chan *chan)
1052{
1053 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Andy Shevchenkodd8ecfca2014-06-18 12:15:38 +03001054 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001055
Andy Shevchenkodd8ecfca2014-06-18 12:15:38 +03001056 spin_lock_irqsave(&dwc->lock, flags);
1057 if (list_empty(&dwc->active_list))
1058 dwc_dostart_first_queued(dwc);
1059 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001060}
1061
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001062/*----------------------------------------------------------------------*/
1063
Andy Shevchenko199244d2017-01-17 13:57:31 +02001064/*
1065 * Program FIFO size of channels.
1066 *
1067 * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
1068 * slice FIFO on equal parts between channels.
1069 */
1070static void idma32_fifo_partition(struct dw_dma *dw)
1071{
1072 u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
1073 IDMA32C_FP_UPDATE;
1074 u64 fifo_partition = 0;
1075
1076 if (!dw->pdata->is_idma32)
1077 return;
1078
1079 /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
1080 fifo_partition |= value << 0;
1081
1082 /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
1083 fifo_partition |= value << 32;
1084
1085 /* Program FIFO Partition registers - 128 bytes for each channel */
1086 idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
1087 idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
1088}
1089
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001090static void dw_dma_off(struct dw_dma *dw)
1091{
Andy Shevchenko7794e5b2016-03-18 16:24:48 +02001092 unsigned int i;
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001093
1094 dma_writel(dw, CFG, 0);
1095
1096 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Mans Rullgard2895b2c2016-01-11 13:04:29 +00001097 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001098 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1099 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1100 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1101
1102 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1103 cpu_relax();
1104
1105 for (i = 0; i < dw->dma.chancnt; i++)
Andy Shevchenko423f9cb2016-03-18 16:24:52 +02001106 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001107}
1108
1109static void dw_dma_on(struct dw_dma *dw)
1110{
1111 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1112}
1113
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001114static int dwc_alloc_chan_resources(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001115{
1116 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1117 struct dw_dma *dw = to_dw_dma(chan->device);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001118
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001119 dev_vdbg(chan2dev(chan), "%s\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001120
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001121 /* ASSERT: channel is idle */
1122 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -07001123 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001124 return -EIO;
1125 }
1126
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001127 dma_cookie_init(chan);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001128
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001129 /*
1130 * NOTE: some controllers may have additional features that we
1131 * need to initialize here, like "scatter-gather" (which
1132 * doesn't mean what you think it means), and status writeback.
1133 */
1134
Andy Shevchenko3fe64092016-04-08 16:22:17 +03001135 /*
1136 * We need controller-specific data to set up slave transfers.
1137 */
1138 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1139 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1140 return -EINVAL;
1141 }
1142
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001143 /* Enable controller here if needed */
1144 if (!dw->in_use)
1145 dw_dma_on(dw);
1146 dw->in_use |= dwc->mask;
1147
Christian Lamparterab703f82016-04-14 18:11:01 +02001148 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001149}
1150
1151static void dwc_free_chan_resources(struct dma_chan *chan)
1152{
1153 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1154 struct dw_dma *dw = to_dw_dma(chan->device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301155 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001156 LIST_HEAD(list);
1157
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001158 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001159 dwc->descs_allocated);
1160
1161 /* ASSERT: channel is idle */
1162 BUG_ON(!list_empty(&dwc->active_list));
1163 BUG_ON(!list_empty(&dwc->queue));
1164 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1165
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301166 spin_lock_irqsave(&dwc->lock, flags);
Andy Shevchenko3fe64092016-04-08 16:22:17 +03001167
1168 /* Clear custom channel configuration */
Andy Shevchenko9217a5b2016-08-17 19:20:20 +03001169 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
Andy Shevchenko3fe64092016-04-08 16:22:17 +03001170
Andy Shevchenko423f9cb2016-03-18 16:24:52 +02001171 clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001172
1173 /* Disable interrupts */
1174 channel_clear_bit(dw, MASK.XFER, dwc->mask);
Mans Rullgard2895b2c2016-01-11 13:04:29 +00001175 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001176 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1177
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301178 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001179
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001180 /* Disable controller in case it was a last user */
1181 dw->in_use &= ~dwc->mask;
1182 if (!dw->in_use)
1183 dw_dma_off(dw);
1184
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001185 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001186}
1187
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001188int dw_dma_probe(struct dw_dma_chip *chip)
Viresh Kumara9ddb572012-10-16 09:49:17 +05301189{
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001190 struct dw_dma_platform_data *pdata;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001191 struct dw_dma *dw;
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001192 bool autocfg = false;
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001193 unsigned int dw_params;
Andy Shevchenko7794e5b2016-03-18 16:24:48 +02001194 unsigned int i;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001195 int err;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001196
Andy Shevchenko000871c2014-03-05 15:48:12 +02001197 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1198 if (!dw)
1199 return -ENOMEM;
1200
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001201 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1202 if (!dw->pdata)
1203 return -ENOMEM;
1204
Andy Shevchenko000871c2014-03-05 15:48:12 +02001205 dw->regs = chip->regs;
1206 chip->dw = dw;
1207
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001208 pm_runtime_get_sync(chip->dev);
1209
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001210 if (!chip->pdata) {
Andy Shevchenko897e40d2016-03-18 16:24:46 +02001211 dw_params = dma_readl(dw, DW_PARAMS);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001212 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001213
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001214 autocfg = dw_params >> DW_PARAMS_EN & 1;
1215 if (!autocfg) {
1216 err = -EINVAL;
1217 goto err_pdata;
1218 }
Andy Shevchenko123de542013-01-09 10:17:01 +02001219
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001220 /* Reassign the platform data pointer */
1221 pdata = dw->pdata;
Andy Shevchenko123de542013-01-09 10:17:01 +02001222
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001223 /* Get hardware configuration parameters */
1224 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1225 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1226 for (i = 0; i < pdata->nr_masters; i++) {
1227 pdata->data_width[i] =
Andy Shevchenko2e650602016-04-27 14:15:38 +03001228 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001229 }
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001230 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001231
Andy Shevchenko123de542013-01-09 10:17:01 +02001232 /* Fill platform data with the default values */
1233 pdata->is_private = true;
Andy Shevchenkodf5c7382015-10-13 20:09:19 +03001234 pdata->is_memcpy = true;
Andy Shevchenko123de542013-01-09 10:17:01 +02001235 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1236 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001237 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001238 err = -EINVAL;
1239 goto err_pdata;
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001240 } else {
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001241 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001242
1243 /* Reassign the platform data pointer */
1244 pdata = dw->pdata;
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001245 }
Andy Shevchenko123de542013-01-09 10:17:01 +02001246
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001247 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
Andy Shevchenko000871c2014-03-05 15:48:12 +02001248 GFP_KERNEL);
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001249 if (!dw->chan) {
1250 err = -ENOMEM;
1251 goto err_pdata;
1252 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001253
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001254 /* Calculate all channel mask before DMA setup */
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001255 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001256
Andy Shevchenko75c61222013-03-26 16:53:54 +02001257 /* Force dma off, just in case */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001258 dw_dma_off(dw);
1259
Andy Shevchenko199244d2017-01-17 13:57:31 +02001260 idma32_fifo_partition(dw);
1261
Andy Shevchenko08d62f52017-01-17 13:57:26 +02001262 /* Device and instance ID for IRQ and DMA pool */
Andy Shevchenko199244d2017-01-17 13:57:31 +02001263 if (pdata->is_idma32)
1264 snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
1265 else
1266 snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
Andy Shevchenko08d62f52017-01-17 13:57:26 +02001267
Andy Shevchenko75c61222013-03-26 16:53:54 +02001268 /* Create a pool of consistent memory blocks for hardware descriptors */
Andy Shevchenko08d62f52017-01-17 13:57:26 +02001269 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001270 sizeof(struct dw_desc), 4, 0);
1271 if (!dw->desc_pool) {
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001272 dev_err(chip->dev, "No memory for descriptors dma pool\n");
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001273 err = -ENOMEM;
1274 goto err_pdata;
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001275 }
1276
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001277 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1278
Andy Shevchenko97977f72014-05-07 10:56:24 +03001279 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
Andy Shevchenko08d62f52017-01-17 13:57:26 +02001280 dw->name, dw);
Andy Shevchenko97977f72014-05-07 10:56:24 +03001281 if (err)
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001282 goto err_pdata;
Andy Shevchenko97977f72014-05-07 10:56:24 +03001283
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001284 INIT_LIST_HEAD(&dw->dma.channels);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001285 for (i = 0; i < pdata->nr_channels; i++) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001286 struct dw_dma_chan *dwc = &dw->chan[i];
1287
1288 dwc->chan.device = &dw->dma;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001289 dma_cookie_init(&dwc->chan);
Viresh Kumarb0c31302011-03-03 15:47:21 +05301290 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1291 list_add_tail(&dwc->chan.device_node,
1292 &dw->dma.channels);
1293 else
1294 list_add(&dwc->chan.device_node, &dw->dma.channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001295
Viresh Kumar93317e82011-03-03 15:47:22 +05301296 /* 7 is highest priority & 0 is lowest. */
1297 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001298 dwc->priority = pdata->nr_channels - i - 1;
Viresh Kumar93317e82011-03-03 15:47:22 +05301299 else
1300 dwc->priority = i;
1301
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001302 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1303 spin_lock_init(&dwc->lock);
1304 dwc->mask = 1 << i;
1305
1306 INIT_LIST_HEAD(&dwc->active_list);
1307 INIT_LIST_HEAD(&dwc->queue);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001308
1309 channel_clear_bit(dw, CH_EN, dwc->mask);
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001310
Andy Shevchenko0fdb5672013-01-10 10:53:03 +02001311 dwc->direction = DMA_TRANS_NONE;
Andy Shevchenkoa0982002012-09-21 15:05:48 +03001312
Andy Shevchenko75c61222013-03-26 16:53:54 +02001313 /* Hardware configuration */
Andy Shevchenkofed25742012-09-21 15:05:49 +03001314 if (autocfg) {
Andy Shevchenko6bea0f62015-09-28 18:57:03 +03001315 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
Andy Shevchenko897e40d2016-03-18 16:24:46 +02001316 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
Andy Shevchenko14bebd02017-05-09 19:18:37 +03001317 unsigned int dwc_params = readl(addr);
Andy Shevchenkofed25742012-09-21 15:05:49 +03001318
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001319 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1320 dwc_params);
Andy Shevchenko985a6c72013-01-18 17:10:59 +02001321
Andy Shevchenko1d566f12014-01-13 14:04:48 +02001322 /*
1323 * Decode maximum block size for given channel. The
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001324 * stored 4 bit value represents blocks from 0x00 for 3
Andy Shevchenko1d566f12014-01-13 14:04:48 +02001325 * up to 0x0a for 4095.
1326 */
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001327 dwc->block_size =
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001328 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
Andy Shevchenkofed25742012-09-21 15:05:49 +03001329 dwc->nollp =
1330 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1331 } else {
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001332 dwc->block_size = pdata->block_size;
Eugeniy Paltsevbd2c6632016-11-25 17:59:07 +03001333 dwc->nollp = !pdata->multi_block[i];
Andy Shevchenkofed25742012-09-21 15:05:49 +03001334 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001335 }
1336
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001337 /* Clear all interrupts on all channels. */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001338 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
Andy Shevchenko236b1062012-06-19 13:34:07 +03001339 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001340 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1341 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1342 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1343
Andy Shevchenkodf5c7382015-10-13 20:09:19 +03001344 /* Set capabilities */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001345 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
Jamie Iles95ea7592011-01-21 14:11:54 +00001346 if (pdata->is_private)
1347 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
Andy Shevchenkodf5c7382015-10-13 20:09:19 +03001348 if (pdata->is_memcpy)
1349 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1350
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001351 dw->dma.dev = chip->dev;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001352 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1353 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1354
1355 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001356 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
Andy Shevchenko029a40e2015-01-02 16:17:24 +02001357
Maxime Riparda4b0d342014-11-17 14:42:12 +01001358 dw->dma.device_config = dwc_config;
1359 dw->dma.device_pause = dwc_pause;
1360 dw->dma.device_resume = dwc_resume;
1361 dw->dma.device_terminate_all = dwc_terminate_all;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001362
Linus Walleij07934482010-03-26 16:50:49 -07001363 dw->dma.device_tx_status = dwc_tx_status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001364 dw->dma.device_issue_pending = dwc_issue_pending;
1365
Andy Shevchenko029a40e2015-01-02 16:17:24 +02001366 /* DMA capabilities */
1367 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1368 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1369 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1370 BIT(DMA_MEM_TO_MEM);
1371 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1372
Andy Shevchenko12229342014-05-08 12:01:50 +03001373 err = dma_async_device_register(&dw->dma);
1374 if (err)
1375 goto err_dma_register;
1376
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001377 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001378 pdata->nr_channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001379
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001380 pm_runtime_put_sync_suspend(chip->dev);
1381
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001382 return 0;
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001383
Andy Shevchenko12229342014-05-08 12:01:50 +03001384err_dma_register:
1385 free_irq(chip->irq, dw);
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001386err_pdata:
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001387 pm_runtime_put_sync_suspend(chip->dev);
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001388 return err;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001389}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001390EXPORT_SYMBOL_GPL(dw_dma_probe);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001391
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001392int dw_dma_remove(struct dw_dma_chip *chip)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001393{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001394 struct dw_dma *dw = chip->dw;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001395 struct dw_dma_chan *dwc, *_dwc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001396
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001397 pm_runtime_get_sync(chip->dev);
1398
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001399 dw_dma_off(dw);
1400 dma_async_device_unregister(&dw->dma);
1401
Andy Shevchenko97977f72014-05-07 10:56:24 +03001402 free_irq(chip->irq, dw);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001403 tasklet_kill(&dw->tasklet);
1404
1405 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1406 chan.device_node) {
1407 list_del(&dwc->chan.device_node);
1408 channel_clear_bit(dw, CH_EN, dwc->mask);
1409 }
1410
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001411 pm_runtime_put_sync_suspend(chip->dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001412 return 0;
1413}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001414EXPORT_SYMBOL_GPL(dw_dma_remove);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001415
Andy Shevchenko2540f742014-09-23 17:18:13 +03001416int dw_dma_disable(struct dw_dma_chip *chip)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001417{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001418 struct dw_dma *dw = chip->dw;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001419
Andy Shevchenko6168d562012-10-18 17:34:10 +03001420 dw_dma_off(dw);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001421 return 0;
1422}
Andy Shevchenko2540f742014-09-23 17:18:13 +03001423EXPORT_SYMBOL_GPL(dw_dma_disable);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001424
Andy Shevchenko2540f742014-09-23 17:18:13 +03001425int dw_dma_enable(struct dw_dma_chip *chip)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001426{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001427 struct dw_dma *dw = chip->dw;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001428
Andy Shevchenko199244d2017-01-17 13:57:31 +02001429 idma32_fifo_partition(dw);
1430
Andy Shevchenko7a83c042014-09-23 17:18:12 +03001431 dw_dma_on(dw);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001432 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001433}
Andy Shevchenko2540f742014-09-23 17:18:13 +03001434EXPORT_SYMBOL_GPL(dw_dma_enable);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001435
1436MODULE_LICENSE("GPL v2");
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001437MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02001438MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Viresh Kumarda899472015-07-17 16:23:50 -07001439MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");