blob: 81b06dfd7e06fceff3fa0c883157ca58c0dd63cd [file] [log] [blame]
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001/*
Heikki Krogerusb8014792012-10-18 17:34:08 +03002 * Core driver for the Synopsys DesignWare DMA Controller
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07003 *
4 * Copyright (C) 2007-2008 Atmel Corporation
Viresh Kumaraecb7b62011-05-24 14:04:09 +05305 * Copyright (C) 2010-2011 ST Microelectronics
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03006 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
Heikki Krogerusb8014792012-10-18 17:34:08 +030012
Viresh Kumar327e6972012-02-01 16:12:26 +053013#include <linux/bitops.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070014#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
Andy Shevchenkof8122a82013-01-16 15:48:50 +020017#include <linux/dmapool.h>
Thierry Reding73312052013-01-21 11:09:00 +010018#include <linux/err.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070019#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/mm.h>
23#include <linux/module.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070024#include <linux/slab.h>
Andy Shevchenkobb32baf2014-11-05 18:34:48 +020025#include <linux/pm_runtime.h>
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070026
Andy Shevchenko61a76492013-06-05 15:26:44 +030027#include "../dmaengine.h"
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030028#include "internal.h"
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070029
30/*
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide.
35 *
Andy Shevchenkodd5720b2014-02-12 11:16:17 +020036 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * support descriptor writeback.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070038 */
39
Viresh Kumar327e6972012-02-01 16:12:26 +053040#define DWC_DEFAULT_CTLLO(_chan) ({ \
Viresh Kumar327e6972012-02-01 16:12:26 +053041 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
Andy Shevchenko495aea42013-01-10 11:11:41 +020043 bool _is_slave = is_slave_direction(_dwc->direction); \
Andy Shevchenko495aea42013-01-10 11:11:41 +020044 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
Viresh Kumar327e6972012-02-01 16:12:26 +053045 DW_DMA_MSIZE_16; \
Andy Shevchenko495aea42013-01-10 11:11:41 +020046 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
Viresh Kumar327e6972012-02-01 16:12:26 +053047 DW_DMA_MSIZE_16; \
Mans Rullgardbb3450a2016-03-18 16:24:42 +020048 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
Andy Shevchenko9217a5b2016-08-17 19:20:20 +030049 _dwc->dws.p_master : _dwc->dws.m_master; \
Mans Rullgardbb3450a2016-03-18 16:24:42 +020050 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
Andy Shevchenko9217a5b2016-08-17 19:20:20 +030051 _dwc->dws.p_master : _dwc->dws.m_master; \
Jamie Ilesf301c062011-01-21 14:11:53 +000052 \
Viresh Kumar327e6972012-02-01 16:12:26 +053053 (DWC_CTLL_DST_MSIZE(_dmsize) \
54 | DWC_CTLL_SRC_MSIZE(_smsize) \
Jamie Ilesf301c062011-01-21 14:11:53 +000055 | DWC_CTLL_LLP_D_EN \
56 | DWC_CTLL_LLP_S_EN \
Mans Rullgardbb3450a2016-03-18 16:24:42 +020057 | DWC_CTLL_DMS(_dms) \
58 | DWC_CTLL_SMS(_sms)); \
Jamie Ilesf301c062011-01-21 14:11:53 +000059 })
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070060
Andy Shevchenko029a40e2015-01-02 16:17:24 +020061/* The set of bus widths supported by the DMA controller */
62#define DW_DMA_BUSWIDTHS \
63 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
64 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
65 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
66 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
67
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070068/*----------------------------------------------------------------------*/
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070069
Dan Williams41d5e592009-01-06 11:38:21 -070070static struct device *chan2dev(struct dma_chan *chan)
71{
72 return &chan->dev->device;
73}
Dan Williams41d5e592009-01-06 11:38:21 -070074
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070075static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
76{
Andy Shevchenkoe63a47a2012-10-18 17:34:12 +030077 return to_dw_desc(dwc->active_list.next);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070078}
79
Christian Lamparterab703f82016-04-14 18:11:01 +020080static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070081{
Christian Lamparterab703f82016-04-14 18:11:01 +020082 struct dw_desc *desc = txd_to_dw_desc(tx);
83 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
84 dma_cookie_t cookie;
85 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070086
Viresh Kumar69cea5a2011-04-15 16:03:35 +053087 spin_lock_irqsave(&dwc->lock, flags);
Christian Lamparterab703f82016-04-14 18:11:01 +020088 cookie = dma_cookie_assign(tx);
89
90 /*
91 * REVISIT: We should attempt to chain as many descriptors as
92 * possible, perhaps even appending to those already submitted
93 * for DMA. But this is hard to do in a race-free manner.
94 */
95
96 list_add_tail(&desc->desc_node, &dwc->queue);
Viresh Kumar69cea5a2011-04-15 16:03:35 +053097 spin_unlock_irqrestore(&dwc->lock, flags);
Christian Lamparterab703f82016-04-14 18:11:01 +020098 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99 __func__, desc->txd.cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700100
Christian Lamparterab703f82016-04-14 18:11:01 +0200101 return cookie;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700102}
103
Christian Lamparterab703f82016-04-14 18:11:01 +0200104static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105{
106 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107 struct dw_desc *desc;
108 dma_addr_t phys;
109
110 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
111 if (!desc)
112 return NULL;
113
114 dwc->descs_allocated++;
115 INIT_LIST_HEAD(&desc->tx_list);
116 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117 desc->txd.tx_submit = dwc_tx_submit;
118 desc->txd.flags = DMA_CTRL_ACK;
119 desc->txd.phys = phys;
120 return desc;
121}
122
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
Christian Lamparterab703f82016-04-14 18:11:01 +0200125 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126 struct dw_desc *child, *_next;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530127
Christian Lamparterab703f82016-04-14 18:11:01 +0200128 if (unlikely(!desc))
129 return;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700130
Christian Lamparterab703f82016-04-14 18:11:01 +0200131 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132 list_del(&child->desc_node);
133 dma_pool_free(dw->desc_pool, child, child->txd.phys);
134 dwc->descs_allocated--;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700135 }
Christian Lamparterab703f82016-04-14 18:11:01 +0200136
137 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138 dwc->descs_allocated--;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700139}
140
Viresh Kumar61e183f2011-11-17 16:01:29 +0530141static void dwc_initialize(struct dw_dma_chan *dwc)
142{
143 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530144 u32 cfghi = DWC_CFGH_FIFO_MODE;
145 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
146
Andy Shevchenko423f9cb2016-03-18 16:24:52 +0200147 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
Viresh Kumar61e183f2011-11-17 16:01:29 +0530148 return;
149
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300150 cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
151 cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530152
153 channel_writel(dwc, CFG_LO, cfglo);
154 channel_writel(dwc, CFG_HI, cfghi);
155
156 /* Enable interrupts */
157 channel_set_bit(dw, MASK.XFER, dwc->mask);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530158 channel_set_bit(dw, MASK.ERROR, dwc->mask);
159
Andy Shevchenko423f9cb2016-03-18 16:24:52 +0200160 set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530161}
162
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700163/*----------------------------------------------------------------------*/
164
Andy Shevchenkof52b36d2012-09-21 15:05:44 +0300165static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
Andy Shevchenko1d455432012-06-19 13:34:03 +0300166{
167 dev_err(chan2dev(&dwc->chan),
168 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
169 channel_readl(dwc, SAR),
170 channel_readl(dwc, DAR),
171 channel_readl(dwc, LLP),
172 channel_readl(dwc, CTL_HI),
173 channel_readl(dwc, CTL_LO));
174}
175
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300176static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
177{
178 channel_clear_bit(dw, CH_EN, dwc->mask);
179 while (dma_readl(dw, CH_EN) & dwc->mask)
180 cpu_relax();
181}
182
Andy Shevchenko1d455432012-06-19 13:34:03 +0300183/*----------------------------------------------------------------------*/
184
Andy Shevchenkofed25742012-09-21 15:05:49 +0300185/* Perform single block transfer */
186static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
187 struct dw_desc *desc)
188{
189 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
190 u32 ctllo;
191
Andy Shevchenko1d566f12014-01-13 14:04:48 +0200192 /*
193 * Software emulation of LLP mode relies on interrupts to continue
194 * multi block transfer.
195 */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200196 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300197
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200198 channel_writel(dwc, SAR, lli_read(desc, sar));
199 channel_writel(dwc, DAR, lli_read(desc, dar));
Andy Shevchenkofed25742012-09-21 15:05:49 +0300200 channel_writel(dwc, CTL_LO, ctllo);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200201 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
Andy Shevchenkofed25742012-09-21 15:05:49 +0300202 channel_set_bit(dw, CH_EN, dwc->mask);
Andy Shevchenkof5c6a7d2013-01-09 10:17:13 +0200203
204 /* Move pointer to next descriptor */
205 dwc->tx_node_active = dwc->tx_node_active->next;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300206}
207
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700208/* Called with dwc->lock held and bh disabled */
209static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
210{
211 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300212 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
Andy Shevchenkofed25742012-09-21 15:05:49 +0300213 unsigned long was_soft_llp;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700214
215 /* ASSERT: channel is idle */
216 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700217 dev_err(chan2dev(&dwc->chan),
Jarkko Nikula550da642015-03-10 11:37:23 +0200218 "%s: BUG: Attempted to start non-idle channel\n",
219 __func__);
Andy Shevchenko1d455432012-06-19 13:34:03 +0300220 dwc_dump_chan_regs(dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700221
222 /* The tasklet will hopefully advance the queue... */
223 return;
224 }
225
Andy Shevchenkofed25742012-09-21 15:05:49 +0300226 if (dwc->nollp) {
227 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
228 &dwc->flags);
229 if (was_soft_llp) {
230 dev_err(chan2dev(&dwc->chan),
Andy Shevchenkofc61f6b2014-01-13 14:04:49 +0200231 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
Andy Shevchenkofed25742012-09-21 15:05:49 +0300232 return;
233 }
234
235 dwc_initialize(dwc);
236
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200237 first->residue = first->total_len;
Andy Shevchenkof5c6a7d2013-01-09 10:17:13 +0200238 dwc->tx_node_active = &first->tx_list;
Andy Shevchenkofed25742012-09-21 15:05:49 +0300239
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200240 /* Submit first block */
Andy Shevchenkofed25742012-09-21 15:05:49 +0300241 dwc_do_single_block(dwc, first);
242
243 return;
244 }
245
Viresh Kumar61e183f2011-11-17 16:01:29 +0530246 dwc_initialize(dwc);
247
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200248 channel_writel(dwc, LLP, first->txd.phys | lms);
249 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700250 channel_writel(dwc, CTL_HI, 0);
251 channel_set_bit(dw, CH_EN, dwc->mask);
252}
253
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300254static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
255{
Andy Shevchenkocba15612014-06-18 12:15:37 +0300256 struct dw_desc *desc;
257
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300258 if (list_empty(&dwc->queue))
259 return;
260
261 list_move(dwc->queue.next, &dwc->active_list);
Andy Shevchenkocba15612014-06-18 12:15:37 +0300262 desc = dwc_first_active(dwc);
263 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
264 dwc_dostart(dwc, desc);
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300265}
266
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700267/*----------------------------------------------------------------------*/
268
269static void
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530270dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
271 bool callback_required)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700272{
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530273 dma_async_tx_callback callback = NULL;
274 void *param = NULL;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700275 struct dma_async_tx_descriptor *txd = &desc->txd;
Viresh Kumare5180762011-03-03 15:47:20 +0530276 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530277 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700278
Dan Williams41d5e592009-01-06 11:38:21 -0700279 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700280
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530281 spin_lock_irqsave(&dwc->lock, flags);
Russell King - ARM Linuxf7fbce02012-03-06 22:35:07 +0000282 dma_cookie_complete(txd);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530283 if (callback_required) {
284 callback = txd->callback;
285 param = txd->callback_param;
286 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700287
Viresh Kumare5180762011-03-03 15:47:20 +0530288 /* async_tx_ack */
289 list_for_each_entry(child, &desc->tx_list, desc_node)
290 async_tx_ack(&child->txd);
291 async_tx_ack(&desc->txd);
Christian Lamparterab703f82016-04-14 18:11:01 +0200292 dwc_desc_put(dwc, desc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530293 spin_unlock_irqrestore(&dwc->lock, flags);
294
Andy Shevchenko21e93c12013-01-09 10:17:12 +0200295 if (callback)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700296 callback(param);
297}
298
299static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
300{
301 struct dw_desc *desc, *_desc;
302 LIST_HEAD(list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530303 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700304
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530305 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700306 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700307 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700308 "BUG: XFER bit set, but channel not idle!\n");
309
310 /* Try to continue after resetting the channel... */
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300311 dwc_chan_disable(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700312 }
313
314 /*
315 * Submit queued descriptors ASAP, i.e. before we go through
316 * the completed ones.
317 */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700318 list_splice_init(&dwc->active_list, &list);
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300319 dwc_dostart_first_queued(dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700320
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530321 spin_unlock_irqrestore(&dwc->lock, flags);
322
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700323 list_for_each_entry_safe(desc, _desc, &list, desc_node)
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530324 dwc_descriptor_complete(dwc, desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700325}
326
Andy Shevchenko4702d522013-01-25 11:48:03 +0200327/* Returns how many bytes were already received from source */
328static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
329{
330 u32 ctlhi = channel_readl(dwc, CTL_HI);
331 u32 ctllo = channel_readl(dwc, CTL_LO);
332
333 return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
334}
335
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700336static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
337{
338 dma_addr_t llp;
339 struct dw_desc *desc, *_desc;
340 struct dw_desc *child;
341 u32 status_xfer;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530342 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700343
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530344 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700345 llp = channel_readl(dwc, LLP);
346 status_xfer = dma_readl(dw, RAW.XFER);
347
348 if (status_xfer & dwc->mask) {
349 /* Everything we've submitted is done */
350 dma_writel(dw, CLEAR.XFER, dwc->mask);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200351
352 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200353 struct list_head *head, *active = dwc->tx_node_active;
354
355 /*
356 * We are inside first active descriptor.
357 * Otherwise something is really wrong.
358 */
359 desc = dwc_first_active(dwc);
360
361 head = &desc->tx_list;
362 if (active != head) {
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200363 /* Update residue to reflect last sent descriptor */
364 if (active == head->next)
365 desc->residue -= desc->len;
366 else
367 desc->residue -= to_dw_desc(active->prev)->len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200368
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200369 child = to_dw_desc(active);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200370
371 /* Submit next block */
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200372 dwc_do_single_block(dwc, child);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200373
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200374 spin_unlock_irqrestore(&dwc->lock, flags);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200375 return;
376 }
Andy Shevchenkofdf475f2013-01-25 11:48:00 +0200377
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200378 /* We are done here */
379 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
380 }
Andy Shevchenko4702d522013-01-25 11:48:03 +0200381
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530382 spin_unlock_irqrestore(&dwc->lock, flags);
383
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700384 dwc_complete_all(dw, dwc);
385 return;
386 }
387
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530388 if (list_empty(&dwc->active_list)) {
389 spin_unlock_irqrestore(&dwc->lock, flags);
Jamie Iles087809f2011-01-21 14:11:52 +0000390 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530391 }
Jamie Iles087809f2011-01-21 14:11:52 +0000392
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200393 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
394 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700395 spin_unlock_irqrestore(&dwc->lock, flags);
Dan Williams41d5e592009-01-06 11:38:21 -0700396 return;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700397 }
398
Andy Shevchenko5a87f0e2014-01-13 14:04:50 +0200399 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700400
401 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
Andy Shevchenko75c61222013-03-26 16:53:54 +0200402 /* Initial residue value */
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200403 desc->residue = desc->total_len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200404
Andy Shevchenko75c61222013-03-26 16:53:54 +0200405 /* Check first descriptors addr */
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200406 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530407 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700408 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530409 }
Viresh Kumar84adccf2011-03-24 11:32:15 +0530410
Andy Shevchenko75c61222013-03-26 16:53:54 +0200411 /* Check first descriptors llp */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200412 if (lli_read(desc, llp) == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700413 /* This one is currently in progress */
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200414 desc->residue -= dwc_get_sent(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530415 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700416 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530417 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700418
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200419 desc->residue -= desc->len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200420 list_for_each_entry(child, &desc->tx_list, desc_node) {
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200421 if (lli_read(child, llp) == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700422 /* Currently in progress */
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200423 desc->residue -= dwc_get_sent(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530424 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700425 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530426 }
Andy Shevchenkob68fd092016-03-18 16:24:53 +0200427 desc->residue -= child->len;
Andy Shevchenko4702d522013-01-25 11:48:03 +0200428 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700429
430 /*
431 * No descriptors so far seem to be in progress, i.e.
432 * this one must be done.
433 */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530434 spin_unlock_irqrestore(&dwc->lock, flags);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530435 dwc_descriptor_complete(dwc, desc, true);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530436 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700437 }
438
Dan Williams41d5e592009-01-06 11:38:21 -0700439 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700440 "BUG: All descriptors done, but channel not idle!\n");
441
442 /* Try to continue after resetting the channel... */
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300443 dwc_chan_disable(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700444
Andy Shevchenkoe7637c62014-06-18 12:15:36 +0300445 dwc_dostart_first_queued(dwc);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530446 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700447}
448
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200449static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700450{
Andy Shevchenko21d43f42012-10-18 17:34:09 +0300451 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200452 lli_read(desc, sar),
453 lli_read(desc, dar),
454 lli_read(desc, llp),
455 lli_read(desc, ctlhi),
456 lli_read(desc, ctllo));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700457}
458
459static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
460{
461 struct dw_desc *bad_desc;
462 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530463 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700464
465 dwc_scan_descriptors(dw, dwc);
466
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530467 spin_lock_irqsave(&dwc->lock, flags);
468
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700469 /*
470 * The descriptor currently at the head of the active list is
471 * borked. Since we don't have any way to report errors, we'll
472 * just have to scream loudly and try to carry on.
473 */
474 bad_desc = dwc_first_active(dwc);
475 list_del_init(&bad_desc->desc_node);
Viresh Kumarf336e422011-03-03 15:47:16 +0530476 list_move(dwc->queue.next, dwc->active_list.prev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700477
478 /* Clear the error flag and try to restart the controller */
479 dma_writel(dw, CLEAR.ERROR, dwc->mask);
480 if (!list_empty(&dwc->active_list))
481 dwc_dostart(dwc, dwc_first_active(dwc));
482
483 /*
Andy Shevchenkoba84bd72012-10-18 17:34:11 +0300484 * WARN may seem harsh, but since this only happens
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700485 * when someone submits a bad physical address in a
486 * descriptor, we should consider ourselves lucky that the
487 * controller flagged an error instead of scribbling over
488 * random memory locations.
489 */
Andy Shevchenkoba84bd72012-10-18 17:34:11 +0300490 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
491 " cookie: %d\n", bad_desc->txd.cookie);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200492 dwc_dump_lli(dwc, bad_desc);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700493 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200494 dwc_dump_lli(dwc, child);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700495
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530496 spin_unlock_irqrestore(&dwc->lock, flags);
497
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700498 /* Pretend the descriptor completed successfully */
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530499 dwc_descriptor_complete(dwc, bad_desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700500}
501
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200502/* --------------------- Cyclic DMA API extensions -------------------- */
503
Denis Efremov8004cbb2013-05-09 13:19:40 +0400504dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200505{
506 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
507 return channel_readl(dwc, SAR);
508}
509EXPORT_SYMBOL(dw_dma_get_src_addr);
510
Denis Efremov8004cbb2013-05-09 13:19:40 +0400511dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200512{
513 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
514 return channel_readl(dwc, DAR);
515}
516EXPORT_SYMBOL(dw_dma_get_dst_addr);
517
Andy Shevchenko75c61222013-03-26 16:53:54 +0200518/* Called with dwc->lock held and all DMAC interrupts disabled */
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200519static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000520 u32 status_block, u32 status_err, u32 status_xfer)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200521{
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530522 unsigned long flags;
523
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000524 if (status_block & dwc->mask) {
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200525 void (*callback)(void *param);
526 void *callback_param;
527
528 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
529 channel_readl(dwc, LLP));
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000530 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200531
532 callback = dwc->cdesc->period_callback;
533 callback_param = dwc->cdesc->period_callback_param;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530534
535 if (callback)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200536 callback(callback_param);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200537 }
538
539 /*
540 * Error and transfer complete are highly unlikely, and will most
541 * likely be due to a configuration error by the user.
542 */
543 if (unlikely(status_err & dwc->mask) ||
544 unlikely(status_xfer & dwc->mask)) {
Andy Shevchenko7794e5b2016-03-18 16:24:48 +0200545 unsigned int i;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200546
Andy Shevchenkofc61f6b2014-01-13 14:04:49 +0200547 dev_err(chan2dev(&dwc->chan),
548 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
549 status_xfer ? "xfer" : "error");
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530550
551 spin_lock_irqsave(&dwc->lock, flags);
552
Andy Shevchenko1d455432012-06-19 13:34:03 +0300553 dwc_dump_chan_regs(dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200554
Andy Shevchenko3f9362072012-06-19 13:46:32 +0300555 dwc_chan_disable(dw, dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200556
Andy Shevchenko75c61222013-03-26 16:53:54 +0200557 /* Make sure DMA does not restart by loading a new list */
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200558 channel_writel(dwc, LLP, 0);
559 channel_writel(dwc, CTL_LO, 0);
560 channel_writel(dwc, CTL_HI, 0);
561
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000562 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200563 dma_writel(dw, CLEAR.ERROR, dwc->mask);
564 dma_writel(dw, CLEAR.XFER, dwc->mask);
565
566 for (i = 0; i < dwc->cdesc->periods; i++)
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200567 dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530568
569 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200570 }
Andy Shevchenkoee1cdcd2016-02-10 15:59:42 +0200571
572 /* Re-enable interrupts */
573 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200574}
575
576/* ------------------------------------------------------------------------- */
577
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700578static void dw_dma_tasklet(unsigned long data)
579{
580 struct dw_dma *dw = (struct dw_dma *)data;
581 struct dw_dma_chan *dwc;
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000582 u32 status_block;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700583 u32 status_xfer;
584 u32 status_err;
Andy Shevchenko7794e5b2016-03-18 16:24:48 +0200585 unsigned int i;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700586
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000587 status_block = dma_readl(dw, RAW.BLOCK);
Haavard Skinnemoen7fe7b2f2008-10-03 15:23:46 -0700588 status_xfer = dma_readl(dw, RAW.XFER);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700589 status_err = dma_readl(dw, RAW.ERROR);
590
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300591 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700592
593 for (i = 0; i < dw->dma.chancnt; i++) {
594 dwc = &dw->chan[i];
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200595 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000596 dwc_handle_cyclic(dw, dwc, status_block, status_err,
597 status_xfer);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200598 else if (status_err & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700599 dwc_handle_error(dw, dwc);
Andy Shevchenko77bcc4972013-01-18 14:14:15 +0200600 else if (status_xfer & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700601 dwc_scan_descriptors(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700602 }
603
Andy Shevchenkoee1cdcd2016-02-10 15:59:42 +0200604 /* Re-enable interrupts */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700605 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700606 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
607}
608
609static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
610{
611 struct dw_dma *dw = dev_id;
Andy Shevchenko02a21b72015-12-04 23:49:24 +0200612 u32 status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700613
Andy Shevchenko02a21b72015-12-04 23:49:24 +0200614 /* Check if we have any interrupt from the DMAC which is not in use */
615 if (!dw->in_use)
616 return IRQ_NONE;
617
618 status = dma_readl(dw, STATUS_INT);
Andy Shevchenko3783cef2013-07-15 15:04:39 +0300619 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
620
621 /* Check if we have any interrupt from the DMAC */
Andy Shevchenko02a21b72015-12-04 23:49:24 +0200622 if (!status)
Andy Shevchenko3783cef2013-07-15 15:04:39 +0300623 return IRQ_NONE;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700624
625 /*
626 * Just disable the interrupts. We'll turn them back on in the
627 * softirq handler.
628 */
629 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000630 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700631 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
632
633 status = dma_readl(dw, STATUS_INT);
634 if (status) {
635 dev_err(dw->dma.dev,
636 "BUG: Unexpected interrupts pending: 0x%x\n",
637 status);
638
639 /* Try to recover */
640 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
Mans Rullgard2895b2c2016-01-11 13:04:29 +0000641 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700642 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
643 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
644 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
645 }
646
647 tasklet_schedule(&dw->tasklet);
648
649 return IRQ_HANDLED;
650}
651
652/*----------------------------------------------------------------------*/
653
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700654static struct dma_async_tx_descriptor *
655dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
656 size_t len, unsigned long flags)
657{
658 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Arnd Bergmannf7760762013-03-26 16:53:57 +0200659 struct dw_dma *dw = to_dw_dma(chan->device);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700660 struct dw_desc *desc;
661 struct dw_desc *first;
662 struct dw_desc *prev;
663 size_t xfer_count;
664 size_t offset;
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300665 u8 m_master = dwc->dws.m_master;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700666 unsigned int src_width;
667 unsigned int dst_width;
Andy Shevchenko161c3d02016-04-27 14:15:39 +0300668 unsigned int data_width = dw->pdata->data_width[m_master];
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700669 u32 ctllo;
Andy Shevchenko2e650602016-04-27 14:15:38 +0300670 u8 lms = DWC_LLP_LMS(m_master);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700671
Andy Shevchenko2f45d612012-06-19 13:34:02 +0300672 dev_vdbg(chan2dev(chan),
Andy Shevchenko5a87f0e2014-01-13 14:04:50 +0200673 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
674 &dest, &src, len, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700675
676 if (unlikely(!len)) {
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300677 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700678 return NULL;
679 }
680
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200681 dwc->direction = DMA_MEM_TO_MEM;
682
Andy Shevchenko2e650602016-04-27 14:15:38 +0300683 src_width = dst_width = __ffs(data_width | src | dest | len);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700684
Viresh Kumar327e6972012-02-01 16:12:26 +0530685 ctllo = DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700686 | DWC_CTLL_DST_WIDTH(dst_width)
687 | DWC_CTLL_SRC_WIDTH(src_width)
688 | DWC_CTLL_DST_INC
689 | DWC_CTLL_SRC_INC
690 | DWC_CTLL_FC_M2M;
691 prev = first = NULL;
692
693 for (offset = 0; offset < len; offset += xfer_count << src_width) {
694 xfer_count = min_t(size_t, (len - offset) >> src_width,
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +0300695 dwc->block_size);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700696
697 desc = dwc_desc_get(dwc);
698 if (!desc)
699 goto err_desc_get;
700
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200701 lli_write(desc, sar, src + offset);
702 lli_write(desc, dar, dest + offset);
703 lli_write(desc, ctllo, ctllo);
704 lli_write(desc, ctlhi, xfer_count);
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200705 desc->len = xfer_count << src_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700706
707 if (!first) {
708 first = desc;
709 } else {
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200710 lli_write(prev, llp, desc->txd.phys | lms);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200711 list_add_tail(&desc->desc_node, &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700712 }
713 prev = desc;
714 }
715
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700716 if (flags & DMA_PREP_INTERRUPT)
717 /* Trigger interrupt after last block */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200718 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700719
720 prev->lli.llp = 0;
Mans Rullgarda3e55792016-03-18 16:24:45 +0200721 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700722 first->txd.flags = flags;
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200723 first->total_len = len;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700724
725 return &first->txd;
726
727err_desc_get:
728 dwc_desc_put(dwc, first);
729 return NULL;
730}
731
732static struct dma_async_tx_descriptor *
733dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530734 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -0500735 unsigned long flags, void *context)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700736{
737 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Arnd Bergmannf7760762013-03-26 16:53:57 +0200738 struct dw_dma *dw = to_dw_dma(chan->device);
Viresh Kumar327e6972012-02-01 16:12:26 +0530739 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700740 struct dw_desc *prev;
741 struct dw_desc *first;
742 u32 ctllo;
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300743 u8 m_master = dwc->dws.m_master;
Andy Shevchenko2e650602016-04-27 14:15:38 +0300744 u8 lms = DWC_LLP_LMS(m_master);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700745 dma_addr_t reg;
746 unsigned int reg_width;
747 unsigned int mem_width;
Andy Shevchenko161c3d02016-04-27 14:15:39 +0300748 unsigned int data_width = dw->pdata->data_width[m_master];
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700749 unsigned int i;
750 struct scatterlist *sg;
751 size_t total_len = 0;
752
Andy Shevchenko2e4c3642012-06-19 13:34:05 +0300753 dev_vdbg(chan2dev(chan), "%s\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700754
Andy Shevchenko495aea42013-01-10 11:11:41 +0200755 if (unlikely(!is_slave_direction(direction) || !sg_len))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700756 return NULL;
757
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200758 dwc->direction = direction;
759
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700760 prev = first = NULL;
761
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700762 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530763 case DMA_MEM_TO_DEV:
Andy Shevchenko39416672015-09-28 18:57:04 +0300764 reg_width = __ffs(sconfig->dst_addr_width);
Viresh Kumar327e6972012-02-01 16:12:26 +0530765 reg = sconfig->dst_addr;
766 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700767 | DWC_CTLL_DST_WIDTH(reg_width)
768 | DWC_CTLL_DST_FIX
Viresh Kumar327e6972012-02-01 16:12:26 +0530769 | DWC_CTLL_SRC_INC);
770
771 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
772 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
773
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700774 for_each_sg(sgl, sg, sg_len, i) {
775 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530776 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700777
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200778 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700779 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530780
Andy Shevchenko2e650602016-04-27 14:15:38 +0300781 mem_width = __ffs(data_width | mem | len);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700782
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530783slave_sg_todev_fill_desc:
784 desc = dwc_desc_get(dwc);
Jarkko Nikulab2607222015-03-10 11:37:24 +0200785 if (!desc)
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530786 goto err_desc_get;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530787
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200788 lli_write(desc, sar, mem);
789 lli_write(desc, dar, reg);
790 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +0300791 if ((len >> mem_width) > dwc->block_size) {
792 dlen = dwc->block_size << mem_width;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530793 mem += dlen;
794 len -= dlen;
795 } else {
796 dlen = len;
797 len = 0;
798 }
799
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200800 lli_write(desc, ctlhi, dlen >> mem_width);
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200801 desc->len = dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700802
803 if (!first) {
804 first = desc;
805 } else {
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200806 lli_write(prev, llp, desc->txd.phys | lms);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200807 list_add_tail(&desc->desc_node, &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700808 }
809 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530810 total_len += dlen;
811
812 if (len)
813 goto slave_sg_todev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700814 }
815 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530816 case DMA_DEV_TO_MEM:
Andy Shevchenko39416672015-09-28 18:57:04 +0300817 reg_width = __ffs(sconfig->src_addr_width);
Viresh Kumar327e6972012-02-01 16:12:26 +0530818 reg = sconfig->src_addr;
819 ctllo = (DWC_DEFAULT_CTLLO(chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700820 | DWC_CTLL_SRC_WIDTH(reg_width)
821 | DWC_CTLL_DST_INC
Viresh Kumar327e6972012-02-01 16:12:26 +0530822 | DWC_CTLL_SRC_FIX);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700823
Viresh Kumar327e6972012-02-01 16:12:26 +0530824 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
825 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
826
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700827 for_each_sg(sgl, sg, sg_len, i) {
828 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530829 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700830
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200831 mem = sg_dma_address(sg);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700832 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530833
Andy Shevchenko2e650602016-04-27 14:15:38 +0300834 mem_width = __ffs(data_width | mem | len);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700835
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530836slave_sg_fromdev_fill_desc:
837 desc = dwc_desc_get(dwc);
Jarkko Nikulab2607222015-03-10 11:37:24 +0200838 if (!desc)
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530839 goto err_desc_get;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530840
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200841 lli_write(desc, sar, reg);
842 lli_write(desc, dar, mem);
843 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +0300844 if ((len >> reg_width) > dwc->block_size) {
845 dlen = dwc->block_size << reg_width;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530846 mem += dlen;
847 len -= dlen;
848 } else {
849 dlen = len;
850 len = 0;
851 }
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200852 lli_write(desc, ctlhi, dlen >> reg_width);
Andy Shevchenko176dcec2013-01-25 11:48:02 +0200853 desc->len = dlen;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700854
855 if (!first) {
856 first = desc;
857 } else {
Mans Rullgard2a0fae02016-03-18 16:24:44 +0200858 lli_write(prev, llp, desc->txd.phys | lms);
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200859 list_add_tail(&desc->desc_node, &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700860 }
861 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530862 total_len += dlen;
863
864 if (len)
865 goto slave_sg_fromdev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700866 }
867 break;
868 default:
869 return NULL;
870 }
871
872 if (flags & DMA_PREP_INTERRUPT)
873 /* Trigger interrupt after last block */
Mans Rullgarddf1f3a22016-03-18 16:24:43 +0200874 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700875
876 prev->lli.llp = 0;
Mans Rullgarda3e55792016-03-18 16:24:45 +0200877 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
Andy Shevchenko30d38a32013-01-25 11:48:01 +0200878 first->total_len = total_len;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700879
880 return &first->txd;
881
882err_desc_get:
Jarkko Nikulab2607222015-03-10 11:37:24 +0200883 dev_err(chan2dev(chan),
884 "not enough descriptors available. Direction %d\n", direction);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700885 dwc_desc_put(dwc, first);
886 return NULL;
887}
888
Andy Shevchenko4d130de2014-08-19 20:29:16 +0300889bool dw_dma_filter(struct dma_chan *chan, void *param)
890{
891 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
892 struct dw_dma_slave *dws = param;
893
Andy Shevchenko3fe64092016-04-08 16:22:17 +0300894 if (dws->dma_dev != chan->device->dev)
Andy Shevchenko4d130de2014-08-19 20:29:16 +0300895 return false;
896
897 /* We have to copy data since dws can be temporary storage */
Andy Shevchenko9217a5b2016-08-17 19:20:20 +0300898 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
Andy Shevchenko4d130de2014-08-19 20:29:16 +0300899
900 return true;
901}
902EXPORT_SYMBOL_GPL(dw_dma_filter);
903
Viresh Kumar327e6972012-02-01 16:12:26 +0530904/*
905 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
906 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
907 *
908 * NOTE: burst size 2 is not supported by controller.
909 *
910 * This can be done by finding least significant bit set: n & (n - 1)
911 */
912static inline void convert_burst(u32 *maxburst)
913{
914 if (*maxburst > 1)
915 *maxburst = fls(*maxburst) - 2;
916 else
917 *maxburst = 0;
918}
919
Maxime Riparda4b0d342014-11-17 14:42:12 +0100920static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
Viresh Kumar327e6972012-02-01 16:12:26 +0530921{
922 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
923
Andy Shevchenko495aea42013-01-10 11:11:41 +0200924 /* Check if chan will be configured for slave transfers */
925 if (!is_slave_direction(sconfig->direction))
Viresh Kumar327e6972012-02-01 16:12:26 +0530926 return -EINVAL;
927
928 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
Andy Shevchenko0fdb5672013-01-10 10:53:03 +0200929 dwc->direction = sconfig->direction;
Viresh Kumar327e6972012-02-01 16:12:26 +0530930
931 convert_burst(&dwc->dma_sconfig.src_maxburst);
932 convert_burst(&dwc->dma_sconfig.dst_maxburst);
933
934 return 0;
935}
936
Maxime Riparda4b0d342014-11-17 14:42:12 +0100937static int dwc_pause(struct dma_chan *chan)
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200938{
Maxime Riparda4b0d342014-11-17 14:42:12 +0100939 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
940 unsigned long flags;
941 unsigned int count = 20; /* timeout iterations */
942 u32 cfglo;
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200943
Maxime Riparda4b0d342014-11-17 14:42:12 +0100944 spin_lock_irqsave(&dwc->lock, flags);
945
946 cfglo = channel_readl(dwc, CFG_LO);
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200947 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
Andy Shevchenko123b69a2013-03-21 11:49:17 +0200948 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
949 udelay(2);
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200950
Andy Shevchenko5e09f982016-03-18 16:24:51 +0200951 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
Maxime Riparda4b0d342014-11-17 14:42:12 +0100952
953 spin_unlock_irqrestore(&dwc->lock, flags);
954
955 return 0;
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200956}
957
958static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
959{
960 u32 cfglo = channel_readl(dwc, CFG_LO);
961
962 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
963
Andy Shevchenko5e09f982016-03-18 16:24:51 +0200964 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
Andy Shevchenko21fe3c52013-01-09 10:17:14 +0200965}
966
Maxime Riparda4b0d342014-11-17 14:42:12 +0100967static int dwc_resume(struct dma_chan *chan)
968{
969 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
970 unsigned long flags;
971
Maxime Riparda4b0d342014-11-17 14:42:12 +0100972 spin_lock_irqsave(&dwc->lock, flags);
973
Andy Shevchenko5e09f982016-03-18 16:24:51 +0200974 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
975 dwc_chan_resume(dwc);
Maxime Riparda4b0d342014-11-17 14:42:12 +0100976
977 spin_unlock_irqrestore(&dwc->lock, flags);
978
979 return 0;
980}
981
982static int dwc_terminate_all(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700983{
984 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
985 struct dw_dma *dw = to_dw_dma(chan->device);
986 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530987 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700988 LIST_HEAD(list);
989
Maxime Riparda4b0d342014-11-17 14:42:12 +0100990 spin_lock_irqsave(&dwc->lock, flags);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800991
Maxime Riparda4b0d342014-11-17 14:42:12 +0100992 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800993
Maxime Riparda4b0d342014-11-17 14:42:12 +0100994 dwc_chan_disable(dw, dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800995
Maxime Riparda4b0d342014-11-17 14:42:12 +0100996 dwc_chan_resume(dwc);
Linus Walleija7c57cf2011-04-19 08:31:32 +0800997
Maxime Riparda4b0d342014-11-17 14:42:12 +0100998 /* active_list entries will end up before queued entries */
999 list_splice_init(&dwc->queue, &list);
1000 list_splice_init(&dwc->active_list, &list);
Linus Walleija7c57cf2011-04-19 08:31:32 +08001001
Maxime Riparda4b0d342014-11-17 14:42:12 +01001002 spin_unlock_irqrestore(&dwc->lock, flags);
Linus Walleija7c57cf2011-04-19 08:31:32 +08001003
Maxime Riparda4b0d342014-11-17 14:42:12 +01001004 /* Flush all pending and queued descriptors */
1005 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1006 dwc_descriptor_complete(dwc, desc, false);
Linus Walleijc3635c72010-03-26 16:44:01 -07001007
Linus Walleijc3635c72010-03-26 16:44:01 -07001008 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001009}
1010
Andy Shevchenkob68fd092016-03-18 16:24:53 +02001011static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
Andy Shevchenko4702d522013-01-25 11:48:03 +02001012{
Andy Shevchenkob68fd092016-03-18 16:24:53 +02001013 struct dw_desc *desc;
1014
1015 list_for_each_entry(desc, &dwc->active_list, desc_node)
1016 if (desc->txd.cookie == c)
1017 return desc;
1018
1019 return NULL;
1020}
1021
1022static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
1023{
1024 struct dw_desc *desc;
Andy Shevchenko4702d522013-01-25 11:48:03 +02001025 unsigned long flags;
1026 u32 residue;
1027
1028 spin_lock_irqsave(&dwc->lock, flags);
1029
Andy Shevchenkob68fd092016-03-18 16:24:53 +02001030 desc = dwc_find_desc(dwc, cookie);
1031 if (desc) {
1032 if (desc == dwc_first_active(dwc)) {
1033 residue = desc->residue;
1034 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1035 residue -= dwc_get_sent(dwc);
1036 } else {
1037 residue = desc->total_len;
1038 }
1039 } else {
1040 residue = 0;
1041 }
Andy Shevchenko4702d522013-01-25 11:48:03 +02001042
1043 spin_unlock_irqrestore(&dwc->lock, flags);
1044 return residue;
1045}
1046
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001047static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -07001048dwc_tx_status(struct dma_chan *chan,
1049 dma_cookie_t cookie,
1050 struct dma_tx_state *txstate)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001051{
1052 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001053 enum dma_status ret;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001054
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00001055 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Koul2c404102013-10-16 13:41:15 +05301056 if (ret == DMA_COMPLETE)
Andy Shevchenko12381dc2013-07-15 15:04:40 +03001057 return ret;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001058
Andy Shevchenko12381dc2013-07-15 15:04:40 +03001059 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001060
Andy Shevchenko12381dc2013-07-15 15:04:40 +03001061 ret = dma_cookie_status(chan, cookie, txstate);
Andy Shevchenkob68fd092016-03-18 16:24:53 +02001062 if (ret == DMA_COMPLETE)
1063 return ret;
1064
1065 dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001066
Andy Shevchenko5e09f982016-03-18 16:24:51 +02001067 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
Linus Walleija7c57cf2011-04-19 08:31:32 +08001068 return DMA_PAUSED;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001069
1070 return ret;
1071}
1072
1073static void dwc_issue_pending(struct dma_chan *chan)
1074{
1075 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Andy Shevchenkodd8ecfca2014-06-18 12:15:38 +03001076 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001077
Andy Shevchenkodd8ecfca2014-06-18 12:15:38 +03001078 spin_lock_irqsave(&dwc->lock, flags);
1079 if (list_empty(&dwc->active_list))
1080 dwc_dostart_first_queued(dwc);
1081 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001082}
1083
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001084/*----------------------------------------------------------------------*/
1085
1086static void dw_dma_off(struct dw_dma *dw)
1087{
Andy Shevchenko7794e5b2016-03-18 16:24:48 +02001088 unsigned int i;
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001089
1090 dma_writel(dw, CFG, 0);
1091
1092 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Mans Rullgard2895b2c2016-01-11 13:04:29 +00001093 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001094 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1095 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1096 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1097
1098 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1099 cpu_relax();
1100
1101 for (i = 0; i < dw->dma.chancnt; i++)
Andy Shevchenko423f9cb2016-03-18 16:24:52 +02001102 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001103}
1104
1105static void dw_dma_on(struct dw_dma *dw)
1106{
1107 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1108}
1109
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001110static int dwc_alloc_chan_resources(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001111{
1112 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1113 struct dw_dma *dw = to_dw_dma(chan->device);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001114
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001115 dev_vdbg(chan2dev(chan), "%s\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001116
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001117 /* ASSERT: channel is idle */
1118 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -07001119 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001120 return -EIO;
1121 }
1122
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001123 dma_cookie_init(chan);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001124
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001125 /*
1126 * NOTE: some controllers may have additional features that we
1127 * need to initialize here, like "scatter-gather" (which
1128 * doesn't mean what you think it means), and status writeback.
1129 */
1130
Andy Shevchenko3fe64092016-04-08 16:22:17 +03001131 /*
1132 * We need controller-specific data to set up slave transfers.
1133 */
1134 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1135 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1136 return -EINVAL;
1137 }
1138
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001139 /* Enable controller here if needed */
1140 if (!dw->in_use)
1141 dw_dma_on(dw);
1142 dw->in_use |= dwc->mask;
1143
Christian Lamparterab703f82016-04-14 18:11:01 +02001144 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001145}
1146
1147static void dwc_free_chan_resources(struct dma_chan *chan)
1148{
1149 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1150 struct dw_dma *dw = to_dw_dma(chan->device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301151 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001152 LIST_HEAD(list);
1153
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001154 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001155 dwc->descs_allocated);
1156
1157 /* ASSERT: channel is idle */
1158 BUG_ON(!list_empty(&dwc->active_list));
1159 BUG_ON(!list_empty(&dwc->queue));
1160 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1161
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301162 spin_lock_irqsave(&dwc->lock, flags);
Andy Shevchenko3fe64092016-04-08 16:22:17 +03001163
1164 /* Clear custom channel configuration */
Andy Shevchenko9217a5b2016-08-17 19:20:20 +03001165 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
Andy Shevchenko3fe64092016-04-08 16:22:17 +03001166
Andy Shevchenko423f9cb2016-03-18 16:24:52 +02001167 clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001168
1169 /* Disable interrupts */
1170 channel_clear_bit(dw, MASK.XFER, dwc->mask);
Mans Rullgard2895b2c2016-01-11 13:04:29 +00001171 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001172 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1173
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301174 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001175
Andy Shevchenko99d9bf42014-09-23 17:18:14 +03001176 /* Disable controller in case it was a last user */
1177 dw->in_use &= ~dwc->mask;
1178 if (!dw->in_use)
1179 dw_dma_off(dw);
1180
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001181 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001182}
1183
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001184/* --------------------- Cyclic DMA API extensions -------------------- */
1185
1186/**
1187 * dw_dma_cyclic_start - start the cyclic DMA transfer
1188 * @chan: the DMA channel to start
1189 *
1190 * Must be called with soft interrupts disabled. Returns zero on success or
1191 * -errno on failure.
1192 */
1193int dw_dma_cyclic_start(struct dma_chan *chan)
1194{
1195 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Andy Shevchenkoee1cdcd2016-02-10 15:59:42 +02001196 struct dw_dma *dw = to_dw_dma(chan->device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301197 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001198
1199 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1200 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1201 return -ENODEV;
1202 }
1203
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301204 spin_lock_irqsave(&dwc->lock, flags);
Andy Shevchenkoee1cdcd2016-02-10 15:59:42 +02001205
1206 /* Enable interrupts to perform cyclic transfer */
1207 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1208
Mans Rullgarddf3bb8a2016-01-11 13:04:28 +00001209 dwc_dostart(dwc, dwc->cdesc->desc[0]);
Andy Shevchenkoee1cdcd2016-02-10 15:59:42 +02001210
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301211 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001212
1213 return 0;
1214}
1215EXPORT_SYMBOL(dw_dma_cyclic_start);
1216
1217/**
1218 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1219 * @chan: the DMA channel to stop
1220 *
1221 * Must be called with soft interrupts disabled.
1222 */
1223void dw_dma_cyclic_stop(struct dma_chan *chan)
1224{
1225 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1226 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301227 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001228
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301229 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001230
Andy Shevchenko3f9362072012-06-19 13:46:32 +03001231 dwc_chan_disable(dw, dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001232
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301233 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001234}
1235EXPORT_SYMBOL(dw_dma_cyclic_stop);
1236
1237/**
1238 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1239 * @chan: the DMA channel to prepare
1240 * @buf_addr: physical DMA address where the buffer starts
1241 * @buf_len: total number of bytes for the entire buffer
1242 * @period_len: number of bytes for each period
1243 * @direction: transfer direction, to or from device
1244 *
1245 * Must be called before trying to start the transfer. Returns a valid struct
1246 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1247 */
1248struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1249 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +05301250 enum dma_transfer_direction direction)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001251{
1252 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Viresh Kumar327e6972012-02-01 16:12:26 +05301253 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001254 struct dw_cyclic_desc *cdesc;
1255 struct dw_cyclic_desc *retval = NULL;
1256 struct dw_desc *desc;
1257 struct dw_desc *last = NULL;
Andy Shevchenko9217a5b2016-08-17 19:20:20 +03001258 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001259 unsigned long was_cyclic;
1260 unsigned int reg_width;
1261 unsigned int periods;
1262 unsigned int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301263 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001264
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301265 spin_lock_irqsave(&dwc->lock, flags);
Andy Shevchenkofed25742012-09-21 15:05:49 +03001266 if (dwc->nollp) {
1267 spin_unlock_irqrestore(&dwc->lock, flags);
1268 dev_dbg(chan2dev(&dwc->chan),
1269 "channel doesn't support LLP transfers\n");
1270 return ERR_PTR(-EINVAL);
1271 }
1272
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001273 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301274 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001275 dev_dbg(chan2dev(&dwc->chan),
1276 "queue and/or active list are not empty\n");
1277 return ERR_PTR(-EBUSY);
1278 }
1279
1280 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301281 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001282 if (was_cyclic) {
1283 dev_dbg(chan2dev(&dwc->chan),
1284 "channel already prepared for cyclic DMA\n");
1285 return ERR_PTR(-EBUSY);
1286 }
1287
1288 retval = ERR_PTR(-EINVAL);
Viresh Kumar327e6972012-02-01 16:12:26 +05301289
Andy Shevchenkof44b92f2013-01-10 10:52:58 +02001290 if (unlikely(!is_slave_direction(direction)))
1291 goto out_err;
1292
Andy Shevchenko0fdb5672013-01-10 10:53:03 +02001293 dwc->direction = direction;
1294
Viresh Kumar327e6972012-02-01 16:12:26 +05301295 if (direction == DMA_MEM_TO_DEV)
1296 reg_width = __ffs(sconfig->dst_addr_width);
1297 else
1298 reg_width = __ffs(sconfig->src_addr_width);
1299
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001300 periods = buf_len / period_len;
1301
1302 /* Check for too big/unaligned periods and unaligned DMA buffer. */
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001303 if (period_len > (dwc->block_size << reg_width))
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001304 goto out_err;
1305 if (unlikely(period_len & ((1 << reg_width) - 1)))
1306 goto out_err;
1307 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1308 goto out_err;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001309
1310 retval = ERR_PTR(-ENOMEM);
1311
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001312 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1313 if (!cdesc)
1314 goto out_err;
1315
1316 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1317 if (!cdesc->desc)
1318 goto out_err_alloc;
1319
1320 for (i = 0; i < periods; i++) {
1321 desc = dwc_desc_get(dwc);
1322 if (!desc)
1323 goto out_err_desc_get;
1324
1325 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +05301326 case DMA_MEM_TO_DEV:
Mans Rullgarddf1f3a22016-03-18 16:24:43 +02001327 lli_write(desc, dar, sconfig->dst_addr);
1328 lli_write(desc, sar, buf_addr + period_len * i);
1329 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1330 | DWC_CTLL_DST_WIDTH(reg_width)
1331 | DWC_CTLL_SRC_WIDTH(reg_width)
1332 | DWC_CTLL_DST_FIX
1333 | DWC_CTLL_SRC_INC
1334 | DWC_CTLL_INT_EN));
Viresh Kumar327e6972012-02-01 16:12:26 +05301335
Mans Rullgarddf1f3a22016-03-18 16:24:43 +02001336 lli_set(desc, ctllo, sconfig->device_fc ?
1337 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1338 DWC_CTLL_FC(DW_DMA_FC_D_M2P));
Viresh Kumar327e6972012-02-01 16:12:26 +05301339
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001340 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +05301341 case DMA_DEV_TO_MEM:
Mans Rullgarddf1f3a22016-03-18 16:24:43 +02001342 lli_write(desc, dar, buf_addr + period_len * i);
1343 lli_write(desc, sar, sconfig->src_addr);
1344 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1345 | DWC_CTLL_SRC_WIDTH(reg_width)
1346 | DWC_CTLL_DST_WIDTH(reg_width)
1347 | DWC_CTLL_DST_INC
1348 | DWC_CTLL_SRC_FIX
1349 | DWC_CTLL_INT_EN));
Viresh Kumar327e6972012-02-01 16:12:26 +05301350
Mans Rullgarddf1f3a22016-03-18 16:24:43 +02001351 lli_set(desc, ctllo, sconfig->device_fc ?
1352 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1353 DWC_CTLL_FC(DW_DMA_FC_D_P2M));
Viresh Kumar327e6972012-02-01 16:12:26 +05301354
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001355 break;
1356 default:
1357 break;
1358 }
1359
Mans Rullgarddf1f3a22016-03-18 16:24:43 +02001360 lli_write(desc, ctlhi, period_len >> reg_width);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001361 cdesc->desc[i] = desc;
1362
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001363 if (last)
Mans Rullgard2a0fae02016-03-18 16:24:44 +02001364 lli_write(last, llp, desc->txd.phys | lms);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001365
1366 last = desc;
1367 }
1368
Andy Shevchenko75c61222013-03-26 16:53:54 +02001369 /* Let's make a cyclic list */
Mans Rullgard2a0fae02016-03-18 16:24:44 +02001370 lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001371
Andy Shevchenko5a87f0e2014-01-13 14:04:50 +02001372 dev_dbg(chan2dev(&dwc->chan),
1373 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1374 &buf_addr, buf_len, period_len, periods);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001375
1376 cdesc->periods = periods;
1377 dwc->cdesc = cdesc;
1378
1379 return cdesc;
1380
1381out_err_desc_get:
1382 while (i--)
1383 dwc_desc_put(dwc, cdesc->desc[i]);
1384out_err_alloc:
1385 kfree(cdesc);
1386out_err:
1387 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1388 return (struct dw_cyclic_desc *)retval;
1389}
1390EXPORT_SYMBOL(dw_dma_cyclic_prep);
1391
1392/**
1393 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1394 * @chan: the DMA channel to free
1395 */
1396void dw_dma_cyclic_free(struct dma_chan *chan)
1397{
1398 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1399 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1400 struct dw_cyclic_desc *cdesc = dwc->cdesc;
Andy Shevchenko7794e5b2016-03-18 16:24:48 +02001401 unsigned int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301402 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001403
Andy Shevchenko2e4c3642012-06-19 13:34:05 +03001404 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001405
1406 if (!cdesc)
1407 return;
1408
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301409 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001410
Andy Shevchenko3f9362072012-06-19 13:46:32 +03001411 dwc_chan_disable(dw, dwc);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001412
Mans Rullgard2895b2c2016-01-11 13:04:29 +00001413 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001414 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1415 dma_writel(dw, CLEAR.XFER, dwc->mask);
1416
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301417 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001418
1419 for (i = 0; i < cdesc->periods; i++)
1420 dwc_desc_put(dwc, cdesc->desc[i]);
1421
1422 kfree(cdesc->desc);
1423 kfree(cdesc);
1424
Andy Shevchenko925a7d02016-03-18 16:24:54 +02001425 dwc->cdesc = NULL;
1426
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001427 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1428}
1429EXPORT_SYMBOL(dw_dma_cyclic_free);
1430
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001431/*----------------------------------------------------------------------*/
1432
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001433int dw_dma_probe(struct dw_dma_chip *chip)
Viresh Kumara9ddb572012-10-16 09:49:17 +05301434{
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001435 struct dw_dma_platform_data *pdata;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001436 struct dw_dma *dw;
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001437 bool autocfg = false;
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001438 unsigned int dw_params;
Andy Shevchenko7794e5b2016-03-18 16:24:48 +02001439 unsigned int i;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001440 int err;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001441
Andy Shevchenko000871c2014-03-05 15:48:12 +02001442 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1443 if (!dw)
1444 return -ENOMEM;
1445
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001446 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1447 if (!dw->pdata)
1448 return -ENOMEM;
1449
Andy Shevchenko000871c2014-03-05 15:48:12 +02001450 dw->regs = chip->regs;
1451 chip->dw = dw;
1452
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001453 pm_runtime_get_sync(chip->dev);
1454
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001455 if (!chip->pdata) {
Andy Shevchenko897e40d2016-03-18 16:24:46 +02001456 dw_params = dma_readl(dw, DW_PARAMS);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001457 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
Andy Shevchenko482c67e2012-09-21 15:05:46 +03001458
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001459 autocfg = dw_params >> DW_PARAMS_EN & 1;
1460 if (!autocfg) {
1461 err = -EINVAL;
1462 goto err_pdata;
1463 }
Andy Shevchenko123de542013-01-09 10:17:01 +02001464
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001465 /* Reassign the platform data pointer */
1466 pdata = dw->pdata;
Andy Shevchenko123de542013-01-09 10:17:01 +02001467
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001468 /* Get hardware configuration parameters */
1469 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1470 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1471 for (i = 0; i < pdata->nr_masters; i++) {
1472 pdata->data_width[i] =
Andy Shevchenko2e650602016-04-27 14:15:38 +03001473 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001474 }
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001475 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001476
Andy Shevchenko123de542013-01-09 10:17:01 +02001477 /* Fill platform data with the default values */
1478 pdata->is_private = true;
Andy Shevchenkodf5c7382015-10-13 20:09:19 +03001479 pdata->is_memcpy = true;
Andy Shevchenko123de542013-01-09 10:17:01 +02001480 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1481 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001482 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001483 err = -EINVAL;
1484 goto err_pdata;
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001485 } else {
Andy Shevchenko3a14c662016-04-27 14:15:40 +03001486 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001487
1488 /* Reassign the platform data pointer */
1489 pdata = dw->pdata;
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001490 }
Andy Shevchenko123de542013-01-09 10:17:01 +02001491
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001492 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
Andy Shevchenko000871c2014-03-05 15:48:12 +02001493 GFP_KERNEL);
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001494 if (!dw->chan) {
1495 err = -ENOMEM;
1496 goto err_pdata;
1497 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001498
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001499 /* Calculate all channel mask before DMA setup */
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001500 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001501
Andy Shevchenko75c61222013-03-26 16:53:54 +02001502 /* Force dma off, just in case */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001503 dw_dma_off(dw);
1504
Andy Shevchenko75c61222013-03-26 16:53:54 +02001505 /* Create a pool of consistent memory blocks for hardware descriptors */
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001506 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001507 sizeof(struct dw_desc), 4, 0);
1508 if (!dw->desc_pool) {
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001509 dev_err(chip->dev, "No memory for descriptors dma pool\n");
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001510 err = -ENOMEM;
1511 goto err_pdata;
Andy Shevchenkof8122a82013-01-16 15:48:50 +02001512 }
1513
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001514 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1515
Andy Shevchenko97977f72014-05-07 10:56:24 +03001516 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1517 "dw_dmac", dw);
1518 if (err)
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001519 goto err_pdata;
Andy Shevchenko97977f72014-05-07 10:56:24 +03001520
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001521 INIT_LIST_HEAD(&dw->dma.channels);
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001522 for (i = 0; i < pdata->nr_channels; i++) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001523 struct dw_dma_chan *dwc = &dw->chan[i];
1524
1525 dwc->chan.device = &dw->dma;
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00001526 dma_cookie_init(&dwc->chan);
Viresh Kumarb0c31302011-03-03 15:47:21 +05301527 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1528 list_add_tail(&dwc->chan.device_node,
1529 &dw->dma.channels);
1530 else
1531 list_add(&dwc->chan.device_node, &dw->dma.channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001532
Viresh Kumar93317e82011-03-03 15:47:22 +05301533 /* 7 is highest priority & 0 is lowest. */
1534 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001535 dwc->priority = pdata->nr_channels - i - 1;
Viresh Kumar93317e82011-03-03 15:47:22 +05301536 else
1537 dwc->priority = i;
1538
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001539 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1540 spin_lock_init(&dwc->lock);
1541 dwc->mask = 1 << i;
1542
1543 INIT_LIST_HEAD(&dwc->active_list);
1544 INIT_LIST_HEAD(&dwc->queue);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001545
1546 channel_clear_bit(dw, CH_EN, dwc->mask);
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001547
Andy Shevchenko0fdb5672013-01-10 10:53:03 +02001548 dwc->direction = DMA_TRANS_NONE;
Andy Shevchenkoa0982002012-09-21 15:05:48 +03001549
Andy Shevchenko75c61222013-03-26 16:53:54 +02001550 /* Hardware configuration */
Andy Shevchenkofed25742012-09-21 15:05:49 +03001551 if (autocfg) {
Andy Shevchenko6bea0f62015-09-28 18:57:03 +03001552 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
Andy Shevchenko897e40d2016-03-18 16:24:46 +02001553 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1554 unsigned int dwc_params = dma_readl_native(addr);
Andy Shevchenkofed25742012-09-21 15:05:49 +03001555
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001556 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1557 dwc_params);
Andy Shevchenko985a6c72013-01-18 17:10:59 +02001558
Andy Shevchenko1d566f12014-01-13 14:04:48 +02001559 /*
1560 * Decode maximum block size for given channel. The
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001561 * stored 4 bit value represents blocks from 0x00 for 3
Andy Shevchenko1d566f12014-01-13 14:04:48 +02001562 * up to 0x0a for 4095.
1563 */
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001564 dwc->block_size =
Andy Shevchenko161c3d02016-04-27 14:15:39 +03001565 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
Andy Shevchenkofed25742012-09-21 15:05:49 +03001566 dwc->nollp =
1567 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1568 } else {
Andy Shevchenko4a63a8b2012-09-21 15:05:47 +03001569 dwc->block_size = pdata->block_size;
Andy Shevchenkofed25742012-09-21 15:05:49 +03001570
1571 /* Check if channel supports multi block transfer */
Mans Rullgard2a0fae02016-03-18 16:24:44 +02001572 channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff));
1573 dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0;
Andy Shevchenkofed25742012-09-21 15:05:49 +03001574 channel_writel(dwc, LLP, 0);
1575 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001576 }
1577
Andy Shevchenko11f932e2012-06-19 13:34:06 +03001578 /* Clear all interrupts on all channels. */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001579 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
Andy Shevchenko236b1062012-06-19 13:34:07 +03001580 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001581 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1582 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1583 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1584
Andy Shevchenkodf5c7382015-10-13 20:09:19 +03001585 /* Set capabilities */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001586 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
Jamie Iles95ea7592011-01-21 14:11:54 +00001587 if (pdata->is_private)
1588 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
Andy Shevchenkodf5c7382015-10-13 20:09:19 +03001589 if (pdata->is_memcpy)
1590 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1591
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001592 dw->dma.dev = chip->dev;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001593 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1594 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1595
1596 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001597 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
Andy Shevchenko029a40e2015-01-02 16:17:24 +02001598
Maxime Riparda4b0d342014-11-17 14:42:12 +01001599 dw->dma.device_config = dwc_config;
1600 dw->dma.device_pause = dwc_pause;
1601 dw->dma.device_resume = dwc_resume;
1602 dw->dma.device_terminate_all = dwc_terminate_all;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001603
Linus Walleij07934482010-03-26 16:50:49 -07001604 dw->dma.device_tx_status = dwc_tx_status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001605 dw->dma.device_issue_pending = dwc_issue_pending;
1606
Andy Shevchenko029a40e2015-01-02 16:17:24 +02001607 /* DMA capabilities */
1608 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1609 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1610 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1611 BIT(DMA_MEM_TO_MEM);
1612 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1613
Andy Shevchenko12229342014-05-08 12:01:50 +03001614 err = dma_async_device_register(&dw->dma);
1615 if (err)
1616 goto err_dma_register;
1617
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001618 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
Andy Shevchenko30cb2632015-10-13 20:09:17 +03001619 pdata->nr_channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001620
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001621 pm_runtime_put_sync_suspend(chip->dev);
1622
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001623 return 0;
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001624
Andy Shevchenko12229342014-05-08 12:01:50 +03001625err_dma_register:
1626 free_irq(chip->irq, dw);
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001627err_pdata:
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001628 pm_runtime_put_sync_suspend(chip->dev);
Andy Shevchenko8be4f522014-05-08 12:01:49 +03001629 return err;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001630}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001631EXPORT_SYMBOL_GPL(dw_dma_probe);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001632
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001633int dw_dma_remove(struct dw_dma_chip *chip)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001634{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001635 struct dw_dma *dw = chip->dw;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001636 struct dw_dma_chan *dwc, *_dwc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001637
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001638 pm_runtime_get_sync(chip->dev);
1639
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001640 dw_dma_off(dw);
1641 dma_async_device_unregister(&dw->dma);
1642
Andy Shevchenko97977f72014-05-07 10:56:24 +03001643 free_irq(chip->irq, dw);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001644 tasklet_kill(&dw->tasklet);
1645
1646 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1647 chan.device_node) {
1648 list_del(&dwc->chan.device_node);
1649 channel_clear_bit(dw, CH_EN, dwc->mask);
1650 }
1651
Andy Shevchenkobb32baf2014-11-05 18:34:48 +02001652 pm_runtime_put_sync_suspend(chip->dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001653 return 0;
1654}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001655EXPORT_SYMBOL_GPL(dw_dma_remove);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001656
Andy Shevchenko2540f742014-09-23 17:18:13 +03001657int dw_dma_disable(struct dw_dma_chip *chip)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001658{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001659 struct dw_dma *dw = chip->dw;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001660
Andy Shevchenko6168d562012-10-18 17:34:10 +03001661 dw_dma_off(dw);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001662 return 0;
1663}
Andy Shevchenko2540f742014-09-23 17:18:13 +03001664EXPORT_SYMBOL_GPL(dw_dma_disable);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001665
Andy Shevchenko2540f742014-09-23 17:18:13 +03001666int dw_dma_enable(struct dw_dma_chip *chip)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001667{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001668 struct dw_dma *dw = chip->dw;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001669
Andy Shevchenko7a83c042014-09-23 17:18:12 +03001670 dw_dma_on(dw);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001671 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001672}
Andy Shevchenko2540f742014-09-23 17:18:13 +03001673EXPORT_SYMBOL_GPL(dw_dma_enable);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001674
1675MODULE_LICENSE("GPL v2");
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001676MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02001677MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Viresh Kumarda899472015-07-17 16:23:50 -07001678MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");