blob: 12d904829324541770508681ffed6241c0622225 [file] [log] [blame]
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001/*
2 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
3 *
4 * Copyright (C) 2014 Atmel Corporation
5 *
6 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/barrier.h>
22#include <dt-bindings/dma/at91.h>
23#include <linux/clk.h>
24#include <linux/dmaengine.h>
25#include <linux/dmapool.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
Ludovic Desroches6d3a7d92015-01-27 16:30:32 +010028#include <linux/kernel.h>
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +020029#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/of_dma.h>
32#include <linux/of_platform.h>
33#include <linux/platform_device.h>
34#include <linux/pm.h>
35
36#include "dmaengine.h"
37
38/* Global registers */
39#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
40#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
41#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
42#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
43#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
44#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
46#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
47#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
48#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
49#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
50#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
51#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
52#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
53#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
54#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
55#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
56#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
57#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
58#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
59#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
60
61/* Channel relative registers offsets */
62#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
104#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
105#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
106#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
107#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
108#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
109#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
110#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
111#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
112#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
113#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
114#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
115#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
116#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
117#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
118#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
119#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
120#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
121#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
122#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
123#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
124#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
125#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
126#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
127#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
128#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
129#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
130#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
131#define AT_XDMAC_CC_DWIDTH_OFFSET 11
132#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
133#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
134#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
135#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
136#define AT_XDMAC_CC_DWIDTH_WORD 0x2
137#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
138#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
139#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
140#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
141#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
142#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
143#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
144#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
145#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
146#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
147#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
148#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
149#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
150#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
151#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
152#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
153#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
154#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
155#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
Ludovic Desroches15a03852015-11-23 14:09:38 +0100159#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
163
164#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
165
166/* Microblock control members */
167#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
168#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
169#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
170#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
171#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
172#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
173#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
174#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
175
176#define AT_XDMAC_MAX_CHAN 0x20
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
Ludovic Desroches25c5e962016-03-10 10:17:55 +0100179#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200180
Ludovic Desroches8ac82f82014-11-17 14:42:44 +0100181#define AT_XDMAC_DMA_BUSWIDTHS\
182 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
183 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
184 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
185 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
186 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
187
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200188enum atc_status {
189 AT_XDMAC_CHAN_IS_CYCLIC = 0,
190 AT_XDMAC_CHAN_IS_PAUSED,
191};
192
193/* ----- Channels ----- */
194struct at_xdmac_chan {
195 struct dma_chan chan;
196 void __iomem *ch_regs;
197 u32 mask; /* Channel Mask */
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200198 u32 cfg; /* Channel Configuration Register */
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200199 u8 perid; /* Peripheral ID */
200 u8 perif; /* Peripheral Interface */
201 u8 memif; /* Memory Interface */
Ludovic Desroches734bb9a2015-01-27 16:30:30 +0100202 u32 save_cc;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200203 u32 save_cim;
204 u32 save_cnda;
205 u32 save_cndc;
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +0000206 u32 irq_status;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200207 unsigned long status;
208 struct tasklet_struct tasklet;
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200209 struct dma_slave_config sconfig;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200210
211 spinlock_t lock;
212
213 struct list_head xfers_list;
214 struct list_head free_descs_list;
215};
216
217
218/* ----- Controller ----- */
219struct at_xdmac {
220 struct dma_device dma;
221 void __iomem *regs;
222 int irq;
223 struct clk *clk;
224 u32 save_gim;
225 u32 save_gs;
226 struct dma_pool *at_xdmac_desc_pool;
227 struct at_xdmac_chan chan[0];
228};
229
230
231/* ----- Descriptors ----- */
232
233/* Linked List Descriptor */
234struct at_xdmac_lld {
235 dma_addr_t mbr_nda; /* Next Descriptor Member */
236 u32 mbr_ubc; /* Microblock Control Member */
237 dma_addr_t mbr_sa; /* Source Address Member */
238 dma_addr_t mbr_da; /* Destination Address Member */
239 u32 mbr_cfg; /* Configuration Register */
Maxime Ripardee0fe352015-05-07 17:38:08 +0200240 u32 mbr_bc; /* Block Control Register */
241 u32 mbr_ds; /* Data Stride Register */
242 u32 mbr_sus; /* Source Microblock Stride Register */
243 u32 mbr_dus; /* Destination Microblock Stride Register */
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200244};
245
Ludovic Desroches4a9723e2016-05-12 16:54:08 +0200246/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200247struct at_xdmac_desc {
248 struct at_xdmac_lld lld;
249 enum dma_transfer_direction direction;
250 struct dma_async_tx_descriptor tx_dma_desc;
251 struct list_head desc_node;
252 /* Following members are only used by the first descriptor */
253 bool active_xfer;
254 unsigned int xfer_size;
255 struct list_head descs_list;
256 struct list_head xfer_node;
Ludovic Desroches4a9723e2016-05-12 16:54:08 +0200257} __aligned(sizeof(u64));
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200258
259static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
260{
261 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
262}
263
Ludovic Desroches6e5ae292014-11-13 11:52:39 +0100264#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200265#define at_xdmac_write(atxdmac, reg, value) \
Ludovic Desroches6e5ae292014-11-13 11:52:39 +0100266 writel_relaxed((value), (atxdmac)->regs + (reg))
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200267
Ludovic Desroches6e5ae292014-11-13 11:52:39 +0100268#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
269#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200270
271static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
272{
273 return container_of(dchan, struct at_xdmac_chan, chan);
274}
275
276static struct device *chan2dev(struct dma_chan *chan)
277{
278 return &chan->dev->device;
279}
280
281static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
282{
283 return container_of(ddev, struct at_xdmac, dma);
284}
285
286static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
287{
288 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
289}
290
291static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
292{
293 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
294}
295
296static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
297{
298 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
299}
300
301static inline int at_xdmac_csize(u32 maxburst)
302{
303 int csize;
304
305 csize = ffs(maxburst) - 1;
306 if (csize > 4)
307 csize = -EINVAL;
308
309 return csize;
310};
311
312static inline u8 at_xdmac_get_dwidth(u32 cfg)
313{
314 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
315};
316
317static unsigned int init_nr_desc_per_channel = 64;
318module_param(init_nr_desc_per_channel, uint, 0644);
319MODULE_PARM_DESC(init_nr_desc_per_channel,
320 "initial descriptors per channel (default: 64)");
321
322
323static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
324{
325 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
326}
327
328static void at_xdmac_off(struct at_xdmac *atxdmac)
329{
330 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
331
332 /* Wait that all chans are disabled. */
333 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
334 cpu_relax();
335
336 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
337}
338
339/* Call with lock hold. */
340static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
341 struct at_xdmac_desc *first)
342{
343 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
344 u32 reg;
345
346 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
347
348 if (at_xdmac_chan_is_enabled(atchan))
349 return;
350
351 /* Set transfer as active to not try to start it again. */
352 first->active_xfer = true;
353
354 /* Tell xdmac where to get the first descriptor. */
355 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
356 | AT_XDMAC_CNDA_NDAIF(atchan->memif);
357 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
358
359 /*
Ludovic Desroches6d3a7d92015-01-27 16:30:32 +0100360 * When doing non cyclic transfer we need to use the next
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200361 * descriptor view 2 since some fields of the configuration register
362 * depend on transfer size and src/dest addresses.
363 */
Ludovic Desroches20cadcb42015-06-17 16:22:26 +0200364 if (at_xdmac_chan_is_cyclic(atchan))
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200365 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
Ludovic Desroches20cadcb42015-06-17 16:22:26 +0200366 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
Maxime Ripardee0fe352015-05-07 17:38:08 +0200367 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
Ludovic Desroches20cadcb42015-06-17 16:22:26 +0200368 else
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200369 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
Ludovic Desroches20cadcb42015-06-17 16:22:26 +0200370 /*
371 * Even if the register will be updated from the configuration in the
372 * descriptor when using view 2 or higher, the PROT bit won't be set
373 * properly. This bit can be modified only by using the channel
374 * configuration register.
375 */
376 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200377
378 reg |= AT_XDMAC_CNDC_NDDUP
379 | AT_XDMAC_CNDC_NDSUP
380 | AT_XDMAC_CNDC_NDE;
381 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
382
383 dev_vdbg(chan2dev(&atchan->chan),
384 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
385 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
386 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
387 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
388 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
389 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
390 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
391
392 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
393 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
394 /*
395 * There is no end of list when doing cyclic dma, we need to get
396 * an interrupt after each periods.
397 */
398 if (at_xdmac_chan_is_cyclic(atchan))
399 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
400 reg | AT_XDMAC_CIE_BIE);
401 else
402 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
403 reg | AT_XDMAC_CIE_LIE);
404 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
405 dev_vdbg(chan2dev(&atchan->chan),
406 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
407 wmb();
408 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
409
410 dev_vdbg(chan2dev(&atchan->chan),
411 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
412 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
413 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
414 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
415 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
416 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
417 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
418
419}
420
421static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
422{
423 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
424 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
425 dma_cookie_t cookie;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200426 unsigned long irqflags;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200427
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200428 spin_lock_irqsave(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200429 cookie = dma_cookie_assign(tx);
430
431 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
432 __func__, atchan, desc);
433 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
434 if (list_is_singular(&atchan->xfers_list))
435 at_xdmac_start_xfer(atchan, desc);
436
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200437 spin_unlock_irqrestore(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200438 return cookie;
439}
440
441static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
442 gfp_t gfp_flags)
443{
444 struct at_xdmac_desc *desc;
445 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
446 dma_addr_t phys;
447
448 desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
449 if (desc) {
450 memset(desc, 0, sizeof(*desc));
451 INIT_LIST_HEAD(&desc->descs_list);
452 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
453 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
454 desc->tx_dma_desc.phys = phys;
455 }
456
457 return desc;
458}
459
Ben Dooks192dc8c2016-06-07 17:09:15 +0100460static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
Ludovic Desroches0be21362015-09-15 15:39:11 +0200461{
462 memset(&desc->lld, 0, sizeof(desc->lld));
463 INIT_LIST_HEAD(&desc->descs_list);
464 desc->direction = DMA_TRANS_NONE;
465 desc->xfer_size = 0;
466 desc->active_xfer = false;
467}
468
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200469/* Call must be protected by lock. */
470static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
471{
472 struct at_xdmac_desc *desc;
473
474 if (list_empty(&atchan->free_descs_list)) {
475 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
476 } else {
477 desc = list_first_entry(&atchan->free_descs_list,
478 struct at_xdmac_desc, desc_node);
479 list_del(&desc->desc_node);
Ludovic Desroches0be21362015-09-15 15:39:11 +0200480 at_xdmac_init_used_desc(desc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200481 }
482
483 return desc;
484}
485
Maxime Ripard0d0ee752015-05-07 17:38:10 +0200486static void at_xdmac_queue_desc(struct dma_chan *chan,
487 struct at_xdmac_desc *prev,
488 struct at_xdmac_desc *desc)
489{
490 if (!prev || !desc)
491 return;
492
493 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
494 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
495
496 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
497 __func__, prev, &prev->lld.mbr_nda);
498}
499
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200500static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
501 struct at_xdmac_desc *desc)
502{
503 if (!desc)
504 return;
505
506 desc->lld.mbr_bc++;
507
508 dev_dbg(chan2dev(chan),
509 "%s: incrementing the block count of the desc 0x%p\n",
510 __func__, desc);
511}
512
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200513static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
514 struct of_dma *of_dma)
515{
516 struct at_xdmac *atxdmac = of_dma->of_dma_data;
517 struct at_xdmac_chan *atchan;
518 struct dma_chan *chan;
519 struct device *dev = atxdmac->dma.dev;
520
521 if (dma_spec->args_count != 1) {
522 dev_err(dev, "dma phandler args: bad number of args\n");
523 return NULL;
524 }
525
526 chan = dma_get_any_slave_channel(&atxdmac->dma);
527 if (!chan) {
528 dev_err(dev, "can't get a dma channel\n");
529 return NULL;
530 }
531
532 atchan = to_at_xdmac_chan(chan);
533 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
534 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
535 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
536 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
537 atchan->memif, atchan->perif, atchan->perid);
538
539 return chan;
540}
541
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200542static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
543 enum dma_transfer_direction direction)
544{
545 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
546 int csize, dwidth;
547
548 if (direction == DMA_DEV_TO_MEM) {
549 atchan->cfg =
550 AT91_XDMAC_DT_PERID(atchan->perid)
551 | AT_XDMAC_CC_DAM_INCREMENTED_AM
552 | AT_XDMAC_CC_SAM_FIXED_AM
553 | AT_XDMAC_CC_DIF(atchan->memif)
554 | AT_XDMAC_CC_SIF(atchan->perif)
555 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
556 | AT_XDMAC_CC_DSYNC_PER2MEM
557 | AT_XDMAC_CC_MBSIZE_SIXTEEN
558 | AT_XDMAC_CC_TYPE_PER_TRAN;
559 csize = ffs(atchan->sconfig.src_maxburst) - 1;
560 if (csize < 0) {
561 dev_err(chan2dev(chan), "invalid src maxburst value\n");
562 return -EINVAL;
563 }
564 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
565 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
566 if (dwidth < 0) {
567 dev_err(chan2dev(chan), "invalid src addr width value\n");
568 return -EINVAL;
569 }
570 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
571 } else if (direction == DMA_MEM_TO_DEV) {
572 atchan->cfg =
573 AT91_XDMAC_DT_PERID(atchan->perid)
574 | AT_XDMAC_CC_DAM_FIXED_AM
575 | AT_XDMAC_CC_SAM_INCREMENTED_AM
576 | AT_XDMAC_CC_DIF(atchan->perif)
577 | AT_XDMAC_CC_SIF(atchan->memif)
578 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
579 | AT_XDMAC_CC_DSYNC_MEM2PER
580 | AT_XDMAC_CC_MBSIZE_SIXTEEN
581 | AT_XDMAC_CC_TYPE_PER_TRAN;
582 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
583 if (csize < 0) {
584 dev_err(chan2dev(chan), "invalid src maxburst value\n");
585 return -EINVAL;
586 }
587 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
588 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
589 if (dwidth < 0) {
590 dev_err(chan2dev(chan), "invalid dst addr width value\n");
591 return -EINVAL;
592 }
593 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
594 }
595
596 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
597
598 return 0;
599}
600
601/*
602 * Only check that maxburst and addr width values are supported by the
603 * the controller but not that the configuration is good to perform the
604 * transfer since we don't know the direction at this stage.
605 */
606static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
607{
608 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
609 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
610 return -EINVAL;
611
612 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
613 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
614 return -EINVAL;
615
616 return 0;
617}
618
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200619static int at_xdmac_set_slave_config(struct dma_chan *chan,
620 struct dma_slave_config *sconfig)
621{
622 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200623
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200624 if (at_xdmac_check_slave_config(sconfig)) {
625 dev_err(chan2dev(chan), "invalid slave configuration\n");
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200626 return -EINVAL;
627 }
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200628
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200629 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200630
631 return 0;
632}
633
634static struct dma_async_tx_descriptor *
635at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
636 unsigned int sg_len, enum dma_transfer_direction direction,
637 unsigned long flags, void *context)
638{
Ludovic Desroches35ca0ee2015-06-08 10:33:16 +0200639 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
640 struct at_xdmac_desc *first = NULL, *prev = NULL;
641 struct scatterlist *sg;
642 int i;
643 unsigned int xfer_size = 0;
644 unsigned long irqflags;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200645 struct dma_async_tx_descriptor *ret = NULL;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200646
647 if (!sgl)
648 return NULL;
649
650 if (!is_slave_direction(direction)) {
651 dev_err(chan2dev(chan), "invalid DMA direction\n");
652 return NULL;
653 }
654
655 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
656 __func__, sg_len,
657 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
658 flags);
659
660 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200661 spin_lock_irqsave(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200662
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200663 if (at_xdmac_compute_chan_conf(chan, direction))
664 goto spin_unlock;
665
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200666 /* Prepare descriptors. */
667 for_each_sg(sgl, sg, sg_len, i) {
668 struct at_xdmac_desc *desc = NULL;
Ludovic Desroches6d3a7d92015-01-27 16:30:32 +0100669 u32 len, mem, dwidth, fixed_dwidth;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200670
671 len = sg_dma_len(sg);
672 mem = sg_dma_address(sg);
673 if (unlikely(!len)) {
674 dev_err(chan2dev(chan), "sg data length is zero\n");
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200675 goto spin_unlock;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200676 }
677 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
678 __func__, i, len, mem);
679
680 desc = at_xdmac_get_desc(atchan);
681 if (!desc) {
682 dev_err(chan2dev(chan), "can't get descriptor\n");
683 if (first)
684 list_splice_init(&first->descs_list, &atchan->free_descs_list);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200685 goto spin_unlock;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200686 }
687
688 /* Linked list descriptor setup. */
689 if (direction == DMA_DEV_TO_MEM) {
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200690 desc->lld.mbr_sa = atchan->sconfig.src_addr;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200691 desc->lld.mbr_da = mem;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200692 } else {
693 desc->lld.mbr_sa = mem;
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200694 desc->lld.mbr_da = atchan->sconfig.dst_addr;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200695 }
Cyrille Pitchen1c8a38b2015-06-30 14:36:57 +0200696 dwidth = at_xdmac_get_dwidth(atchan->cfg);
Ludovic Desroches6d3a7d92015-01-27 16:30:32 +0100697 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
Cyrille Pitchen1c8a38b2015-06-30 14:36:57 +0200698 ? dwidth
Ludovic Desroches6d3a7d92015-01-27 16:30:32 +0100699 : AT_XDMAC_CC_DWIDTH_BYTE;
700 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
Ludovic Desrochesbe835072015-01-27 16:30:31 +0100701 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
702 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
Ludovic Desroches6d3a7d92015-01-27 16:30:32 +0100703 | (len >> fixed_dwidth); /* microblock length */
Cyrille Pitchen1c8a38b2015-06-30 14:36:57 +0200704 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
705 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200706 dev_dbg(chan2dev(chan),
Vinod Koul82e24242014-11-06 18:02:52 +0530707 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
708 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200709
710 /* Chain lld. */
Maxime Ripard0d0ee752015-05-07 17:38:10 +0200711 if (prev)
712 at_xdmac_queue_desc(chan, prev, desc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200713
714 prev = desc;
715 if (!first)
716 first = desc;
717
718 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
719 __func__, desc, first);
720 list_add_tail(&desc->desc_node, &first->descs_list);
Cyrille Pitchen57819272014-11-13 11:52:42 +0100721 xfer_size += len;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200722 }
723
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200724
725 first->tx_dma_desc.flags = flags;
Cyrille Pitchen57819272014-11-13 11:52:42 +0100726 first->xfer_size = xfer_size;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200727 first->direction = direction;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200728 ret = &first->tx_dma_desc;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200729
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200730spin_unlock:
731 spin_unlock_irqrestore(&atchan->lock, irqflags);
732 return ret;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200733}
734
735static struct dma_async_tx_descriptor *
736at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
737 size_t buf_len, size_t period_len,
738 enum dma_transfer_direction direction,
739 unsigned long flags)
740{
741 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
742 struct at_xdmac_desc *first = NULL, *prev = NULL;
743 unsigned int periods = buf_len / period_len;
744 int i;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200745 unsigned long irqflags;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200746
Vinod Koul82e24242014-11-06 18:02:52 +0530747 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
748 __func__, &buf_addr, buf_len, period_len,
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200749 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
750
751 if (!is_slave_direction(direction)) {
752 dev_err(chan2dev(chan), "invalid DMA direction\n");
753 return NULL;
754 }
755
756 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
757 dev_err(chan2dev(chan), "channel currently used\n");
758 return NULL;
759 }
760
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200761 if (at_xdmac_compute_chan_conf(chan, direction))
762 return NULL;
763
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200764 for (i = 0; i < periods; i++) {
765 struct at_xdmac_desc *desc = NULL;
766
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200767 spin_lock_irqsave(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200768 desc = at_xdmac_get_desc(atchan);
769 if (!desc) {
770 dev_err(chan2dev(chan), "can't get descriptor\n");
771 if (first)
772 list_splice_init(&first->descs_list, &atchan->free_descs_list);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200773 spin_unlock_irqrestore(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200774 return NULL;
775 }
Ludovic Desroches4c374fc2015-06-08 10:33:14 +0200776 spin_unlock_irqrestore(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200777 dev_dbg(chan2dev(chan),
Vinod Koul82e24242014-11-06 18:02:52 +0530778 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
779 __func__, desc, &desc->tx_dma_desc.phys);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200780
781 if (direction == DMA_DEV_TO_MEM) {
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200782 desc->lld.mbr_sa = atchan->sconfig.src_addr;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200783 desc->lld.mbr_da = buf_addr + i * period_len;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200784 } else {
785 desc->lld.mbr_sa = buf_addr + i * period_len;
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200786 desc->lld.mbr_da = atchan->sconfig.dst_addr;
kbuild test robot5ac7d582014-11-06 17:28:08 +0800787 }
Ludovic Desroches765c37d2015-06-08 10:33:15 +0200788 desc->lld.mbr_cfg = atchan->cfg;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200789 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
790 | AT_XDMAC_MBR_UBC_NDEN
791 | AT_XDMAC_MBR_UBC_NSEN
Ludovic Desroches6eb9d3c2015-02-12 16:30:30 +0100792 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200793
794 dev_dbg(chan2dev(chan),
Vinod Koul82e24242014-11-06 18:02:52 +0530795 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
796 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200797
798 /* Chain lld. */
Maxime Ripard0d0ee752015-05-07 17:38:10 +0200799 if (prev)
800 at_xdmac_queue_desc(chan, prev, desc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200801
802 prev = desc;
803 if (!first)
804 first = desc;
805
806 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
807 __func__, desc, first);
808 list_add_tail(&desc->desc_node, &first->descs_list);
809 }
810
Ludovic Desrochese900c302015-07-22 16:12:29 +0200811 at_xdmac_queue_desc(chan, prev, first);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +0200812 first->tx_dma_desc.flags = flags;
813 first->xfer_size = buf_len;
814 first->direction = direction;
815
816 return &first->tx_dma_desc;
817}
818
Maxime Ripardf0816a32015-05-07 17:38:09 +0200819static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
820{
821 u32 width;
822
823 /*
824 * Check address alignment to select the greater data width we
825 * can use.
826 *
827 * Some XDMAC implementations don't provide dword transfer, in
828 * this case selecting dword has the same behavior as
829 * selecting word transfers.
830 */
831 if (!(addr & 7)) {
832 width = AT_XDMAC_CC_DWIDTH_DWORD;
833 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
834 } else if (!(addr & 3)) {
835 width = AT_XDMAC_CC_DWIDTH_WORD;
836 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
837 } else if (!(addr & 1)) {
838 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
839 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
840 } else {
841 width = AT_XDMAC_CC_DWIDTH_BYTE;
842 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
843 }
844
845 return width;
846}
847
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200848static struct at_xdmac_desc *
849at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
850 struct at_xdmac_chan *atchan,
851 struct at_xdmac_desc *prev,
852 dma_addr_t src, dma_addr_t dst,
853 struct dma_interleaved_template *xt,
854 struct data_chunk *chunk)
855{
856 struct at_xdmac_desc *desc;
857 u32 dwidth;
858 unsigned long flags;
859 size_t ublen;
860 /*
861 * WARNING: The channel configuration is set here since there is no
862 * dmaengine_slave_config call in this case. Moreover we don't know the
863 * direction, it involves we can't dynamically set the source and dest
864 * interface so we have to use the same one. Only interface 0 allows EBI
865 * access. Hopefully we can access DDR through both ports (at least on
866 * SAMA5D4x), so we can use the same interface for source and dest,
867 * that solves the fact we don't know the direction.
Ludovic Desroches95da0c12015-11-23 14:09:39 +0100868 * ERRATA: Even if useless for memory transfers, the PERID has to not
869 * match the one of another channel. If not, it could lead to spurious
870 * flag status.
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200871 */
Ludovic Desroches95da0c12015-11-23 14:09:39 +0100872 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
873 | AT_XDMAC_CC_DIF(0)
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200874 | AT_XDMAC_CC_SIF(0)
875 | AT_XDMAC_CC_MBSIZE_SIXTEEN
876 | AT_XDMAC_CC_TYPE_MEM_TRAN;
877
878 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
879 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
880 dev_dbg(chan2dev(chan),
881 "%s: chunk too big (%d, max size %lu)...\n",
882 __func__, chunk->size,
883 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
884 return NULL;
885 }
886
887 if (prev)
888 dev_dbg(chan2dev(chan),
889 "Adding items at the end of desc 0x%p\n", prev);
890
891 if (xt->src_inc) {
892 if (xt->src_sgl)
Maxime Riparda1cf09032015-09-15 15:36:00 +0200893 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200894 else
895 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
896 }
897
898 if (xt->dst_inc) {
899 if (xt->dst_sgl)
Maxime Riparda1cf09032015-09-15 15:36:00 +0200900 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200901 else
902 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
903 }
904
905 spin_lock_irqsave(&atchan->lock, flags);
906 desc = at_xdmac_get_desc(atchan);
907 spin_unlock_irqrestore(&atchan->lock, flags);
908 if (!desc) {
909 dev_err(chan2dev(chan), "can't get descriptor\n");
910 return NULL;
911 }
912
913 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
914
915 ublen = chunk->size >> dwidth;
916
917 desc->lld.mbr_sa = src;
918 desc->lld.mbr_da = dst;
Maxime Ripard87d001e2015-05-27 16:01:52 +0200919 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
920 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200921
922 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
923 | AT_XDMAC_MBR_UBC_NDEN
924 | AT_XDMAC_MBR_UBC_NSEN
925 | ublen;
926 desc->lld.mbr_cfg = chan_cc;
927
928 dev_dbg(chan2dev(chan),
Arnd Bergmann268914f2015-11-12 15:16:53 +0100929 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
930 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200931 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
932
933 /* Chain lld. */
934 if (prev)
935 at_xdmac_queue_desc(chan, prev, desc);
936
937 return desc;
938}
939
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200940static struct dma_async_tx_descriptor *
941at_xdmac_prep_interleaved(struct dma_chan *chan,
942 struct dma_interleaved_template *xt,
943 unsigned long flags)
944{
945 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
946 struct at_xdmac_desc *prev = NULL, *first = NULL;
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200947 dma_addr_t dst_addr, src_addr;
Maxime Ripard4e5385782015-09-15 15:29:27 +0200948 size_t src_skip = 0, dst_skip = 0, len = 0;
949 struct data_chunk *chunk;
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200950 int i;
951
Maxime Ripard4e5385782015-09-15 15:29:27 +0200952 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
953 return NULL;
954
955 /*
956 * TODO: Handle the case where we have to repeat a chain of
957 * descriptors...
958 */
959 if ((xt->numf > 1) && (xt->frame_size > 1))
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200960 return NULL;
961
Arnd Bergmann268914f2015-11-12 15:16:53 +0100962 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
963 __func__, &xt->src_start, &xt->dst_start, xt->numf,
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200964 xt->frame_size, flags);
965
966 src_addr = xt->src_start;
967 dst_addr = xt->dst_start;
968
Maxime Ripard4e5385782015-09-15 15:29:27 +0200969 if (xt->numf > 1) {
970 first = at_xdmac_interleaved_queue_desc(chan, atchan,
971 NULL,
972 src_addr, dst_addr,
973 xt, xt->sgl);
Sylvain ETIENNEef10b0b2015-12-02 17:10:16 +0100974
975 /* Length of the block is (BLEN+1) microblocks. */
976 for (i = 0; i < xt->numf - 1; i++)
Maxime Ripard4e5385782015-09-15 15:29:27 +0200977 at_xdmac_increment_block_count(chan, first);
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200978
979 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
Ludovic Desroches62b5cb72015-09-15 15:38:24 +0200980 __func__, first, first);
981 list_add_tail(&first->desc_node, &first->descs_list);
Maxime Ripard4e5385782015-09-15 15:29:27 +0200982 } else {
983 for (i = 0; i < xt->frame_size; i++) {
984 size_t src_icg = 0, dst_icg = 0;
985 struct at_xdmac_desc *desc;
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200986
Maxime Ripard4e5385782015-09-15 15:29:27 +0200987 chunk = xt->sgl + i;
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200988
Maxime Ripard4e5385782015-09-15 15:29:27 +0200989 dst_icg = dmaengine_get_dst_icg(xt, chunk);
990 src_icg = dmaengine_get_src_icg(xt, chunk);
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200991
Maxime Ripard4e5385782015-09-15 15:29:27 +0200992 src_skip = chunk->size + src_icg;
993 dst_skip = chunk->size + dst_icg;
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200994
Maxime Ripard6007ccb2015-05-07 17:38:11 +0200995 dev_dbg(chan2dev(chan),
Maxime Ripard4e5385782015-09-15 15:29:27 +0200996 "%s: chunk size=%d, src icg=%d, dst icg=%d\n",
997 __func__, chunk->size, src_icg, dst_icg);
998
999 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1000 prev,
1001 src_addr, dst_addr,
1002 xt, chunk);
1003 if (!desc) {
1004 list_splice_init(&first->descs_list,
1005 &atchan->free_descs_list);
1006 return NULL;
1007 }
1008
1009 if (!first)
1010 first = desc;
1011
1012 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1013 __func__, desc, first);
1014 list_add_tail(&desc->desc_node, &first->descs_list);
1015
1016 if (xt->src_sgl)
1017 src_addr += src_skip;
1018
1019 if (xt->dst_sgl)
1020 dst_addr += dst_skip;
1021
1022 len += chunk->size;
1023 prev = desc;
Maxime Ripard6007ccb2015-05-07 17:38:11 +02001024 }
Maxime Ripard6007ccb2015-05-07 17:38:11 +02001025 }
1026
1027 first->tx_dma_desc.cookie = -EBUSY;
1028 first->tx_dma_desc.flags = flags;
1029 first->xfer_size = len;
1030
1031 return &first->tx_dma_desc;
1032}
1033
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001034static struct dma_async_tx_descriptor *
1035at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1036 size_t len, unsigned long flags)
1037{
1038 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1039 struct at_xdmac_desc *first = NULL, *prev = NULL;
1040 size_t remaining_size = len, xfer_size = 0, ublen;
1041 dma_addr_t src_addr = src, dst_addr = dest;
1042 u32 dwidth;
1043 /*
1044 * WARNING: We don't know the direction, it involves we can't
1045 * dynamically set the source and dest interface so we have to use the
1046 * same one. Only interface 0 allows EBI access. Hopefully we can
1047 * access DDR through both ports (at least on SAMA5D4x), so we can use
1048 * the same interface for source and dest, that solves the fact we
1049 * don't know the direction.
Ludovic Desroches95da0c12015-11-23 14:09:39 +01001050 * ERRATA: Even if useless for memory transfers, the PERID has to not
1051 * match the one of another channel. If not, it could lead to spurious
1052 * flag status.
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001053 */
Ludovic Desroches95da0c12015-11-23 14:09:39 +01001054 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1055 | AT_XDMAC_CC_DAM_INCREMENTED_AM
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001056 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1057 | AT_XDMAC_CC_DIF(0)
1058 | AT_XDMAC_CC_SIF(0)
1059 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1060 | AT_XDMAC_CC_TYPE_MEM_TRAN;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001061 unsigned long irqflags;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001062
Vinod Koul82e24242014-11-06 18:02:52 +05301063 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1064 __func__, &src, &dest, len, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001065
1066 if (unlikely(!len))
1067 return NULL;
1068
Maxime Ripardf0816a32015-05-07 17:38:09 +02001069 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001070
1071 /* Prepare descriptors. */
1072 while (remaining_size) {
1073 struct at_xdmac_desc *desc = NULL;
1074
Vinod Koulc66ec042014-11-06 17:37:48 +05301075 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001076
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001077 spin_lock_irqsave(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001078 desc = at_xdmac_get_desc(atchan);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001079 spin_unlock_irqrestore(&atchan->lock, irqflags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001080 if (!desc) {
1081 dev_err(chan2dev(chan), "can't get descriptor\n");
1082 if (first)
1083 list_splice_init(&first->descs_list, &atchan->free_descs_list);
1084 return NULL;
1085 }
1086
1087 /* Update src and dest addresses. */
1088 src_addr += xfer_size;
1089 dst_addr += xfer_size;
1090
1091 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1092 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1093 else
1094 xfer_size = remaining_size;
1095
Vinod Koulc66ec042014-11-06 17:37:48 +05301096 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001097
1098 /* Check remaining length and change data width if needed. */
Maxime Ripardf0816a32015-05-07 17:38:09 +02001099 dwidth = at_xdmac_align_width(chan,
1100 src_addr | dst_addr | xfer_size);
Cyrille Pitchenaa876cd2015-12-07 15:58:56 +01001101 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001102 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1103
1104 ublen = xfer_size >> dwidth;
1105 remaining_size -= xfer_size;
1106
1107 desc->lld.mbr_sa = src_addr;
1108 desc->lld.mbr_da = dst_addr;
1109 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1110 | AT_XDMAC_MBR_UBC_NDEN
1111 | AT_XDMAC_MBR_UBC_NSEN
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001112 | ublen;
1113 desc->lld.mbr_cfg = chan_cc;
1114
1115 dev_dbg(chan2dev(chan),
Vinod Koul82e24242014-11-06 18:02:52 +05301116 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1117 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001118
1119 /* Chain lld. */
Maxime Ripard0d0ee752015-05-07 17:38:10 +02001120 if (prev)
1121 at_xdmac_queue_desc(chan, prev, desc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001122
1123 prev = desc;
1124 if (!first)
1125 first = desc;
1126
1127 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1128 __func__, desc, first);
1129 list_add_tail(&desc->desc_node, &first->descs_list);
1130 }
1131
1132 first->tx_dma_desc.flags = flags;
1133 first->xfer_size = len;
1134
1135 return &first->tx_dma_desc;
1136}
1137
Maxime Ripardb206d9a2015-05-18 13:46:16 +02001138static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1139 struct at_xdmac_chan *atchan,
1140 dma_addr_t dst_addr,
1141 size_t len,
1142 int value)
1143{
1144 struct at_xdmac_desc *desc;
1145 unsigned long flags;
1146 size_t ublen;
1147 u32 dwidth;
1148 /*
1149 * WARNING: The channel configuration is set here since there is no
1150 * dmaengine_slave_config call in this case. Moreover we don't know the
1151 * direction, it involves we can't dynamically set the source and dest
1152 * interface so we have to use the same one. Only interface 0 allows EBI
1153 * access. Hopefully we can access DDR through both ports (at least on
1154 * SAMA5D4x), so we can use the same interface for source and dest,
1155 * that solves the fact we don't know the direction.
Ludovic Desroches95da0c12015-11-23 14:09:39 +01001156 * ERRATA: Even if useless for memory transfers, the PERID has to not
1157 * match the one of another channel. If not, it could lead to spurious
1158 * flag status.
Maxime Ripardb206d9a2015-05-18 13:46:16 +02001159 */
Ludovic Desroches95da0c12015-11-23 14:09:39 +01001160 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1161 | AT_XDMAC_CC_DAM_UBS_AM
Maxime Ripardb206d9a2015-05-18 13:46:16 +02001162 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1163 | AT_XDMAC_CC_DIF(0)
1164 | AT_XDMAC_CC_SIF(0)
1165 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1166 | AT_XDMAC_CC_MEMSET_HW_MODE
1167 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1168
1169 dwidth = at_xdmac_align_width(chan, dst_addr);
1170
1171 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1172 dev_err(chan2dev(chan),
1173 "%s: Transfer too large, aborting...\n",
1174 __func__);
1175 return NULL;
1176 }
1177
1178 spin_lock_irqsave(&atchan->lock, flags);
1179 desc = at_xdmac_get_desc(atchan);
1180 spin_unlock_irqrestore(&atchan->lock, flags);
1181 if (!desc) {
1182 dev_err(chan2dev(chan), "can't get descriptor\n");
1183 return NULL;
1184 }
1185
1186 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1187
1188 ublen = len >> dwidth;
1189
1190 desc->lld.mbr_da = dst_addr;
1191 desc->lld.mbr_ds = value;
1192 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1193 | AT_XDMAC_MBR_UBC_NDEN
1194 | AT_XDMAC_MBR_UBC_NSEN
1195 | ublen;
1196 desc->lld.mbr_cfg = chan_cc;
1197
1198 dev_dbg(chan2dev(chan),
Alexandre Belloni3935e082016-06-29 19:44:51 +02001199 "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1200 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
Maxime Ripardb206d9a2015-05-18 13:46:16 +02001201 desc->lld.mbr_cfg);
1202
1203 return desc;
1204}
1205
Ben Dooks192dc8c2016-06-07 17:09:15 +01001206static struct dma_async_tx_descriptor *
Maxime Ripardb206d9a2015-05-18 13:46:16 +02001207at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1208 size_t len, unsigned long flags)
1209{
1210 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1211 struct at_xdmac_desc *desc;
1212
Arnd Bergmann268914f2015-11-12 15:16:53 +01001213 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1214 __func__, &dest, len, value, flags);
Maxime Ripardb206d9a2015-05-18 13:46:16 +02001215
1216 if (unlikely(!len))
1217 return NULL;
1218
1219 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1220 list_add_tail(&desc->desc_node, &desc->descs_list);
1221
1222 desc->tx_dma_desc.cookie = -EBUSY;
1223 desc->tx_dma_desc.flags = flags;
1224 desc->xfer_size = len;
1225
1226 return &desc->tx_dma_desc;
1227}
1228
Maxime Ripard67a6eed2015-07-06 12:19:24 +02001229static struct dma_async_tx_descriptor *
1230at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1231 unsigned int sg_len, int value,
1232 unsigned long flags)
1233{
1234 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1235 struct at_xdmac_desc *desc, *pdesc = NULL,
1236 *ppdesc = NULL, *first = NULL;
1237 struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1238 size_t stride = 0, pstride = 0, len = 0;
1239 int i;
1240
1241 if (!sgl)
1242 return NULL;
1243
1244 dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1245 __func__, sg_len, value, flags);
1246
1247 /* Prepare descriptors. */
1248 for_each_sg(sgl, sg, sg_len, i) {
Arnd Bergmann268914f2015-11-12 15:16:53 +01001249 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1250 __func__, &sg_dma_address(sg), sg_dma_len(sg),
Maxime Ripard67a6eed2015-07-06 12:19:24 +02001251 value, flags);
1252 desc = at_xdmac_memset_create_desc(chan, atchan,
1253 sg_dma_address(sg),
1254 sg_dma_len(sg),
1255 value);
1256 if (!desc && first)
1257 list_splice_init(&first->descs_list,
1258 &atchan->free_descs_list);
1259
1260 if (!first)
1261 first = desc;
1262
1263 /* Update our strides */
1264 pstride = stride;
1265 if (psg)
1266 stride = sg_dma_address(sg) -
1267 (sg_dma_address(psg) + sg_dma_len(psg));
1268
1269 /*
1270 * The scatterlist API gives us only the address and
1271 * length of each elements.
1272 *
1273 * Unfortunately, we don't have the stride, which we
1274 * will need to compute.
1275 *
1276 * That make us end up in a situation like this one:
1277 * len stride len stride len
1278 * +-------+ +-------+ +-------+
1279 * | N-2 | | N-1 | | N |
1280 * +-------+ +-------+ +-------+
1281 *
1282 * We need all these three elements (N-2, N-1 and N)
1283 * to actually take the decision on whether we need to
1284 * queue N-1 or reuse N-2.
1285 *
1286 * We will only consider N if it is the last element.
1287 */
1288 if (ppdesc && pdesc) {
1289 if ((stride == pstride) &&
1290 (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1291 dev_dbg(chan2dev(chan),
1292 "%s: desc 0x%p can be merged with desc 0x%p\n",
1293 __func__, pdesc, ppdesc);
1294
1295 /*
1296 * Increment the block count of the
1297 * N-2 descriptor
1298 */
1299 at_xdmac_increment_block_count(chan, ppdesc);
1300 ppdesc->lld.mbr_dus = stride;
1301
1302 /*
1303 * Put back the N-1 descriptor in the
1304 * free descriptor list
1305 */
1306 list_add_tail(&pdesc->desc_node,
1307 &atchan->free_descs_list);
1308
1309 /*
1310 * Make our N-1 descriptor pointer
1311 * point to the N-2 since they were
1312 * actually merged.
1313 */
1314 pdesc = ppdesc;
1315
1316 /*
1317 * Rule out the case where we don't have
1318 * pstride computed yet (our second sg
1319 * element)
1320 *
1321 * We also want to catch the case where there
1322 * would be a negative stride,
1323 */
1324 } else if (pstride ||
1325 sg_dma_address(sg) < sg_dma_address(psg)) {
1326 /*
1327 * Queue the N-1 descriptor after the
1328 * N-2
1329 */
1330 at_xdmac_queue_desc(chan, ppdesc, pdesc);
1331
1332 /*
1333 * Add the N-1 descriptor to the list
1334 * of the descriptors used for this
1335 * transfer
1336 */
1337 list_add_tail(&desc->desc_node,
1338 &first->descs_list);
1339 dev_dbg(chan2dev(chan),
1340 "%s: add desc 0x%p to descs_list 0x%p\n",
1341 __func__, desc, first);
1342 }
1343 }
1344
1345 /*
1346 * If we are the last element, just see if we have the
1347 * same size than the previous element.
1348 *
1349 * If so, we can merge it with the previous descriptor
1350 * since we don't care about the stride anymore.
1351 */
1352 if ((i == (sg_len - 1)) &&
Ludovic Desrochesf5a00eb2015-11-24 10:51:09 +01001353 sg_dma_len(psg) == sg_dma_len(sg)) {
Maxime Ripard67a6eed2015-07-06 12:19:24 +02001354 dev_dbg(chan2dev(chan),
1355 "%s: desc 0x%p can be merged with desc 0x%p\n",
1356 __func__, desc, pdesc);
1357
1358 /*
1359 * Increment the block count of the N-1
1360 * descriptor
1361 */
1362 at_xdmac_increment_block_count(chan, pdesc);
1363 pdesc->lld.mbr_dus = stride;
1364
1365 /*
1366 * Put back the N descriptor in the free
1367 * descriptor list
1368 */
1369 list_add_tail(&desc->desc_node,
1370 &atchan->free_descs_list);
1371 }
1372
1373 /* Update our descriptors */
1374 ppdesc = pdesc;
1375 pdesc = desc;
1376
1377 /* Update our scatter pointers */
1378 ppsg = psg;
1379 psg = sg;
1380
1381 len += sg_dma_len(sg);
1382 }
1383
1384 first->tx_dma_desc.cookie = -EBUSY;
1385 first->tx_dma_desc.flags = flags;
1386 first->xfer_size = len;
1387
1388 return &first->tx_dma_desc;
1389}
1390
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001391static enum dma_status
1392at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1393 struct dma_tx_state *txstate)
1394{
1395 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1396 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1397 struct at_xdmac_desc *desc, *_desc;
1398 struct list_head *descs_list;
1399 enum dma_status ret;
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001400 int residue, retry;
1401 u32 cur_nda, check_nda, cur_ubc, mask, value;
Ludovic Desrochesbe835072015-01-27 16:30:31 +01001402 u8 dwidth = 0;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001403 unsigned long flags;
Ludovic Desroches53398f42016-05-12 16:54:09 +02001404 bool initd;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001405
1406 ret = dma_cookie_status(chan, cookie, txstate);
1407 if (ret == DMA_COMPLETE)
1408 return ret;
1409
1410 if (!txstate)
1411 return ret;
1412
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001413 spin_lock_irqsave(&atchan->lock, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001414
1415 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1416
1417 /*
1418 * If the transfer has not been started yet, don't need to compute the
1419 * residue, it's the transfer length.
1420 */
1421 if (!desc->active_xfer) {
1422 dma_set_residue(txstate, desc->xfer_size);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001423 goto spin_unlock;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001424 }
1425
1426 residue = desc->xfer_size;
Cyrille Pitchen4e097822014-11-13 11:52:41 +01001427 /*
1428 * Flush FIFO: only relevant when the transfer is source peripheral
Ludovic Desroches9295c412016-05-12 16:54:10 +02001429 * synchronized. Flush is needed before reading CUBC because data in
1430 * the FIFO are not reported by CUBC. Reporting a residue of the
1431 * transfer length while we have data in FIFO can cause issue.
1432 * Usecase: atmel USART has a timeout which means I have received
1433 * characters but there is no more character received for a while. On
1434 * timeout, it requests the residue. If the data are in the DMA FIFO,
1435 * we will return a residue of the transfer length. It means no data
1436 * received. If an application is waiting for these data, it will hang
1437 * since we won't have another USART timeout without receiving new
1438 * data.
Cyrille Pitchen4e097822014-11-13 11:52:41 +01001439 */
1440 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1441 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
Ludovic Desrochesbe835072015-01-27 16:30:31 +01001442 if ((desc->lld.mbr_cfg & mask) == value) {
Cyrille Pitchen4e097822014-11-13 11:52:41 +01001443 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1444 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1445 cpu_relax();
1446 }
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001447
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001448 /*
Ludovic Desroches53398f42016-05-12 16:54:09 +02001449 * The easiest way to compute the residue should be to pause the DMA
1450 * but doing this can lead to miss some data as some devices don't
1451 * have FIFO.
1452 * We need to read several registers because:
1453 * - DMA is running therefore a descriptor change is possible while
1454 * reading these registers
1455 * - When the block transfer is done, the value of the CUBC register
1456 * is set to its initial value until the fetch of the next descriptor.
1457 * This value will corrupt the residue calculation so we have to skip
1458 * it.
1459 *
1460 * INITD -------- ------------
1461 * |____________________|
1462 * _______________________ _______________
1463 * NDA @desc2 \/ @desc3
1464 * _______________________/\_______________
1465 * __________ ___________ _______________
1466 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1467 * __________/\___________/\_______________
1468 *
1469 * Since descriptors are aligned on 64 bits, we can assume that
1470 * the update of NDA and CUBC is atomic.
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001471 * Memory barriers are used to ensure the read order of the registers.
Ludovic Desroches53398f42016-05-12 16:54:09 +02001472 * A max number of retries is set because unlikely it could never ends.
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001473 */
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001474 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001475 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
Ludovic Desroches53398f42016-05-12 16:54:09 +02001476 rmb();
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001477 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
Ludovic Desroches53398f42016-05-12 16:54:09 +02001478 rmb();
Maxime Jayat5d151872018-02-22 12:39:55 +01001479 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1480 rmb();
Ludovic Desroches53398f42016-05-12 16:54:09 +02001481 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1482 rmb();
1483
1484 if ((check_nda == cur_nda) && initd)
1485 break;
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001486 }
1487
1488 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1489 ret = DMA_ERROR;
1490 goto spin_unlock;
1491 }
1492
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001493 /*
Ludovic Desroches9295c412016-05-12 16:54:10 +02001494 * Flush FIFO: only relevant when the transfer is source peripheral
1495 * synchronized. Another flush is needed here because CUBC is updated
1496 * when the controller sends the data write command. It can lead to
1497 * report data that are not written in the memory or the device. The
1498 * FIFO flush ensures that data are really written.
1499 */
1500 if ((desc->lld.mbr_cfg & mask) == value) {
1501 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1502 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1503 cpu_relax();
1504 }
1505
1506 /*
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001507 * Remove size of all microblocks already transferred and the current
1508 * one. Then add the remaining size to transfer of the current
1509 * microblock.
1510 */
1511 descs_list = &desc->descs_list;
1512 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
Ludovic Desrochesbe835072015-01-27 16:30:31 +01001513 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001514 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1515 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1516 break;
1517 }
Ludovic Desroches25c5e962016-03-10 10:17:55 +01001518 residue += cur_ubc << dwidth;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001519
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001520 dma_set_residue(txstate, residue);
1521
1522 dev_dbg(chan2dev(chan),
Vinod Koul82e24242014-11-06 18:02:52 +05301523 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1524 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001525
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001526spin_unlock:
1527 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001528 return ret;
1529}
1530
1531/* Call must be protected by lock. */
1532static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1533 struct at_xdmac_desc *desc)
1534{
1535 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1536
1537 /*
1538 * Remove the transfer from the transfer list then move the transfer
1539 * descriptors into the free descriptors list.
1540 */
1541 list_del(&desc->xfer_node);
1542 list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1543}
1544
1545static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1546{
1547 struct at_xdmac_desc *desc;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001548 unsigned long flags;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001549
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001550 spin_lock_irqsave(&atchan->lock, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001551
1552 /*
1553 * If channel is enabled, do nothing, advance_work will be triggered
1554 * after the interruption.
1555 */
1556 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
1557 desc = list_first_entry(&atchan->xfers_list,
1558 struct at_xdmac_desc,
1559 xfer_node);
1560 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1561 if (!desc->active_xfer)
1562 at_xdmac_start_xfer(atchan, desc);
1563 }
1564
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001565 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001566}
1567
1568static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1569{
1570 struct at_xdmac_desc *desc;
1571 struct dma_async_tx_descriptor *txd;
1572
1573 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1574 txd = &desc->tx_dma_desc;
1575
Dave Jianga1d4eaa2016-07-20 13:10:42 -07001576 if (txd->flags & DMA_PREP_INTERRUPT)
1577 dmaengine_desc_get_callback_invoke(txd, NULL);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001578}
1579
1580static void at_xdmac_tasklet(unsigned long data)
1581{
1582 struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
1583 struct at_xdmac_desc *desc;
1584 u32 error_mask;
1585
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +00001586 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1587 __func__, atchan->irq_status);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001588
1589 error_mask = AT_XDMAC_CIS_RBEIS
1590 | AT_XDMAC_CIS_WBEIS
1591 | AT_XDMAC_CIS_ROIS;
1592
1593 if (at_xdmac_chan_is_cyclic(atchan)) {
1594 at_xdmac_handle_cyclic(atchan);
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +00001595 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1596 || (atchan->irq_status & error_mask)) {
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001597 struct dma_async_tx_descriptor *txd;
1598
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +00001599 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001600 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +00001601 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001602 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +00001603 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001604 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1605
1606 spin_lock_bh(&atchan->lock);
1607 desc = list_first_entry(&atchan->xfers_list,
1608 struct at_xdmac_desc,
1609 xfer_node);
1610 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
Nicolas Ferre5c1c6e22019-04-03 12:23:57 +02001611 if (!desc->active_xfer) {
1612 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1613 spin_unlock_bh(&atchan->lock);
1614 return;
1615 }
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001616
1617 txd = &desc->tx_dma_desc;
1618
1619 at_xdmac_remove_xfer(atchan, desc);
1620 spin_unlock_bh(&atchan->lock);
1621
1622 if (!at_xdmac_chan_is_cyclic(atchan)) {
1623 dma_cookie_complete(txd);
Dave Jianga1d4eaa2016-07-20 13:10:42 -07001624 if (txd->flags & DMA_PREP_INTERRUPT)
1625 dmaengine_desc_get_callback_invoke(txd, NULL);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001626 }
1627
1628 dma_run_dependencies(txd);
1629
1630 at_xdmac_advance_work(atchan);
1631 }
1632}
1633
1634static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1635{
1636 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1637 struct at_xdmac_chan *atchan;
1638 u32 imr, status, pending;
1639 u32 chan_imr, chan_status;
1640 int i, ret = IRQ_NONE;
1641
1642 do {
1643 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1644 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1645 pending = status & imr;
1646
1647 dev_vdbg(atxdmac->dma.dev,
1648 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1649 __func__, status, imr, pending);
1650
1651 if (!pending)
1652 break;
1653
1654 /* We have to find which channel has generated the interrupt. */
1655 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1656 if (!((1 << i) & pending))
1657 continue;
1658
1659 atchan = &atxdmac->chan[i];
1660 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1661 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +00001662 atchan->irq_status = chan_status & chan_imr;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001663 dev_vdbg(atxdmac->dma.dev,
1664 "%s: chan%d: imr=0x%x, status=0x%x\n",
1665 __func__, i, chan_imr, chan_status);
1666 dev_vdbg(chan2dev(&atchan->chan),
1667 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1668 __func__,
1669 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1670 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1671 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1672 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1673 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1674 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1675
Codrin Ciubotariub498cfe2019-01-23 16:33:47 +00001676 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001677 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1678
1679 tasklet_schedule(&atchan->tasklet);
1680 ret = IRQ_HANDLED;
1681 }
1682
1683 } while (pending);
1684
1685 return ret;
1686}
1687
1688static void at_xdmac_issue_pending(struct dma_chan *chan)
1689{
1690 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1691
1692 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1693
1694 if (!at_xdmac_chan_is_cyclic(atchan))
1695 at_xdmac_advance_work(atchan);
1696
1697 return;
1698}
1699
Ludovic Desroches3d138872014-11-17 14:42:07 +01001700static int at_xdmac_device_config(struct dma_chan *chan,
1701 struct dma_slave_config *config)
1702{
1703 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1704 int ret;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001705 unsigned long flags;
Ludovic Desroches3d138872014-11-17 14:42:07 +01001706
1707 dev_dbg(chan2dev(chan), "%s\n", __func__);
1708
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001709 spin_lock_irqsave(&atchan->lock, flags);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001710 ret = at_xdmac_set_slave_config(chan, config);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001711 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001712
1713 return ret;
1714}
1715
1716static int at_xdmac_device_pause(struct dma_chan *chan)
1717{
1718 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1719 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001720 unsigned long flags;
Ludovic Desroches3d138872014-11-17 14:42:07 +01001721
1722 dev_dbg(chan2dev(chan), "%s\n", __func__);
1723
Cyrille Pitchencbb85e62015-01-27 16:30:29 +01001724 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1725 return 0;
1726
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001727 spin_lock_irqsave(&atchan->lock, flags);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001728 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
Cyrille Pitchencbb85e62015-01-27 16:30:29 +01001729 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1730 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1731 cpu_relax();
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001732 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001733
1734 return 0;
1735}
1736
1737static int at_xdmac_device_resume(struct dma_chan *chan)
1738{
1739 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1740 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001741 unsigned long flags;
Ludovic Desroches3d138872014-11-17 14:42:07 +01001742
1743 dev_dbg(chan2dev(chan), "%s\n", __func__);
1744
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001745 spin_lock_irqsave(&atchan->lock, flags);
Niklas Cassel0434a232015-04-07 16:42:45 +02001746 if (!at_xdmac_chan_is_paused(atchan)) {
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001747 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001748 return 0;
Niklas Cassel0434a232015-04-07 16:42:45 +02001749 }
Ludovic Desroches3d138872014-11-17 14:42:07 +01001750
1751 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1752 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001753 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001754
1755 return 0;
1756}
1757
1758static int at_xdmac_device_terminate_all(struct dma_chan *chan)
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001759{
1760 struct at_xdmac_desc *desc, *_desc;
1761 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1762 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001763 unsigned long flags;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001764
Ludovic Desroches3d138872014-11-17 14:42:07 +01001765 dev_dbg(chan2dev(chan), "%s\n", __func__);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001766
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001767 spin_lock_irqsave(&atchan->lock, flags);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001768 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1769 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1770 cpu_relax();
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001771
Ludovic Desroches3d138872014-11-17 14:42:07 +01001772 /* Cancel all pending transfers. */
1773 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1774 at_xdmac_remove_xfer(atchan, desc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001775
Songjun Wu611dcad2016-01-18 11:14:44 +01001776 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
Ludovic Desroches3d138872014-11-17 14:42:07 +01001777 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001778 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001779
Ludovic Desroches3d138872014-11-17 14:42:07 +01001780 return 0;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001781}
1782
1783static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1784{
1785 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1786 struct at_xdmac_desc *desc;
1787 int i;
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001788 unsigned long flags;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001789
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001790 spin_lock_irqsave(&atchan->lock, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001791
1792 if (at_xdmac_chan_is_enabled(atchan)) {
1793 dev_err(chan2dev(chan),
1794 "can't allocate channel resources (channel enabled)\n");
1795 i = -EIO;
1796 goto spin_unlock;
1797 }
1798
1799 if (!list_empty(&atchan->free_descs_list)) {
1800 dev_err(chan2dev(chan),
1801 "can't allocate channel resources (channel not free from a previous use)\n");
1802 i = -EIO;
1803 goto spin_unlock;
1804 }
1805
1806 for (i = 0; i < init_nr_desc_per_channel; i++) {
1807 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1808 if (!desc) {
1809 dev_warn(chan2dev(chan),
1810 "only %d descriptors have been allocated\n", i);
1811 break;
1812 }
1813 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1814 }
1815
1816 dma_cookie_init(chan);
1817
1818 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1819
1820spin_unlock:
Ludovic Desroches4c374fc2015-06-08 10:33:14 +02001821 spin_unlock_irqrestore(&atchan->lock, flags);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001822 return i;
1823}
1824
1825static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1826{
1827 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1828 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
1829 struct at_xdmac_desc *desc, *_desc;
1830
1831 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1832 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1833 list_del(&desc->desc_node);
1834 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1835 }
1836
1837 return;
1838}
1839
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001840#ifdef CONFIG_PM
1841static int atmel_xdmac_prepare(struct device *dev)
1842{
1843 struct platform_device *pdev = to_platform_device(dev);
1844 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1845 struct dma_chan *chan, *_chan;
1846
1847 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1848 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1849
1850 /* Wait for transfer completion, except in cyclic case. */
1851 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1852 return -EAGAIN;
1853 }
1854 return 0;
1855}
1856#else
1857# define atmel_xdmac_prepare NULL
1858#endif
1859
1860#ifdef CONFIG_PM_SLEEP
1861static int atmel_xdmac_suspend(struct device *dev)
1862{
1863 struct platform_device *pdev = to_platform_device(dev);
1864 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1865 struct dma_chan *chan, *_chan;
1866
1867 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1868 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1869
Ludovic Desroches734bb9a2015-01-27 16:30:30 +01001870 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001871 if (at_xdmac_chan_is_cyclic(atchan)) {
1872 if (!at_xdmac_chan_is_paused(atchan))
Ludovic Desroches3d138872014-11-17 14:42:07 +01001873 at_xdmac_device_pause(chan);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001874 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1875 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1876 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1877 }
1878 }
1879 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1880
1881 at_xdmac_off(atxdmac);
1882 clk_disable_unprepare(atxdmac->clk);
1883 return 0;
1884}
1885
1886static int atmel_xdmac_resume(struct device *dev)
1887{
1888 struct platform_device *pdev = to_platform_device(dev);
1889 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1890 struct at_xdmac_chan *atchan;
1891 struct dma_chan *chan, *_chan;
1892 int i;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001893
1894 clk_prepare_enable(atxdmac->clk);
1895
1896 /* Clear pending interrupts. */
1897 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1898 atchan = &atxdmac->chan[i];
1899 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1900 cpu_relax();
1901 }
1902
1903 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1904 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1905 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1906 atchan = to_at_xdmac_chan(chan);
Ludovic Desroches734bb9a2015-01-27 16:30:30 +01001907 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001908 if (at_xdmac_chan_is_cyclic(atchan)) {
Songjun Wu611dcad2016-01-18 11:14:44 +01001909 if (at_xdmac_chan_is_paused(atchan))
1910 at_xdmac_device_resume(chan);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001911 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1912 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1913 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1914 wmb();
1915 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1916 }
1917 }
1918 return 0;
1919}
1920#endif /* CONFIG_PM_SLEEP */
1921
1922static int at_xdmac_probe(struct platform_device *pdev)
1923{
1924 struct resource *res;
1925 struct at_xdmac *atxdmac;
1926 int irq, size, nr_channels, i, ret;
1927 void __iomem *base;
1928 u32 reg;
1929
1930 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1931 if (!res)
1932 return -EINVAL;
1933
1934 irq = platform_get_irq(pdev, 0);
1935 if (irq < 0)
1936 return irq;
1937
1938 base = devm_ioremap_resource(&pdev->dev, res);
1939 if (IS_ERR(base))
1940 return PTR_ERR(base);
1941
1942 /*
1943 * Read number of xdmac channels, read helper function can't be used
1944 * since atxdmac is not yet allocated and we need to know the number
1945 * of channels to do the allocation.
1946 */
1947 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
1948 nr_channels = AT_XDMAC_NB_CH(reg);
1949 if (nr_channels > AT_XDMAC_MAX_CHAN) {
1950 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
1951 nr_channels);
1952 return -EINVAL;
1953 }
1954
1955 size = sizeof(*atxdmac);
1956 size += nr_channels * sizeof(struct at_xdmac_chan);
1957 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1958 if (!atxdmac) {
1959 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
1960 return -ENOMEM;
1961 }
1962
1963 atxdmac->regs = base;
1964 atxdmac->irq = irq;
1965
1966 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
1967 if (IS_ERR(atxdmac->clk)) {
1968 dev_err(&pdev->dev, "can't get dma_clk\n");
1969 return PTR_ERR(atxdmac->clk);
1970 }
1971
1972 /* Do not use dev res to prevent races with tasklet */
1973 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
1974 if (ret) {
1975 dev_err(&pdev->dev, "can't request irq\n");
1976 return ret;
1977 }
1978
1979 ret = clk_prepare_enable(atxdmac->clk);
1980 if (ret) {
1981 dev_err(&pdev->dev, "can't prepare or enable clock\n");
1982 goto err_free_irq;
1983 }
1984
1985 atxdmac->at_xdmac_desc_pool =
1986 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1987 sizeof(struct at_xdmac_desc), 4, 0);
1988 if (!atxdmac->at_xdmac_desc_pool) {
1989 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
1990 ret = -ENOMEM;
1991 goto err_clk_disable;
1992 }
1993
1994 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
Maxime Ripard6007ccb2015-05-07 17:38:11 +02001995 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001996 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
Maxime Ripardb206d9a2015-05-18 13:46:16 +02001997 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
Maxime Ripard67a6eed2015-07-06 12:19:24 +02001998 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02001999 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
Ludovic Desrochesfef4cbf2014-11-13 11:52:45 +01002000 /*
2001 * Without DMA_PRIVATE the driver is not able to allocate more than
2002 * one channel, second allocation fails in private_candidate.
2003 */
2004 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02002005 atxdmac->dma.dev = &pdev->dev;
2006 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2007 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2008 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2009 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2010 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
Maxime Ripard6007ccb2015-05-07 17:38:11 +02002011 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02002012 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
Maxime Ripardb206d9a2015-05-18 13:46:16 +02002013 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
Maxime Ripard67a6eed2015-07-06 12:19:24 +02002014 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02002015 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
Ludovic Desroches3d138872014-11-17 14:42:07 +01002016 atxdmac->dma.device_config = at_xdmac_device_config;
2017 atxdmac->dma.device_pause = at_xdmac_device_pause;
2018 atxdmac->dma.device_resume = at_xdmac_device_resume;
2019 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
Ludovic Desroches8ac82f82014-11-17 14:42:44 +01002020 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2021 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2022 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2023 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02002024
2025 /* Disable all chans and interrupts. */
2026 at_xdmac_off(atxdmac);
2027
2028 /* Init channels. */
2029 INIT_LIST_HEAD(&atxdmac->dma.channels);
2030 for (i = 0; i < nr_channels; i++) {
2031 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2032
2033 atchan->chan.device = &atxdmac->dma;
2034 list_add_tail(&atchan->chan.device_node,
2035 &atxdmac->dma.channels);
2036
2037 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2038 atchan->mask = 1 << i;
2039
2040 spin_lock_init(&atchan->lock);
2041 INIT_LIST_HEAD(&atchan->xfers_list);
2042 INIT_LIST_HEAD(&atchan->free_descs_list);
2043 tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
2044 (unsigned long)atchan);
2045
2046 /* Clear pending interrupts. */
2047 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2048 cpu_relax();
2049 }
2050 platform_set_drvdata(pdev, atxdmac);
2051
2052 ret = dma_async_device_register(&atxdmac->dma);
2053 if (ret) {
2054 dev_err(&pdev->dev, "fail to register DMA engine device\n");
2055 goto err_clk_disable;
2056 }
2057
2058 ret = of_dma_controller_register(pdev->dev.of_node,
2059 at_xdmac_xlate, atxdmac);
2060 if (ret) {
2061 dev_err(&pdev->dev, "could not register of dma controller\n");
2062 goto err_dma_unregister;
2063 }
2064
2065 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2066 nr_channels, atxdmac->regs);
2067
2068 return 0;
2069
2070err_dma_unregister:
2071 dma_async_device_unregister(&atxdmac->dma);
2072err_clk_disable:
2073 clk_disable_unprepare(atxdmac->clk);
2074err_free_irq:
Wei Yongjun6a8b0c62016-08-10 03:17:09 +00002075 free_irq(atxdmac->irq, atxdmac);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02002076 return ret;
2077}
2078
2079static int at_xdmac_remove(struct platform_device *pdev)
2080{
2081 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2082 int i;
2083
2084 at_xdmac_off(atxdmac);
2085 of_dma_controller_free(pdev->dev.of_node);
2086 dma_async_device_unregister(&atxdmac->dma);
2087 clk_disable_unprepare(atxdmac->clk);
2088
Wei Yongjun6a8b0c62016-08-10 03:17:09 +00002089 free_irq(atxdmac->irq, atxdmac);
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02002090
2091 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2092 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2093
2094 tasklet_kill(&atchan->tasklet);
2095 at_xdmac_free_chan_resources(&atchan->chan);
2096 }
2097
2098 return 0;
2099}
2100
2101static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
2102 .prepare = atmel_xdmac_prepare,
2103 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2104};
2105
2106static const struct of_device_id atmel_xdmac_dt_ids[] = {
2107 {
2108 .compatible = "atmel,sama5d4-dma",
2109 }, {
2110 /* sentinel */
2111 }
2112};
2113MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2114
2115static struct platform_driver at_xdmac_driver = {
2116 .probe = at_xdmac_probe,
2117 .remove = at_xdmac_remove,
2118 .driver = {
2119 .name = "at_xdmac",
Ludovic Desrochese1f7c9e2014-10-22 17:22:18 +02002120 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2121 .pm = &atmel_xdmac_dev_pm_ops,
2122 }
2123};
2124
2125static int __init at_xdmac_init(void)
2126{
2127 return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
2128}
2129subsys_initcall(at_xdmac_init);
2130
2131MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2132MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2133MODULE_LICENSE("GPL");