blob: e1b0e6864f273fac68f5ffcc1a4fb96719c36288 [file] [log] [blame]
Matt Porterc2dde5f2012-08-22 21:09:34 -04001/*
2 * TI EDMA DMA engine driver
3 *
4 * Copyright 2012 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h>
Lad, Prabhakarb7a4fd52015-02-04 13:03:27 +000018#include <linux/edma.h>
Matt Porterc2dde5f2012-08-22 21:09:34 -040019#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/list.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
Peter Ujfalusied646102014-07-31 13:12:38 +030027#include <linux/of.h>
Peter Ujfalusidc9b60552015-10-14 14:42:47 +030028#include <linux/of_dma.h>
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +030029#include <linux/of_irq.h>
30#include <linux/of_address.h>
31#include <linux/of_device.h>
32#include <linux/pm_runtime.h>
Matt Porterc2dde5f2012-08-22 21:09:34 -040033
Matt Porter3ad7a422013-03-06 11:15:31 -050034#include <linux/platform_data/edma.h>
Matt Porterc2dde5f2012-08-22 21:09:34 -040035
36#include "dmaengine.h"
37#include "virt-dma.h"
38
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +030039/* Offsets matching "struct edmacc_param" */
40#define PARM_OPT 0x00
41#define PARM_SRC 0x04
42#define PARM_A_B_CNT 0x08
43#define PARM_DST 0x0c
44#define PARM_SRC_DST_BIDX 0x10
45#define PARM_LINK_BCNTRLD 0x14
46#define PARM_SRC_DST_CIDX 0x18
47#define PARM_CCNT 0x1c
48
49#define PARM_SIZE 0x20
50
51/* Offsets for EDMA CC global channel registers and their shadows */
52#define SH_ER 0x00 /* 64 bits */
53#define SH_ECR 0x08 /* 64 bits */
54#define SH_ESR 0x10 /* 64 bits */
55#define SH_CER 0x18 /* 64 bits */
56#define SH_EER 0x20 /* 64 bits */
57#define SH_EECR 0x28 /* 64 bits */
58#define SH_EESR 0x30 /* 64 bits */
59#define SH_SER 0x38 /* 64 bits */
60#define SH_SECR 0x40 /* 64 bits */
61#define SH_IER 0x50 /* 64 bits */
62#define SH_IECR 0x58 /* 64 bits */
63#define SH_IESR 0x60 /* 64 bits */
64#define SH_IPR 0x68 /* 64 bits */
65#define SH_ICR 0x70 /* 64 bits */
66#define SH_IEVAL 0x78
67#define SH_QER 0x80
68#define SH_QEER 0x84
69#define SH_QEECR 0x88
70#define SH_QEESR 0x8c
71#define SH_QSER 0x90
72#define SH_QSECR 0x94
73#define SH_SIZE 0x200
74
75/* Offsets for EDMA CC global registers */
76#define EDMA_REV 0x0000
77#define EDMA_CCCFG 0x0004
78#define EDMA_QCHMAP 0x0200 /* 8 registers */
79#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80#define EDMA_QDMAQNUM 0x0260
81#define EDMA_QUETCMAP 0x0280
82#define EDMA_QUEPRI 0x0284
83#define EDMA_EMR 0x0300 /* 64 bits */
84#define EDMA_EMCR 0x0308 /* 64 bits */
85#define EDMA_QEMR 0x0310
86#define EDMA_QEMCR 0x0314
87#define EDMA_CCERR 0x0318
88#define EDMA_CCERRCLR 0x031c
89#define EDMA_EEVAL 0x0320
90#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91#define EDMA_QRAE 0x0380 /* 4 registers */
92#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93#define EDMA_QSTAT 0x0600 /* 2 registers */
94#define EDMA_QWMTHRA 0x0620
95#define EDMA_QWMTHRB 0x0624
96#define EDMA_CCSTAT 0x0640
97
98#define EDMA_M 0x1000 /* global channel registers */
99#define EDMA_ECR 0x1008
100#define EDMA_ECRH 0x100C
101#define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102#define EDMA_PARM 0x4000 /* PaRAM entries */
103
104#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
105
106#define EDMA_DCHMAP 0x0100 /* 64 registers */
107
108/* CCCFG register */
109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
Peter Ujfalusi633e42b2015-10-16 10:18:04 +0300110#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114#define CHMAP_EXIST BIT(24)
115
Matt Porterc2dde5f2012-08-22 21:09:34 -0400116/*
Joel Fernandes2abd5f12013-09-23 18:05:15 -0500117 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods
119 * that are required for ASoC, otherwise DMA prep calls will
120 * fail. Today davinci-pcm is the only user of this driver and
121 * requires atleast 17 slots, so we setup the default to 20.
122 */
123#define MAX_NR_SG 20
Matt Porterc2dde5f2012-08-22 21:09:34 -0400124#define EDMA_MAX_SLOTS MAX_NR_SG
125#define EDMA_DESCRIPTORS 16
126
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300127#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
128#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
129#define EDMA_CONT_PARAMS_ANY 1001
130#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
131#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
132
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300133/* PaRAM slots are laid out like this */
134struct edmacc_param {
135 u32 opt;
136 u32 src;
137 u32 a_b_cnt;
138 u32 dst;
139 u32 src_dst_bidx;
140 u32 link_bcntrld;
141 u32 src_dst_cidx;
142 u32 ccnt;
143} __packed;
144
145/* fields in edmacc_param.opt */
146#define SAM BIT(0)
147#define DAM BIT(1)
148#define SYNCDIM BIT(2)
149#define STATIC BIT(3)
150#define EDMA_FWID (0x07 << 8)
151#define TCCMODE BIT(11)
152#define EDMA_TCC(t) ((t) << 12)
153#define TCINTEN BIT(20)
154#define ITCINTEN BIT(21)
155#define TCCHEN BIT(22)
156#define ITCCHEN BIT(23)
157
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -0500158struct edma_pset {
Thomas Gleixnerc2da2342014-04-28 14:29:57 -0500159 u32 len;
160 dma_addr_t addr;
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -0500161 struct edmacc_param param;
162};
163
Matt Porterc2dde5f2012-08-22 21:09:34 -0400164struct edma_desc {
165 struct virt_dma_desc vdesc;
166 struct list_head node;
Thomas Gleixnerc2da2342014-04-28 14:29:57 -0500167 enum dma_transfer_direction direction;
Joel Fernandes50a9c702013-10-31 16:31:23 -0500168 int cyclic;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400169 int absync;
170 int pset_nr;
Thomas Gleixner740b41f2014-04-28 14:34:11 -0500171 struct edma_chan *echan;
Joel Fernandes04361d82014-04-28 15:19:31 -0500172 int processed;
173
174 /*
175 * The following 4 elements are used for residue accounting.
176 *
177 * - processed_stat: the number of SG elements we have traversed
178 * so far to cover accounting. This is updated directly to processed
179 * during edma_callback and is always <= processed, because processed
180 * refers to the number of pending transfer (programmed to EDMA
181 * controller), where as processed_stat tracks number of transfers
182 * accounted for so far.
183 *
184 * - residue: The amount of bytes we have left to transfer for this desc
185 *
186 * - residue_stat: The residue in bytes of data we have covered
187 * so far for accounting. This is updated directly to residue
188 * during callbacks to keep it current.
189 *
190 * - sg_len: Tracks the length of the current intermediate transfer,
191 * this is required to update the residue during intermediate transfer
192 * completion callback.
193 */
194 int processed_stat;
195 u32 sg_len;
196 u32 residue;
197 u32 residue_stat;
198
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -0500199 struct edma_pset pset[0];
Matt Porterc2dde5f2012-08-22 21:09:34 -0400200};
201
202struct edma_cc;
203
204struct edma_chan {
205 struct virt_dma_chan vchan;
206 struct list_head node;
207 struct edma_desc *edesc;
208 struct edma_cc *ecc;
209 int ch_num;
210 bool alloced;
211 int slot[EDMA_MAX_SLOTS];
Joel Fernandesc5f47992013-08-29 18:05:43 -0500212 int missed;
Matt Porter661f7cb2013-01-10 13:41:04 -0500213 struct dma_slave_config cfg;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400214};
215
216struct edma_cc {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300217 struct device *dev;
218 struct edma_soc_info *info;
219 void __iomem *base;
220 int id;
221
222 /* eDMA3 resource information */
223 unsigned num_channels;
Peter Ujfalusi633e42b2015-10-16 10:18:04 +0300224 unsigned num_qchannels;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300225 unsigned num_region;
226 unsigned num_slots;
227 unsigned num_tc;
Peter Ujfalusi4ab54f62015-10-14 14:43:04 +0300228 bool chmap_exist;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300229 enum dma_event_q default_queue;
230
231 bool unused_chan_list_done;
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300232 /* The slot_inuse bit for each PaRAM slot is clear unless the
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300233 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
234 */
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300235 unsigned long *slot_inuse;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300236
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300237 /* The channel_unused bit for each channel is clear unless
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300238 * it is not being used on this platform. It uses a bit
239 * of SOC-specific initialization code.
240 */
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300241 unsigned long *channel_unused;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300242
Matt Porterc2dde5f2012-08-22 21:09:34 -0400243 struct dma_device dma_slave;
Peter Ujfalusicb782052015-10-14 14:42:54 +0300244 struct edma_chan *slave_chans;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400245 int dummy_slot;
246};
247
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300248/* dummy param set used to (re)initialize parameter RAM slots */
249static const struct edmacc_param dummy_paramset = {
250 .link_bcntrld = 0xffff,
251 .ccnt = 1,
252};
253
254static const struct of_device_id edma_of_ids[] = {
255 { .compatible = "ti,edma3", },
256 {}
257};
258
259static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
260{
261 return (unsigned int)__raw_readl(ecc->base + offset);
262}
263
264static inline void edma_write(struct edma_cc *ecc, int offset, int val)
265{
266 __raw_writel(val, ecc->base + offset);
267}
268
269static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
270 unsigned or)
271{
272 unsigned val = edma_read(ecc, offset);
273
274 val &= and;
275 val |= or;
276 edma_write(ecc, offset, val);
277}
278
279static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
280{
281 unsigned val = edma_read(ecc, offset);
282
283 val &= and;
284 edma_write(ecc, offset, val);
285}
286
287static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
288{
289 unsigned val = edma_read(ecc, offset);
290
291 val |= or;
292 edma_write(ecc, offset, val);
293}
294
295static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
296 int i)
297{
298 return edma_read(ecc, offset + (i << 2));
299}
300
301static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
302 unsigned val)
303{
304 edma_write(ecc, offset + (i << 2), val);
305}
306
307static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
308 unsigned and, unsigned or)
309{
310 edma_modify(ecc, offset + (i << 2), and, or);
311}
312
313static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
314 unsigned or)
315{
316 edma_or(ecc, offset + (i << 2), or);
317}
318
319static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
320 unsigned or)
321{
322 edma_or(ecc, offset + ((i * 2 + j) << 2), or);
323}
324
325static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
326 int j, unsigned val)
327{
328 edma_write(ecc, offset + ((i * 2 + j) << 2), val);
329}
330
331static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
332{
333 return edma_read(ecc, EDMA_SHADOW0 + offset);
334}
335
336static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
337 int offset, int i)
338{
339 return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
340}
341
342static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
343 unsigned val)
344{
345 edma_write(ecc, EDMA_SHADOW0 + offset, val);
346}
347
348static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
349 int i, unsigned val)
350{
351 edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
352}
353
Peter Ujfalusid9c345d2015-10-16 10:18:02 +0300354static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
355 int param_no)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300356{
357 return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
358}
359
Peter Ujfalusid9c345d2015-10-16 10:18:02 +0300360static inline void edma_param_write(struct edma_cc *ecc, int offset,
361 int param_no, unsigned val)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300362{
363 edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
364}
365
Peter Ujfalusid9c345d2015-10-16 10:18:02 +0300366static inline void edma_param_modify(struct edma_cc *ecc, int offset,
367 int param_no, unsigned and, unsigned or)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300368{
369 edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
370}
371
Peter Ujfalusid9c345d2015-10-16 10:18:02 +0300372static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
373 unsigned and)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300374{
375 edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
376}
377
Peter Ujfalusid9c345d2015-10-16 10:18:02 +0300378static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
379 unsigned or)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300380{
381 edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
382}
383
384static inline void set_bits(int offset, int len, unsigned long *p)
385{
386 for (; len > 0; len--)
387 set_bit(offset + (len - 1), p);
388}
389
390static inline void clear_bits(int offset, int len, unsigned long *p)
391{
392 for (; len > 0; len--)
393 clear_bit(offset + (len - 1), p);
394}
395
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300396static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
397 int priority)
398{
399 int bit = queue_no * 4;
400
401 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
402}
403
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300404static void edma_set_chmap(struct edma_chan *echan, int slot)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300405{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300406 struct edma_cc *ecc = echan->ecc;
407 int channel = EDMA_CHAN_SLOT(echan->ch_num);
408
Peter Ujfalusie4e886c2015-10-14 14:43:06 +0300409 if (ecc->chmap_exist) {
Peter Ujfalusie4e886c2015-10-14 14:43:06 +0300410 slot = EDMA_CHAN_SLOT(slot);
411 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
412 }
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300413}
414
415static int prepare_unused_channel_list(struct device *dev, void *data)
416{
417 struct platform_device *pdev = to_platform_device(dev);
418 struct edma_cc *ecc = data;
Peter Ujfalusicb782052015-10-14 14:42:54 +0300419 int dma_req_min = EDMA_CTLR_CHAN(ecc->id, 0);
420 int dma_req_max = dma_req_min + ecc->num_channels;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300421 int i, count;
422 struct of_phandle_args dma_spec;
423
424 if (dev->of_node) {
425 struct platform_device *dma_pdev;
426
427 count = of_property_count_strings(dev->of_node, "dma-names");
428 if (count < 0)
429 return 0;
430 for (i = 0; i < count; i++) {
431 if (of_parse_phandle_with_args(dev->of_node, "dmas",
432 "#dma-cells", i,
433 &dma_spec))
434 continue;
435
436 if (!of_match_node(edma_of_ids, dma_spec.np)) {
437 of_node_put(dma_spec.np);
438 continue;
439 }
440
441 dma_pdev = of_find_device_by_node(dma_spec.np);
442 if (&dma_pdev->dev != ecc->dev)
443 continue;
444
445 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300446 ecc->channel_unused);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300447 of_node_put(dma_spec.np);
448 }
449 return 0;
450 }
451
452 /* For non-OF case */
453 for (i = 0; i < pdev->num_resources; i++) {
454 struct resource *res = &pdev->resource[i];
Peter Ujfalusicb782052015-10-14 14:42:54 +0300455 int dma_req;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300456
Peter Ujfalusicb782052015-10-14 14:42:54 +0300457 if (!(res->flags & IORESOURCE_DMA))
458 continue;
459
460 dma_req = (int)res->start;
461 if (dma_req >= dma_req_min && dma_req < dma_req_max)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300462 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300463 ecc->channel_unused);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300464 }
465
466 return 0;
467}
468
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300469static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300470{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300471 struct edma_cc *ecc = echan->ecc;
472 int channel = EDMA_CHAN_SLOT(echan->ch_num);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300473
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +0300474 if (enable) {
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300475 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
476 BIT(channel & 0x1f));
477 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
478 BIT(channel & 0x1f));
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +0300479 } else {
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300480 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
481 BIT(channel & 0x1f));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300482 }
483}
484
485/*
Peter Ujfalusi11c15732015-10-14 14:43:00 +0300486 * paRAM slot management functions
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300487 */
488static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
489 const struct edmacc_param *param)
490{
491 slot = EDMA_CHAN_SLOT(slot);
492 if (slot >= ecc->num_slots)
493 return;
494 memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
495}
496
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300497static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
498 struct edmacc_param *param)
499{
500 slot = EDMA_CHAN_SLOT(slot);
501 if (slot >= ecc->num_slots)
502 return;
503 memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
504}
505
506/**
507 * edma_alloc_slot - allocate DMA parameter RAM
508 * @ecc: pointer to edma_cc struct
509 * @slot: specific slot to allocate; negative for "any unused slot"
510 *
511 * This allocates a parameter RAM slot, initializing it to hold a
512 * dummy transfer. Slots allocated using this routine have not been
513 * mapped to a hardware DMA channel, and will normally be used by
514 * linking to them from a slot associated with a DMA channel.
515 *
516 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
517 * slots may be allocated on behalf of DSP firmware.
518 *
519 * Returns the number of the slot, else negative errno.
520 */
521static int edma_alloc_slot(struct edma_cc *ecc, int slot)
522{
Peter Ujfalusie4e886c2015-10-14 14:43:06 +0300523 if (slot > 0) {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300524 slot = EDMA_CHAN_SLOT(slot);
Peter Ujfalusie4e886c2015-10-14 14:43:06 +0300525 /* Requesting entry paRAM slot for a HW triggered channel. */
526 if (ecc->chmap_exist && slot < ecc->num_channels)
527 slot = EDMA_SLOT_ANY;
528 }
529
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300530 if (slot < 0) {
Peter Ujfalusie4e886c2015-10-14 14:43:06 +0300531 if (ecc->chmap_exist)
532 slot = 0;
533 else
534 slot = ecc->num_channels;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300535 for (;;) {
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300536 slot = find_next_zero_bit(ecc->slot_inuse,
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300537 ecc->num_slots,
538 slot);
539 if (slot == ecc->num_slots)
540 return -ENOMEM;
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300541 if (!test_and_set_bit(slot, ecc->slot_inuse))
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300542 break;
543 }
Peter Ujfalusie4e886c2015-10-14 14:43:06 +0300544 } else if (slot >= ecc->num_slots) {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300545 return -EINVAL;
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300546 } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300547 return -EBUSY;
548 }
549
550 edma_write_slot(ecc, slot, &dummy_paramset);
551
552 return EDMA_CTLR_CHAN(ecc->id, slot);
553}
554
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300555static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
556{
557 slot = EDMA_CHAN_SLOT(slot);
Peter Ujfalusie4e886c2015-10-14 14:43:06 +0300558 if (slot >= ecc->num_slots)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300559 return;
560
561 edma_write_slot(ecc, slot, &dummy_paramset);
Peter Ujfalusi7a73b132015-10-14 14:43:05 +0300562 clear_bit(slot, ecc->slot_inuse);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300563}
564
565/**
566 * edma_link - link one parameter RAM slot to another
567 * @ecc: pointer to edma_cc struct
568 * @from: parameter RAM slot originating the link
569 * @to: parameter RAM slot which is the link target
570 *
571 * The originating slot should not be part of any active DMA transfer.
572 */
573static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
574{
Peter Ujfalusifc014092015-10-14 14:42:59 +0300575 if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
576 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
577
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300578 from = EDMA_CHAN_SLOT(from);
579 to = EDMA_CHAN_SLOT(to);
580 if (from >= ecc->num_slots || to >= ecc->num_slots)
581 return;
582
Peter Ujfalusid9c345d2015-10-16 10:18:02 +0300583 edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
584 PARM_OFFSET(to));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300585}
586
587/**
588 * edma_get_position - returns the current transfer point
589 * @ecc: pointer to edma_cc struct
590 * @slot: parameter RAM slot being examined
591 * @dst: true selects the dest position, false the source
592 *
593 * Returns the position of the current active slot
594 */
595static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
596 bool dst)
597{
598 u32 offs;
599
600 slot = EDMA_CHAN_SLOT(slot);
601 offs = PARM_OFFSET(slot);
602 offs += dst ? PARM_DST : PARM_SRC;
603
604 return edma_read(ecc, offs);
605}
606
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300607/*
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300608 * Channels with event associations will be triggered by their hardware
609 * events, and channels without such associations will be triggered by
610 * software. (At this writing there is no interface for using software
611 * triggers except with channels that don't support hardware triggers.)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300612 */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300613static void edma_start(struct edma_chan *echan)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300614{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300615 struct edma_cc *ecc = echan->ecc;
616 int channel = EDMA_CHAN_SLOT(echan->ch_num);
617 int j = (channel >> 5);
618 unsigned int mask = BIT(channel & 0x1f);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300619
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300620 if (test_bit(channel, ecc->channel_unused)) {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300621 /* EDMA channels without event association */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300622 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
623 edma_shadow0_read_array(ecc, SH_ESR, j));
624 edma_shadow0_write_array(ecc, SH_ESR, j, mask);
625 } else {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300626 /* EDMA channel with event association */
Peter Ujfalusi3287fb42015-10-14 14:42:57 +0300627 dev_dbg(ecc->dev, "ER%d %08x\n", j,
628 edma_shadow0_read_array(ecc, SH_ER, j));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300629 /* Clear any pending event or error */
630 edma_write_array(ecc, EDMA_ECR, j, mask);
631 edma_write_array(ecc, EDMA_EMCR, j, mask);
632 /* Clear any SER */
633 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
634 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
Peter Ujfalusi3287fb42015-10-14 14:42:57 +0300635 dev_dbg(ecc->dev, "EER%d %08x\n", j,
636 edma_shadow0_read_array(ecc, SH_EER, j));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300637 }
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300638}
639
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300640static void edma_stop(struct edma_chan *echan)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300641{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300642 struct edma_cc *ecc = echan->ecc;
643 int channel = EDMA_CHAN_SLOT(echan->ch_num);
644 int j = (channel >> 5);
645 unsigned int mask = BIT(channel & 0x1f);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300646
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300647 edma_shadow0_write_array(ecc, SH_EECR, j, mask);
648 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
649 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
650 edma_write_array(ecc, EDMA_EMCR, j, mask);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300651
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300652 /* clear possibly pending completion interrupt */
653 edma_shadow0_write_array(ecc, SH_ICR, j, mask);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300654
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300655 dev_dbg(ecc->dev, "EER%d %08x\n", j,
656 edma_shadow0_read_array(ecc, SH_EER, j));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300657
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300658 /* REVISIT: consider guarding against inappropriate event
659 * chaining by overwriting with dummy_paramset.
660 */
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300661}
662
Peter Ujfalusi11c15732015-10-14 14:43:00 +0300663/*
664 * Temporarily disable EDMA hardware events on the specified channel,
665 * preventing them from triggering new transfers
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300666 */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300667static void edma_pause(struct edma_chan *echan)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300668{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300669 int channel = EDMA_CHAN_SLOT(echan->ch_num);
670 unsigned int mask = BIT(channel & 0x1f);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300671
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300672 edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300673}
674
Peter Ujfalusi11c15732015-10-14 14:43:00 +0300675/* Re-enable EDMA hardware events on the specified channel. */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300676static void edma_resume(struct edma_chan *echan)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300677{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300678 int channel = EDMA_CHAN_SLOT(echan->ch_num);
679 unsigned int mask = BIT(channel & 0x1f);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300680
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300681 edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300682}
683
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300684static void edma_trigger_channel(struct edma_chan *echan)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300685{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300686 struct edma_cc *ecc = echan->ecc;
687 int channel = EDMA_CHAN_SLOT(echan->ch_num);
688 unsigned int mask = BIT(channel & 0x1f);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300689
690 edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
691
Peter Ujfalusi3287fb42015-10-14 14:42:57 +0300692 dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
693 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300694}
695
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300696static void edma_clean_channel(struct edma_chan *echan)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300697{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300698 struct edma_cc *ecc = echan->ecc;
699 int channel = EDMA_CHAN_SLOT(echan->ch_num);
700 int j = (channel >> 5);
701 unsigned int mask = BIT(channel & 0x1f);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300702
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300703 dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
704 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
705 /* Clear the corresponding EMR bits */
706 edma_write_array(ecc, EDMA_EMCR, j, mask);
707 /* Clear any SER */
708 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
709 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300710}
711
Peter Ujfalusif9425de2015-10-16 10:18:03 +0300712/* Move channel to a specific event queue */
713static void edma_assign_channel_eventq(struct edma_chan *echan,
714 enum dma_event_q eventq_no)
715{
716 struct edma_cc *ecc = echan->ecc;
717 int channel = EDMA_CHAN_SLOT(echan->ch_num);
718 int bit = (channel & 0x7) * 4;
719
720 /* default to low priority queue */
721 if (eventq_no == EVENTQ_DEFAULT)
722 eventq_no = ecc->default_queue;
723 if (eventq_no >= ecc->num_tc)
724 return;
725
726 eventq_no &= 7;
727 edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
728 eventq_no << bit);
729}
730
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300731static int edma_alloc_channel(struct edma_chan *echan,
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +0300732 enum dma_event_q eventq_no)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300733{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300734 struct edma_cc *ecc = echan->ecc;
735 int channel = EDMA_CHAN_SLOT(echan->ch_num);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300736
737 if (!ecc->unused_chan_list_done) {
738 /*
739 * Scan all the platform devices to find out the EDMA channels
740 * used and clear them in the unused list, making the rest
741 * available for ARM usage.
742 */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300743 int ret = bus_for_each_dev(&platform_bus_type, NULL, ecc,
744 prepare_unused_channel_list);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300745 if (ret < 0)
746 return ret;
747
748 ecc->unused_chan_list_done = true;
749 }
750
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300751 /* ensure access through shadow region 0 */
752 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
753
754 /* ensure no events are pending */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300755 edma_stop(echan);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300756
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300757 edma_setup_interrupt(echan, true);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300758
Peter Ujfalusif9425de2015-10-16 10:18:03 +0300759 edma_assign_channel_eventq(echan, eventq_no);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300760
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300761 return 0;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300762}
763
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300764static void edma_free_channel(struct edma_chan *echan)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300765{
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300766 /* ensure no events are pending */
767 edma_stop(echan);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300768 /* REVISIT should probably take out of shadow region 0 */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300769 edma_setup_interrupt(echan, false);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300770}
771
Matt Porterc2dde5f2012-08-22 21:09:34 -0400772static inline struct edma_cc *to_edma_cc(struct dma_device *d)
773{
774 return container_of(d, struct edma_cc, dma_slave);
775}
776
777static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
778{
779 return container_of(c, struct edma_chan, vchan.chan);
780}
781
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300782static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
Matt Porterc2dde5f2012-08-22 21:09:34 -0400783{
784 return container_of(tx, struct edma_desc, vdesc.tx);
785}
786
787static void edma_desc_free(struct virt_dma_desc *vdesc)
788{
789 kfree(container_of(vdesc, struct edma_desc, vdesc));
790}
791
792/* Dispatch a queued descriptor to the controller (caller holds lock) */
793static void edma_execute(struct edma_chan *echan)
794{
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300795 struct edma_cc *ecc = echan->ecc;
Joel Fernandes53407062013-09-03 10:02:46 -0500796 struct virt_dma_desc *vdesc;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400797 struct edma_desc *edesc;
Joel Fernandes53407062013-09-03 10:02:46 -0500798 struct device *dev = echan->vchan.chan.device->dev;
799 int i, j, left, nslots;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400800
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +0300801 if (!echan->edesc) {
802 /* Setup is needed for the first transfer */
Joel Fernandes53407062013-09-03 10:02:46 -0500803 vdesc = vchan_next_desc(&echan->vchan);
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +0300804 if (!vdesc)
Joel Fernandes53407062013-09-03 10:02:46 -0500805 return;
Joel Fernandes53407062013-09-03 10:02:46 -0500806 list_del(&vdesc->node);
807 echan->edesc = to_edma_desc(&vdesc->tx);
Matt Porterc2dde5f2012-08-22 21:09:34 -0400808 }
809
Joel Fernandes53407062013-09-03 10:02:46 -0500810 edesc = echan->edesc;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400811
Joel Fernandes53407062013-09-03 10:02:46 -0500812 /* Find out how many left */
813 left = edesc->pset_nr - edesc->processed;
814 nslots = min(MAX_NR_SG, left);
Thomas Gleixner740b41f2014-04-28 14:34:11 -0500815 edesc->sg_len = 0;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400816
817 /* Write descriptor PaRAM set(s) */
Joel Fernandes53407062013-09-03 10:02:46 -0500818 for (i = 0; i < nslots; i++) {
819 j = i + edesc->processed;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300820 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
Thomas Gleixner740b41f2014-04-28 14:34:11 -0500821 edesc->sg_len += edesc->pset[j].len;
Peter Ujfalusi907f74a2015-10-14 14:42:56 +0300822 dev_vdbg(dev,
823 "\n pset[%d]:\n"
824 " chnum\t%d\n"
825 " slot\t%d\n"
826 " opt\t%08x\n"
827 " src\t%08x\n"
828 " dst\t%08x\n"
829 " abcnt\t%08x\n"
830 " ccnt\t%08x\n"
831 " bidx\t%08x\n"
832 " cidx\t%08x\n"
833 " lkrld\t%08x\n",
834 j, echan->ch_num, echan->slot[i],
835 edesc->pset[j].param.opt,
836 edesc->pset[j].param.src,
837 edesc->pset[j].param.dst,
838 edesc->pset[j].param.a_b_cnt,
839 edesc->pset[j].param.ccnt,
840 edesc->pset[j].param.src_dst_bidx,
841 edesc->pset[j].param.src_dst_cidx,
842 edesc->pset[j].param.link_bcntrld);
Matt Porterc2dde5f2012-08-22 21:09:34 -0400843 /* Link to the previous slot if not the last set */
Joel Fernandes53407062013-09-03 10:02:46 -0500844 if (i != (nslots - 1))
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300845 edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
Matt Porterc2dde5f2012-08-22 21:09:34 -0400846 }
847
Joel Fernandes53407062013-09-03 10:02:46 -0500848 edesc->processed += nslots;
849
Joel Fernandesb267b3b2013-08-29 18:05:44 -0500850 /*
851 * If this is either the last set in a set of SG-list transactions
852 * then setup a link to the dummy slot, this results in all future
853 * events being absorbed and that's OK because we're done
854 */
Joel Fernandes50a9c702013-10-31 16:31:23 -0500855 if (edesc->processed == edesc->pset_nr) {
856 if (edesc->cyclic)
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300857 edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
Joel Fernandes50a9c702013-10-31 16:31:23 -0500858 else
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300859 edma_link(ecc, echan->slot[nslots - 1],
Joel Fernandes50a9c702013-10-31 16:31:23 -0500860 echan->ecc->dummy_slot);
861 }
Joel Fernandesb267b3b2013-08-29 18:05:44 -0500862
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +0300863 if (echan->missed) {
864 /*
865 * This happens due to setup times between intermediate
866 * transfers in long SG lists which have to be broken up into
867 * transfers of MAX_NR_SG
868 */
869 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300870 edma_clean_channel(echan);
871 edma_stop(echan);
872 edma_start(echan);
873 edma_trigger_channel(echan);
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +0300874 echan->missed = 0;
875 } else if (edesc->processed <= MAX_NR_SG) {
Peter Ujfalusi9aac9092014-04-24 10:29:50 +0300876 dev_dbg(dev, "first transfer starting on channel %d\n",
877 echan->ch_num);
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300878 edma_start(echan);
Sekhar Nori5fc68a62014-03-19 11:25:50 +0530879 } else {
880 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
881 echan->ch_num, edesc->processed);
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300882 edma_resume(echan);
Joel Fernandes53407062013-09-03 10:02:46 -0500883 }
Matt Porterc2dde5f2012-08-22 21:09:34 -0400884}
885
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100886static int edma_terminate_all(struct dma_chan *chan)
Matt Porterc2dde5f2012-08-22 21:09:34 -0400887{
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100888 struct edma_chan *echan = to_edma_chan(chan);
Matt Porterc2dde5f2012-08-22 21:09:34 -0400889 unsigned long flags;
890 LIST_HEAD(head);
891
892 spin_lock_irqsave(&echan->vchan.lock, flags);
893
894 /*
895 * Stop DMA activity: we assume the callback will not be called
896 * after edma_dma() returns (even if it does, it will see
897 * echan->edesc is NULL and exit.)
898 */
899 if (echan->edesc) {
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300900 edma_stop(echan);
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +0300901 /* Move the cyclic channel back to default queue */
902 if (echan->edesc->cyclic)
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300903 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
Petr Kulhavy5ca9e7c2015-03-27 13:35:51 +0200904 /*
905 * free the running request descriptor
906 * since it is not in any of the vdesc lists
907 */
908 edma_desc_free(&echan->edesc->vdesc);
Matt Porterc2dde5f2012-08-22 21:09:34 -0400909 echan->edesc = NULL;
Matt Porterc2dde5f2012-08-22 21:09:34 -0400910 }
911
912 vchan_get_all_descriptors(&echan->vchan, &head);
913 spin_unlock_irqrestore(&echan->vchan.lock, flags);
914 vchan_dma_desc_free_list(&echan->vchan, &head);
915
916 return 0;
917}
918
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100919static int edma_slave_config(struct dma_chan *chan,
Matt Porter661f7cb2013-01-10 13:41:04 -0500920 struct dma_slave_config *cfg)
Matt Porterc2dde5f2012-08-22 21:09:34 -0400921{
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100922 struct edma_chan *echan = to_edma_chan(chan);
923
Matt Porter661f7cb2013-01-10 13:41:04 -0500924 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
925 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
Matt Porterc2dde5f2012-08-22 21:09:34 -0400926 return -EINVAL;
927
Matt Porter661f7cb2013-01-10 13:41:04 -0500928 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
Matt Porterc2dde5f2012-08-22 21:09:34 -0400929
930 return 0;
931}
932
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100933static int edma_dma_pause(struct dma_chan *chan)
Peter Ujfalusi72c7b672014-04-14 14:41:59 +0300934{
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100935 struct edma_chan *echan = to_edma_chan(chan);
936
John Ogness02ec6042015-04-27 13:52:25 +0200937 if (!echan->edesc)
Peter Ujfalusi72c7b672014-04-14 14:41:59 +0300938 return -EINVAL;
939
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300940 edma_pause(echan);
Peter Ujfalusi72c7b672014-04-14 14:41:59 +0300941 return 0;
942}
943
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100944static int edma_dma_resume(struct dma_chan *chan)
Peter Ujfalusi72c7b672014-04-14 14:41:59 +0300945{
Maxime Ripardaa7c09b2014-11-17 14:42:13 +0100946 struct edma_chan *echan = to_edma_chan(chan);
947
Peter Ujfalusi34cf3012015-10-16 10:18:01 +0300948 edma_resume(echan);
Peter Ujfalusi72c7b672014-04-14 14:41:59 +0300949 return 0;
950}
951
Joel Fernandesfd009032013-09-23 18:05:13 -0500952/*
953 * A PaRAM set configuration abstraction used by other modes
954 * @chan: Channel who's PaRAM set we're configuring
955 * @pset: PaRAM set to initialize and setup.
956 * @src_addr: Source address of the DMA
957 * @dst_addr: Destination address of the DMA
958 * @burst: In units of dev_width, how much to send
959 * @dev_width: How much is the dev_width
960 * @dma_length: Total length of the DMA transfer
961 * @direction: Direction of the transfer
962 */
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -0500963static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300964 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
Peter Ujfalusidf6694f2015-10-16 10:18:00 +0300965 unsigned int acnt, unsigned int dma_length,
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +0300966 enum dma_transfer_direction direction)
Joel Fernandesfd009032013-09-23 18:05:13 -0500967{
968 struct edma_chan *echan = to_edma_chan(chan);
969 struct device *dev = chan->device->dev;
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -0500970 struct edmacc_param *param = &epset->param;
Peter Ujfalusidf6694f2015-10-16 10:18:00 +0300971 int bcnt, ccnt, cidx;
Joel Fernandesfd009032013-09-23 18:05:13 -0500972 int src_bidx, dst_bidx, src_cidx, dst_cidx;
973 int absync;
974
Peter Ujfalusib2b617d2014-04-14 14:41:58 +0300975 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
976 if (!burst)
977 burst = 1;
Joel Fernandesfd009032013-09-23 18:05:13 -0500978 /*
979 * If the maxburst is equal to the fifo width, use
980 * A-synced transfers. This allows for large contiguous
981 * buffer transfers using only one PaRAM set.
982 */
983 if (burst == 1) {
984 /*
985 * For the A-sync case, bcnt and ccnt are the remainder
986 * and quotient respectively of the division of:
987 * (dma_length / acnt) by (SZ_64K -1). This is so
988 * that in case bcnt over flows, we have ccnt to use.
989 * Note: In A-sync tranfer only, bcntrld is used, but it
990 * only applies for sg_dma_len(sg) >= SZ_64K.
991 * In this case, the best way adopted is- bccnt for the
992 * first frame will be the remainder below. Then for
993 * every successive frame, bcnt will be SZ_64K-1. This
994 * is assured as bcntrld = 0xffff in end of function.
995 */
996 absync = false;
997 ccnt = dma_length / acnt / (SZ_64K - 1);
998 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
999 /*
1000 * If bcnt is non-zero, we have a remainder and hence an
1001 * extra frame to transfer, so increment ccnt.
1002 */
1003 if (bcnt)
1004 ccnt++;
1005 else
1006 bcnt = SZ_64K - 1;
1007 cidx = acnt;
1008 } else {
1009 /*
1010 * If maxburst is greater than the fifo address_width,
1011 * use AB-synced transfers where A count is the fifo
1012 * address_width and B count is the maxburst. In this
1013 * case, we are limited to transfers of C count frames
1014 * of (address_width * maxburst) where C count is limited
1015 * to SZ_64K-1. This places an upper bound on the length
1016 * of an SG segment that can be handled.
1017 */
1018 absync = true;
1019 bcnt = burst;
1020 ccnt = dma_length / (acnt * bcnt);
1021 if (ccnt > (SZ_64K - 1)) {
1022 dev_err(dev, "Exceeded max SG segment size\n");
1023 return -EINVAL;
1024 }
1025 cidx = acnt * bcnt;
1026 }
1027
Thomas Gleixnerc2da2342014-04-28 14:29:57 -05001028 epset->len = dma_length;
1029
Joel Fernandesfd009032013-09-23 18:05:13 -05001030 if (direction == DMA_MEM_TO_DEV) {
1031 src_bidx = acnt;
1032 src_cidx = cidx;
1033 dst_bidx = 0;
1034 dst_cidx = 0;
Thomas Gleixnerc2da2342014-04-28 14:29:57 -05001035 epset->addr = src_addr;
Joel Fernandesfd009032013-09-23 18:05:13 -05001036 } else if (direction == DMA_DEV_TO_MEM) {
1037 src_bidx = 0;
1038 src_cidx = 0;
1039 dst_bidx = acnt;
1040 dst_cidx = cidx;
Thomas Gleixnerc2da2342014-04-28 14:29:57 -05001041 epset->addr = dst_addr;
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001042 } else if (direction == DMA_MEM_TO_MEM) {
1043 src_bidx = acnt;
1044 src_cidx = cidx;
1045 dst_bidx = acnt;
1046 dst_cidx = cidx;
Joel Fernandesfd009032013-09-23 18:05:13 -05001047 } else {
1048 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1049 return -EINVAL;
1050 }
1051
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001052 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
Joel Fernandesfd009032013-09-23 18:05:13 -05001053 /* Configure A or AB synchronized transfers */
1054 if (absync)
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001055 param->opt |= SYNCDIM;
Joel Fernandesfd009032013-09-23 18:05:13 -05001056
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001057 param->src = src_addr;
1058 param->dst = dst_addr;
Joel Fernandesfd009032013-09-23 18:05:13 -05001059
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001060 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1061 param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
Joel Fernandesfd009032013-09-23 18:05:13 -05001062
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001063 param->a_b_cnt = bcnt << 16 | acnt;
1064 param->ccnt = ccnt;
Joel Fernandesfd009032013-09-23 18:05:13 -05001065 /*
1066 * Only time when (bcntrld) auto reload is required is for
1067 * A-sync case, and in this case, a requirement of reload value
1068 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1069 * and then later will be populated by edma_execute.
1070 */
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001071 param->link_bcntrld = 0xffffffff;
Joel Fernandesfd009032013-09-23 18:05:13 -05001072 return absync;
1073}
1074
Matt Porterc2dde5f2012-08-22 21:09:34 -04001075static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1076 struct dma_chan *chan, struct scatterlist *sgl,
1077 unsigned int sg_len, enum dma_transfer_direction direction,
1078 unsigned long tx_flags, void *context)
1079{
1080 struct edma_chan *echan = to_edma_chan(chan);
1081 struct device *dev = chan->device->dev;
1082 struct edma_desc *edesc;
Joel Fernandesfd009032013-09-23 18:05:13 -05001083 dma_addr_t src_addr = 0, dst_addr = 0;
Matt Porter661f7cb2013-01-10 13:41:04 -05001084 enum dma_slave_buswidth dev_width;
1085 u32 burst;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001086 struct scatterlist *sg;
Joel Fernandesfd009032013-09-23 18:05:13 -05001087 int i, nslots, ret;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001088
1089 if (unlikely(!echan || !sgl || !sg_len))
1090 return NULL;
1091
Matt Porter661f7cb2013-01-10 13:41:04 -05001092 if (direction == DMA_DEV_TO_MEM) {
Joel Fernandesfd009032013-09-23 18:05:13 -05001093 src_addr = echan->cfg.src_addr;
Matt Porter661f7cb2013-01-10 13:41:04 -05001094 dev_width = echan->cfg.src_addr_width;
1095 burst = echan->cfg.src_maxburst;
1096 } else if (direction == DMA_MEM_TO_DEV) {
Joel Fernandesfd009032013-09-23 18:05:13 -05001097 dst_addr = echan->cfg.dst_addr;
Matt Porter661f7cb2013-01-10 13:41:04 -05001098 dev_width = echan->cfg.dst_addr_width;
1099 burst = echan->cfg.dst_maxburst;
1100 } else {
Peter Ujfalusie6fad592014-04-14 14:42:05 +03001101 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
Matt Porter661f7cb2013-01-10 13:41:04 -05001102 return NULL;
1103 }
1104
1105 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
Peter Ujfalusic594c892014-04-14 14:42:03 +03001106 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001107 return NULL;
1108 }
1109
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001110 edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
1111 GFP_ATOMIC);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001112 if (!edesc) {
Peter Ujfalusic594c892014-04-14 14:42:03 +03001113 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001114 return NULL;
1115 }
1116
1117 edesc->pset_nr = sg_len;
Thomas Gleixnerb6205c32014-04-28 14:18:45 -05001118 edesc->residue = 0;
Thomas Gleixnerc2da2342014-04-28 14:29:57 -05001119 edesc->direction = direction;
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001120 edesc->echan = echan;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001121
Joel Fernandes6fbe24d2013-08-29 18:05:40 -05001122 /* Allocate a PaRAM slot, if needed */
1123 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1124
1125 for (i = 0; i < nslots; i++) {
Matt Porterc2dde5f2012-08-22 21:09:34 -04001126 if (echan->slot[i] < 0) {
1127 echan->slot[i] =
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001128 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001129 if (echan->slot[i] < 0) {
Valentin Ilie4b6271a2013-10-24 16:14:22 +03001130 kfree(edesc);
Peter Ujfalusic594c892014-04-14 14:42:03 +03001131 dev_err(dev, "%s: Failed to allocate slot\n",
1132 __func__);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001133 return NULL;
1134 }
1135 }
Joel Fernandes6fbe24d2013-08-29 18:05:40 -05001136 }
1137
1138 /* Configure PaRAM sets for each SG */
1139 for_each_sg(sgl, sg, sg_len, i) {
Joel Fernandesfd009032013-09-23 18:05:13 -05001140 /* Get address for each SG */
1141 if (direction == DMA_DEV_TO_MEM)
1142 dst_addr = sg_dma_address(sg);
1143 else
1144 src_addr = sg_dma_address(sg);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001145
Joel Fernandesfd009032013-09-23 18:05:13 -05001146 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1147 dst_addr, burst, dev_width,
1148 sg_dma_len(sg), direction);
Vinod Koulb967aec2013-10-30 13:07:18 +05301149 if (ret < 0) {
1150 kfree(edesc);
Joel Fernandesfd009032013-09-23 18:05:13 -05001151 return NULL;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001152 }
1153
Joel Fernandesfd009032013-09-23 18:05:13 -05001154 edesc->absync = ret;
Thomas Gleixnerb6205c32014-04-28 14:18:45 -05001155 edesc->residue += sg_dma_len(sg);
Joel Fernandes6fbe24d2013-08-29 18:05:40 -05001156
1157 /* If this is the last in a current SG set of transactions,
1158 enable interrupts so that next set is processed */
1159 if (!((i+1) % MAX_NR_SG))
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001160 edesc->pset[i].param.opt |= TCINTEN;
Joel Fernandes6fbe24d2013-08-29 18:05:40 -05001161
Matt Porterc2dde5f2012-08-22 21:09:34 -04001162 /* If this is the last set, enable completion interrupt flag */
1163 if (i == sg_len - 1)
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001164 edesc->pset[i].param.opt |= TCINTEN;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001165 }
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001166 edesc->residue_stat = edesc->residue;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001167
Matt Porterc2dde5f2012-08-22 21:09:34 -04001168 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1169}
Matt Porterc2dde5f2012-08-22 21:09:34 -04001170
Lad, Prabhakarb7a4fd52015-02-04 13:03:27 +00001171static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001172 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1173 size_t len, unsigned long tx_flags)
1174{
Peter Ujfalusidf6694f2015-10-16 10:18:00 +03001175 int ret, nslots;
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001176 struct edma_desc *edesc;
1177 struct device *dev = chan->device->dev;
1178 struct edma_chan *echan = to_edma_chan(chan);
Peter Ujfalusidf6694f2015-10-16 10:18:00 +03001179 unsigned int width, pset_len;
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001180
1181 if (unlikely(!echan || !len))
1182 return NULL;
1183
Peter Ujfalusidf6694f2015-10-16 10:18:00 +03001184 if (len < SZ_64K) {
1185 /*
1186 * Transfer size less than 64K can be handled with one paRAM
1187 * slot and with one burst.
1188 * ACNT = length
1189 */
1190 width = len;
1191 pset_len = len;
1192 nslots = 1;
1193 } else {
1194 /*
1195 * Transfer size bigger than 64K will be handled with maximum of
1196 * two paRAM slots.
1197 * slot1: (full_length / 32767) times 32767 bytes bursts.
1198 * ACNT = 32767, length1: (full_length / 32767) * 32767
1199 * slot2: the remaining amount of data after slot1.
1200 * ACNT = full_length - length1, length2 = ACNT
1201 *
1202 * When the full_length is multibple of 32767 one slot can be
1203 * used to complete the transfer.
1204 */
1205 width = SZ_32K - 1;
1206 pset_len = rounddown(len, width);
1207 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1208 if (unlikely(pset_len == len))
1209 nslots = 1;
1210 else
1211 nslots = 2;
1212 }
1213
1214 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1215 GFP_ATOMIC);
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001216 if (!edesc) {
1217 dev_dbg(dev, "Failed to allocate a descriptor\n");
1218 return NULL;
1219 }
1220
Peter Ujfalusidf6694f2015-10-16 10:18:00 +03001221 edesc->pset_nr = nslots;
1222 edesc->residue = edesc->residue_stat = len;
1223 edesc->direction = DMA_MEM_TO_MEM;
1224 edesc->echan = echan;
Peter Ujfalusi21a31842015-10-16 10:17:59 +03001225
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001226 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
Peter Ujfalusidf6694f2015-10-16 10:18:00 +03001227 width, pset_len, DMA_MEM_TO_MEM);
1228 if (ret < 0) {
1229 kfree(edesc);
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001230 return NULL;
Peter Ujfalusidf6694f2015-10-16 10:18:00 +03001231 }
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001232
1233 edesc->absync = ret;
1234
Joel Fernandesb0cce4c2014-04-28 15:30:32 -05001235 edesc->pset[0].param.opt |= ITCCHEN;
Peter Ujfalusidf6694f2015-10-16 10:18:00 +03001236 if (nslots == 1) {
1237 /* Enable transfer complete interrupt */
1238 edesc->pset[0].param.opt |= TCINTEN;
1239 } else {
1240 /* Enable transfer complete chaining for the first slot */
1241 edesc->pset[0].param.opt |= TCCHEN;
1242
1243 if (echan->slot[1] < 0) {
1244 echan->slot[1] = edma_alloc_slot(echan->ecc,
1245 EDMA_SLOT_ANY);
1246 if (echan->slot[1] < 0) {
1247 kfree(edesc);
1248 dev_err(dev, "%s: Failed to allocate slot\n",
1249 __func__);
1250 return NULL;
1251 }
1252 }
1253 dest += pset_len;
1254 src += pset_len;
1255 pset_len = width = len % (SZ_32K - 1);
1256
1257 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1258 width, pset_len, DMA_MEM_TO_MEM);
1259 if (ret < 0) {
1260 kfree(edesc);
1261 return NULL;
1262 }
1263
1264 edesc->pset[1].param.opt |= ITCCHEN;
1265 edesc->pset[1].param.opt |= TCINTEN;
1266 }
Joel Fernandes8cc3e302014-04-18 21:50:33 -05001267
1268 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1269}
1270
Joel Fernandes50a9c702013-10-31 16:31:23 -05001271static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1272 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1273 size_t period_len, enum dma_transfer_direction direction,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +02001274 unsigned long tx_flags)
Joel Fernandes50a9c702013-10-31 16:31:23 -05001275{
1276 struct edma_chan *echan = to_edma_chan(chan);
1277 struct device *dev = chan->device->dev;
1278 struct edma_desc *edesc;
1279 dma_addr_t src_addr, dst_addr;
1280 enum dma_slave_buswidth dev_width;
1281 u32 burst;
1282 int i, ret, nslots;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001283
Joel Fernandes50a9c702013-10-31 16:31:23 -05001284 if (unlikely(!echan || !buf_len || !period_len))
1285 return NULL;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001286
Joel Fernandes50a9c702013-10-31 16:31:23 -05001287 if (direction == DMA_DEV_TO_MEM) {
1288 src_addr = echan->cfg.src_addr;
1289 dst_addr = buf_addr;
1290 dev_width = echan->cfg.src_addr_width;
1291 burst = echan->cfg.src_maxburst;
1292 } else if (direction == DMA_MEM_TO_DEV) {
1293 src_addr = buf_addr;
1294 dst_addr = echan->cfg.dst_addr;
1295 dev_width = echan->cfg.dst_addr_width;
1296 burst = echan->cfg.dst_maxburst;
1297 } else {
Peter Ujfalusie6fad592014-04-14 14:42:05 +03001298 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001299 return NULL;
1300 }
1301
1302 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
Peter Ujfalusic594c892014-04-14 14:42:03 +03001303 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001304 return NULL;
1305 }
1306
1307 if (unlikely(buf_len % period_len)) {
1308 dev_err(dev, "Period should be multiple of Buffer length\n");
1309 return NULL;
1310 }
1311
1312 nslots = (buf_len / period_len) + 1;
1313
1314 /*
1315 * Cyclic DMA users such as audio cannot tolerate delays introduced
1316 * by cases where the number of periods is more than the maximum
1317 * number of SGs the EDMA driver can handle at a time. For DMA types
1318 * such as Slave SGs, such delays are tolerable and synchronized,
1319 * but the synchronization is difficult to achieve with Cyclic and
1320 * cannot be guaranteed, so we error out early.
1321 */
1322 if (nslots > MAX_NR_SG)
1323 return NULL;
1324
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001325 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1326 GFP_ATOMIC);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001327 if (!edesc) {
Peter Ujfalusic594c892014-04-14 14:42:03 +03001328 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001329 return NULL;
1330 }
1331
1332 edesc->cyclic = 1;
1333 edesc->pset_nr = nslots;
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001334 edesc->residue = edesc->residue_stat = buf_len;
Thomas Gleixnerc2da2342014-04-28 14:29:57 -05001335 edesc->direction = direction;
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001336 edesc->echan = echan;
Joel Fernandes50a9c702013-10-31 16:31:23 -05001337
Peter Ujfalusi83bb3122014-04-14 14:42:02 +03001338 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1339 __func__, echan->ch_num, nslots, period_len, buf_len);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001340
1341 for (i = 0; i < nslots; i++) {
1342 /* Allocate a PaRAM slot, if needed */
1343 if (echan->slot[i] < 0) {
1344 echan->slot[i] =
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001345 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001346 if (echan->slot[i] < 0) {
Christian Engelmayere3ddc972013-12-30 20:48:39 +01001347 kfree(edesc);
Peter Ujfalusic594c892014-04-14 14:42:03 +03001348 dev_err(dev, "%s: Failed to allocate slot\n",
1349 __func__);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001350 return NULL;
1351 }
1352 }
1353
1354 if (i == nslots - 1) {
1355 memcpy(&edesc->pset[i], &edesc->pset[0],
1356 sizeof(edesc->pset[0]));
1357 break;
1358 }
1359
1360 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1361 dst_addr, burst, dev_width, period_len,
1362 direction);
Christian Engelmayere3ddc972013-12-30 20:48:39 +01001363 if (ret < 0) {
1364 kfree(edesc);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001365 return NULL;
Christian Engelmayere3ddc972013-12-30 20:48:39 +01001366 }
Joel Fernandes50a9c702013-10-31 16:31:23 -05001367
1368 if (direction == DMA_DEV_TO_MEM)
1369 dst_addr += period_len;
1370 else
1371 src_addr += period_len;
1372
Peter Ujfalusi83bb3122014-04-14 14:42:02 +03001373 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1374 dev_vdbg(dev,
Joel Fernandes50a9c702013-10-31 16:31:23 -05001375 "\n pset[%d]:\n"
1376 " chnum\t%d\n"
1377 " slot\t%d\n"
1378 " opt\t%08x\n"
1379 " src\t%08x\n"
1380 " dst\t%08x\n"
1381 " abcnt\t%08x\n"
1382 " ccnt\t%08x\n"
1383 " bidx\t%08x\n"
1384 " cidx\t%08x\n"
1385 " lkrld\t%08x\n",
1386 i, echan->ch_num, echan->slot[i],
Thomas Gleixnerb5088ad2014-04-28 14:23:55 -05001387 edesc->pset[i].param.opt,
1388 edesc->pset[i].param.src,
1389 edesc->pset[i].param.dst,
1390 edesc->pset[i].param.a_b_cnt,
1391 edesc->pset[i].param.ccnt,
1392 edesc->pset[i].param.src_dst_bidx,
1393 edesc->pset[i].param.src_dst_cidx,
1394 edesc->pset[i].param.link_bcntrld);
Joel Fernandes50a9c702013-10-31 16:31:23 -05001395
1396 edesc->absync = ret;
1397
1398 /*
Peter Ujfalusia1f146f2014-07-16 15:29:21 +03001399 * Enable period interrupt only if it is requested
Joel Fernandes50a9c702013-10-31 16:31:23 -05001400 */
Peter Ujfalusia1f146f2014-07-16 15:29:21 +03001401 if (tx_flags & DMA_PREP_INTERRUPT)
1402 edesc->pset[i].param.opt |= TCINTEN;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001403 }
1404
Peter Ujfalusi8e8805d2014-07-08 13:46:38 +03001405 /* Place the cyclic channel to highest priority queue */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001406 edma_assign_channel_eventq(echan, EVENTQ_0);
Peter Ujfalusi8e8805d2014-07-08 13:46:38 +03001407
Matt Porterc2dde5f2012-08-22 21:09:34 -04001408 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1409}
1410
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001411static void edma_completion_handler(struct edma_chan *echan)
Matt Porterc2dde5f2012-08-22 21:09:34 -04001412{
Matt Porterc2dde5f2012-08-22 21:09:34 -04001413 struct device *dev = echan->vchan.chan.device->dev;
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001414 struct edma_desc *edesc = echan->edesc;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001415
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001416 if (!edesc)
1417 return;
Joel Fernandes50a9c702013-10-31 16:31:23 -05001418
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +03001419 spin_lock(&echan->vchan.lock);
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001420 if (edesc->cyclic) {
1421 vchan_cyclic_callback(&edesc->vdesc);
1422 spin_unlock(&echan->vchan.lock);
1423 return;
1424 } else if (edesc->processed == edesc->pset_nr) {
1425 edesc->residue = 0;
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001426 edma_stop(echan);
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001427 vchan_cookie_complete(&edesc->vdesc);
1428 echan->edesc = NULL;
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001429
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001430 dev_dbg(dev, "Transfer completed on channel %d\n",
1431 echan->ch_num);
1432 } else {
1433 dev_dbg(dev, "Sub transfer completed on channel %d\n",
1434 echan->ch_num);
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +03001435
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001436 edma_pause(echan);
Joel Fernandesc5f47992013-08-29 18:05:43 -05001437
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001438 /* Update statistics for tx_status */
1439 edesc->residue -= edesc->sg_len;
1440 edesc->residue_stat = edesc->residue;
1441 edesc->processed_stat = edesc->processed;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001442 }
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001443 edma_execute(echan);
1444
Peter Ujfalusi8fa7ff42015-10-14 14:42:45 +03001445 spin_unlock(&echan->vchan.lock);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001446}
1447
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001448/* eDMA interrupt handler */
1449static irqreturn_t dma_irq_handler(int irq, void *data)
1450{
1451 struct edma_cc *ecc = data;
1452 int ctlr;
1453 u32 sh_ier;
1454 u32 sh_ipr;
1455 u32 bank;
1456
1457 ctlr = ecc->id;
1458 if (ctlr < 0)
1459 return IRQ_NONE;
1460
1461 dev_vdbg(ecc->dev, "dma_irq_handler\n");
1462
1463 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1464 if (!sh_ipr) {
1465 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1466 if (!sh_ipr)
1467 return IRQ_NONE;
1468 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1469 bank = 1;
1470 } else {
1471 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1472 bank = 0;
1473 }
1474
1475 do {
1476 u32 slot;
1477 u32 channel;
1478
1479 slot = __ffs(sh_ipr);
1480 sh_ipr &= ~(BIT(slot));
1481
1482 if (sh_ier & BIT(slot)) {
1483 channel = (bank << 5) | slot;
1484 /* Clear the corresponding IPR bits */
1485 edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1486 edma_completion_handler(&ecc->slave_chans[channel]);
1487 }
1488 } while (sh_ipr);
1489
1490 edma_shadow0_write(ecc, SH_IEVAL, 1);
1491 return IRQ_HANDLED;
1492}
1493
1494static void edma_error_handler(struct edma_chan *echan)
1495{
1496 struct edma_cc *ecc = echan->ecc;
1497 struct device *dev = echan->vchan.chan.device->dev;
1498 struct edmacc_param p;
1499
1500 if (!echan->edesc)
1501 return;
1502
1503 spin_lock(&echan->vchan.lock);
1504
1505 edma_read_slot(ecc, echan->slot[0], &p);
1506 /*
1507 * Issue later based on missed flag which will be sure
1508 * to happen as:
1509 * (1) we finished transmitting an intermediate slot and
1510 * edma_execute is coming up.
1511 * (2) or we finished current transfer and issue will
1512 * call edma_execute.
1513 *
1514 * Important note: issuing can be dangerous here and
1515 * lead to some nasty recursion when we are in a NULL
1516 * slot. So we avoid doing so and set the missed flag.
1517 */
1518 if (p.a_b_cnt == 0 && p.ccnt == 0) {
1519 dev_dbg(dev, "Error on null slot, setting miss\n");
1520 echan->missed = 1;
1521 } else {
1522 /*
1523 * The slot is already programmed but the event got
1524 * missed, so its safe to issue it here.
1525 */
1526 dev_dbg(dev, "Missed event, TRIGGERING\n");
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001527 edma_clean_channel(echan);
1528 edma_stop(echan);
1529 edma_start(echan);
1530 edma_trigger_channel(echan);
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001531 }
1532 spin_unlock(&echan->vchan.lock);
1533}
1534
Peter Ujfalusi7c3b8b32015-10-14 14:43:02 +03001535static inline bool edma_error_pending(struct edma_cc *ecc)
1536{
1537 if (edma_read_array(ecc, EDMA_EMR, 0) ||
1538 edma_read_array(ecc, EDMA_EMR, 1) ||
1539 edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1540 return true;
1541
1542 return false;
1543}
1544
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001545/* eDMA error interrupt handler */
1546static irqreturn_t dma_ccerr_handler(int irq, void *data)
1547{
1548 struct edma_cc *ecc = data;
Peter Ujfalusie4402a12015-10-14 14:43:03 +03001549 int i, j;
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001550 int ctlr;
1551 unsigned int cnt = 0;
Peter Ujfalusie4402a12015-10-14 14:43:03 +03001552 unsigned int val;
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001553
1554 ctlr = ecc->id;
1555 if (ctlr < 0)
1556 return IRQ_NONE;
1557
1558 dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1559
Peter Ujfalusi7c3b8b32015-10-14 14:43:02 +03001560 if (!edma_error_pending(ecc))
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001561 return IRQ_NONE;
1562
1563 while (1) {
Peter Ujfalusie4402a12015-10-14 14:43:03 +03001564 /* Event missed register(s) */
1565 for (j = 0; j < 2; j++) {
1566 unsigned long emr;
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001567
Peter Ujfalusie4402a12015-10-14 14:43:03 +03001568 val = edma_read_array(ecc, EDMA_EMR, j);
1569 if (!val)
1570 continue;
1571
1572 dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1573 emr = val;
1574 for (i = find_next_bit(&emr, 32, 0); i < 32;
1575 i = find_next_bit(&emr, 32, i + 1)) {
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001576 int k = (j << 5) + i;
1577
Peter Ujfalusie4402a12015-10-14 14:43:03 +03001578 /* Clear the corresponding EMR bits */
1579 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1580 /* Clear any SER */
1581 edma_shadow0_write_array(ecc, SH_SECR, j,
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001582 BIT(i));
Peter Ujfalusie4402a12015-10-14 14:43:03 +03001583 edma_error_handler(&ecc->slave_chans[k]);
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001584 }
1585 }
Peter Ujfalusie4402a12015-10-14 14:43:03 +03001586
1587 val = edma_read(ecc, EDMA_QEMR);
1588 if (val) {
1589 dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1590 /* Not reported, just clear the interrupt reason. */
1591 edma_write(ecc, EDMA_QEMCR, val);
1592 edma_shadow0_write(ecc, SH_QSECR, val);
1593 }
1594
1595 val = edma_read(ecc, EDMA_CCERR);
1596 if (val) {
1597 dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1598 /* Not reported, just clear the interrupt reason. */
1599 edma_write(ecc, EDMA_CCERRCLR, val);
1600 }
1601
Peter Ujfalusi7c3b8b32015-10-14 14:43:02 +03001602 if (!edma_error_pending(ecc))
Peter Ujfalusi79ad2e32015-10-14 14:43:01 +03001603 break;
1604 cnt++;
1605 if (cnt > 10)
1606 break;
1607 }
1608 edma_write(ecc, EDMA_EEVAL, 1);
1609 return IRQ_HANDLED;
1610}
1611
Matt Porterc2dde5f2012-08-22 21:09:34 -04001612/* Alloc channel resources */
1613static int edma_alloc_chan_resources(struct dma_chan *chan)
1614{
1615 struct edma_chan *echan = to_edma_chan(chan);
1616 struct device *dev = chan->device->dev;
1617 int ret;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001618
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001619 ret = edma_alloc_channel(echan, EVENTQ_DEFAULT);
1620 if (ret)
1621 return ret;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001622
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03001623 echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num);
1624 if (echan->slot[0] < 0) {
1625 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1626 EDMA_CHAN_SLOT(echan->ch_num));
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001627 goto err_slot;
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03001628 }
1629
1630 /* Set up channel -> slot mapping for the entry slot */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001631 edma_set_chmap(echan, echan->slot[0]);
1632 echan->alloced = true;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001633
Peter Ujfalusi9aac9092014-04-24 10:29:50 +03001634 dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
Ezequiel Garcia0e772c62013-12-13 11:06:18 -03001635 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
Matt Porterc2dde5f2012-08-22 21:09:34 -04001636
1637 return 0;
1638
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001639err_slot:
1640 edma_free_channel(echan);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001641 return ret;
1642}
1643
1644/* Free channel resources */
1645static void edma_free_chan_resources(struct dma_chan *chan)
1646{
1647 struct edma_chan *echan = to_edma_chan(chan);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001648 int i;
1649
1650 /* Terminate transfers */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001651 edma_stop(echan);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001652
1653 vchan_free_chan_resources(&echan->vchan);
1654
1655 /* Free EDMA PaRAM slots */
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03001656 for (i = 0; i < EDMA_MAX_SLOTS; i++) {
Matt Porterc2dde5f2012-08-22 21:09:34 -04001657 if (echan->slot[i] >= 0) {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001658 edma_free_slot(echan->ecc, echan->slot[i]);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001659 echan->slot[i] = -1;
1660 }
1661 }
1662
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03001663 /* Set entry slot to the dummy slot */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001664 edma_set_chmap(echan, echan->ecc->dummy_slot);
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03001665
Matt Porterc2dde5f2012-08-22 21:09:34 -04001666 /* Free EDMA channel */
1667 if (echan->alloced) {
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03001668 edma_free_channel(echan);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001669 echan->alloced = false;
1670 }
1671
Peter Ujfalusi907f74a2015-10-14 14:42:56 +03001672 dev_dbg(chan->device->dev, "freeing channel for %u\n", echan->ch_num);
Matt Porterc2dde5f2012-08-22 21:09:34 -04001673}
1674
1675/* Send pending descriptor to hardware */
1676static void edma_issue_pending(struct dma_chan *chan)
1677{
1678 struct edma_chan *echan = to_edma_chan(chan);
1679 unsigned long flags;
1680
1681 spin_lock_irqsave(&echan->vchan.lock, flags);
1682 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1683 edma_execute(echan);
1684 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1685}
1686
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001687static u32 edma_residue(struct edma_desc *edesc)
1688{
1689 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1690 struct edma_pset *pset = edesc->pset;
1691 dma_addr_t done, pos;
1692 int i;
1693
1694 /*
1695 * We always read the dst/src position from the first RamPar
1696 * pset. That's the one which is active now.
1697 */
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001698 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001699
1700 /*
1701 * Cyclic is simple. Just subtract pset[0].addr from pos.
1702 *
1703 * We never update edesc->residue in the cyclic case, so we
1704 * can tell the remaining room to the end of the circular
1705 * buffer.
1706 */
1707 if (edesc->cyclic) {
1708 done = pos - pset->addr;
1709 edesc->residue_stat = edesc->residue - done;
1710 return edesc->residue_stat;
1711 }
1712
1713 /*
1714 * For SG operation we catch up with the last processed
1715 * status.
1716 */
1717 pset += edesc->processed_stat;
1718
1719 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1720 /*
1721 * If we are inside this pset address range, we know
1722 * this is the active one. Get the current delta and
1723 * stop walking the psets.
1724 */
1725 if (pos >= pset->addr && pos < pset->addr + pset->len)
1726 return edesc->residue_stat - (pos - pset->addr);
1727
1728 /* Otherwise mark it done and update residue_stat. */
1729 edesc->processed_stat++;
1730 edesc->residue_stat -= pset->len;
1731 }
1732 return edesc->residue_stat;
1733}
1734
Matt Porterc2dde5f2012-08-22 21:09:34 -04001735/* Check request completion status */
1736static enum dma_status edma_tx_status(struct dma_chan *chan,
1737 dma_cookie_t cookie,
1738 struct dma_tx_state *txstate)
1739{
1740 struct edma_chan *echan = to_edma_chan(chan);
1741 struct virt_dma_desc *vdesc;
1742 enum dma_status ret;
1743 unsigned long flags;
1744
1745 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Koul9d386ec2013-10-16 13:42:15 +05301746 if (ret == DMA_COMPLETE || !txstate)
Matt Porterc2dde5f2012-08-22 21:09:34 -04001747 return ret;
1748
1749 spin_lock_irqsave(&echan->vchan.lock, flags);
Thomas Gleixnerde135932014-04-28 14:19:51 -05001750 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
Thomas Gleixner740b41f2014-04-28 14:34:11 -05001751 txstate->residue = edma_residue(echan->edesc);
Thomas Gleixnerde135932014-04-28 14:19:51 -05001752 else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1753 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001754 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1755
1756 return ret;
1757}
1758
Peter Ujfalusi2c88ee62014-04-14 14:42:01 +03001759#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1760 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
Peter Ujfalusie4a899d2014-07-03 07:51:56 +03001761 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
Peter Ujfalusi2c88ee62014-04-14 14:42:01 +03001762 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1763
Peter Ujfalusi02f77ef2015-10-16 10:18:05 +03001764static void edma_dma_init(struct edma_cc *ecc)
Matt Porterc2dde5f2012-08-22 21:09:34 -04001765{
Peter Ujfalusi02f77ef2015-10-16 10:18:05 +03001766 struct dma_device *ddev = &ecc->dma_slave;
1767 int i, j;
Maxime Ripard9f59cd02014-11-17 14:42:47 +01001768
Peter Ujfalusi02f77ef2015-10-16 10:18:05 +03001769 dma_cap_zero(ddev->cap_mask);
1770 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1771 dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
1772 dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
Maxime Ripard9f59cd02014-11-17 14:42:47 +01001773
Peter Ujfalusi02f77ef2015-10-16 10:18:05 +03001774 ddev->device_prep_slave_sg = edma_prep_slave_sg;
1775 ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1776 ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1777 ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1778 ddev->device_free_chan_resources = edma_free_chan_resources;
1779 ddev->device_issue_pending = edma_issue_pending;
1780 ddev->device_tx_status = edma_tx_status;
1781 ddev->device_config = edma_slave_config;
1782 ddev->device_pause = edma_dma_pause;
1783 ddev->device_resume = edma_dma_resume;
1784 ddev->device_terminate_all = edma_terminate_all;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001785
Peter Ujfalusi02f77ef2015-10-16 10:18:05 +03001786 ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1787 ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1788 ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1789 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1790
1791 ddev->dev = ecc->dev;
1792
1793 INIT_LIST_HEAD(&ddev->channels);
1794
1795 for (i = 0; i < ecc->num_channels; i++) {
1796 struct edma_chan *echan = &ecc->slave_chans[i];
1797 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1798 echan->ecc = ecc;
1799 echan->vchan.desc_free = edma_desc_free;
1800
1801 vchan_init(&echan->vchan, ddev);
1802
1803 INIT_LIST_HEAD(&echan->node);
1804 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1805 echan->slot[j] = -1;
1806 }
Matt Porterc2dde5f2012-08-22 21:09:34 -04001807}
1808
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001809static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1810 struct edma_cc *ecc)
1811{
1812 int i;
1813 u32 value, cccfg;
1814 s8 (*queue_priority_map)[2];
1815
1816 /* Decode the eDMA3 configuration from CCCFG register */
1817 cccfg = edma_read(ecc, EDMA_CCCFG);
1818
1819 value = GET_NUM_REGN(cccfg);
1820 ecc->num_region = BIT(value);
1821
1822 value = GET_NUM_DMACH(cccfg);
1823 ecc->num_channels = BIT(value + 1);
1824
Peter Ujfalusi633e42b2015-10-16 10:18:04 +03001825 value = GET_NUM_QDMACH(cccfg);
1826 ecc->num_qchannels = value * 2;
1827
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001828 value = GET_NUM_PAENTRY(cccfg);
1829 ecc->num_slots = BIT(value + 4);
1830
1831 value = GET_NUM_EVQUE(cccfg);
1832 ecc->num_tc = value + 1;
1833
Peter Ujfalusi4ab54f62015-10-14 14:43:04 +03001834 ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
1835
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001836 dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
1837 dev_dbg(dev, "num_region: %u\n", ecc->num_region);
1838 dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
Peter Ujfalusi633e42b2015-10-16 10:18:04 +03001839 dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001840 dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
1841 dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
Peter Ujfalusi4ab54f62015-10-14 14:43:04 +03001842 dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001843
1844 /* Nothing need to be done if queue priority is provided */
1845 if (pdata->queue_priority_mapping)
1846 return 0;
1847
1848 /*
1849 * Configure TC/queue priority as follows:
1850 * Q0 - priority 0
1851 * Q1 - priority 1
1852 * Q2 - priority 2
1853 * ...
1854 * The meaning of priority numbers: 0 highest priority, 7 lowest
1855 * priority. So Q0 is the highest priority queue and the last queue has
1856 * the lowest priority.
1857 */
Peter Ujfalusi547c6e22015-10-14 14:42:55 +03001858 queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001859 GFP_KERNEL);
1860 if (!queue_priority_map)
1861 return -ENOMEM;
1862
1863 for (i = 0; i < ecc->num_tc; i++) {
1864 queue_priority_map[i][0] = i;
1865 queue_priority_map[i][1] = i;
1866 }
1867 queue_priority_map[i][0] = -1;
1868 queue_priority_map[i][1] = -1;
1869
1870 pdata->queue_priority_mapping = queue_priority_map;
1871 /* Default queue has the lowest priority */
1872 pdata->default_queue = i - 1;
1873
1874 return 0;
1875}
1876
1877#if IS_ENABLED(CONFIG_OF)
1878static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
1879 size_t sz)
1880{
1881 const char pname[] = "ti,edma-xbar-event-map";
1882 struct resource res;
1883 void __iomem *xbar;
1884 s16 (*xbar_chans)[2];
1885 size_t nelm = sz / sizeof(s16);
1886 u32 shift, offset, mux;
1887 int ret, i;
1888
Peter Ujfalusi547c6e22015-10-14 14:42:55 +03001889 xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001890 if (!xbar_chans)
1891 return -ENOMEM;
1892
1893 ret = of_address_to_resource(dev->of_node, 1, &res);
1894 if (ret)
1895 return -ENOMEM;
1896
1897 xbar = devm_ioremap(dev, res.start, resource_size(&res));
1898 if (!xbar)
1899 return -ENOMEM;
1900
1901 ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
1902 nelm);
1903 if (ret)
1904 return -EIO;
1905
1906 /* Invalidate last entry for the other user of this mess */
1907 nelm >>= 1;
1908 xbar_chans[nelm][0] = -1;
1909 xbar_chans[nelm][1] = -1;
1910
1911 for (i = 0; i < nelm; i++) {
1912 shift = (xbar_chans[i][1] & 0x03) << 3;
1913 offset = xbar_chans[i][1] & 0xfffffffc;
1914 mux = readl(xbar + offset);
1915 mux &= ~(0xff << shift);
1916 mux |= xbar_chans[i][0] << shift;
1917 writel(mux, (xbar + offset));
1918 }
1919
1920 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
1921 return 0;
1922}
1923
1924static int edma_of_parse_dt(struct device *dev, struct edma_soc_info *pdata)
1925{
1926 int ret = 0;
1927 struct property *prop;
1928 size_t sz;
1929 struct edma_rsv_info *rsv_info;
1930
1931 rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
1932 if (!rsv_info)
1933 return -ENOMEM;
1934 pdata->rsv = rsv_info;
1935
1936 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", &sz);
1937 if (prop)
1938 ret = edma_xbar_event_map(dev, pdata, sz);
1939
1940 return ret;
1941}
1942
1943static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev)
1944{
1945 struct edma_soc_info *info;
1946 int ret;
1947
1948 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
1949 if (!info)
1950 return ERR_PTR(-ENOMEM);
1951
1952 ret = edma_of_parse_dt(dev, info);
1953 if (ret)
1954 return ERR_PTR(ret);
1955
1956 return info;
1957}
1958#else
1959static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev)
1960{
1961 return ERR_PTR(-EINVAL);
1962}
1963#endif
1964
Bill Pemberton463a1f82012-11-19 13:22:55 -05001965static int edma_probe(struct platform_device *pdev)
Matt Porterc2dde5f2012-08-22 21:09:34 -04001966{
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001967 struct edma_soc_info *info = pdev->dev.platform_data;
1968 s8 (*queue_priority_mapping)[2];
1969 int i, off, ln;
1970 const s16 (*rsv_chans)[2];
1971 const s16 (*rsv_slots)[2];
1972 const s16 (*xbar_chans)[2];
1973 int irq;
1974 char *irq_name;
1975 struct resource *mem;
1976 struct device_node *node = pdev->dev.of_node;
1977 struct device *dev = &pdev->dev;
1978 struct edma_cc *ecc;
Matt Porterc2dde5f2012-08-22 21:09:34 -04001979 int ret;
1980
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03001981 if (node) {
1982 info = edma_setup_info_from_dt(dev);
1983 if (IS_ERR(info)) {
1984 dev_err(dev, "failed to get DT data\n");
1985 return PTR_ERR(info);
1986 }
1987 }
1988
1989 if (!info)
1990 return -ENODEV;
1991
1992 pm_runtime_enable(dev);
1993 ret = pm_runtime_get_sync(dev);
1994 if (ret < 0) {
1995 dev_err(dev, "pm_runtime_get_sync() failed\n");
1996 return ret;
1997 }
1998
Peter Ujfalusi907f74a2015-10-14 14:42:56 +03001999 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
Russell King94cb0e72013-06-27 13:45:16 +01002000 if (ret)
2001 return ret;
2002
Peter Ujfalusi907f74a2015-10-14 14:42:56 +03002003 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
Matt Porterc2dde5f2012-08-22 21:09:34 -04002004 if (!ecc) {
Peter Ujfalusi907f74a2015-10-14 14:42:56 +03002005 dev_err(dev, "Can't allocate controller\n");
Matt Porterc2dde5f2012-08-22 21:09:34 -04002006 return -ENOMEM;
2007 }
2008
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002009 ecc->dev = dev;
2010 ecc->id = pdev->id;
2011 /* When booting with DT the pdev->id is -1 */
2012 if (ecc->id < 0)
2013 ecc->id = 0;
Peter Ujfalusica304fa2015-10-14 14:42:49 +03002014
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002015 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2016 if (!mem) {
2017 dev_dbg(dev, "mem resource not found, using index 0\n");
2018 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2019 if (!mem) {
2020 dev_err(dev, "no mem resource?\n");
2021 return -ENODEV;
2022 }
2023 }
2024 ecc->base = devm_ioremap_resource(dev, mem);
2025 if (IS_ERR(ecc->base))
2026 return PTR_ERR(ecc->base);
Peter Ujfalusib2c843a2015-10-14 14:42:50 +03002027
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002028 platform_set_drvdata(pdev, ecc);
2029
2030 /* Get eDMA3 configuration from IP */
2031 ret = edma_setup_from_hw(dev, info, ecc);
2032 if (ret)
2033 return ret;
2034
Peter Ujfalusicb782052015-10-14 14:42:54 +03002035 /* Allocate memory based on the information we got from the IP */
2036 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2037 sizeof(*ecc->slave_chans), GFP_KERNEL);
2038 if (!ecc->slave_chans)
2039 return -ENOMEM;
2040
Peter Ujfalusi7a73b132015-10-14 14:43:05 +03002041 ecc->channel_unused = devm_kcalloc(dev,
2042 BITS_TO_LONGS(ecc->num_channels),
2043 sizeof(unsigned long), GFP_KERNEL);
2044 if (!ecc->channel_unused)
Peter Ujfalusicb782052015-10-14 14:42:54 +03002045 return -ENOMEM;
2046
Peter Ujfalusi7a73b132015-10-14 14:43:05 +03002047 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
Peter Ujfalusicb782052015-10-14 14:42:54 +03002048 sizeof(unsigned long), GFP_KERNEL);
Peter Ujfalusi7a73b132015-10-14 14:43:05 +03002049 if (!ecc->slot_inuse)
Peter Ujfalusicb782052015-10-14 14:42:54 +03002050 return -ENOMEM;
2051
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002052 ecc->default_queue = info->default_queue;
2053
2054 for (i = 0; i < ecc->num_slots; i++)
2055 edma_write_slot(ecc, i, &dummy_paramset);
2056
2057 /* Mark all channels as unused */
Peter Ujfalusi7a73b132015-10-14 14:43:05 +03002058 memset(ecc->channel_unused, 0xff, sizeof(ecc->channel_unused));
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002059
2060 if (info->rsv) {
2061 /* Clear the reserved channels in unused list */
2062 rsv_chans = info->rsv->rsv_chans;
2063 if (rsv_chans) {
2064 for (i = 0; rsv_chans[i][0] != -1; i++) {
2065 off = rsv_chans[i][0];
2066 ln = rsv_chans[i][1];
Peter Ujfalusi7a73b132015-10-14 14:43:05 +03002067 clear_bits(off, ln, ecc->channel_unused);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002068 }
2069 }
2070
2071 /* Set the reserved slots in inuse list */
2072 rsv_slots = info->rsv->rsv_slots;
2073 if (rsv_slots) {
2074 for (i = 0; rsv_slots[i][0] != -1; i++) {
2075 off = rsv_slots[i][0];
2076 ln = rsv_slots[i][1];
Peter Ujfalusi7a73b132015-10-14 14:43:05 +03002077 set_bits(off, ln, ecc->slot_inuse);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002078 }
2079 }
2080 }
2081
2082 /* Clear the xbar mapped channels in unused list */
2083 xbar_chans = info->xbar_chans;
2084 if (xbar_chans) {
2085 for (i = 0; xbar_chans[i][1] != -1; i++) {
2086 off = xbar_chans[i][1];
Peter Ujfalusi7a73b132015-10-14 14:43:05 +03002087 clear_bits(off, 1, ecc->channel_unused);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002088 }
2089 }
2090
2091 irq = platform_get_irq_byname(pdev, "edma3_ccint");
2092 if (irq < 0 && node)
2093 irq = irq_of_parse_and_map(node, 0);
2094
2095 if (irq >= 0) {
2096 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2097 dev_name(dev));
2098 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2099 ecc);
2100 if (ret) {
2101 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2102 return ret;
2103 }
2104 }
2105
2106 irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2107 if (irq < 0 && node)
2108 irq = irq_of_parse_and_map(node, 2);
2109
2110 if (irq >= 0) {
2111 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2112 dev_name(dev));
2113 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2114 ecc);
2115 if (ret) {
2116 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2117 return ret;
2118 }
2119 }
2120
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03002121 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2122 if (ecc->dummy_slot < 0) {
2123 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2124 return ecc->dummy_slot;
2125 }
2126
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002127 queue_priority_mapping = info->queue_priority_mapping;
2128
2129 /* Event queue priority mapping */
2130 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2131 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2132 queue_priority_mapping[i][1]);
2133
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002134 for (i = 0; i < ecc->num_region; i++) {
2135 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2136 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2137 edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2138 }
2139 ecc->info = info;
2140
Peter Ujfalusi02f77ef2015-10-16 10:18:05 +03002141 /* Init the dma device and channels */
2142 edma_dma_init(ecc);
Matt Porterc2dde5f2012-08-22 21:09:34 -04002143
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03002144 for (i = 0; i < ecc->num_channels; i++) {
2145 /* Assign all channels to the default queue */
Peter Ujfalusif9425de2015-10-16 10:18:03 +03002146 edma_assign_channel_eventq(&ecc->slave_chans[i],
2147 info->default_queue);
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03002148 /* Set entry slot to the dummy slot */
2149 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2150 }
2151
Matt Porterc2dde5f2012-08-22 21:09:34 -04002152 ret = dma_async_device_register(&ecc->dma_slave);
2153 if (ret)
2154 goto err_reg1;
2155
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002156 if (node)
2157 of_dma_controller_register(node, of_dma_xlate_by_chan_id,
Peter Ujfalusib2c843a2015-10-14 14:42:50 +03002158 &ecc->dma_slave);
Peter Ujfalusidc9b60552015-10-14 14:42:47 +03002159
Peter Ujfalusi907f74a2015-10-14 14:42:56 +03002160 dev_info(dev, "TI EDMA DMA engine driver\n");
Matt Porterc2dde5f2012-08-22 21:09:34 -04002161
2162 return 0;
2163
2164err_reg1:
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002165 edma_free_slot(ecc, ecc->dummy_slot);
Matt Porterc2dde5f2012-08-22 21:09:34 -04002166 return ret;
2167}
2168
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08002169static int edma_remove(struct platform_device *pdev)
Matt Porterc2dde5f2012-08-22 21:09:34 -04002170{
2171 struct device *dev = &pdev->dev;
2172 struct edma_cc *ecc = dev_get_drvdata(dev);
2173
Peter Ujfalusi907f74a2015-10-14 14:42:56 +03002174 if (dev->of_node)
2175 of_dma_controller_free(dev->of_node);
Matt Porterc2dde5f2012-08-22 21:09:34 -04002176 dma_async_device_unregister(&ecc->dma_slave);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002177 edma_free_slot(ecc, ecc->dummy_slot);
Matt Porterc2dde5f2012-08-22 21:09:34 -04002178
2179 return 0;
2180}
2181
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002182#ifdef CONFIG_PM_SLEEP
2183static int edma_pm_resume(struct device *dev)
2184{
2185 struct edma_cc *ecc = dev_get_drvdata(dev);
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03002186 struct edma_chan *echan = ecc->slave_chans;
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002187 int i;
2188 s8 (*queue_priority_mapping)[2];
2189
2190 queue_priority_mapping = ecc->info->queue_priority_mapping;
2191
2192 /* Event queue priority mapping */
2193 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2194 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2195 queue_priority_mapping[i][1]);
2196
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002197 for (i = 0; i < ecc->num_channels; i++) {
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03002198 if (echan[i].alloced) {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002199 /* ensure access through shadow region 0 */
2200 edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2201 BIT(i & 0x1f));
2202
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03002203 edma_setup_interrupt(&echan[i], true);
Peter Ujfalusie4e886c2015-10-14 14:43:06 +03002204
2205 /* Set up channel -> slot mapping for the entry slot */
Peter Ujfalusi34cf3012015-10-16 10:18:01 +03002206 edma_set_chmap(&echan[i], echan[i].slot[0]);
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002207 }
2208 }
2209
2210 return 0;
2211}
2212#endif
2213
2214static const struct dev_pm_ops edma_pm_ops = {
2215 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
2216};
2217
Matt Porterc2dde5f2012-08-22 21:09:34 -04002218static struct platform_driver edma_driver = {
2219 .probe = edma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05002220 .remove = edma_remove,
Matt Porterc2dde5f2012-08-22 21:09:34 -04002221 .driver = {
Peter Ujfalusi2b6b3b72015-10-14 14:42:53 +03002222 .name = "edma",
2223 .pm = &edma_pm_ops,
2224 .of_match_table = edma_of_ids,
Matt Porterc2dde5f2012-08-22 21:09:34 -04002225 },
2226};
2227
2228bool edma_filter_fn(struct dma_chan *chan, void *param)
2229{
2230 if (chan->device->dev->driver == &edma_driver.driver) {
2231 struct edma_chan *echan = to_edma_chan(chan);
2232 unsigned ch_req = *(unsigned *)param;
2233 return ch_req == echan->ch_num;
2234 }
2235 return false;
2236}
2237EXPORT_SYMBOL(edma_filter_fn);
2238
Matt Porterc2dde5f2012-08-22 21:09:34 -04002239static int edma_init(void)
2240{
Arnd Bergmann5305e4d2014-10-24 18:14:01 +02002241 return platform_driver_register(&edma_driver);
Matt Porterc2dde5f2012-08-22 21:09:34 -04002242}
2243subsys_initcall(edma_init);
2244
2245static void __exit edma_exit(void)
2246{
Matt Porterc2dde5f2012-08-22 21:09:34 -04002247 platform_driver_unregister(&edma_driver);
2248}
2249module_exit(edma_exit);
2250
Josh Boyerd71505b2013-09-04 10:32:50 -04002251MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
Matt Porterc2dde5f2012-08-22 21:09:34 -04002252MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2253MODULE_LICENSE("GPL v2");