blob: bc5878a5c09ec3e05951fee6cb22027950f9408d [file] [log] [blame]
Boojin Kimb7d861d2011-12-26 18:49:52 +09001/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
Jassi Brarb3040e42010-05-23 20:28:19 -07004 *
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
Boojin Kimb7d861d2011-12-26 18:49:52 +090014#include <linux/kernel.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070015#include <linux/io.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/module.h>
Boojin Kimb7d861d2011-12-26 18:49:52 +090019#include <linux/string.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070023#include <linux/dmaengine.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070024#include <linux/amba/bus.h>
25#include <linux/amba/pl330.h>
Boojin Kim1b9bb712011-09-02 09:44:30 +090026#include <linux/scatterlist.h>
Thomas Abraham93ed5542011-10-24 11:43:31 +020027#include <linux/of.h>
Padmavathi Vennaa80258f2013-02-14 09:10:06 +053028#include <linux/of_dma.h>
Sachin Kamatbcc7fa92013-03-04 14:36:27 +053029#include <linux/err.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070030
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000031#include "dmaengine.h"
Boojin Kimb7d861d2011-12-26 18:49:52 +090032#define PL330_MAX_CHAN 8
33#define PL330_MAX_IRQS 32
34#define PL330_MAX_PERI 32
35
Lars-Peter Clausenf0564c72014-07-06 20:32:19 +020036enum pl330_cachectrl {
37 CCTRL0, /* Noncacheable and nonbufferable */
38 CCTRL1, /* Bufferable only */
39 CCTRL2, /* Cacheable, but do not allocate */
40 CCTRL3, /* Cacheable and bufferable, but do not allocate */
41 INVALID1, /* AWCACHE = 0x1000 */
42 INVALID2,
43 CCTRL6, /* Cacheable write-through, allocate on writes only */
44 CCTRL7, /* Cacheable write-back, allocate on writes only */
Boojin Kimb7d861d2011-12-26 18:49:52 +090045};
46
47enum pl330_byteswap {
48 SWAP_NO,
49 SWAP_2,
50 SWAP_4,
51 SWAP_8,
52 SWAP_16,
53};
54
Boojin Kimb7d861d2011-12-26 18:49:52 +090055/* Register and Bit field Definitions */
56#define DS 0x0
57#define DS_ST_STOP 0x0
58#define DS_ST_EXEC 0x1
59#define DS_ST_CMISS 0x2
60#define DS_ST_UPDTPC 0x3
61#define DS_ST_WFE 0x4
62#define DS_ST_ATBRR 0x5
63#define DS_ST_QBUSY 0x6
64#define DS_ST_WFP 0x7
65#define DS_ST_KILL 0x8
66#define DS_ST_CMPLT 0x9
67#define DS_ST_FLTCMP 0xe
68#define DS_ST_FAULT 0xf
69
70#define DPC 0x4
71#define INTEN 0x20
72#define ES 0x24
73#define INTSTATUS 0x28
74#define INTCLR 0x2c
75#define FSM 0x30
76#define FSC 0x34
77#define FTM 0x38
78
79#define _FTC 0x40
80#define FTC(n) (_FTC + (n)*0x4)
81
82#define _CS 0x100
83#define CS(n) (_CS + (n)*0x8)
84#define CS_CNS (1 << 21)
85
86#define _CPC 0x104
87#define CPC(n) (_CPC + (n)*0x8)
88
89#define _SA 0x400
90#define SA(n) (_SA + (n)*0x20)
91
92#define _DA 0x404
93#define DA(n) (_DA + (n)*0x20)
94
95#define _CC 0x408
96#define CC(n) (_CC + (n)*0x20)
97
98#define CC_SRCINC (1 << 0)
99#define CC_DSTINC (1 << 14)
100#define CC_SRCPRI (1 << 8)
101#define CC_DSTPRI (1 << 22)
102#define CC_SRCNS (1 << 9)
103#define CC_DSTNS (1 << 23)
104#define CC_SRCIA (1 << 10)
105#define CC_DSTIA (1 << 24)
106#define CC_SRCBRSTLEN_SHFT 4
107#define CC_DSTBRSTLEN_SHFT 18
108#define CC_SRCBRSTSIZE_SHFT 1
109#define CC_DSTBRSTSIZE_SHFT 15
110#define CC_SRCCCTRL_SHFT 11
111#define CC_SRCCCTRL_MASK 0x7
112#define CC_DSTCCTRL_SHFT 25
113#define CC_DRCCCTRL_MASK 0x7
114#define CC_SWAP_SHFT 28
115
116#define _LC0 0x40c
117#define LC0(n) (_LC0 + (n)*0x20)
118
119#define _LC1 0x410
120#define LC1(n) (_LC1 + (n)*0x20)
121
122#define DBGSTATUS 0xd00
123#define DBG_BUSY (1 << 0)
124
125#define DBGCMD 0xd04
126#define DBGINST0 0xd08
127#define DBGINST1 0xd0c
128
129#define CR0 0xe00
130#define CR1 0xe04
131#define CR2 0xe08
132#define CR3 0xe0c
133#define CR4 0xe10
134#define CRD 0xe14
135
136#define PERIPH_ID 0xfe0
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900137#define PERIPH_REV_SHIFT 20
138#define PERIPH_REV_MASK 0xf
139#define PERIPH_REV_R0P0 0
140#define PERIPH_REV_R1P0 1
141#define PERIPH_REV_R1P1 2
Boojin Kimb7d861d2011-12-26 18:49:52 +0900142
143#define CR0_PERIPH_REQ_SET (1 << 0)
144#define CR0_BOOT_EN_SET (1 << 1)
145#define CR0_BOOT_MAN_NS (1 << 2)
146#define CR0_NUM_CHANS_SHIFT 4
147#define CR0_NUM_CHANS_MASK 0x7
148#define CR0_NUM_PERIPH_SHIFT 12
149#define CR0_NUM_PERIPH_MASK 0x1f
150#define CR0_NUM_EVENTS_SHIFT 17
151#define CR0_NUM_EVENTS_MASK 0x1f
152
153#define CR1_ICACHE_LEN_SHIFT 0
154#define CR1_ICACHE_LEN_MASK 0x7
155#define CR1_NUM_ICACHELINES_SHIFT 4
156#define CR1_NUM_ICACHELINES_MASK 0xf
157
158#define CRD_DATA_WIDTH_SHIFT 0
159#define CRD_DATA_WIDTH_MASK 0x7
160#define CRD_WR_CAP_SHIFT 4
161#define CRD_WR_CAP_MASK 0x7
162#define CRD_WR_Q_DEP_SHIFT 8
163#define CRD_WR_Q_DEP_MASK 0xf
164#define CRD_RD_CAP_SHIFT 12
165#define CRD_RD_CAP_MASK 0x7
166#define CRD_RD_Q_DEP_SHIFT 16
167#define CRD_RD_Q_DEP_MASK 0xf
168#define CRD_DATA_BUFF_SHIFT 20
169#define CRD_DATA_BUFF_MASK 0x3ff
170
171#define PART 0x330
172#define DESIGNER 0x41
173#define REVISION 0x0
174#define INTEG_CFG 0x0
175#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
176
Boojin Kimb7d861d2011-12-26 18:49:52 +0900177#define PL330_STATE_STOPPED (1 << 0)
178#define PL330_STATE_EXECUTING (1 << 1)
179#define PL330_STATE_WFE (1 << 2)
180#define PL330_STATE_FAULTING (1 << 3)
181#define PL330_STATE_COMPLETING (1 << 4)
182#define PL330_STATE_WFP (1 << 5)
183#define PL330_STATE_KILLING (1 << 6)
184#define PL330_STATE_FAULT_COMPLETING (1 << 7)
185#define PL330_STATE_CACHEMISS (1 << 8)
186#define PL330_STATE_UPDTPC (1 << 9)
187#define PL330_STATE_ATBARRIER (1 << 10)
188#define PL330_STATE_QUEUEBUSY (1 << 11)
189#define PL330_STATE_INVALID (1 << 15)
190
191#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
192 | PL330_STATE_WFE | PL330_STATE_FAULTING)
193
194#define CMD_DMAADDH 0x54
195#define CMD_DMAEND 0x00
196#define CMD_DMAFLUSHP 0x35
197#define CMD_DMAGO 0xa0
198#define CMD_DMALD 0x04
199#define CMD_DMALDP 0x25
200#define CMD_DMALP 0x20
201#define CMD_DMALPEND 0x28
202#define CMD_DMAKILL 0x01
203#define CMD_DMAMOV 0xbc
204#define CMD_DMANOP 0x18
205#define CMD_DMARMB 0x12
206#define CMD_DMASEV 0x34
207#define CMD_DMAST 0x08
208#define CMD_DMASTP 0x29
209#define CMD_DMASTZ 0x0c
210#define CMD_DMAWFE 0x36
211#define CMD_DMAWFP 0x30
212#define CMD_DMAWMB 0x13
213
214#define SZ_DMAADDH 3
215#define SZ_DMAEND 1
216#define SZ_DMAFLUSHP 2
217#define SZ_DMALD 1
218#define SZ_DMALDP 2
219#define SZ_DMALP 2
220#define SZ_DMALPEND 2
221#define SZ_DMAKILL 1
222#define SZ_DMAMOV 6
223#define SZ_DMANOP 1
224#define SZ_DMARMB 1
225#define SZ_DMASEV 2
226#define SZ_DMAST 1
227#define SZ_DMASTP 2
228#define SZ_DMASTZ 1
229#define SZ_DMAWFE 2
230#define SZ_DMAWFP 2
231#define SZ_DMAWMB 1
232#define SZ_DMAGO 6
233
234#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
235#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
236
237#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
238#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
239
240/*
241 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
242 * at 1byte/burst for P<->M and M<->M respectively.
243 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
244 * should be enough for P<->M and M<->M respectively.
245 */
246#define MCODE_BUFF_PER_REQ 256
247
Boojin Kimb7d861d2011-12-26 18:49:52 +0900248/* Use this _only_ to wait on transient states */
249#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
250
251#ifdef PL330_DEBUG_MCGEN
252static unsigned cmd_line;
253#define PL330_DBGCMD_DUMP(off, x...) do { \
254 printk("%x:", cmd_line); \
255 printk(x); \
256 cmd_line += off; \
257 } while (0)
258#define PL330_DBGMC_START(addr) (cmd_line = addr)
259#else
260#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
261#define PL330_DBGMC_START(addr) do {} while (0)
262#endif
263
264/* The number of default descriptors */
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +0000265
Jassi Brarb3040e42010-05-23 20:28:19 -0700266#define NR_DEFAULT_DESC 16
267
Boojin Kimb7d861d2011-12-26 18:49:52 +0900268/* Populated by the PL330 core driver for DMA API driver's info */
269struct pl330_config {
270 u32 periph_id;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900271#define DMAC_MODE_NS (1 << 0)
272 unsigned int mode;
273 unsigned int data_bus_width:10; /* In number of bits */
274 unsigned int data_buf_dep:10;
275 unsigned int num_chan:4;
276 unsigned int num_peri:6;
277 u32 peri_ns;
278 unsigned int num_events:6;
279 u32 irq_ns;
280};
281
Boojin Kimb7d861d2011-12-26 18:49:52 +0900282/**
283 * Request Configuration.
284 * The PL330 core does not modify this and uses the last
285 * working configuration if the request doesn't provide any.
286 *
287 * The Client may want to provide this info only for the
288 * first request and a request with new settings.
289 */
290struct pl330_reqcfg {
291 /* Address Incrementing */
292 unsigned dst_inc:1;
293 unsigned src_inc:1;
294
295 /*
296 * For now, the SRC & DST protection levels
297 * and burst size/length are assumed same.
298 */
299 bool nonsecure;
300 bool privileged;
301 bool insnaccess;
302 unsigned brst_len:5;
303 unsigned brst_size:3; /* in power of 2 */
304
Lars-Peter Clausenf0564c72014-07-06 20:32:19 +0200305 enum pl330_cachectrl dcctl;
306 enum pl330_cachectrl scctl;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900307 enum pl330_byteswap swap;
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900308 struct pl330_config *pcfg;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900309};
310
311/*
312 * One cycle of DMAC operation.
313 * There may be more than one xfer in a request.
314 */
315struct pl330_xfer {
316 u32 src_addr;
317 u32 dst_addr;
318 /* Size to xfer */
319 u32 bytes;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900320};
321
322/* The xfer callbacks are made with one of these arguments. */
323enum pl330_op_err {
324 /* The all xfers in the request were success. */
325 PL330_ERR_NONE,
326 /* If req aborted due to global error. */
327 PL330_ERR_ABORT,
328 /* If req failed due to problem with Channel. */
329 PL330_ERR_FAIL,
330};
331
Boojin Kimb7d861d2011-12-26 18:49:52 +0900332enum dmamov_dst {
333 SAR = 0,
334 CCR,
335 DAR,
336};
337
338enum pl330_dst {
339 SRC = 0,
340 DST,
341};
342
343enum pl330_cond {
344 SINGLE,
345 BURST,
346 ALWAYS,
347};
348
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +0200349struct dma_pl330_desc;
350
Boojin Kimb7d861d2011-12-26 18:49:52 +0900351struct _pl330_req {
352 u32 mc_bus;
353 void *mc_cpu;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +0200354 struct dma_pl330_desc *desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900355};
356
357/* ToBeDone for tasklet */
358struct _pl330_tbd {
359 bool reset_dmac;
360 bool reset_mngr;
361 u8 reset_chan;
362};
363
364/* A DMAC Thread */
365struct pl330_thread {
366 u8 id;
367 int ev;
368 /* If the channel is not yet acquired by any client */
369 bool free;
370 /* Parent DMAC */
371 struct pl330_dmac *dmac;
372 /* Only two at a time */
373 struct _pl330_req req[2];
374 /* Index of the last enqueued request */
375 unsigned lstenq;
376 /* Index of the last submitted request or -1 if the DMA is stopped */
377 int req_running;
378};
379
380enum pl330_dmac_state {
381 UNINIT,
382 INIT,
383 DYING,
384};
385
Jassi Brarb3040e42010-05-23 20:28:19 -0700386enum desc_status {
387 /* In the DMAC pool */
388 FREE,
389 /*
Masanari Iidad73111c2012-08-04 23:37:53 +0900390 * Allocated to some channel during prep_xxx
Jassi Brarb3040e42010-05-23 20:28:19 -0700391 * Also may be sitting on the work_list.
392 */
393 PREP,
394 /*
395 * Sitting on the work_list and already submitted
396 * to the PL330 core. Not more than two descriptors
397 * of a channel can be BUSY at any time.
398 */
399 BUSY,
400 /*
401 * Sitting on the channel work_list but xfer done
402 * by PL330 core
403 */
404 DONE,
405};
406
407struct dma_pl330_chan {
408 /* Schedule desc completion */
409 struct tasklet_struct task;
410
411 /* DMA-Engine Channel */
412 struct dma_chan chan;
413
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +0100414 /* List of submitted descriptors */
415 struct list_head submitted_list;
416 /* List of issued descriptors */
Jassi Brarb3040e42010-05-23 20:28:19 -0700417 struct list_head work_list;
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +0200418 /* List of completed descriptors */
419 struct list_head completed_list;
Jassi Brarb3040e42010-05-23 20:28:19 -0700420
421 /* Pointer to the DMAC that manages this channel,
422 * NULL if the channel is available to be acquired.
423 * As the parent, this DMAC also provides descriptors
424 * to the channel.
425 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200426 struct pl330_dmac *dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -0700427
428 /* To protect channel manipulation */
429 spinlock_t lock;
430
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +0200431 /*
432 * Hardware channel thread of PL330 DMAC. NULL if the channel is
433 * available.
Jassi Brarb3040e42010-05-23 20:28:19 -0700434 */
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +0200435 struct pl330_thread *thread;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900436
437 /* For D-to-M and M-to-D channels */
438 int burst_sz; /* the peripheral fifo width */
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900439 int burst_len; /* the number of burst */
Boojin Kim1b9bb712011-09-02 09:44:30 +0900440 dma_addr_t fifo_addr;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900441
442 /* for cyclic capability */
443 bool cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -0700444};
445
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200446struct pl330_dmac {
Jassi Brarb3040e42010-05-23 20:28:19 -0700447 /* DMA-Engine Device */
448 struct dma_device ddma;
449
Lars-Peter Clausenb714b842013-11-25 16:07:46 +0100450 /* Holds info about sg limitations */
451 struct device_dma_parameters dma_parms;
452
Jassi Brarb3040e42010-05-23 20:28:19 -0700453 /* Pool of descriptors available for the DMAC's channels */
454 struct list_head desc_pool;
455 /* To protect desc_pool manipulation */
456 spinlock_t pool_lock;
457
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200458 /* Size of MicroCode buffers for each channel. */
459 unsigned mcbufsz;
460 /* ioremap'ed address of PL330 registers. */
461 void __iomem *base;
462 /* Populated by the PL330 core driver during pl330_add */
463 struct pl330_config pcfg;
464
465 spinlock_t lock;
466 /* Maximum possible events/irqs */
467 int events[32];
468 /* BUS address of MicroCode buffer */
469 dma_addr_t mcode_bus;
470 /* CPU address of MicroCode buffer */
471 void *mcode_cpu;
472 /* List of all Channel threads */
473 struct pl330_thread *channels;
474 /* Pointer to the MANAGER thread */
475 struct pl330_thread *manager;
476 /* To handle bad news in interrupt */
477 struct tasklet_struct tasks;
478 struct _pl330_tbd dmac_tbd;
479 /* State of DMAC operation */
480 enum pl330_dmac_state state;
481 /* Holds list of reqs with due callbacks */
482 struct list_head req_done;
483
Jassi Brarb3040e42010-05-23 20:28:19 -0700484 /* Peripheral channels connected to this DMAC */
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +0100485 unsigned int num_peripherals;
Rob Herring4e0e6102011-07-25 16:05:04 -0500486 struct dma_pl330_chan *peripherals; /* keep at end */
Jassi Brarb3040e42010-05-23 20:28:19 -0700487};
488
489struct dma_pl330_desc {
490 /* To attach to a queue as child */
491 struct list_head node;
492
493 /* Descriptor for the DMA Engine API */
494 struct dma_async_tx_descriptor txd;
495
496 /* Xfer for PL330 core */
497 struct pl330_xfer px;
498
499 struct pl330_reqcfg rqcfg;
Jassi Brarb3040e42010-05-23 20:28:19 -0700500
501 enum desc_status status;
502
503 /* The channel which currently holds this desc */
504 struct dma_pl330_chan *pchan;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +0200505
506 enum dma_transfer_direction rqtype;
507 /* Index of peripheral for the xfer. */
508 unsigned peri:5;
509 /* Hook to attach to DMAC's list of reqs with due callback */
510 struct list_head rqd;
511};
512
513struct _xfer_spec {
514 u32 ccr;
515 struct dma_pl330_desc *desc;
Jassi Brarb3040e42010-05-23 20:28:19 -0700516};
517
Boojin Kimb7d861d2011-12-26 18:49:52 +0900518static inline bool _queue_empty(struct pl330_thread *thrd)
519{
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +0200520 return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900521}
522
523static inline bool _queue_full(struct pl330_thread *thrd)
524{
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +0200525 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900526}
527
528static inline bool is_manager(struct pl330_thread *thrd)
529{
Lars-Peter Clausenfbbcd9b2014-07-06 20:32:28 +0200530 return thrd->dmac->manager == thrd;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900531}
532
533/* If manager of the thread is in Non-Secure mode */
534static inline bool _manager_ns(struct pl330_thread *thrd)
535{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200536 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900537}
538
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900539static inline u32 get_revision(u32 periph_id)
540{
541 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
542}
543
Boojin Kimb7d861d2011-12-26 18:49:52 +0900544static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
545 enum pl330_dst da, u16 val)
546{
547 if (dry_run)
548 return SZ_DMAADDH;
549
550 buf[0] = CMD_DMAADDH;
551 buf[0] |= (da << 1);
552 *((u16 *)&buf[1]) = val;
553
554 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
555 da == 1 ? "DA" : "SA", val);
556
557 return SZ_DMAADDH;
558}
559
560static inline u32 _emit_END(unsigned dry_run, u8 buf[])
561{
562 if (dry_run)
563 return SZ_DMAEND;
564
565 buf[0] = CMD_DMAEND;
566
567 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
568
569 return SZ_DMAEND;
570}
571
572static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
573{
574 if (dry_run)
575 return SZ_DMAFLUSHP;
576
577 buf[0] = CMD_DMAFLUSHP;
578
579 peri &= 0x1f;
580 peri <<= 3;
581 buf[1] = peri;
582
583 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
584
585 return SZ_DMAFLUSHP;
586}
587
588static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
589{
590 if (dry_run)
591 return SZ_DMALD;
592
593 buf[0] = CMD_DMALD;
594
595 if (cond == SINGLE)
596 buf[0] |= (0 << 1) | (1 << 0);
597 else if (cond == BURST)
598 buf[0] |= (1 << 1) | (1 << 0);
599
600 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
601 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
602
603 return SZ_DMALD;
604}
605
606static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
607 enum pl330_cond cond, u8 peri)
608{
609 if (dry_run)
610 return SZ_DMALDP;
611
612 buf[0] = CMD_DMALDP;
613
614 if (cond == BURST)
615 buf[0] |= (1 << 1);
616
617 peri &= 0x1f;
618 peri <<= 3;
619 buf[1] = peri;
620
621 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
622 cond == SINGLE ? 'S' : 'B', peri >> 3);
623
624 return SZ_DMALDP;
625}
626
627static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
628 unsigned loop, u8 cnt)
629{
630 if (dry_run)
631 return SZ_DMALP;
632
633 buf[0] = CMD_DMALP;
634
635 if (loop)
636 buf[0] |= (1 << 1);
637
638 cnt--; /* DMAC increments by 1 internally */
639 buf[1] = cnt;
640
641 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
642
643 return SZ_DMALP;
644}
645
646struct _arg_LPEND {
647 enum pl330_cond cond;
648 bool forever;
649 unsigned loop;
650 u8 bjump;
651};
652
653static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
654 const struct _arg_LPEND *arg)
655{
656 enum pl330_cond cond = arg->cond;
657 bool forever = arg->forever;
658 unsigned loop = arg->loop;
659 u8 bjump = arg->bjump;
660
661 if (dry_run)
662 return SZ_DMALPEND;
663
664 buf[0] = CMD_DMALPEND;
665
666 if (loop)
667 buf[0] |= (1 << 2);
668
669 if (!forever)
670 buf[0] |= (1 << 4);
671
672 if (cond == SINGLE)
673 buf[0] |= (0 << 1) | (1 << 0);
674 else if (cond == BURST)
675 buf[0] |= (1 << 1) | (1 << 0);
676
677 buf[1] = bjump;
678
679 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
680 forever ? "FE" : "END",
681 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
682 loop ? '1' : '0',
683 bjump);
684
685 return SZ_DMALPEND;
686}
687
688static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
689{
690 if (dry_run)
691 return SZ_DMAKILL;
692
693 buf[0] = CMD_DMAKILL;
694
695 return SZ_DMAKILL;
696}
697
698static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
699 enum dmamov_dst dst, u32 val)
700{
701 if (dry_run)
702 return SZ_DMAMOV;
703
704 buf[0] = CMD_DMAMOV;
705 buf[1] = dst;
706 *((u32 *)&buf[2]) = val;
707
708 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
709 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
710
711 return SZ_DMAMOV;
712}
713
714static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
715{
716 if (dry_run)
717 return SZ_DMANOP;
718
719 buf[0] = CMD_DMANOP;
720
721 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
722
723 return SZ_DMANOP;
724}
725
726static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
727{
728 if (dry_run)
729 return SZ_DMARMB;
730
731 buf[0] = CMD_DMARMB;
732
733 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
734
735 return SZ_DMARMB;
736}
737
738static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
739{
740 if (dry_run)
741 return SZ_DMASEV;
742
743 buf[0] = CMD_DMASEV;
744
745 ev &= 0x1f;
746 ev <<= 3;
747 buf[1] = ev;
748
749 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
750
751 return SZ_DMASEV;
752}
753
754static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
755{
756 if (dry_run)
757 return SZ_DMAST;
758
759 buf[0] = CMD_DMAST;
760
761 if (cond == SINGLE)
762 buf[0] |= (0 << 1) | (1 << 0);
763 else if (cond == BURST)
764 buf[0] |= (1 << 1) | (1 << 0);
765
766 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
767 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
768
769 return SZ_DMAST;
770}
771
772static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
773 enum pl330_cond cond, u8 peri)
774{
775 if (dry_run)
776 return SZ_DMASTP;
777
778 buf[0] = CMD_DMASTP;
779
780 if (cond == BURST)
781 buf[0] |= (1 << 1);
782
783 peri &= 0x1f;
784 peri <<= 3;
785 buf[1] = peri;
786
787 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
788 cond == SINGLE ? 'S' : 'B', peri >> 3);
789
790 return SZ_DMASTP;
791}
792
793static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
794{
795 if (dry_run)
796 return SZ_DMASTZ;
797
798 buf[0] = CMD_DMASTZ;
799
800 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
801
802 return SZ_DMASTZ;
803}
804
805static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
806 unsigned invalidate)
807{
808 if (dry_run)
809 return SZ_DMAWFE;
810
811 buf[0] = CMD_DMAWFE;
812
813 ev &= 0x1f;
814 ev <<= 3;
815 buf[1] = ev;
816
817 if (invalidate)
818 buf[1] |= (1 << 1);
819
820 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
821 ev >> 3, invalidate ? ", I" : "");
822
823 return SZ_DMAWFE;
824}
825
826static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
827 enum pl330_cond cond, u8 peri)
828{
829 if (dry_run)
830 return SZ_DMAWFP;
831
832 buf[0] = CMD_DMAWFP;
833
834 if (cond == SINGLE)
835 buf[0] |= (0 << 1) | (0 << 0);
836 else if (cond == BURST)
837 buf[0] |= (1 << 1) | (0 << 0);
838 else
839 buf[0] |= (0 << 1) | (1 << 0);
840
841 peri &= 0x1f;
842 peri <<= 3;
843 buf[1] = peri;
844
845 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
846 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
847
848 return SZ_DMAWFP;
849}
850
851static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
852{
853 if (dry_run)
854 return SZ_DMAWMB;
855
856 buf[0] = CMD_DMAWMB;
857
858 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
859
860 return SZ_DMAWMB;
861}
862
863struct _arg_GO {
864 u8 chan;
865 u32 addr;
866 unsigned ns;
867};
868
869static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
870 const struct _arg_GO *arg)
871{
872 u8 chan = arg->chan;
873 u32 addr = arg->addr;
874 unsigned ns = arg->ns;
875
876 if (dry_run)
877 return SZ_DMAGO;
878
879 buf[0] = CMD_DMAGO;
880 buf[0] |= (ns << 1);
881
882 buf[1] = chan & 0x7;
883
884 *((u32 *)&buf[2]) = addr;
885
886 return SZ_DMAGO;
887}
888
889#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
890
891/* Returns Time-Out */
892static bool _until_dmac_idle(struct pl330_thread *thrd)
893{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200894 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900895 unsigned long loops = msecs_to_loops(5);
896
897 do {
898 /* Until Manager is Idle */
899 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
900 break;
901
902 cpu_relax();
903 } while (--loops);
904
905 if (!loops)
906 return true;
907
908 return false;
909}
910
911static inline void _execute_DBGINSN(struct pl330_thread *thrd,
912 u8 insn[], bool as_manager)
913{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200914 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900915 u32 val;
916
917 val = (insn[0] << 16) | (insn[1] << 24);
918 if (!as_manager) {
919 val |= (1 << 0);
920 val |= (thrd->id << 8); /* Channel Number */
921 }
922 writel(val, regs + DBGINST0);
923
924 val = *((u32 *)&insn[2]);
925 writel(val, regs + DBGINST1);
926
927 /* If timed out due to halted state-machine */
928 if (_until_dmac_idle(thrd)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200929 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
Boojin Kimb7d861d2011-12-26 18:49:52 +0900930 return;
931 }
932
933 /* Get going */
934 writel(0, regs + DBGCMD);
935}
936
Boojin Kimb7d861d2011-12-26 18:49:52 +0900937static inline u32 _state(struct pl330_thread *thrd)
938{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200939 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900940 u32 val;
941
942 if (is_manager(thrd))
943 val = readl(regs + DS) & 0xf;
944 else
945 val = readl(regs + CS(thrd->id)) & 0xf;
946
947 switch (val) {
948 case DS_ST_STOP:
949 return PL330_STATE_STOPPED;
950 case DS_ST_EXEC:
951 return PL330_STATE_EXECUTING;
952 case DS_ST_CMISS:
953 return PL330_STATE_CACHEMISS;
954 case DS_ST_UPDTPC:
955 return PL330_STATE_UPDTPC;
956 case DS_ST_WFE:
957 return PL330_STATE_WFE;
958 case DS_ST_FAULT:
959 return PL330_STATE_FAULTING;
960 case DS_ST_ATBRR:
961 if (is_manager(thrd))
962 return PL330_STATE_INVALID;
963 else
964 return PL330_STATE_ATBARRIER;
965 case DS_ST_QBUSY:
966 if (is_manager(thrd))
967 return PL330_STATE_INVALID;
968 else
969 return PL330_STATE_QUEUEBUSY;
970 case DS_ST_WFP:
971 if (is_manager(thrd))
972 return PL330_STATE_INVALID;
973 else
974 return PL330_STATE_WFP;
975 case DS_ST_KILL:
976 if (is_manager(thrd))
977 return PL330_STATE_INVALID;
978 else
979 return PL330_STATE_KILLING;
980 case DS_ST_CMPLT:
981 if (is_manager(thrd))
982 return PL330_STATE_INVALID;
983 else
984 return PL330_STATE_COMPLETING;
985 case DS_ST_FLTCMP:
986 if (is_manager(thrd))
987 return PL330_STATE_INVALID;
988 else
989 return PL330_STATE_FAULT_COMPLETING;
990 default:
991 return PL330_STATE_INVALID;
992 }
993}
994
995static void _stop(struct pl330_thread *thrd)
996{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200997 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900998 u8 insn[6] = {0, 0, 0, 0, 0, 0};
999
1000 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1001 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1002
1003 /* Return if nothing needs to be done */
1004 if (_state(thrd) == PL330_STATE_COMPLETING
1005 || _state(thrd) == PL330_STATE_KILLING
1006 || _state(thrd) == PL330_STATE_STOPPED)
1007 return;
1008
1009 _emit_KILL(0, insn);
1010
1011 /* Stop generating interrupts for SEV */
1012 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1013
1014 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1015}
1016
1017/* Start doing req 'idx' of thread 'thrd' */
1018static bool _trigger(struct pl330_thread *thrd)
1019{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001020 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001021 struct _pl330_req *req;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001022 struct dma_pl330_desc *desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001023 struct _arg_GO go;
1024 unsigned ns;
1025 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1026 int idx;
1027
1028 /* Return if already ACTIVE */
1029 if (_state(thrd) != PL330_STATE_STOPPED)
1030 return true;
1031
1032 idx = 1 - thrd->lstenq;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001033 if (thrd->req[idx].desc != NULL) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001034 req = &thrd->req[idx];
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001035 } else {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001036 idx = thrd->lstenq;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001037 if (thrd->req[idx].desc != NULL)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001038 req = &thrd->req[idx];
1039 else
1040 req = NULL;
1041 }
1042
1043 /* Return if no request */
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001044 if (!req)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001045 return true;
1046
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001047 desc = req->desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001048
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001049 ns = desc->rqcfg.nonsecure ? 1 : 0;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001050
1051 /* See 'Abort Sources' point-4 at Page 2-25 */
1052 if (_manager_ns(thrd) && !ns)
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001053 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001054 __func__, __LINE__);
1055
1056 go.chan = thrd->id;
1057 go.addr = req->mc_bus;
1058 go.ns = ns;
1059 _emit_GO(0, insn, &go);
1060
1061 /* Set to generate interrupts for SEV */
1062 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1063
1064 /* Only manager can execute GO */
1065 _execute_DBGINSN(thrd, insn, true);
1066
1067 thrd->req_running = idx;
1068
1069 return true;
1070}
1071
1072static bool _start(struct pl330_thread *thrd)
1073{
1074 switch (_state(thrd)) {
1075 case PL330_STATE_FAULT_COMPLETING:
1076 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1077
1078 if (_state(thrd) == PL330_STATE_KILLING)
1079 UNTIL(thrd, PL330_STATE_STOPPED)
1080
1081 case PL330_STATE_FAULTING:
1082 _stop(thrd);
1083
1084 case PL330_STATE_KILLING:
1085 case PL330_STATE_COMPLETING:
1086 UNTIL(thrd, PL330_STATE_STOPPED)
1087
1088 case PL330_STATE_STOPPED:
1089 return _trigger(thrd);
1090
1091 case PL330_STATE_WFP:
1092 case PL330_STATE_QUEUEBUSY:
1093 case PL330_STATE_ATBARRIER:
1094 case PL330_STATE_UPDTPC:
1095 case PL330_STATE_CACHEMISS:
1096 case PL330_STATE_EXECUTING:
1097 return true;
1098
1099 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1100 default:
1101 return false;
1102 }
1103}
1104
1105static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1106 const struct _xfer_spec *pxs, int cyc)
1107{
1108 int off = 0;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001109 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001110
Boojin Kim3ecf51a2011-12-26 18:55:47 +09001111 /* check lock-up free version */
1112 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1113 while (cyc--) {
1114 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1115 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1116 }
1117 } else {
1118 while (cyc--) {
1119 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1120 off += _emit_RMB(dry_run, &buf[off]);
1121 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1122 off += _emit_WMB(dry_run, &buf[off]);
1123 }
Boojin Kimb7d861d2011-12-26 18:49:52 +09001124 }
1125
1126 return off;
1127}
1128
1129static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1130 const struct _xfer_spec *pxs, int cyc)
1131{
1132 int off = 0;
1133
1134 while (cyc--) {
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001135 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
1136 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001137 off += _emit_ST(dry_run, &buf[off], ALWAYS);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001138 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001139 }
1140
1141 return off;
1142}
1143
1144static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1145 const struct _xfer_spec *pxs, int cyc)
1146{
1147 int off = 0;
1148
1149 while (cyc--) {
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001150 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001151 off += _emit_LD(dry_run, &buf[off], ALWAYS);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001152 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
1153 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001154 }
1155
1156 return off;
1157}
1158
1159static int _bursts(unsigned dry_run, u8 buf[],
1160 const struct _xfer_spec *pxs, int cyc)
1161{
1162 int off = 0;
1163
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001164 switch (pxs->desc->rqtype) {
Lars-Peter Clausen585a9d02014-07-06 20:32:18 +02001165 case DMA_MEM_TO_DEV:
Boojin Kimb7d861d2011-12-26 18:49:52 +09001166 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1167 break;
Lars-Peter Clausen585a9d02014-07-06 20:32:18 +02001168 case DMA_DEV_TO_MEM:
Boojin Kimb7d861d2011-12-26 18:49:52 +09001169 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1170 break;
Lars-Peter Clausen585a9d02014-07-06 20:32:18 +02001171 case DMA_MEM_TO_MEM:
Boojin Kimb7d861d2011-12-26 18:49:52 +09001172 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1173 break;
1174 default:
1175 off += 0x40000000; /* Scare off the Client */
1176 break;
1177 }
1178
1179 return off;
1180}
1181
1182/* Returns bytes consumed and updates bursts */
1183static inline int _loop(unsigned dry_run, u8 buf[],
1184 unsigned long *bursts, const struct _xfer_spec *pxs)
1185{
1186 int cyc, cycmax, szlp, szlpend, szbrst, off;
1187 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1188 struct _arg_LPEND lpend;
1189
1190 /* Max iterations possible in DMALP is 256 */
1191 if (*bursts >= 256*256) {
1192 lcnt1 = 256;
1193 lcnt0 = 256;
1194 cyc = *bursts / lcnt1 / lcnt0;
1195 } else if (*bursts > 256) {
1196 lcnt1 = 256;
1197 lcnt0 = *bursts / lcnt1;
1198 cyc = 1;
1199 } else {
1200 lcnt1 = *bursts;
1201 lcnt0 = 0;
1202 cyc = 1;
1203 }
1204
1205 szlp = _emit_LP(1, buf, 0, 0);
1206 szbrst = _bursts(1, buf, pxs, 1);
1207
1208 lpend.cond = ALWAYS;
1209 lpend.forever = false;
1210 lpend.loop = 0;
1211 lpend.bjump = 0;
1212 szlpend = _emit_LPEND(1, buf, &lpend);
1213
1214 if (lcnt0) {
1215 szlp *= 2;
1216 szlpend *= 2;
1217 }
1218
1219 /*
1220 * Max bursts that we can unroll due to limit on the
1221 * size of backward jump that can be encoded in DMALPEND
1222 * which is 8-bits and hence 255
1223 */
1224 cycmax = (255 - (szlp + szlpend)) / szbrst;
1225
1226 cyc = (cycmax < cyc) ? cycmax : cyc;
1227
1228 off = 0;
1229
1230 if (lcnt0) {
1231 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1232 ljmp0 = off;
1233 }
1234
1235 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1236 ljmp1 = off;
1237
1238 off += _bursts(dry_run, &buf[off], pxs, cyc);
1239
1240 lpend.cond = ALWAYS;
1241 lpend.forever = false;
1242 lpend.loop = 1;
1243 lpend.bjump = off - ljmp1;
1244 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1245
1246 if (lcnt0) {
1247 lpend.cond = ALWAYS;
1248 lpend.forever = false;
1249 lpend.loop = 0;
1250 lpend.bjump = off - ljmp0;
1251 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1252 }
1253
1254 *bursts = lcnt1 * cyc;
1255 if (lcnt0)
1256 *bursts *= lcnt0;
1257
1258 return off;
1259}
1260
1261static inline int _setup_loops(unsigned dry_run, u8 buf[],
1262 const struct _xfer_spec *pxs)
1263{
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001264 struct pl330_xfer *x = &pxs->desc->px;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001265 u32 ccr = pxs->ccr;
1266 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1267 int off = 0;
1268
1269 while (bursts) {
1270 c = bursts;
1271 off += _loop(dry_run, &buf[off], &c, pxs);
1272 bursts -= c;
1273 }
1274
1275 return off;
1276}
1277
1278static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1279 const struct _xfer_spec *pxs)
1280{
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001281 struct pl330_xfer *x = &pxs->desc->px;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001282 int off = 0;
1283
1284 /* DMAMOV SAR, x->src_addr */
1285 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1286 /* DMAMOV DAR, x->dst_addr */
1287 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1288
1289 /* Setup Loop(s) */
1290 off += _setup_loops(dry_run, &buf[off], pxs);
1291
1292 return off;
1293}
1294
1295/*
1296 * A req is a sequence of one or more xfer units.
1297 * Returns the number of bytes taken to setup the MC for the req.
1298 */
1299static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1300 unsigned index, struct _xfer_spec *pxs)
1301{
1302 struct _pl330_req *req = &thrd->req[index];
1303 struct pl330_xfer *x;
1304 u8 *buf = req->mc_cpu;
1305 int off = 0;
1306
1307 PL330_DBGMC_START(req->mc_bus);
1308
1309 /* DMAMOV CCR, ccr */
1310 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1311
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001312 x = &pxs->desc->px;
Lars-Peter Clausend5cef122014-07-06 20:32:23 +02001313 /* Error if xfer length is not aligned at burst size */
1314 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1315 return -EINVAL;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001316
Lars-Peter Clausend5cef122014-07-06 20:32:23 +02001317 off += _setup_xfer(dry_run, &buf[off], pxs);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001318
1319 /* DMASEV peripheral/event */
1320 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1321 /* DMAEND */
1322 off += _emit_END(dry_run, &buf[off]);
1323
1324 return off;
1325}
1326
1327static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1328{
1329 u32 ccr = 0;
1330
1331 if (rqc->src_inc)
1332 ccr |= CC_SRCINC;
1333
1334 if (rqc->dst_inc)
1335 ccr |= CC_DSTINC;
1336
1337 /* We set same protection levels for Src and DST for now */
1338 if (rqc->privileged)
1339 ccr |= CC_SRCPRI | CC_DSTPRI;
1340 if (rqc->nonsecure)
1341 ccr |= CC_SRCNS | CC_DSTNS;
1342 if (rqc->insnaccess)
1343 ccr |= CC_SRCIA | CC_DSTIA;
1344
1345 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1346 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1347
1348 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1349 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1350
1351 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1352 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1353
1354 ccr |= (rqc->swap << CC_SWAP_SHFT);
1355
1356 return ccr;
1357}
1358
Boojin Kimb7d861d2011-12-26 18:49:52 +09001359/*
1360 * Submit a list of xfers after which the client wants notification.
1361 * Client is not notified after each xfer unit, just once after all
1362 * xfer units are done or some error occurs.
1363 */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001364static int pl330_submit_req(struct pl330_thread *thrd,
1365 struct dma_pl330_desc *desc)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001366{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001367 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001368 struct _xfer_spec xs;
1369 unsigned long flags;
1370 void __iomem *regs;
1371 unsigned idx;
1372 u32 ccr;
1373 int ret = 0;
1374
1375 /* No Req or Unacquired Channel or DMAC */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001376 if (!desc || !thrd || thrd->free)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001377 return -EINVAL;
1378
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001379 regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001380
1381 if (pl330->state == DYING
1382 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001383 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001384 __func__, __LINE__);
1385 return -EAGAIN;
1386 }
1387
1388 /* If request for non-existing peripheral */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001389 if (desc->rqtype != DMA_MEM_TO_MEM &&
1390 desc->peri >= pl330->pcfg.num_peri) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001391 dev_info(thrd->dmac->ddma.dev,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001392 "%s:%d Invalid peripheral(%u)!\n",
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001393 __func__, __LINE__, desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001394 return -EINVAL;
1395 }
1396
1397 spin_lock_irqsave(&pl330->lock, flags);
1398
1399 if (_queue_full(thrd)) {
1400 ret = -EAGAIN;
1401 goto xfer_exit;
1402 }
1403
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001404 /* Prefer Secure Channel */
1405 if (!_manager_ns(thrd))
1406 desc->rqcfg.nonsecure = 0;
1407 else
1408 desc->rqcfg.nonsecure = 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001409
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001410 ccr = _prepare_ccr(&desc->rqcfg);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001411
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001412 idx = thrd->req[0].desc == NULL ? 0 : 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001413
1414 xs.ccr = ccr;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001415 xs.desc = desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001416
1417 /* First dry run to check if req is acceptable */
1418 ret = _setup_req(1, thrd, idx, &xs);
1419 if (ret < 0)
1420 goto xfer_exit;
1421
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001422 if (ret > pl330->mcbufsz / 2) {
1423 dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001424 __func__, __LINE__);
1425 ret = -ENOMEM;
1426 goto xfer_exit;
1427 }
1428
1429 /* Hook the request */
1430 thrd->lstenq = idx;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001431 thrd->req[idx].desc = desc;
Lars-Peter Clausenbe025322014-07-06 20:32:24 +02001432 _setup_req(0, thrd, idx, &xs);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001433
1434 ret = 0;
1435
1436xfer_exit:
1437 spin_unlock_irqrestore(&pl330->lock, flags);
1438
1439 return ret;
1440}
1441
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001442static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
Lars-Peter Clausen6079d382014-07-06 20:32:25 +02001443{
Lars-Peter Clausen6079d382014-07-06 20:32:25 +02001444 struct dma_pl330_chan *pch = desc->pchan;
1445 unsigned long flags;
1446
1447 /* If desc aborted */
1448 if (!pch)
1449 return;
1450
1451 spin_lock_irqsave(&pch->lock, flags);
1452
1453 desc->status = DONE;
1454
1455 spin_unlock_irqrestore(&pch->lock, flags);
1456
1457 tasklet_schedule(&pch->task);
1458}
1459
Boojin Kimb7d861d2011-12-26 18:49:52 +09001460static void pl330_dotask(unsigned long data)
1461{
1462 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001463 unsigned long flags;
1464 int i;
1465
1466 spin_lock_irqsave(&pl330->lock, flags);
1467
1468 /* The DMAC itself gone nuts */
1469 if (pl330->dmac_tbd.reset_dmac) {
1470 pl330->state = DYING;
1471 /* Reset the manager too */
1472 pl330->dmac_tbd.reset_mngr = true;
1473 /* Clear the reset flag */
1474 pl330->dmac_tbd.reset_dmac = false;
1475 }
1476
1477 if (pl330->dmac_tbd.reset_mngr) {
1478 _stop(pl330->manager);
1479 /* Reset all channels */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001480 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001481 /* Clear the reset flag */
1482 pl330->dmac_tbd.reset_mngr = false;
1483 }
1484
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001485 for (i = 0; i < pl330->pcfg.num_chan; i++) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001486
1487 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1488 struct pl330_thread *thrd = &pl330->channels[i];
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001489 void __iomem *regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001490 enum pl330_op_err err;
1491
1492 _stop(thrd);
1493
1494 if (readl(regs + FSC) & (1 << thrd->id))
1495 err = PL330_ERR_FAIL;
1496 else
1497 err = PL330_ERR_ABORT;
1498
1499 spin_unlock_irqrestore(&pl330->lock, flags);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001500 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1501 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001502 spin_lock_irqsave(&pl330->lock, flags);
1503
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001504 thrd->req[0].desc = NULL;
1505 thrd->req[1].desc = NULL;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001506 thrd->req_running = -1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001507
1508 /* Clear the reset flag */
1509 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1510 }
1511 }
1512
1513 spin_unlock_irqrestore(&pl330->lock, flags);
1514
1515 return;
1516}
1517
1518/* Returns 1 if state was updated, 0 otherwise */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001519static int pl330_update(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001520{
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001521 struct dma_pl330_desc *descdone, *tmp;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001522 unsigned long flags;
1523 void __iomem *regs;
1524 u32 val;
1525 int id, ev, ret = 0;
1526
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001527 regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001528
1529 spin_lock_irqsave(&pl330->lock, flags);
1530
1531 val = readl(regs + FSM) & 0x1;
1532 if (val)
1533 pl330->dmac_tbd.reset_mngr = true;
1534 else
1535 pl330->dmac_tbd.reset_mngr = false;
1536
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001537 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001538 pl330->dmac_tbd.reset_chan |= val;
1539 if (val) {
1540 int i = 0;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001541 while (i < pl330->pcfg.num_chan) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001542 if (val & (1 << i)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001543 dev_info(pl330->ddma.dev,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001544 "Reset Channel-%d\t CS-%x FTC-%x\n",
1545 i, readl(regs + CS(i)),
1546 readl(regs + FTC(i)));
1547 _stop(&pl330->channels[i]);
1548 }
1549 i++;
1550 }
1551 }
1552
1553 /* Check which event happened i.e, thread notified */
1554 val = readl(regs + ES);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001555 if (pl330->pcfg.num_events < 32
1556 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001557 pl330->dmac_tbd.reset_dmac = true;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001558 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1559 __LINE__);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001560 ret = 1;
1561 goto updt_exit;
1562 }
1563
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001564 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001565 if (val & (1 << ev)) { /* Event occurred */
1566 struct pl330_thread *thrd;
1567 u32 inten = readl(regs + INTEN);
1568 int active;
1569
1570 /* Clear the event */
1571 if (inten & (1 << ev))
1572 writel(1 << ev, regs + INTCLR);
1573
1574 ret = 1;
1575
1576 id = pl330->events[ev];
1577
1578 thrd = &pl330->channels[id];
1579
1580 active = thrd->req_running;
1581 if (active == -1) /* Aborted */
1582 continue;
1583
Javi Merinofdec53d2012-06-13 15:07:00 +01001584 /* Detach the req */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001585 descdone = thrd->req[active].desc;
1586 thrd->req[active].desc = NULL;
Javi Merinofdec53d2012-06-13 15:07:00 +01001587
Boojin Kimb7d861d2011-12-26 18:49:52 +09001588 /* Get going again ASAP */
1589 _start(thrd);
1590
1591 /* For now, just make a list of callbacks to be done */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001592 list_add_tail(&descdone->rqd, &pl330->req_done);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001593 }
1594 }
1595
1596 /* Now that we are in no hurry, do the callbacks */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001597 list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
1598 list_del(&descdone->rqd);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001599 spin_unlock_irqrestore(&pl330->lock, flags);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001600 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001601 spin_lock_irqsave(&pl330->lock, flags);
1602 }
1603
1604updt_exit:
1605 spin_unlock_irqrestore(&pl330->lock, flags);
1606
1607 if (pl330->dmac_tbd.reset_dmac
1608 || pl330->dmac_tbd.reset_mngr
1609 || pl330->dmac_tbd.reset_chan) {
1610 ret = 1;
1611 tasklet_schedule(&pl330->tasks);
1612 }
1613
1614 return ret;
1615}
1616
Boojin Kimb7d861d2011-12-26 18:49:52 +09001617/* Reserve an event */
1618static inline int _alloc_event(struct pl330_thread *thrd)
1619{
1620 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001621 int ev;
1622
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001623 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001624 if (pl330->events[ev] == -1) {
1625 pl330->events[ev] = thrd->id;
1626 return ev;
1627 }
1628
1629 return -1;
1630}
1631
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001632static bool _chan_ns(const struct pl330_dmac *pl330, int i)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001633{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001634 return pl330->pcfg.irq_ns & (1 << i);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001635}
1636
1637/* Upon success, returns IdentityToken for the
1638 * allocated channel, NULL otherwise.
1639 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001640static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001641{
1642 struct pl330_thread *thrd = NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001643 unsigned long flags;
1644 int chans, i;
1645
Boojin Kimb7d861d2011-12-26 18:49:52 +09001646 if (pl330->state == DYING)
1647 return NULL;
1648
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001649 chans = pl330->pcfg.num_chan;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001650
1651 spin_lock_irqsave(&pl330->lock, flags);
1652
1653 for (i = 0; i < chans; i++) {
1654 thrd = &pl330->channels[i];
1655 if ((thrd->free) && (!_manager_ns(thrd) ||
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001656 _chan_ns(pl330, i))) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001657 thrd->ev = _alloc_event(thrd);
1658 if (thrd->ev >= 0) {
1659 thrd->free = false;
1660 thrd->lstenq = 1;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001661 thrd->req[0].desc = NULL;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001662 thrd->req[1].desc = NULL;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001663 thrd->req_running = -1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001664 break;
1665 }
1666 }
1667 thrd = NULL;
1668 }
1669
1670 spin_unlock_irqrestore(&pl330->lock, flags);
1671
1672 return thrd;
1673}
1674
1675/* Release an event */
1676static inline void _free_event(struct pl330_thread *thrd, int ev)
1677{
1678 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001679
1680 /* If the event is valid and was held by the thread */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001681 if (ev >= 0 && ev < pl330->pcfg.num_events
Boojin Kimb7d861d2011-12-26 18:49:52 +09001682 && pl330->events[ev] == thrd->id)
1683 pl330->events[ev] = -1;
1684}
1685
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02001686static void pl330_release_channel(struct pl330_thread *thrd)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001687{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001688 struct pl330_dmac *pl330;
1689 unsigned long flags;
1690
1691 if (!thrd || thrd->free)
1692 return;
1693
1694 _stop(thrd);
1695
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001696 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1697 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001698
1699 pl330 = thrd->dmac;
1700
1701 spin_lock_irqsave(&pl330->lock, flags);
1702 _free_event(thrd, thrd->ev);
1703 thrd->free = true;
1704 spin_unlock_irqrestore(&pl330->lock, flags);
1705}
1706
1707/* Initialize the structure for PL330 configuration, that can be used
1708 * by the client driver the make best use of the DMAC
1709 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001710static void read_dmac_config(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001711{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001712 void __iomem *regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001713 u32 val;
1714
1715 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1716 val &= CRD_DATA_WIDTH_MASK;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001717 pl330->pcfg.data_bus_width = 8 * (1 << val);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001718
1719 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1720 val &= CRD_DATA_BUFF_MASK;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001721 pl330->pcfg.data_buf_dep = val + 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001722
1723 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1724 val &= CR0_NUM_CHANS_MASK;
1725 val += 1;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001726 pl330->pcfg.num_chan = val;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001727
1728 val = readl(regs + CR0);
1729 if (val & CR0_PERIPH_REQ_SET) {
1730 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1731 val += 1;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001732 pl330->pcfg.num_peri = val;
1733 pl330->pcfg.peri_ns = readl(regs + CR4);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001734 } else {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001735 pl330->pcfg.num_peri = 0;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001736 }
1737
1738 val = readl(regs + CR0);
1739 if (val & CR0_BOOT_MAN_NS)
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001740 pl330->pcfg.mode |= DMAC_MODE_NS;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001741 else
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001742 pl330->pcfg.mode &= ~DMAC_MODE_NS;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001743
1744 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1745 val &= CR0_NUM_EVENTS_MASK;
1746 val += 1;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001747 pl330->pcfg.num_events = val;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001748
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001749 pl330->pcfg.irq_ns = readl(regs + CR3);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001750}
1751
1752static inline void _reset_thread(struct pl330_thread *thrd)
1753{
1754 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001755
1756 thrd->req[0].mc_cpu = pl330->mcode_cpu
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001757 + (thrd->id * pl330->mcbufsz);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001758 thrd->req[0].mc_bus = pl330->mcode_bus
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001759 + (thrd->id * pl330->mcbufsz);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001760 thrd->req[0].desc = NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001761
1762 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001763 + pl330->mcbufsz / 2;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001764 thrd->req[1].mc_bus = thrd->req[0].mc_bus
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001765 + pl330->mcbufsz / 2;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001766 thrd->req[1].desc = NULL;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001767
1768 thrd->req_running = -1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001769}
1770
1771static int dmac_alloc_threads(struct pl330_dmac *pl330)
1772{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001773 int chans = pl330->pcfg.num_chan;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001774 struct pl330_thread *thrd;
1775 int i;
1776
1777 /* Allocate 1 Manager and 'chans' Channel threads */
1778 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1779 GFP_KERNEL);
1780 if (!pl330->channels)
1781 return -ENOMEM;
1782
1783 /* Init Channel threads */
1784 for (i = 0; i < chans; i++) {
1785 thrd = &pl330->channels[i];
1786 thrd->id = i;
1787 thrd->dmac = pl330;
1788 _reset_thread(thrd);
1789 thrd->free = true;
1790 }
1791
1792 /* MANAGER is indexed at the end */
1793 thrd = &pl330->channels[chans];
1794 thrd->id = chans;
1795 thrd->dmac = pl330;
1796 thrd->free = false;
1797 pl330->manager = thrd;
1798
1799 return 0;
1800}
1801
1802static int dmac_alloc_resources(struct pl330_dmac *pl330)
1803{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001804 int chans = pl330->pcfg.num_chan;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001805 int ret;
1806
1807 /*
1808 * Alloc MicroCode buffer for 'chans' Channel threads.
1809 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1810 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001811 pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
1812 chans * pl330->mcbufsz,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001813 &pl330->mcode_bus, GFP_KERNEL);
1814 if (!pl330->mcode_cpu) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001815 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001816 __func__, __LINE__);
1817 return -ENOMEM;
1818 }
1819
1820 ret = dmac_alloc_threads(pl330);
1821 if (ret) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001822 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001823 __func__, __LINE__);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001824 dma_free_coherent(pl330->ddma.dev,
1825 chans * pl330->mcbufsz,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001826 pl330->mcode_cpu, pl330->mcode_bus);
1827 return ret;
1828 }
1829
1830 return 0;
1831}
1832
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001833static int pl330_add(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001834{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001835 void __iomem *regs;
1836 int i, ret;
1837
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001838 regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001839
1840 /* Check if we can handle this DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001841 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1842 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1843 pl330->pcfg.periph_id);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001844 return -EINVAL;
1845 }
1846
1847 /* Read the configuration of the DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001848 read_dmac_config(pl330);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001849
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001850 if (pl330->pcfg.num_events == 0) {
1851 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001852 __func__, __LINE__);
1853 return -EINVAL;
1854 }
1855
Boojin Kimb7d861d2011-12-26 18:49:52 +09001856 spin_lock_init(&pl330->lock);
1857
1858 INIT_LIST_HEAD(&pl330->req_done);
1859
1860 /* Use default MC buffer size if not provided */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001861 if (!pl330->mcbufsz)
1862 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001863
1864 /* Mark all events as free */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001865 for (i = 0; i < pl330->pcfg.num_events; i++)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001866 pl330->events[i] = -1;
1867
1868 /* Allocate resources needed by the DMAC */
1869 ret = dmac_alloc_resources(pl330);
1870 if (ret) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001871 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
Boojin Kimb7d861d2011-12-26 18:49:52 +09001872 return ret;
1873 }
1874
1875 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1876
1877 pl330->state = INIT;
1878
1879 return 0;
1880}
1881
1882static int dmac_free_threads(struct pl330_dmac *pl330)
1883{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001884 struct pl330_thread *thrd;
1885 int i;
1886
1887 /* Release Channel threads */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001888 for (i = 0; i < pl330->pcfg.num_chan; i++) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001889 thrd = &pl330->channels[i];
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02001890 pl330_release_channel(thrd);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001891 }
1892
1893 /* Free memory */
1894 kfree(pl330->channels);
1895
1896 return 0;
1897}
1898
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001899static void pl330_del(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001900{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001901 pl330->state = UNINIT;
1902
1903 tasklet_kill(&pl330->tasks);
1904
1905 /* Free DMAC resources */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001906 dmac_free_threads(pl330);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001907
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001908 dma_free_coherent(pl330->ddma.dev,
1909 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
1910 pl330->mcode_bus);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001911}
1912
Thomas Abraham3e2ec132011-10-24 11:43:02 +02001913/* forward declaration */
1914static struct amba_driver pl330_driver;
1915
Jassi Brarb3040e42010-05-23 20:28:19 -07001916static inline struct dma_pl330_chan *
1917to_pchan(struct dma_chan *ch)
1918{
1919 if (!ch)
1920 return NULL;
1921
1922 return container_of(ch, struct dma_pl330_chan, chan);
1923}
1924
1925static inline struct dma_pl330_desc *
1926to_desc(struct dma_async_tx_descriptor *tx)
1927{
1928 return container_of(tx, struct dma_pl330_desc, txd);
1929}
1930
Jassi Brarb3040e42010-05-23 20:28:19 -07001931static inline void fill_queue(struct dma_pl330_chan *pch)
1932{
1933 struct dma_pl330_desc *desc;
1934 int ret;
1935
1936 list_for_each_entry(desc, &pch->work_list, node) {
1937
1938 /* If already submitted */
1939 if (desc->status == BUSY)
Jassi Brar30fb9802013-02-13 16:13:14 +05301940 continue;
Jassi Brarb3040e42010-05-23 20:28:19 -07001941
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001942 ret = pl330_submit_req(pch->thread, desc);
Jassi Brarb3040e42010-05-23 20:28:19 -07001943 if (!ret) {
1944 desc->status = BUSY;
Jassi Brarb3040e42010-05-23 20:28:19 -07001945 } else if (ret == -EAGAIN) {
1946 /* QFull or DMAC Dying */
1947 break;
1948 } else {
1949 /* Unacceptable request */
1950 desc->status = DONE;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001951 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
Jassi Brarb3040e42010-05-23 20:28:19 -07001952 __func__, __LINE__, desc->txd.cookie);
1953 tasklet_schedule(&pch->task);
1954 }
1955 }
1956}
1957
1958static void pl330_tasklet(unsigned long data)
1959{
1960 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
1961 struct dma_pl330_desc *desc, *_dt;
1962 unsigned long flags;
Jassi Brarb3040e42010-05-23 20:28:19 -07001963
1964 spin_lock_irqsave(&pch->lock, flags);
1965
1966 /* Pick up ripe tomatoes */
1967 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
1968 if (desc->status == DONE) {
Tushar Behera30c1dc02012-05-23 16:47:31 +05301969 if (!pch->cyclic)
Vinod Kouleab21582012-05-11 11:24:41 +05301970 dma_cookie_complete(&desc->txd);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02001971 list_move_tail(&desc->node, &pch->completed_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07001972 }
1973
1974 /* Try to submit a req imm. next to the last completed cookie */
1975 fill_queue(pch);
1976
1977 /* Make sure the PL330 Channel thread is active */
Lars-Peter Clausenc26939e2014-07-06 20:32:32 +02001978 spin_lock(&pch->thread->dmac->lock);
1979 _start(pch->thread);
1980 spin_unlock(&pch->thread->dmac->lock);
Jassi Brarb3040e42010-05-23 20:28:19 -07001981
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02001982 while (!list_empty(&pch->completed_list)) {
1983 dma_async_tx_callback callback;
1984 void *callback_param;
Jassi Brarb3040e42010-05-23 20:28:19 -07001985
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02001986 desc = list_first_entry(&pch->completed_list,
1987 struct dma_pl330_desc, node);
1988
1989 callback = desc->txd.callback;
1990 callback_param = desc->txd.callback_param;
1991
1992 if (pch->cyclic) {
1993 desc->status = PREP;
1994 list_move_tail(&desc->node, &pch->work_list);
1995 } else {
1996 desc->status = FREE;
1997 list_move_tail(&desc->node, &pch->dmac->desc_pool);
1998 }
1999
Dan Williamsd38a8c62013-10-18 19:35:23 +02002000 dma_descriptor_unmap(&desc->txd);
2001
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002002 if (callback) {
2003 spin_unlock_irqrestore(&pch->lock, flags);
2004 callback(callback_param);
2005 spin_lock_irqsave(&pch->lock, flags);
2006 }
2007 }
2008 spin_unlock_irqrestore(&pch->lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002009}
2010
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002011bool pl330_filter(struct dma_chan *chan, void *param)
2012{
Thomas Abrahamcd072512011-10-24 11:43:11 +02002013 u8 *peri_id;
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002014
2015 if (chan->device->dev->driver != &pl330_driver.drv)
2016 return false;
2017
Thomas Abrahamcd072512011-10-24 11:43:11 +02002018 peri_id = chan->private;
Dan Carpenter2f986ec2013-11-08 12:51:16 +03002019 return *peri_id == (unsigned long)param;
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002020}
2021EXPORT_SYMBOL(pl330_filter);
2022
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302023static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2024 struct of_dma *ofdma)
2025{
2026 int count = dma_spec->args_count;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002027 struct pl330_dmac *pl330 = ofdma->of_dma_data;
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002028 unsigned int chan_id;
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302029
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002030 if (!pl330)
2031 return NULL;
2032
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302033 if (count != 1)
2034 return NULL;
2035
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002036 chan_id = dma_spec->args[0];
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002037 if (chan_id >= pl330->num_peripherals)
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002038 return NULL;
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302039
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002040 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302041}
2042
Jassi Brarb3040e42010-05-23 20:28:19 -07002043static int pl330_alloc_chan_resources(struct dma_chan *chan)
2044{
2045 struct dma_pl330_chan *pch = to_pchan(chan);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002046 struct pl330_dmac *pl330 = pch->dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -07002047 unsigned long flags;
2048
2049 spin_lock_irqsave(&pch->lock, flags);
2050
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00002051 dma_cookie_init(chan);
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002052 pch->cyclic = false;
Jassi Brarb3040e42010-05-23 20:28:19 -07002053
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002054 pch->thread = pl330_request_channel(pl330);
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02002055 if (!pch->thread) {
Jassi Brarb3040e42010-05-23 20:28:19 -07002056 spin_unlock_irqrestore(&pch->lock, flags);
Inderpal Singh02747882012-09-17 09:57:45 +05302057 return -ENOMEM;
Jassi Brarb3040e42010-05-23 20:28:19 -07002058 }
2059
2060 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2061
2062 spin_unlock_irqrestore(&pch->lock, flags);
2063
2064 return 1;
2065}
2066
2067static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
2068{
2069 struct dma_pl330_chan *pch = to_pchan(chan);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002070 struct dma_pl330_desc *desc;
Jassi Brarb3040e42010-05-23 20:28:19 -07002071 unsigned long flags;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002072 struct pl330_dmac *pl330 = pch->dmac;
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002073 struct dma_slave_config *slave_config;
Boojin Kimae43b882011-09-02 09:44:32 +09002074 LIST_HEAD(list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002075
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002076 switch (cmd) {
2077 case DMA_TERMINATE_ALL:
2078 spin_lock_irqsave(&pch->lock, flags);
2079
Lars-Peter Clausenc26939e2014-07-06 20:32:32 +02002080 spin_lock(&pl330->lock);
2081 _stop(pch->thread);
2082 spin_unlock(&pl330->lock);
2083
2084 pch->thread->req[0].desc = NULL;
2085 pch->thread->req[1].desc = NULL;
2086 pch->thread->req_running = -1;
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002087
2088 /* Mark all desc done */
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002089 list_for_each_entry(desc, &pch->submitted_list, node) {
2090 desc->status = FREE;
2091 dma_cookie_complete(&desc->txd);
2092 }
2093
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002094 list_for_each_entry(desc, &pch->work_list , node) {
2095 desc->status = FREE;
2096 dma_cookie_complete(&desc->txd);
Boojin Kimae43b882011-09-02 09:44:32 +09002097 }
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002098
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002099 list_for_each_entry(desc, &pch->completed_list , node) {
2100 desc->status = FREE;
2101 dma_cookie_complete(&desc->txd);
2102 }
2103
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002104 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2105 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2106 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002107 spin_unlock_irqrestore(&pch->lock, flags);
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002108 break;
2109 case DMA_SLAVE_CONFIG:
2110 slave_config = (struct dma_slave_config *)arg;
2111
Vinod Kouldb8196d2011-10-13 22:34:23 +05302112 if (slave_config->direction == DMA_MEM_TO_DEV) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002113 if (slave_config->dst_addr)
2114 pch->fifo_addr = slave_config->dst_addr;
2115 if (slave_config->dst_addr_width)
2116 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2117 if (slave_config->dst_maxburst)
2118 pch->burst_len = slave_config->dst_maxburst;
Vinod Kouldb8196d2011-10-13 22:34:23 +05302119 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002120 if (slave_config->src_addr)
2121 pch->fifo_addr = slave_config->src_addr;
2122 if (slave_config->src_addr_width)
2123 pch->burst_sz = __ffs(slave_config->src_addr_width);
2124 if (slave_config->src_maxburst)
2125 pch->burst_len = slave_config->src_maxburst;
2126 }
2127 break;
2128 default:
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002129 dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
Jassi Brarb3040e42010-05-23 20:28:19 -07002130 return -ENXIO;
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002131 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002132
2133 return 0;
2134}
2135
2136static void pl330_free_chan_resources(struct dma_chan *chan)
2137{
2138 struct dma_pl330_chan *pch = to_pchan(chan);
2139 unsigned long flags;
2140
Jassi Brarb3040e42010-05-23 20:28:19 -07002141 tasklet_kill(&pch->task);
2142
Bartlomiej Zolnierkiewiczda331ba2013-07-03 15:00:43 -07002143 spin_lock_irqsave(&pch->lock, flags);
2144
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02002145 pl330_release_channel(pch->thread);
2146 pch->thread = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -07002147
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002148 if (pch->cyclic)
2149 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2150
Jassi Brarb3040e42010-05-23 20:28:19 -07002151 spin_unlock_irqrestore(&pch->lock, flags);
2152}
2153
2154static enum dma_status
2155pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2156 struct dma_tx_state *txstate)
2157{
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00002158 return dma_cookie_status(chan, cookie, txstate);
Jassi Brarb3040e42010-05-23 20:28:19 -07002159}
2160
2161static void pl330_issue_pending(struct dma_chan *chan)
2162{
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002163 struct dma_pl330_chan *pch = to_pchan(chan);
2164 unsigned long flags;
2165
2166 spin_lock_irqsave(&pch->lock, flags);
2167 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2168 spin_unlock_irqrestore(&pch->lock, flags);
2169
2170 pl330_tasklet((unsigned long)pch);
Jassi Brarb3040e42010-05-23 20:28:19 -07002171}
2172
2173/*
2174 * We returned the last one of the circular list of descriptor(s)
2175 * from prep_xxx, so the argument to submit corresponds to the last
2176 * descriptor of the list.
2177 */
2178static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2179{
2180 struct dma_pl330_desc *desc, *last = to_desc(tx);
2181 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2182 dma_cookie_t cookie;
2183 unsigned long flags;
2184
2185 spin_lock_irqsave(&pch->lock, flags);
2186
2187 /* Assign cookies to all nodes */
Jassi Brarb3040e42010-05-23 20:28:19 -07002188 while (!list_empty(&last->node)) {
2189 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002190 if (pch->cyclic) {
2191 desc->txd.callback = last->txd.callback;
2192 desc->txd.callback_param = last->txd.callback_param;
2193 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002194
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00002195 dma_cookie_assign(&desc->txd);
Jassi Brarb3040e42010-05-23 20:28:19 -07002196
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002197 list_move_tail(&desc->node, &pch->submitted_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002198 }
2199
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00002200 cookie = dma_cookie_assign(&last->txd);
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002201 list_add_tail(&last->node, &pch->submitted_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002202 spin_unlock_irqrestore(&pch->lock, flags);
2203
2204 return cookie;
2205}
2206
2207static inline void _init_desc(struct dma_pl330_desc *desc)
2208{
Jassi Brarb3040e42010-05-23 20:28:19 -07002209 desc->rqcfg.swap = SWAP_NO;
Lars-Peter Clausenf0564c72014-07-06 20:32:19 +02002210 desc->rqcfg.scctl = CCTRL0;
2211 desc->rqcfg.dcctl = CCTRL0;
Jassi Brarb3040e42010-05-23 20:28:19 -07002212 desc->txd.tx_submit = pl330_tx_submit;
2213
2214 INIT_LIST_HEAD(&desc->node);
2215}
2216
2217/* Returns the number of descriptors added to the DMAC pool */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002218static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
Jassi Brarb3040e42010-05-23 20:28:19 -07002219{
2220 struct dma_pl330_desc *desc;
2221 unsigned long flags;
2222 int i;
2223
Will Deacon0baf8f62013-12-02 18:01:30 +00002224 desc = kcalloc(count, sizeof(*desc), flg);
Jassi Brarb3040e42010-05-23 20:28:19 -07002225 if (!desc)
2226 return 0;
2227
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002228 spin_lock_irqsave(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002229
2230 for (i = 0; i < count; i++) {
2231 _init_desc(&desc[i]);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002232 list_add_tail(&desc[i].node, &pl330->desc_pool);
Jassi Brarb3040e42010-05-23 20:28:19 -07002233 }
2234
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002235 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002236
2237 return count;
2238}
2239
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002240static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
Jassi Brarb3040e42010-05-23 20:28:19 -07002241{
2242 struct dma_pl330_desc *desc = NULL;
2243 unsigned long flags;
2244
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002245 spin_lock_irqsave(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002246
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002247 if (!list_empty(&pl330->desc_pool)) {
2248 desc = list_entry(pl330->desc_pool.next,
Jassi Brarb3040e42010-05-23 20:28:19 -07002249 struct dma_pl330_desc, node);
2250
2251 list_del_init(&desc->node);
2252
2253 desc->status = PREP;
2254 desc->txd.callback = NULL;
2255 }
2256
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002257 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002258
2259 return desc;
2260}
2261
2262static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2263{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002264 struct pl330_dmac *pl330 = pch->dmac;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002265 u8 *peri_id = pch->chan.private;
Jassi Brarb3040e42010-05-23 20:28:19 -07002266 struct dma_pl330_desc *desc;
2267
2268 /* Pluck one desc from the pool of DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002269 desc = pluck_desc(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002270
2271 /* If the DMAC pool is empty, alloc new */
2272 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002273 if (!add_desc(pl330, GFP_ATOMIC, 1))
Jassi Brarb3040e42010-05-23 20:28:19 -07002274 return NULL;
2275
2276 /* Try again */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002277 desc = pluck_desc(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002278 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002279 dev_err(pch->dmac->ddma.dev,
Jassi Brarb3040e42010-05-23 20:28:19 -07002280 "%s:%d ALERT!\n", __func__, __LINE__);
2281 return NULL;
2282 }
2283 }
2284
2285 /* Initialize the descriptor */
2286 desc->pchan = pch;
2287 desc->txd.cookie = 0;
2288 async_tx_ack(&desc->txd);
2289
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002290 desc->peri = peri_id ? pch->chan.chan_id : 0;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002291 desc->rqcfg.pcfg = &pch->dmac->pcfg;
Jassi Brarb3040e42010-05-23 20:28:19 -07002292
2293 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2294
2295 return desc;
2296}
2297
2298static inline void fill_px(struct pl330_xfer *px,
2299 dma_addr_t dst, dma_addr_t src, size_t len)
2300{
Jassi Brarb3040e42010-05-23 20:28:19 -07002301 px->bytes = len;
2302 px->dst_addr = dst;
2303 px->src_addr = src;
2304}
2305
2306static struct dma_pl330_desc *
2307__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2308 dma_addr_t src, size_t len)
2309{
2310 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2311
2312 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002313 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
Jassi Brarb3040e42010-05-23 20:28:19 -07002314 __func__, __LINE__);
2315 return NULL;
2316 }
2317
2318 /*
2319 * Ideally we should lookout for reqs bigger than
2320 * those that can be programmed with 256 bytes of
2321 * MC buffer, but considering a req size is seldom
2322 * going to be word-unaligned and more than 200MB,
2323 * we take it easy.
2324 * Also, should the limit is reached we'd rather
2325 * have the platform increase MC buffer size than
2326 * complicating this API driver.
2327 */
2328 fill_px(&desc->px, dst, src, len);
2329
2330 return desc;
2331}
2332
2333/* Call after fixing burst size */
2334static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2335{
2336 struct dma_pl330_chan *pch = desc->pchan;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002337 struct pl330_dmac *pl330 = pch->dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -07002338 int burst_len;
2339
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002340 burst_len = pl330->pcfg.data_bus_width / 8;
2341 burst_len *= pl330->pcfg.data_buf_dep;
Jassi Brarb3040e42010-05-23 20:28:19 -07002342 burst_len >>= desc->rqcfg.brst_size;
2343
2344 /* src/dst_burst_len can't be more than 16 */
2345 if (burst_len > 16)
2346 burst_len = 16;
2347
2348 while (burst_len > 1) {
2349 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2350 break;
2351 burst_len--;
2352 }
2353
2354 return burst_len;
2355}
2356
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002357static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2358 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05002359 size_t period_len, enum dma_transfer_direction direction,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +03002360 unsigned long flags, void *context)
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002361{
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002362 struct dma_pl330_desc *desc = NULL, *first = NULL;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002363 struct dma_pl330_chan *pch = to_pchan(chan);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002364 struct pl330_dmac *pl330 = pch->dmac;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002365 unsigned int i;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002366 dma_addr_t dst;
2367 dma_addr_t src;
2368
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002369 if (len % period_len != 0)
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002370 return NULL;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002371
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002372 if (!is_slave_direction(direction)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002373 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002374 __func__, __LINE__);
2375 return NULL;
2376 }
2377
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002378 for (i = 0; i < len / period_len; i++) {
2379 desc = pl330_get_desc(pch);
2380 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002381 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002382 __func__, __LINE__);
2383
2384 if (!first)
2385 return NULL;
2386
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002387 spin_lock_irqsave(&pl330->pool_lock, flags);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002388
2389 while (!list_empty(&first->node)) {
2390 desc = list_entry(first->node.next,
2391 struct dma_pl330_desc, node);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002392 list_move_tail(&desc->node, &pl330->desc_pool);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002393 }
2394
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002395 list_move_tail(&first->node, &pl330->desc_pool);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002396
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002397 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002398
2399 return NULL;
2400 }
2401
2402 switch (direction) {
2403 case DMA_MEM_TO_DEV:
2404 desc->rqcfg.src_inc = 1;
2405 desc->rqcfg.dst_inc = 0;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002406 src = dma_addr;
2407 dst = pch->fifo_addr;
2408 break;
2409 case DMA_DEV_TO_MEM:
2410 desc->rqcfg.src_inc = 0;
2411 desc->rqcfg.dst_inc = 1;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002412 src = pch->fifo_addr;
2413 dst = dma_addr;
2414 break;
2415 default:
2416 break;
2417 }
2418
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002419 desc->rqtype = direction;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002420 desc->rqcfg.brst_size = pch->burst_sz;
2421 desc->rqcfg.brst_len = 1;
2422 fill_px(&desc->px, dst, src, period_len);
2423
2424 if (!first)
2425 first = desc;
2426 else
2427 list_add_tail(&desc->node, &first->node);
2428
2429 dma_addr += period_len;
2430 }
2431
2432 if (!desc)
2433 return NULL;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002434
2435 pch->cyclic = true;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002436 desc->txd.flags = flags;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002437
2438 return &desc->txd;
2439}
2440
Jassi Brarb3040e42010-05-23 20:28:19 -07002441static struct dma_async_tx_descriptor *
2442pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2443 dma_addr_t src, size_t len, unsigned long flags)
2444{
2445 struct dma_pl330_desc *desc;
2446 struct dma_pl330_chan *pch = to_pchan(chan);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002447 struct pl330_dmac *pl330 = pch->dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -07002448 int burst;
2449
Rob Herring4e0e6102011-07-25 16:05:04 -05002450 if (unlikely(!pch || !len))
Jassi Brarb3040e42010-05-23 20:28:19 -07002451 return NULL;
2452
Jassi Brarb3040e42010-05-23 20:28:19 -07002453 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2454 if (!desc)
2455 return NULL;
2456
2457 desc->rqcfg.src_inc = 1;
2458 desc->rqcfg.dst_inc = 1;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002459 desc->rqtype = DMA_MEM_TO_MEM;
Jassi Brarb3040e42010-05-23 20:28:19 -07002460
2461 /* Select max possible burst size */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002462 burst = pl330->pcfg.data_bus_width / 8;
Jassi Brarb3040e42010-05-23 20:28:19 -07002463
2464 while (burst > 1) {
2465 if (!(len % burst))
2466 break;
2467 burst /= 2;
2468 }
2469
2470 desc->rqcfg.brst_size = 0;
2471 while (burst != (1 << desc->rqcfg.brst_size))
2472 desc->rqcfg.brst_size++;
2473
2474 desc->rqcfg.brst_len = get_burst_len(desc, len);
2475
2476 desc->txd.flags = flags;
2477
2478 return &desc->txd;
2479}
2480
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002481static void __pl330_giveback_desc(struct pl330_dmac *pl330,
Chanho Park52a9d172013-08-09 20:11:33 +09002482 struct dma_pl330_desc *first)
2483{
2484 unsigned long flags;
2485 struct dma_pl330_desc *desc;
2486
2487 if (!first)
2488 return;
2489
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002490 spin_lock_irqsave(&pl330->pool_lock, flags);
Chanho Park52a9d172013-08-09 20:11:33 +09002491
2492 while (!list_empty(&first->node)) {
2493 desc = list_entry(first->node.next,
2494 struct dma_pl330_desc, node);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002495 list_move_tail(&desc->node, &pl330->desc_pool);
Chanho Park52a9d172013-08-09 20:11:33 +09002496 }
2497
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002498 list_move_tail(&first->node, &pl330->desc_pool);
Chanho Park52a9d172013-08-09 20:11:33 +09002499
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002500 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Chanho Park52a9d172013-08-09 20:11:33 +09002501}
2502
Jassi Brarb3040e42010-05-23 20:28:19 -07002503static struct dma_async_tx_descriptor *
2504pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +05302505 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05002506 unsigned long flg, void *context)
Jassi Brarb3040e42010-05-23 20:28:19 -07002507{
2508 struct dma_pl330_desc *first, *desc = NULL;
2509 struct dma_pl330_chan *pch = to_pchan(chan);
Jassi Brarb3040e42010-05-23 20:28:19 -07002510 struct scatterlist *sg;
Boojin Kim1b9bb712011-09-02 09:44:30 +09002511 int i;
Jassi Brarb3040e42010-05-23 20:28:19 -07002512 dma_addr_t addr;
2513
Thomas Abrahamcd072512011-10-24 11:43:11 +02002514 if (unlikely(!pch || !sgl || !sg_len))
Jassi Brarb3040e42010-05-23 20:28:19 -07002515 return NULL;
2516
Boojin Kim1b9bb712011-09-02 09:44:30 +09002517 addr = pch->fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -07002518
2519 first = NULL;
2520
2521 for_each_sg(sgl, sg, sg_len, i) {
2522
2523 desc = pl330_get_desc(pch);
2524 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002525 struct pl330_dmac *pl330 = pch->dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -07002526
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002527 dev_err(pch->dmac->ddma.dev,
Jassi Brarb3040e42010-05-23 20:28:19 -07002528 "%s:%d Unable to fetch desc\n",
2529 __func__, __LINE__);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002530 __pl330_giveback_desc(pl330, first);
Jassi Brarb3040e42010-05-23 20:28:19 -07002531
2532 return NULL;
2533 }
2534
2535 if (!first)
2536 first = desc;
2537 else
2538 list_add_tail(&desc->node, &first->node);
2539
Vinod Kouldb8196d2011-10-13 22:34:23 +05302540 if (direction == DMA_MEM_TO_DEV) {
Jassi Brarb3040e42010-05-23 20:28:19 -07002541 desc->rqcfg.src_inc = 1;
2542 desc->rqcfg.dst_inc = 0;
2543 fill_px(&desc->px,
2544 addr, sg_dma_address(sg), sg_dma_len(sg));
2545 } else {
2546 desc->rqcfg.src_inc = 0;
2547 desc->rqcfg.dst_inc = 1;
2548 fill_px(&desc->px,
2549 sg_dma_address(sg), addr, sg_dma_len(sg));
2550 }
2551
Boojin Kim1b9bb712011-09-02 09:44:30 +09002552 desc->rqcfg.brst_size = pch->burst_sz;
Jassi Brarb3040e42010-05-23 20:28:19 -07002553 desc->rqcfg.brst_len = 1;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002554 desc->rqtype = direction;
Jassi Brarb3040e42010-05-23 20:28:19 -07002555 }
2556
2557 /* Return the last desc in the chain */
2558 desc->txd.flags = flg;
2559 return &desc->txd;
2560}
2561
2562static irqreturn_t pl330_irq_handler(int irq, void *data)
2563{
2564 if (pl330_update(data))
2565 return IRQ_HANDLED;
2566 else
2567 return IRQ_NONE;
2568}
2569
Lars-Peter Clausenca38ff12013-07-15 17:53:08 +02002570#define PL330_DMA_BUSWIDTHS \
2571 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2572 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2573 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2574 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2575 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2576
2577static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
2578 struct dma_slave_caps *caps)
2579{
2580 caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
2581 caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
2582 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2583 caps->cmd_pause = false;
2584 caps->cmd_terminate = true;
Lars-Peter Clausenbfb9bb42014-01-11 14:02:17 +01002585 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
Lars-Peter Clausenca38ff12013-07-15 17:53:08 +02002586
Lars-Peter Clausenca38ff12013-07-15 17:53:08 +02002587 return 0;
2588}
2589
Bill Pemberton463a1f82012-11-19 13:22:55 -05002590static int
Russell Kingaa25afa2011-02-19 15:55:00 +00002591pl330_probe(struct amba_device *adev, const struct amba_id *id)
Jassi Brarb3040e42010-05-23 20:28:19 -07002592{
2593 struct dma_pl330_platdata *pdat;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002594 struct pl330_config *pcfg;
2595 struct pl330_dmac *pl330;
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302596 struct dma_pl330_chan *pch, *_p;
Jassi Brarb3040e42010-05-23 20:28:19 -07002597 struct dma_device *pd;
2598 struct resource *res;
2599 int i, ret, irq;
Rob Herring4e0e6102011-07-25 16:05:04 -05002600 int num_chan;
Jassi Brarb3040e42010-05-23 20:28:19 -07002601
Jingoo Hand4adcc02013-07-30 17:09:11 +09002602 pdat = dev_get_platdata(&adev->dev);
Jassi Brarb3040e42010-05-23 20:28:19 -07002603
Russell King64113012013-06-27 10:29:32 +01002604 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2605 if (ret)
2606 return ret;
2607
Jassi Brarb3040e42010-05-23 20:28:19 -07002608 /* Allocate a new DMAC and its Channels */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002609 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
2610 if (!pl330) {
Jassi Brarb3040e42010-05-23 20:28:19 -07002611 dev_err(&adev->dev, "unable to allocate mem\n");
2612 return -ENOMEM;
2613 }
2614
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002615 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -07002616
2617 res = &adev->res;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002618 pl330->base = devm_ioremap_resource(&adev->dev, res);
2619 if (IS_ERR(pl330->base))
2620 return PTR_ERR(pl330->base);
Jassi Brarb3040e42010-05-23 20:28:19 -07002621
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002622 amba_set_drvdata(adev, pl330);
Boojin Kima2f52032011-09-02 09:44:29 +09002623
Dan Carpenter02808b42013-11-08 12:50:24 +03002624 for (i = 0; i < AMBA_NR_IRQS; i++) {
Michal Simeke98b3ca2013-09-30 08:50:48 +02002625 irq = adev->irq[i];
2626 if (irq) {
2627 ret = devm_request_irq(&adev->dev, irq,
2628 pl330_irq_handler, 0,
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002629 dev_name(&adev->dev), pl330);
Michal Simeke98b3ca2013-09-30 08:50:48 +02002630 if (ret)
2631 return ret;
2632 } else {
2633 break;
2634 }
2635 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002636
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002637 pcfg = &pl330->pcfg;
2638
2639 pcfg->periph_id = adev->periphid;
2640 ret = pl330_add(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002641 if (ret)
Michal Simek173e8382013-09-04 16:40:17 +02002642 return ret;
Jassi Brarb3040e42010-05-23 20:28:19 -07002643
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002644 INIT_LIST_HEAD(&pl330->desc_pool);
2645 spin_lock_init(&pl330->pool_lock);
Jassi Brarb3040e42010-05-23 20:28:19 -07002646
2647 /* Create a descriptor pool of default size */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002648 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
Jassi Brarb3040e42010-05-23 20:28:19 -07002649 dev_warn(&adev->dev, "unable to allocate desc\n");
2650
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002651 pd = &pl330->ddma;
Jassi Brarb3040e42010-05-23 20:28:19 -07002652 INIT_LIST_HEAD(&pd->channels);
2653
2654 /* Initialize channel parameters */
Olof Johanssonc8473822012-04-08 16:26:19 -07002655 if (pdat)
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002656 num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
Olof Johanssonc8473822012-04-08 16:26:19 -07002657 else
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002658 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
Olof Johanssonc8473822012-04-08 16:26:19 -07002659
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002660 pl330->num_peripherals = num_chan;
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002661
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002662 pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2663 if (!pl330->peripherals) {
Sachin Kamat61c6e752012-09-17 15:20:23 +05302664 ret = -ENOMEM;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002665 dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
Sachin Kamate4d43c12012-11-15 06:27:50 +00002666 goto probe_err2;
Sachin Kamat61c6e752012-09-17 15:20:23 +05302667 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002668
Rob Herring4e0e6102011-07-25 16:05:04 -05002669 for (i = 0; i < num_chan; i++) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002670 pch = &pl330->peripherals[i];
Thomas Abraham93ed5542011-10-24 11:43:31 +02002671 if (!adev->dev.of_node)
2672 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
2673 else
2674 pch->chan.private = adev->dev.of_node;
Jassi Brarb3040e42010-05-23 20:28:19 -07002675
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002676 INIT_LIST_HEAD(&pch->submitted_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002677 INIT_LIST_HEAD(&pch->work_list);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002678 INIT_LIST_HEAD(&pch->completed_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002679 spin_lock_init(&pch->lock);
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02002680 pch->thread = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -07002681 pch->chan.device = pd;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002682 pch->dmac = pl330;
Jassi Brarb3040e42010-05-23 20:28:19 -07002683
2684 /* Add the channel to the DMAC list */
Jassi Brarb3040e42010-05-23 20:28:19 -07002685 list_add_tail(&pch->chan.device_node, &pd->channels);
2686 }
2687
2688 pd->dev = &adev->dev;
Thomas Abraham93ed5542011-10-24 11:43:31 +02002689 if (pdat) {
Thomas Abrahamcd072512011-10-24 11:43:11 +02002690 pd->cap_mask = pdat->cap_mask;
Thomas Abraham93ed5542011-10-24 11:43:31 +02002691 } else {
Thomas Abrahamcd072512011-10-24 11:43:11 +02002692 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002693 if (pcfg->num_peri) {
Thomas Abraham93ed5542011-10-24 11:43:31 +02002694 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2695 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
Tushar Behera5557a412012-08-29 10:16:25 +05302696 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
Thomas Abraham93ed5542011-10-24 11:43:31 +02002697 }
2698 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002699
2700 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2701 pd->device_free_chan_resources = pl330_free_chan_resources;
2702 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002703 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -07002704 pd->device_tx_status = pl330_tx_status;
2705 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2706 pd->device_control = pl330_control;
2707 pd->device_issue_pending = pl330_issue_pending;
Lars-Peter Clausenca38ff12013-07-15 17:53:08 +02002708 pd->device_slave_caps = pl330_dma_device_slave_caps;
Jassi Brarb3040e42010-05-23 20:28:19 -07002709
2710 ret = dma_async_device_register(pd);
2711 if (ret) {
2712 dev_err(&adev->dev, "unable to register DMAC\n");
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302713 goto probe_err3;
2714 }
2715
2716 if (adev->dev.of_node) {
2717 ret = of_dma_controller_register(adev->dev.of_node,
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002718 of_dma_pl330_xlate, pl330);
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302719 if (ret) {
2720 dev_err(&adev->dev,
2721 "unable to register DMA to the generic DT DMA helpers\n");
2722 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002723 }
Lars-Peter Clausenb714b842013-11-25 16:07:46 +01002724
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002725 adev->dev.dma_parms = &pl330->dma_parms;
Lars-Peter Clausenb714b842013-11-25 16:07:46 +01002726
Vinod Kouldbaf6d82013-09-02 21:54:48 +05302727 /*
2728 * This is the limit for transfers with a buswidth of 1, larger
2729 * buswidths will have larger limits.
2730 */
2731 ret = dma_set_max_seg_size(&adev->dev, 1900800);
2732 if (ret)
2733 dev_err(&adev->dev, "unable to set the seg size\n");
2734
Jassi Brarb3040e42010-05-23 20:28:19 -07002735
Jassi Brarb3040e42010-05-23 20:28:19 -07002736 dev_info(&adev->dev,
2737 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
2738 dev_info(&adev->dev,
2739 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002740 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
2741 pcfg->num_peri, pcfg->num_events);
Jassi Brarb3040e42010-05-23 20:28:19 -07002742
2743 return 0;
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302744probe_err3:
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302745 /* Idle the DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002746 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302747 chan.device_node) {
2748
2749 /* Remove the channel */
2750 list_del(&pch->chan.device_node);
2751
2752 /* Flush the channel */
2753 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
2754 pl330_free_chan_resources(&pch->chan);
2755 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002756probe_err2:
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002757 pl330_del(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002758
2759 return ret;
2760}
2761
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08002762static int pl330_remove(struct amba_device *adev)
Jassi Brarb3040e42010-05-23 20:28:19 -07002763{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002764 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
Jassi Brarb3040e42010-05-23 20:28:19 -07002765 struct dma_pl330_chan *pch, *_p;
Jassi Brarb3040e42010-05-23 20:28:19 -07002766
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302767 if (adev->dev.of_node)
2768 of_dma_controller_free(adev->dev.of_node);
Padmavathi Venna421da892013-02-14 09:10:07 +05302769
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002770 dma_async_device_unregister(&pl330->ddma);
Jassi Brarb3040e42010-05-23 20:28:19 -07002771
2772 /* Idle the DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002773 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
Jassi Brarb3040e42010-05-23 20:28:19 -07002774 chan.device_node) {
2775
2776 /* Remove the channel */
2777 list_del(&pch->chan.device_node);
2778
2779 /* Flush the channel */
2780 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
2781 pl330_free_chan_resources(&pch->chan);
2782 }
2783
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002784 pl330_del(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002785
Jassi Brarb3040e42010-05-23 20:28:19 -07002786 return 0;
2787}
2788
2789static struct amba_id pl330_ids[] = {
2790 {
2791 .id = 0x00041330,
2792 .mask = 0x000fffff,
2793 },
2794 { 0, 0 },
2795};
2796
Dave Martine8fa5162011-10-05 15:15:20 +01002797MODULE_DEVICE_TABLE(amba, pl330_ids);
2798
Jassi Brarb3040e42010-05-23 20:28:19 -07002799static struct amba_driver pl330_driver = {
2800 .drv = {
2801 .owner = THIS_MODULE,
2802 .name = "dma-pl330",
Jassi Brarb3040e42010-05-23 20:28:19 -07002803 },
2804 .id_table = pl330_ids,
2805 .probe = pl330_probe,
2806 .remove = pl330_remove,
2807};
2808
viresh kumar9e5ed092012-03-15 10:40:38 +01002809module_amba_driver(pl330_driver);
Jassi Brarb3040e42010-05-23 20:28:19 -07002810
2811MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
2812MODULE_DESCRIPTION("API Driver for PL330 DMAC");
2813MODULE_LICENSE("GPL");