blob: 9f3dbc8c63d27e733aadc975ca90a74328513333 [file] [log] [blame]
Boojin Kimb7d861d2011-12-26 18:49:52 +09001/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
Jassi Brarb3040e42010-05-23 20:28:19 -07004 *
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
Boojin Kimb7d861d2011-12-26 18:49:52 +090014#include <linux/kernel.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070015#include <linux/io.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/module.h>
Boojin Kimb7d861d2011-12-26 18:49:52 +090019#include <linux/string.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070023#include <linux/dmaengine.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070024#include <linux/amba/bus.h>
25#include <linux/amba/pl330.h>
Boojin Kim1b9bb712011-09-02 09:44:30 +090026#include <linux/scatterlist.h>
Thomas Abraham93ed5542011-10-24 11:43:31 +020027#include <linux/of.h>
Padmavathi Vennaa80258f2013-02-14 09:10:06 +053028#include <linux/of_dma.h>
Sachin Kamatbcc7fa92013-03-04 14:36:27 +053029#include <linux/err.h>
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +010030#include <linux/pm_runtime.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070031
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000032#include "dmaengine.h"
Boojin Kimb7d861d2011-12-26 18:49:52 +090033#define PL330_MAX_CHAN 8
34#define PL330_MAX_IRQS 32
35#define PL330_MAX_PERI 32
Shawn Lin86a8ce72016-01-22 19:06:51 +080036#define PL330_MAX_BURST 16
Boojin Kimb7d861d2011-12-26 18:49:52 +090037
Addy Ke271e1b862016-01-22 19:06:46 +080038#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
39
Lars-Peter Clausenf0564c72014-07-06 20:32:19 +020040enum pl330_cachectrl {
41 CCTRL0, /* Noncacheable and nonbufferable */
42 CCTRL1, /* Bufferable only */
43 CCTRL2, /* Cacheable, but do not allocate */
44 CCTRL3, /* Cacheable and bufferable, but do not allocate */
45 INVALID1, /* AWCACHE = 0x1000 */
46 INVALID2,
47 CCTRL6, /* Cacheable write-through, allocate on writes only */
48 CCTRL7, /* Cacheable write-back, allocate on writes only */
Boojin Kimb7d861d2011-12-26 18:49:52 +090049};
50
51enum pl330_byteswap {
52 SWAP_NO,
53 SWAP_2,
54 SWAP_4,
55 SWAP_8,
56 SWAP_16,
57};
58
Boojin Kimb7d861d2011-12-26 18:49:52 +090059/* Register and Bit field Definitions */
60#define DS 0x0
61#define DS_ST_STOP 0x0
62#define DS_ST_EXEC 0x1
63#define DS_ST_CMISS 0x2
64#define DS_ST_UPDTPC 0x3
65#define DS_ST_WFE 0x4
66#define DS_ST_ATBRR 0x5
67#define DS_ST_QBUSY 0x6
68#define DS_ST_WFP 0x7
69#define DS_ST_KILL 0x8
70#define DS_ST_CMPLT 0x9
71#define DS_ST_FLTCMP 0xe
72#define DS_ST_FAULT 0xf
73
74#define DPC 0x4
75#define INTEN 0x20
76#define ES 0x24
77#define INTSTATUS 0x28
78#define INTCLR 0x2c
79#define FSM 0x30
80#define FSC 0x34
81#define FTM 0x38
82
83#define _FTC 0x40
84#define FTC(n) (_FTC + (n)*0x4)
85
86#define _CS 0x100
87#define CS(n) (_CS + (n)*0x8)
88#define CS_CNS (1 << 21)
89
90#define _CPC 0x104
91#define CPC(n) (_CPC + (n)*0x8)
92
93#define _SA 0x400
94#define SA(n) (_SA + (n)*0x20)
95
96#define _DA 0x404
97#define DA(n) (_DA + (n)*0x20)
98
99#define _CC 0x408
100#define CC(n) (_CC + (n)*0x20)
101
102#define CC_SRCINC (1 << 0)
103#define CC_DSTINC (1 << 14)
104#define CC_SRCPRI (1 << 8)
105#define CC_DSTPRI (1 << 22)
106#define CC_SRCNS (1 << 9)
107#define CC_DSTNS (1 << 23)
108#define CC_SRCIA (1 << 10)
109#define CC_DSTIA (1 << 24)
110#define CC_SRCBRSTLEN_SHFT 4
111#define CC_DSTBRSTLEN_SHFT 18
112#define CC_SRCBRSTSIZE_SHFT 1
113#define CC_DSTBRSTSIZE_SHFT 15
114#define CC_SRCCCTRL_SHFT 11
115#define CC_SRCCCTRL_MASK 0x7
116#define CC_DSTCCTRL_SHFT 25
117#define CC_DRCCCTRL_MASK 0x7
118#define CC_SWAP_SHFT 28
119
120#define _LC0 0x40c
121#define LC0(n) (_LC0 + (n)*0x20)
122
123#define _LC1 0x410
124#define LC1(n) (_LC1 + (n)*0x20)
125
126#define DBGSTATUS 0xd00
127#define DBG_BUSY (1 << 0)
128
129#define DBGCMD 0xd04
130#define DBGINST0 0xd08
131#define DBGINST1 0xd0c
132
133#define CR0 0xe00
134#define CR1 0xe04
135#define CR2 0xe08
136#define CR3 0xe0c
137#define CR4 0xe10
138#define CRD 0xe14
139
140#define PERIPH_ID 0xfe0
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900141#define PERIPH_REV_SHIFT 20
142#define PERIPH_REV_MASK 0xf
143#define PERIPH_REV_R0P0 0
144#define PERIPH_REV_R1P0 1
145#define PERIPH_REV_R1P1 2
Boojin Kimb7d861d2011-12-26 18:49:52 +0900146
147#define CR0_PERIPH_REQ_SET (1 << 0)
148#define CR0_BOOT_EN_SET (1 << 1)
149#define CR0_BOOT_MAN_NS (1 << 2)
150#define CR0_NUM_CHANS_SHIFT 4
151#define CR0_NUM_CHANS_MASK 0x7
152#define CR0_NUM_PERIPH_SHIFT 12
153#define CR0_NUM_PERIPH_MASK 0x1f
154#define CR0_NUM_EVENTS_SHIFT 17
155#define CR0_NUM_EVENTS_MASK 0x1f
156
157#define CR1_ICACHE_LEN_SHIFT 0
158#define CR1_ICACHE_LEN_MASK 0x7
159#define CR1_NUM_ICACHELINES_SHIFT 4
160#define CR1_NUM_ICACHELINES_MASK 0xf
161
162#define CRD_DATA_WIDTH_SHIFT 0
163#define CRD_DATA_WIDTH_MASK 0x7
164#define CRD_WR_CAP_SHIFT 4
165#define CRD_WR_CAP_MASK 0x7
166#define CRD_WR_Q_DEP_SHIFT 8
167#define CRD_WR_Q_DEP_MASK 0xf
168#define CRD_RD_CAP_SHIFT 12
169#define CRD_RD_CAP_MASK 0x7
170#define CRD_RD_Q_DEP_SHIFT 16
171#define CRD_RD_Q_DEP_MASK 0xf
172#define CRD_DATA_BUFF_SHIFT 20
173#define CRD_DATA_BUFF_MASK 0x3ff
174
175#define PART 0x330
176#define DESIGNER 0x41
177#define REVISION 0x0
178#define INTEG_CFG 0x0
179#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
180
Boojin Kimb7d861d2011-12-26 18:49:52 +0900181#define PL330_STATE_STOPPED (1 << 0)
182#define PL330_STATE_EXECUTING (1 << 1)
183#define PL330_STATE_WFE (1 << 2)
184#define PL330_STATE_FAULTING (1 << 3)
185#define PL330_STATE_COMPLETING (1 << 4)
186#define PL330_STATE_WFP (1 << 5)
187#define PL330_STATE_KILLING (1 << 6)
188#define PL330_STATE_FAULT_COMPLETING (1 << 7)
189#define PL330_STATE_CACHEMISS (1 << 8)
190#define PL330_STATE_UPDTPC (1 << 9)
191#define PL330_STATE_ATBARRIER (1 << 10)
192#define PL330_STATE_QUEUEBUSY (1 << 11)
193#define PL330_STATE_INVALID (1 << 15)
194
195#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
196 | PL330_STATE_WFE | PL330_STATE_FAULTING)
197
198#define CMD_DMAADDH 0x54
199#define CMD_DMAEND 0x00
200#define CMD_DMAFLUSHP 0x35
201#define CMD_DMAGO 0xa0
202#define CMD_DMALD 0x04
203#define CMD_DMALDP 0x25
204#define CMD_DMALP 0x20
205#define CMD_DMALPEND 0x28
206#define CMD_DMAKILL 0x01
207#define CMD_DMAMOV 0xbc
208#define CMD_DMANOP 0x18
209#define CMD_DMARMB 0x12
210#define CMD_DMASEV 0x34
211#define CMD_DMAST 0x08
212#define CMD_DMASTP 0x29
213#define CMD_DMASTZ 0x0c
214#define CMD_DMAWFE 0x36
215#define CMD_DMAWFP 0x30
216#define CMD_DMAWMB 0x13
217
218#define SZ_DMAADDH 3
219#define SZ_DMAEND 1
220#define SZ_DMAFLUSHP 2
221#define SZ_DMALD 1
222#define SZ_DMALDP 2
223#define SZ_DMALP 2
224#define SZ_DMALPEND 2
225#define SZ_DMAKILL 1
226#define SZ_DMAMOV 6
227#define SZ_DMANOP 1
228#define SZ_DMARMB 1
229#define SZ_DMASEV 2
230#define SZ_DMAST 1
231#define SZ_DMASTP 2
232#define SZ_DMASTZ 1
233#define SZ_DMAWFE 2
234#define SZ_DMAWFP 2
235#define SZ_DMAWMB 1
236#define SZ_DMAGO 6
237
238#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
239#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
240
241#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
242#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
243
244/*
245 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
246 * at 1byte/burst for P<->M and M<->M respectively.
247 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
248 * should be enough for P<->M and M<->M respectively.
249 */
250#define MCODE_BUFF_PER_REQ 256
251
Boojin Kimb7d861d2011-12-26 18:49:52 +0900252/* Use this _only_ to wait on transient states */
253#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
254
255#ifdef PL330_DEBUG_MCGEN
256static unsigned cmd_line;
257#define PL330_DBGCMD_DUMP(off, x...) do { \
258 printk("%x:", cmd_line); \
259 printk(x); \
260 cmd_line += off; \
261 } while (0)
262#define PL330_DBGMC_START(addr) (cmd_line = addr)
263#else
264#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
265#define PL330_DBGMC_START(addr) do {} while (0)
266#endif
267
268/* The number of default descriptors */
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +0000269
Jassi Brarb3040e42010-05-23 20:28:19 -0700270#define NR_DEFAULT_DESC 16
271
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +0100272/* Delay for runtime PM autosuspend, ms */
273#define PL330_AUTOSUSPEND_DELAY 20
274
Boojin Kimb7d861d2011-12-26 18:49:52 +0900275/* Populated by the PL330 core driver for DMA API driver's info */
276struct pl330_config {
277 u32 periph_id;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900278#define DMAC_MODE_NS (1 << 0)
279 unsigned int mode;
280 unsigned int data_bus_width:10; /* In number of bits */
Liviu Dudau1f0a5cb2014-11-06 17:20:12 +0000281 unsigned int data_buf_dep:11;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900282 unsigned int num_chan:4;
283 unsigned int num_peri:6;
284 u32 peri_ns;
285 unsigned int num_events:6;
286 u32 irq_ns;
287};
288
Boojin Kimb7d861d2011-12-26 18:49:52 +0900289/**
290 * Request Configuration.
291 * The PL330 core does not modify this and uses the last
292 * working configuration if the request doesn't provide any.
293 *
294 * The Client may want to provide this info only for the
295 * first request and a request with new settings.
296 */
297struct pl330_reqcfg {
298 /* Address Incrementing */
299 unsigned dst_inc:1;
300 unsigned src_inc:1;
301
302 /*
303 * For now, the SRC & DST protection levels
304 * and burst size/length are assumed same.
305 */
306 bool nonsecure;
307 bool privileged;
308 bool insnaccess;
309 unsigned brst_len:5;
310 unsigned brst_size:3; /* in power of 2 */
311
Lars-Peter Clausenf0564c72014-07-06 20:32:19 +0200312 enum pl330_cachectrl dcctl;
313 enum pl330_cachectrl scctl;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900314 enum pl330_byteswap swap;
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900315 struct pl330_config *pcfg;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900316};
317
318/*
319 * One cycle of DMAC operation.
320 * There may be more than one xfer in a request.
321 */
322struct pl330_xfer {
323 u32 src_addr;
324 u32 dst_addr;
325 /* Size to xfer */
326 u32 bytes;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900327};
328
329/* The xfer callbacks are made with one of these arguments. */
330enum pl330_op_err {
331 /* The all xfers in the request were success. */
332 PL330_ERR_NONE,
333 /* If req aborted due to global error. */
334 PL330_ERR_ABORT,
335 /* If req failed due to problem with Channel. */
336 PL330_ERR_FAIL,
337};
338
Boojin Kimb7d861d2011-12-26 18:49:52 +0900339enum dmamov_dst {
340 SAR = 0,
341 CCR,
342 DAR,
343};
344
345enum pl330_dst {
346 SRC = 0,
347 DST,
348};
349
350enum pl330_cond {
351 SINGLE,
352 BURST,
353 ALWAYS,
354};
355
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +0200356struct dma_pl330_desc;
357
Boojin Kimb7d861d2011-12-26 18:49:52 +0900358struct _pl330_req {
359 u32 mc_bus;
360 void *mc_cpu;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +0200361 struct dma_pl330_desc *desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900362};
363
364/* ToBeDone for tasklet */
365struct _pl330_tbd {
366 bool reset_dmac;
367 bool reset_mngr;
368 u8 reset_chan;
369};
370
371/* A DMAC Thread */
372struct pl330_thread {
373 u8 id;
374 int ev;
375 /* If the channel is not yet acquired by any client */
376 bool free;
377 /* Parent DMAC */
378 struct pl330_dmac *dmac;
379 /* Only two at a time */
380 struct _pl330_req req[2];
381 /* Index of the last enqueued request */
382 unsigned lstenq;
383 /* Index of the last submitted request or -1 if the DMA is stopped */
384 int req_running;
385};
386
387enum pl330_dmac_state {
388 UNINIT,
389 INIT,
390 DYING,
391};
392
Jassi Brarb3040e42010-05-23 20:28:19 -0700393enum desc_status {
394 /* In the DMAC pool */
395 FREE,
396 /*
Masanari Iidad73111c2012-08-04 23:37:53 +0900397 * Allocated to some channel during prep_xxx
Jassi Brarb3040e42010-05-23 20:28:19 -0700398 * Also may be sitting on the work_list.
399 */
400 PREP,
401 /*
402 * Sitting on the work_list and already submitted
403 * to the PL330 core. Not more than two descriptors
404 * of a channel can be BUSY at any time.
405 */
406 BUSY,
407 /*
408 * Sitting on the channel work_list but xfer done
409 * by PL330 core
410 */
411 DONE,
412};
413
414struct dma_pl330_chan {
415 /* Schedule desc completion */
416 struct tasklet_struct task;
417
418 /* DMA-Engine Channel */
419 struct dma_chan chan;
420
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +0100421 /* List of submitted descriptors */
422 struct list_head submitted_list;
423 /* List of issued descriptors */
Jassi Brarb3040e42010-05-23 20:28:19 -0700424 struct list_head work_list;
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +0200425 /* List of completed descriptors */
426 struct list_head completed_list;
Jassi Brarb3040e42010-05-23 20:28:19 -0700427
428 /* Pointer to the DMAC that manages this channel,
429 * NULL if the channel is available to be acquired.
430 * As the parent, this DMAC also provides descriptors
431 * to the channel.
432 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200433 struct pl330_dmac *dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -0700434
435 /* To protect channel manipulation */
436 spinlock_t lock;
437
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +0200438 /*
439 * Hardware channel thread of PL330 DMAC. NULL if the channel is
440 * available.
Jassi Brarb3040e42010-05-23 20:28:19 -0700441 */
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +0200442 struct pl330_thread *thread;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900443
444 /* For D-to-M and M-to-D channels */
445 int burst_sz; /* the peripheral fifo width */
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900446 int burst_len; /* the number of burst */
Boojin Kim1b9bb712011-09-02 09:44:30 +0900447 dma_addr_t fifo_addr;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900448
449 /* for cyclic capability */
450 bool cyclic;
Marek Szyprowskid21814a2016-12-16 11:39:11 +0100451
452 /* for runtime pm tracking */
453 bool active;
Jassi Brarb3040e42010-05-23 20:28:19 -0700454};
455
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200456struct pl330_dmac {
Jassi Brarb3040e42010-05-23 20:28:19 -0700457 /* DMA-Engine Device */
458 struct dma_device ddma;
459
Lars-Peter Clausenb714b842013-11-25 16:07:46 +0100460 /* Holds info about sg limitations */
461 struct device_dma_parameters dma_parms;
462
Jassi Brarb3040e42010-05-23 20:28:19 -0700463 /* Pool of descriptors available for the DMAC's channels */
464 struct list_head desc_pool;
465 /* To protect desc_pool manipulation */
466 spinlock_t pool_lock;
467
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200468 /* Size of MicroCode buffers for each channel. */
469 unsigned mcbufsz;
470 /* ioremap'ed address of PL330 registers. */
471 void __iomem *base;
472 /* Populated by the PL330 core driver during pl330_add */
473 struct pl330_config pcfg;
474
475 spinlock_t lock;
476 /* Maximum possible events/irqs */
477 int events[32];
478 /* BUS address of MicroCode buffer */
479 dma_addr_t mcode_bus;
480 /* CPU address of MicroCode buffer */
481 void *mcode_cpu;
482 /* List of all Channel threads */
483 struct pl330_thread *channels;
484 /* Pointer to the MANAGER thread */
485 struct pl330_thread *manager;
486 /* To handle bad news in interrupt */
487 struct tasklet_struct tasks;
488 struct _pl330_tbd dmac_tbd;
489 /* State of DMAC operation */
490 enum pl330_dmac_state state;
491 /* Holds list of reqs with due callbacks */
492 struct list_head req_done;
493
Jassi Brarb3040e42010-05-23 20:28:19 -0700494 /* Peripheral channels connected to this DMAC */
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +0100495 unsigned int num_peripherals;
Rob Herring4e0e6102011-07-25 16:05:04 -0500496 struct dma_pl330_chan *peripherals; /* keep at end */
Addy Ke271e1b862016-01-22 19:06:46 +0800497 int quirks;
498};
499
500static struct pl330_of_quirks {
501 char *quirk;
502 int id;
503} of_quirks[] = {
504 {
505 .quirk = "arm,pl330-broken-no-flushp",
506 .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
507 }
Jassi Brarb3040e42010-05-23 20:28:19 -0700508};
509
510struct dma_pl330_desc {
511 /* To attach to a queue as child */
512 struct list_head node;
513
514 /* Descriptor for the DMA Engine API */
515 struct dma_async_tx_descriptor txd;
516
517 /* Xfer for PL330 core */
518 struct pl330_xfer px;
519
520 struct pl330_reqcfg rqcfg;
Jassi Brarb3040e42010-05-23 20:28:19 -0700521
522 enum desc_status status;
523
Robert Baldygaaee4d1f2015-02-11 13:23:17 +0100524 int bytes_requested;
525 bool last;
526
Jassi Brarb3040e42010-05-23 20:28:19 -0700527 /* The channel which currently holds this desc */
528 struct dma_pl330_chan *pchan;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +0200529
530 enum dma_transfer_direction rqtype;
531 /* Index of peripheral for the xfer. */
532 unsigned peri:5;
533 /* Hook to attach to DMAC's list of reqs with due callback */
534 struct list_head rqd;
535};
536
537struct _xfer_spec {
538 u32 ccr;
539 struct dma_pl330_desc *desc;
Jassi Brarb3040e42010-05-23 20:28:19 -0700540};
541
Boojin Kimb7d861d2011-12-26 18:49:52 +0900542static inline bool _queue_empty(struct pl330_thread *thrd)
543{
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +0200544 return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900545}
546
547static inline bool _queue_full(struct pl330_thread *thrd)
548{
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +0200549 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900550}
551
552static inline bool is_manager(struct pl330_thread *thrd)
553{
Lars-Peter Clausenfbbcd9b2014-07-06 20:32:28 +0200554 return thrd->dmac->manager == thrd;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900555}
556
557/* If manager of the thread is in Non-Secure mode */
558static inline bool _manager_ns(struct pl330_thread *thrd)
559{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200560 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900561}
562
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900563static inline u32 get_revision(u32 periph_id)
564{
565 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
566}
567
Boojin Kimb7d861d2011-12-26 18:49:52 +0900568static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
569 enum pl330_dst da, u16 val)
570{
571 if (dry_run)
572 return SZ_DMAADDH;
573
574 buf[0] = CMD_DMAADDH;
575 buf[0] |= (da << 1);
Ben Dooks3a2307f2015-03-16 11:52:43 +0000576 *((__le16 *)&buf[1]) = cpu_to_le16(val);
Boojin Kimb7d861d2011-12-26 18:49:52 +0900577
578 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
579 da == 1 ? "DA" : "SA", val);
580
581 return SZ_DMAADDH;
582}
583
584static inline u32 _emit_END(unsigned dry_run, u8 buf[])
585{
586 if (dry_run)
587 return SZ_DMAEND;
588
589 buf[0] = CMD_DMAEND;
590
591 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
592
593 return SZ_DMAEND;
594}
595
596static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
597{
598 if (dry_run)
599 return SZ_DMAFLUSHP;
600
601 buf[0] = CMD_DMAFLUSHP;
602
603 peri &= 0x1f;
604 peri <<= 3;
605 buf[1] = peri;
606
607 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
608
609 return SZ_DMAFLUSHP;
610}
611
612static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
613{
614 if (dry_run)
615 return SZ_DMALD;
616
617 buf[0] = CMD_DMALD;
618
619 if (cond == SINGLE)
620 buf[0] |= (0 << 1) | (1 << 0);
621 else if (cond == BURST)
622 buf[0] |= (1 << 1) | (1 << 0);
623
624 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
625 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
626
627 return SZ_DMALD;
628}
629
630static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
631 enum pl330_cond cond, u8 peri)
632{
633 if (dry_run)
634 return SZ_DMALDP;
635
636 buf[0] = CMD_DMALDP;
637
638 if (cond == BURST)
639 buf[0] |= (1 << 1);
640
641 peri &= 0x1f;
642 peri <<= 3;
643 buf[1] = peri;
644
645 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
646 cond == SINGLE ? 'S' : 'B', peri >> 3);
647
648 return SZ_DMALDP;
649}
650
651static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
652 unsigned loop, u8 cnt)
653{
654 if (dry_run)
655 return SZ_DMALP;
656
657 buf[0] = CMD_DMALP;
658
659 if (loop)
660 buf[0] |= (1 << 1);
661
662 cnt--; /* DMAC increments by 1 internally */
663 buf[1] = cnt;
664
665 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
666
667 return SZ_DMALP;
668}
669
670struct _arg_LPEND {
671 enum pl330_cond cond;
672 bool forever;
673 unsigned loop;
674 u8 bjump;
675};
676
677static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
678 const struct _arg_LPEND *arg)
679{
680 enum pl330_cond cond = arg->cond;
681 bool forever = arg->forever;
682 unsigned loop = arg->loop;
683 u8 bjump = arg->bjump;
684
685 if (dry_run)
686 return SZ_DMALPEND;
687
688 buf[0] = CMD_DMALPEND;
689
690 if (loop)
691 buf[0] |= (1 << 2);
692
693 if (!forever)
694 buf[0] |= (1 << 4);
695
696 if (cond == SINGLE)
697 buf[0] |= (0 << 1) | (1 << 0);
698 else if (cond == BURST)
699 buf[0] |= (1 << 1) | (1 << 0);
700
701 buf[1] = bjump;
702
703 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
704 forever ? "FE" : "END",
705 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
706 loop ? '1' : '0',
707 bjump);
708
709 return SZ_DMALPEND;
710}
711
712static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
713{
714 if (dry_run)
715 return SZ_DMAKILL;
716
717 buf[0] = CMD_DMAKILL;
718
719 return SZ_DMAKILL;
720}
721
722static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
723 enum dmamov_dst dst, u32 val)
724{
725 if (dry_run)
726 return SZ_DMAMOV;
727
728 buf[0] = CMD_DMAMOV;
729 buf[1] = dst;
Ben Dooks3a2307f2015-03-16 11:52:43 +0000730 *((__le32 *)&buf[2]) = cpu_to_le32(val);
Boojin Kimb7d861d2011-12-26 18:49:52 +0900731
732 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
733 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
734
735 return SZ_DMAMOV;
736}
737
738static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
739{
740 if (dry_run)
741 return SZ_DMANOP;
742
743 buf[0] = CMD_DMANOP;
744
745 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
746
747 return SZ_DMANOP;
748}
749
750static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
751{
752 if (dry_run)
753 return SZ_DMARMB;
754
755 buf[0] = CMD_DMARMB;
756
757 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
758
759 return SZ_DMARMB;
760}
761
762static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
763{
764 if (dry_run)
765 return SZ_DMASEV;
766
767 buf[0] = CMD_DMASEV;
768
769 ev &= 0x1f;
770 ev <<= 3;
771 buf[1] = ev;
772
773 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
774
775 return SZ_DMASEV;
776}
777
778static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
779{
780 if (dry_run)
781 return SZ_DMAST;
782
783 buf[0] = CMD_DMAST;
784
785 if (cond == SINGLE)
786 buf[0] |= (0 << 1) | (1 << 0);
787 else if (cond == BURST)
788 buf[0] |= (1 << 1) | (1 << 0);
789
790 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
791 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
792
793 return SZ_DMAST;
794}
795
796static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
797 enum pl330_cond cond, u8 peri)
798{
799 if (dry_run)
800 return SZ_DMASTP;
801
802 buf[0] = CMD_DMASTP;
803
804 if (cond == BURST)
805 buf[0] |= (1 << 1);
806
807 peri &= 0x1f;
808 peri <<= 3;
809 buf[1] = peri;
810
811 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
812 cond == SINGLE ? 'S' : 'B', peri >> 3);
813
814 return SZ_DMASTP;
815}
816
817static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
818{
819 if (dry_run)
820 return SZ_DMASTZ;
821
822 buf[0] = CMD_DMASTZ;
823
824 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
825
826 return SZ_DMASTZ;
827}
828
829static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
830 unsigned invalidate)
831{
832 if (dry_run)
833 return SZ_DMAWFE;
834
835 buf[0] = CMD_DMAWFE;
836
837 ev &= 0x1f;
838 ev <<= 3;
839 buf[1] = ev;
840
841 if (invalidate)
842 buf[1] |= (1 << 1);
843
844 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
845 ev >> 3, invalidate ? ", I" : "");
846
847 return SZ_DMAWFE;
848}
849
850static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
851 enum pl330_cond cond, u8 peri)
852{
853 if (dry_run)
854 return SZ_DMAWFP;
855
856 buf[0] = CMD_DMAWFP;
857
858 if (cond == SINGLE)
859 buf[0] |= (0 << 1) | (0 << 0);
860 else if (cond == BURST)
861 buf[0] |= (1 << 1) | (0 << 0);
862 else
863 buf[0] |= (0 << 1) | (1 << 0);
864
865 peri &= 0x1f;
866 peri <<= 3;
867 buf[1] = peri;
868
869 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
870 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
871
872 return SZ_DMAWFP;
873}
874
875static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
876{
877 if (dry_run)
878 return SZ_DMAWMB;
879
880 buf[0] = CMD_DMAWMB;
881
882 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
883
884 return SZ_DMAWMB;
885}
886
887struct _arg_GO {
888 u8 chan;
889 u32 addr;
890 unsigned ns;
891};
892
893static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
894 const struct _arg_GO *arg)
895{
896 u8 chan = arg->chan;
897 u32 addr = arg->addr;
898 unsigned ns = arg->ns;
899
900 if (dry_run)
901 return SZ_DMAGO;
902
903 buf[0] = CMD_DMAGO;
904 buf[0] |= (ns << 1);
905
906 buf[1] = chan & 0x7;
907
Ben Dooks3a2307f2015-03-16 11:52:43 +0000908 *((__le32 *)&buf[2]) = cpu_to_le32(addr);
Boojin Kimb7d861d2011-12-26 18:49:52 +0900909
910 return SZ_DMAGO;
911}
912
913#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
914
915/* Returns Time-Out */
916static bool _until_dmac_idle(struct pl330_thread *thrd)
917{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200918 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900919 unsigned long loops = msecs_to_loops(5);
920
921 do {
922 /* Until Manager is Idle */
923 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
924 break;
925
926 cpu_relax();
927 } while (--loops);
928
929 if (!loops)
930 return true;
931
932 return false;
933}
934
935static inline void _execute_DBGINSN(struct pl330_thread *thrd,
936 u8 insn[], bool as_manager)
937{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200938 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900939 u32 val;
940
941 val = (insn[0] << 16) | (insn[1] << 24);
942 if (!as_manager) {
943 val |= (1 << 0);
944 val |= (thrd->id << 8); /* Channel Number */
945 }
946 writel(val, regs + DBGINST0);
947
Ben Dooks3a2307f2015-03-16 11:52:43 +0000948 val = le32_to_cpu(*((__le32 *)&insn[2]));
Boojin Kimb7d861d2011-12-26 18:49:52 +0900949 writel(val, regs + DBGINST1);
950
951 /* If timed out due to halted state-machine */
952 if (_until_dmac_idle(thrd)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200953 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
Boojin Kimb7d861d2011-12-26 18:49:52 +0900954 return;
955 }
956
957 /* Get going */
958 writel(0, regs + DBGCMD);
959}
960
Boojin Kimb7d861d2011-12-26 18:49:52 +0900961static inline u32 _state(struct pl330_thread *thrd)
962{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +0200963 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900964 u32 val;
965
966 if (is_manager(thrd))
967 val = readl(regs + DS) & 0xf;
968 else
969 val = readl(regs + CS(thrd->id)) & 0xf;
970
971 switch (val) {
972 case DS_ST_STOP:
973 return PL330_STATE_STOPPED;
974 case DS_ST_EXEC:
975 return PL330_STATE_EXECUTING;
976 case DS_ST_CMISS:
977 return PL330_STATE_CACHEMISS;
978 case DS_ST_UPDTPC:
979 return PL330_STATE_UPDTPC;
980 case DS_ST_WFE:
981 return PL330_STATE_WFE;
982 case DS_ST_FAULT:
983 return PL330_STATE_FAULTING;
984 case DS_ST_ATBRR:
985 if (is_manager(thrd))
986 return PL330_STATE_INVALID;
987 else
988 return PL330_STATE_ATBARRIER;
989 case DS_ST_QBUSY:
990 if (is_manager(thrd))
991 return PL330_STATE_INVALID;
992 else
993 return PL330_STATE_QUEUEBUSY;
994 case DS_ST_WFP:
995 if (is_manager(thrd))
996 return PL330_STATE_INVALID;
997 else
998 return PL330_STATE_WFP;
999 case DS_ST_KILL:
1000 if (is_manager(thrd))
1001 return PL330_STATE_INVALID;
1002 else
1003 return PL330_STATE_KILLING;
1004 case DS_ST_CMPLT:
1005 if (is_manager(thrd))
1006 return PL330_STATE_INVALID;
1007 else
1008 return PL330_STATE_COMPLETING;
1009 case DS_ST_FLTCMP:
1010 if (is_manager(thrd))
1011 return PL330_STATE_INVALID;
1012 else
1013 return PL330_STATE_FAULT_COMPLETING;
1014 default:
1015 return PL330_STATE_INVALID;
1016 }
1017}
1018
1019static void _stop(struct pl330_thread *thrd)
1020{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001021 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001022 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1023
1024 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1025 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1026
1027 /* Return if nothing needs to be done */
1028 if (_state(thrd) == PL330_STATE_COMPLETING
1029 || _state(thrd) == PL330_STATE_KILLING
1030 || _state(thrd) == PL330_STATE_STOPPED)
1031 return;
1032
1033 _emit_KILL(0, insn);
1034
1035 /* Stop generating interrupts for SEV */
1036 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1037
1038 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1039}
1040
1041/* Start doing req 'idx' of thread 'thrd' */
1042static bool _trigger(struct pl330_thread *thrd)
1043{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001044 void __iomem *regs = thrd->dmac->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001045 struct _pl330_req *req;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001046 struct dma_pl330_desc *desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001047 struct _arg_GO go;
1048 unsigned ns;
1049 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1050 int idx;
1051
1052 /* Return if already ACTIVE */
1053 if (_state(thrd) != PL330_STATE_STOPPED)
1054 return true;
1055
1056 idx = 1 - thrd->lstenq;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001057 if (thrd->req[idx].desc != NULL) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001058 req = &thrd->req[idx];
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001059 } else {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001060 idx = thrd->lstenq;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001061 if (thrd->req[idx].desc != NULL)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001062 req = &thrd->req[idx];
1063 else
1064 req = NULL;
1065 }
1066
1067 /* Return if no request */
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001068 if (!req)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001069 return true;
1070
Addy Ke0091b9d2014-12-08 19:28:20 +08001071 /* Return if req is running */
1072 if (idx == thrd->req_running)
1073 return true;
1074
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001075 desc = req->desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001076
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001077 ns = desc->rqcfg.nonsecure ? 1 : 0;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001078
1079 /* See 'Abort Sources' point-4 at Page 2-25 */
1080 if (_manager_ns(thrd) && !ns)
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001081 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001082 __func__, __LINE__);
1083
1084 go.chan = thrd->id;
1085 go.addr = req->mc_bus;
1086 go.ns = ns;
1087 _emit_GO(0, insn, &go);
1088
1089 /* Set to generate interrupts for SEV */
1090 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1091
1092 /* Only manager can execute GO */
1093 _execute_DBGINSN(thrd, insn, true);
1094
1095 thrd->req_running = idx;
1096
1097 return true;
1098}
1099
1100static bool _start(struct pl330_thread *thrd)
1101{
1102 switch (_state(thrd)) {
1103 case PL330_STATE_FAULT_COMPLETING:
1104 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1105
1106 if (_state(thrd) == PL330_STATE_KILLING)
1107 UNTIL(thrd, PL330_STATE_STOPPED)
1108
1109 case PL330_STATE_FAULTING:
1110 _stop(thrd);
1111
1112 case PL330_STATE_KILLING:
1113 case PL330_STATE_COMPLETING:
1114 UNTIL(thrd, PL330_STATE_STOPPED)
1115
1116 case PL330_STATE_STOPPED:
1117 return _trigger(thrd);
1118
1119 case PL330_STATE_WFP:
1120 case PL330_STATE_QUEUEBUSY:
1121 case PL330_STATE_ATBARRIER:
1122 case PL330_STATE_UPDTPC:
1123 case PL330_STATE_CACHEMISS:
1124 case PL330_STATE_EXECUTING:
1125 return true;
1126
1127 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1128 default:
1129 return false;
1130 }
1131}
1132
1133static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1134 const struct _xfer_spec *pxs, int cyc)
1135{
1136 int off = 0;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001137 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001138
Boojin Kim3ecf51a2011-12-26 18:55:47 +09001139 /* check lock-up free version */
1140 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1141 while (cyc--) {
1142 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1143 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1144 }
1145 } else {
1146 while (cyc--) {
1147 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1148 off += _emit_RMB(dry_run, &buf[off]);
1149 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1150 off += _emit_WMB(dry_run, &buf[off]);
1151 }
Boojin Kimb7d861d2011-12-26 18:49:52 +09001152 }
1153
1154 return off;
1155}
1156
Addy Ke271e1b862016-01-22 19:06:46 +08001157static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
1158 u8 buf[], const struct _xfer_spec *pxs,
1159 int cyc)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001160{
1161 int off = 0;
Boojin Kim848e9772016-01-22 19:06:44 +08001162 enum pl330_cond cond;
1163
Addy Ke271e1b862016-01-22 19:06:46 +08001164 if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
1165 cond = BURST;
1166 else
Caesar Wang0a18f9b2016-02-25 09:00:53 +08001167 cond = SINGLE;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001168
1169 while (cyc--) {
Boojin Kim848e9772016-01-22 19:06:44 +08001170 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1171 off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001172 off += _emit_ST(dry_run, &buf[off], ALWAYS);
Addy Ke271e1b862016-01-22 19:06:46 +08001173
1174 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1175 off += _emit_FLUSHP(dry_run, &buf[off],
1176 pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001177 }
1178
1179 return off;
1180}
1181
Addy Ke271e1b862016-01-22 19:06:46 +08001182static inline int _ldst_memtodev(struct pl330_dmac *pl330,
1183 unsigned dry_run, u8 buf[],
1184 const struct _xfer_spec *pxs, int cyc)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001185{
1186 int off = 0;
Boojin Kim848e9772016-01-22 19:06:44 +08001187 enum pl330_cond cond;
1188
Addy Ke271e1b862016-01-22 19:06:46 +08001189 if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
1190 cond = BURST;
1191 else
Caesar Wang0a18f9b2016-02-25 09:00:53 +08001192 cond = SINGLE;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001193
1194 while (cyc--) {
Boojin Kim848e9772016-01-22 19:06:44 +08001195 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001196 off += _emit_LD(dry_run, &buf[off], ALWAYS);
Boojin Kim848e9772016-01-22 19:06:44 +08001197 off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
Addy Ke271e1b862016-01-22 19:06:46 +08001198
1199 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1200 off += _emit_FLUSHP(dry_run, &buf[off],
1201 pxs->desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001202 }
1203
1204 return off;
1205}
1206
Addy Ke271e1b862016-01-22 19:06:46 +08001207static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
Boojin Kimb7d861d2011-12-26 18:49:52 +09001208 const struct _xfer_spec *pxs, int cyc)
1209{
1210 int off = 0;
1211
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001212 switch (pxs->desc->rqtype) {
Lars-Peter Clausen585a9d02014-07-06 20:32:18 +02001213 case DMA_MEM_TO_DEV:
Addy Ke271e1b862016-01-22 19:06:46 +08001214 off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001215 break;
Lars-Peter Clausen585a9d02014-07-06 20:32:18 +02001216 case DMA_DEV_TO_MEM:
Addy Ke271e1b862016-01-22 19:06:46 +08001217 off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001218 break;
Lars-Peter Clausen585a9d02014-07-06 20:32:18 +02001219 case DMA_MEM_TO_MEM:
Boojin Kimb7d861d2011-12-26 18:49:52 +09001220 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1221 break;
1222 default:
1223 off += 0x40000000; /* Scare off the Client */
1224 break;
1225 }
1226
1227 return off;
1228}
1229
1230/* Returns bytes consumed and updates bursts */
Addy Ke271e1b862016-01-22 19:06:46 +08001231static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
Boojin Kimb7d861d2011-12-26 18:49:52 +09001232 unsigned long *bursts, const struct _xfer_spec *pxs)
1233{
1234 int cyc, cycmax, szlp, szlpend, szbrst, off;
1235 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1236 struct _arg_LPEND lpend;
1237
Michal Suchanek31495d62015-07-23 18:04:49 +02001238 if (*bursts == 1)
Boojin Kim848e9772016-01-22 19:06:44 +08001239 return _bursts(pl330, dry_run, buf, pxs, 1);
Michal Suchanek31495d62015-07-23 18:04:49 +02001240
Boojin Kimb7d861d2011-12-26 18:49:52 +09001241 /* Max iterations possible in DMALP is 256 */
1242 if (*bursts >= 256*256) {
1243 lcnt1 = 256;
1244 lcnt0 = 256;
1245 cyc = *bursts / lcnt1 / lcnt0;
1246 } else if (*bursts > 256) {
1247 lcnt1 = 256;
1248 lcnt0 = *bursts / lcnt1;
1249 cyc = 1;
1250 } else {
1251 lcnt1 = *bursts;
1252 lcnt0 = 0;
1253 cyc = 1;
1254 }
1255
1256 szlp = _emit_LP(1, buf, 0, 0);
Addy Ke271e1b862016-01-22 19:06:46 +08001257 szbrst = _bursts(pl330, 1, buf, pxs, 1);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001258
1259 lpend.cond = ALWAYS;
1260 lpend.forever = false;
1261 lpend.loop = 0;
1262 lpend.bjump = 0;
1263 szlpend = _emit_LPEND(1, buf, &lpend);
1264
1265 if (lcnt0) {
1266 szlp *= 2;
1267 szlpend *= 2;
1268 }
1269
1270 /*
1271 * Max bursts that we can unroll due to limit on the
1272 * size of backward jump that can be encoded in DMALPEND
1273 * which is 8-bits and hence 255
1274 */
1275 cycmax = (255 - (szlp + szlpend)) / szbrst;
1276
1277 cyc = (cycmax < cyc) ? cycmax : cyc;
1278
1279 off = 0;
1280
1281 if (lcnt0) {
1282 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1283 ljmp0 = off;
1284 }
1285
1286 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1287 ljmp1 = off;
1288
Addy Ke271e1b862016-01-22 19:06:46 +08001289 off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001290
1291 lpend.cond = ALWAYS;
1292 lpend.forever = false;
1293 lpend.loop = 1;
1294 lpend.bjump = off - ljmp1;
1295 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1296
1297 if (lcnt0) {
1298 lpend.cond = ALWAYS;
1299 lpend.forever = false;
1300 lpend.loop = 0;
1301 lpend.bjump = off - ljmp0;
1302 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1303 }
1304
1305 *bursts = lcnt1 * cyc;
1306 if (lcnt0)
1307 *bursts *= lcnt0;
1308
1309 return off;
1310}
1311
Addy Ke271e1b862016-01-22 19:06:46 +08001312static inline int _setup_loops(struct pl330_dmac *pl330,
1313 unsigned dry_run, u8 buf[],
1314 const struct _xfer_spec *pxs)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001315{
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001316 struct pl330_xfer *x = &pxs->desc->px;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001317 u32 ccr = pxs->ccr;
1318 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1319 int off = 0;
1320
1321 while (bursts) {
1322 c = bursts;
Addy Ke271e1b862016-01-22 19:06:46 +08001323 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001324 bursts -= c;
1325 }
1326
1327 return off;
1328}
1329
Addy Ke271e1b862016-01-22 19:06:46 +08001330static inline int _setup_xfer(struct pl330_dmac *pl330,
1331 unsigned dry_run, u8 buf[],
1332 const struct _xfer_spec *pxs)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001333{
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001334 struct pl330_xfer *x = &pxs->desc->px;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001335 int off = 0;
1336
1337 /* DMAMOV SAR, x->src_addr */
1338 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1339 /* DMAMOV DAR, x->dst_addr */
1340 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1341
1342 /* Setup Loop(s) */
Addy Ke271e1b862016-01-22 19:06:46 +08001343 off += _setup_loops(pl330, dry_run, &buf[off], pxs);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001344
1345 return off;
1346}
1347
1348/*
1349 * A req is a sequence of one or more xfer units.
1350 * Returns the number of bytes taken to setup the MC for the req.
1351 */
Addy Ke271e1b862016-01-22 19:06:46 +08001352static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
1353 struct pl330_thread *thrd, unsigned index,
1354 struct _xfer_spec *pxs)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001355{
1356 struct _pl330_req *req = &thrd->req[index];
1357 struct pl330_xfer *x;
1358 u8 *buf = req->mc_cpu;
1359 int off = 0;
1360
1361 PL330_DBGMC_START(req->mc_bus);
1362
1363 /* DMAMOV CCR, ccr */
1364 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1365
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001366 x = &pxs->desc->px;
Lars-Peter Clausend5cef122014-07-06 20:32:23 +02001367 /* Error if xfer length is not aligned at burst size */
1368 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1369 return -EINVAL;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001370
Addy Ke271e1b862016-01-22 19:06:46 +08001371 off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001372
1373 /* DMASEV peripheral/event */
1374 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1375 /* DMAEND */
1376 off += _emit_END(dry_run, &buf[off]);
1377
1378 return off;
1379}
1380
1381static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1382{
1383 u32 ccr = 0;
1384
1385 if (rqc->src_inc)
1386 ccr |= CC_SRCINC;
1387
1388 if (rqc->dst_inc)
1389 ccr |= CC_DSTINC;
1390
1391 /* We set same protection levels for Src and DST for now */
1392 if (rqc->privileged)
1393 ccr |= CC_SRCPRI | CC_DSTPRI;
1394 if (rqc->nonsecure)
1395 ccr |= CC_SRCNS | CC_DSTNS;
1396 if (rqc->insnaccess)
1397 ccr |= CC_SRCIA | CC_DSTIA;
1398
1399 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1400 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1401
1402 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1403 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1404
1405 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1406 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1407
1408 ccr |= (rqc->swap << CC_SWAP_SHFT);
1409
1410 return ccr;
1411}
1412
Boojin Kimb7d861d2011-12-26 18:49:52 +09001413/*
1414 * Submit a list of xfers after which the client wants notification.
1415 * Client is not notified after each xfer unit, just once after all
1416 * xfer units are done or some error occurs.
1417 */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001418static int pl330_submit_req(struct pl330_thread *thrd,
1419 struct dma_pl330_desc *desc)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001420{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001421 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001422 struct _xfer_spec xs;
1423 unsigned long flags;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001424 unsigned idx;
1425 u32 ccr;
1426 int ret = 0;
1427
Boojin Kimb7d861d2011-12-26 18:49:52 +09001428 if (pl330->state == DYING
1429 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001430 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001431 __func__, __LINE__);
1432 return -EAGAIN;
1433 }
1434
1435 /* If request for non-existing peripheral */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001436 if (desc->rqtype != DMA_MEM_TO_MEM &&
1437 desc->peri >= pl330->pcfg.num_peri) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001438 dev_info(thrd->dmac->ddma.dev,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001439 "%s:%d Invalid peripheral(%u)!\n",
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001440 __func__, __LINE__, desc->peri);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001441 return -EINVAL;
1442 }
1443
1444 spin_lock_irqsave(&pl330->lock, flags);
1445
1446 if (_queue_full(thrd)) {
1447 ret = -EAGAIN;
1448 goto xfer_exit;
1449 }
1450
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001451 /* Prefer Secure Channel */
1452 if (!_manager_ns(thrd))
1453 desc->rqcfg.nonsecure = 0;
1454 else
1455 desc->rqcfg.nonsecure = 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001456
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001457 ccr = _prepare_ccr(&desc->rqcfg);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001458
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001459 idx = thrd->req[0].desc == NULL ? 0 : 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001460
1461 xs.ccr = ccr;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001462 xs.desc = desc;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001463
1464 /* First dry run to check if req is acceptable */
Addy Ke271e1b862016-01-22 19:06:46 +08001465 ret = _setup_req(pl330, 1, thrd, idx, &xs);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001466 if (ret < 0)
1467 goto xfer_exit;
1468
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001469 if (ret > pl330->mcbufsz / 2) {
Michal Suchaneke5489d52015-06-03 21:26:41 +00001470 dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
1471 __func__, __LINE__, ret, pl330->mcbufsz / 2);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001472 ret = -ENOMEM;
1473 goto xfer_exit;
1474 }
1475
1476 /* Hook the request */
1477 thrd->lstenq = idx;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001478 thrd->req[idx].desc = desc;
Addy Ke271e1b862016-01-22 19:06:46 +08001479 _setup_req(pl330, 0, thrd, idx, &xs);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001480
1481 ret = 0;
1482
1483xfer_exit:
1484 spin_unlock_irqrestore(&pl330->lock, flags);
1485
1486 return ret;
1487}
1488
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001489static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
Lars-Peter Clausen6079d382014-07-06 20:32:25 +02001490{
Javier Martinez Canillasb1e51d72014-07-19 03:21:47 +02001491 struct dma_pl330_chan *pch;
Lars-Peter Clausen6079d382014-07-06 20:32:25 +02001492 unsigned long flags;
1493
Javier Martinez Canillasb1e51d72014-07-19 03:21:47 +02001494 if (!desc)
1495 return;
1496
1497 pch = desc->pchan;
1498
Lars-Peter Clausen6079d382014-07-06 20:32:25 +02001499 /* If desc aborted */
1500 if (!pch)
1501 return;
1502
1503 spin_lock_irqsave(&pch->lock, flags);
1504
1505 desc->status = DONE;
1506
1507 spin_unlock_irqrestore(&pch->lock, flags);
1508
1509 tasklet_schedule(&pch->task);
1510}
1511
Boojin Kimb7d861d2011-12-26 18:49:52 +09001512static void pl330_dotask(unsigned long data)
1513{
1514 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001515 unsigned long flags;
1516 int i;
1517
1518 spin_lock_irqsave(&pl330->lock, flags);
1519
1520 /* The DMAC itself gone nuts */
1521 if (pl330->dmac_tbd.reset_dmac) {
1522 pl330->state = DYING;
1523 /* Reset the manager too */
1524 pl330->dmac_tbd.reset_mngr = true;
1525 /* Clear the reset flag */
1526 pl330->dmac_tbd.reset_dmac = false;
1527 }
1528
1529 if (pl330->dmac_tbd.reset_mngr) {
1530 _stop(pl330->manager);
1531 /* Reset all channels */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001532 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001533 /* Clear the reset flag */
1534 pl330->dmac_tbd.reset_mngr = false;
1535 }
1536
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001537 for (i = 0; i < pl330->pcfg.num_chan; i++) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001538
1539 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1540 struct pl330_thread *thrd = &pl330->channels[i];
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001541 void __iomem *regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001542 enum pl330_op_err err;
1543
1544 _stop(thrd);
1545
1546 if (readl(regs + FSC) & (1 << thrd->id))
1547 err = PL330_ERR_FAIL;
1548 else
1549 err = PL330_ERR_ABORT;
1550
1551 spin_unlock_irqrestore(&pl330->lock, flags);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001552 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1553 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001554 spin_lock_irqsave(&pl330->lock, flags);
1555
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001556 thrd->req[0].desc = NULL;
1557 thrd->req[1].desc = NULL;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001558 thrd->req_running = -1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001559
1560 /* Clear the reset flag */
1561 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1562 }
1563 }
1564
1565 spin_unlock_irqrestore(&pl330->lock, flags);
1566
1567 return;
1568}
1569
1570/* Returns 1 if state was updated, 0 otherwise */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001571static int pl330_update(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001572{
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001573 struct dma_pl330_desc *descdone, *tmp;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001574 unsigned long flags;
1575 void __iomem *regs;
1576 u32 val;
1577 int id, ev, ret = 0;
1578
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001579 regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001580
1581 spin_lock_irqsave(&pl330->lock, flags);
1582
1583 val = readl(regs + FSM) & 0x1;
1584 if (val)
1585 pl330->dmac_tbd.reset_mngr = true;
1586 else
1587 pl330->dmac_tbd.reset_mngr = false;
1588
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001589 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001590 pl330->dmac_tbd.reset_chan |= val;
1591 if (val) {
1592 int i = 0;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001593 while (i < pl330->pcfg.num_chan) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001594 if (val & (1 << i)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001595 dev_info(pl330->ddma.dev,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001596 "Reset Channel-%d\t CS-%x FTC-%x\n",
1597 i, readl(regs + CS(i)),
1598 readl(regs + FTC(i)));
1599 _stop(&pl330->channels[i]);
1600 }
1601 i++;
1602 }
1603 }
1604
1605 /* Check which event happened i.e, thread notified */
1606 val = readl(regs + ES);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001607 if (pl330->pcfg.num_events < 32
1608 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001609 pl330->dmac_tbd.reset_dmac = true;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001610 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1611 __LINE__);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001612 ret = 1;
1613 goto updt_exit;
1614 }
1615
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001616 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001617 if (val & (1 << ev)) { /* Event occurred */
1618 struct pl330_thread *thrd;
1619 u32 inten = readl(regs + INTEN);
1620 int active;
1621
1622 /* Clear the event */
1623 if (inten & (1 << ev))
1624 writel(1 << ev, regs + INTCLR);
1625
1626 ret = 1;
1627
1628 id = pl330->events[ev];
1629
1630 thrd = &pl330->channels[id];
1631
1632 active = thrd->req_running;
1633 if (active == -1) /* Aborted */
1634 continue;
1635
Javi Merinofdec53d2012-06-13 15:07:00 +01001636 /* Detach the req */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001637 descdone = thrd->req[active].desc;
1638 thrd->req[active].desc = NULL;
Javi Merinofdec53d2012-06-13 15:07:00 +01001639
Addy Ke0091b9d2014-12-08 19:28:20 +08001640 thrd->req_running = -1;
1641
Boojin Kimb7d861d2011-12-26 18:49:52 +09001642 /* Get going again ASAP */
1643 _start(thrd);
1644
1645 /* For now, just make a list of callbacks to be done */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001646 list_add_tail(&descdone->rqd, &pl330->req_done);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001647 }
1648 }
1649
1650 /* Now that we are in no hurry, do the callbacks */
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001651 list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
1652 list_del(&descdone->rqd);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001653 spin_unlock_irqrestore(&pl330->lock, flags);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001654 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001655 spin_lock_irqsave(&pl330->lock, flags);
1656 }
1657
1658updt_exit:
1659 spin_unlock_irqrestore(&pl330->lock, flags);
1660
1661 if (pl330->dmac_tbd.reset_dmac
1662 || pl330->dmac_tbd.reset_mngr
1663 || pl330->dmac_tbd.reset_chan) {
1664 ret = 1;
1665 tasklet_schedule(&pl330->tasks);
1666 }
1667
1668 return ret;
1669}
1670
Boojin Kimb7d861d2011-12-26 18:49:52 +09001671/* Reserve an event */
1672static inline int _alloc_event(struct pl330_thread *thrd)
1673{
1674 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001675 int ev;
1676
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001677 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001678 if (pl330->events[ev] == -1) {
1679 pl330->events[ev] = thrd->id;
1680 return ev;
1681 }
1682
1683 return -1;
1684}
1685
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001686static bool _chan_ns(const struct pl330_dmac *pl330, int i)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001687{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001688 return pl330->pcfg.irq_ns & (1 << i);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001689}
1690
1691/* Upon success, returns IdentityToken for the
1692 * allocated channel, NULL otherwise.
1693 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001694static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001695{
1696 struct pl330_thread *thrd = NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001697 unsigned long flags;
1698 int chans, i;
1699
Boojin Kimb7d861d2011-12-26 18:49:52 +09001700 if (pl330->state == DYING)
1701 return NULL;
1702
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001703 chans = pl330->pcfg.num_chan;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001704
1705 spin_lock_irqsave(&pl330->lock, flags);
1706
1707 for (i = 0; i < chans; i++) {
1708 thrd = &pl330->channels[i];
1709 if ((thrd->free) && (!_manager_ns(thrd) ||
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001710 _chan_ns(pl330, i))) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001711 thrd->ev = _alloc_event(thrd);
1712 if (thrd->ev >= 0) {
1713 thrd->free = false;
1714 thrd->lstenq = 1;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001715 thrd->req[0].desc = NULL;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001716 thrd->req[1].desc = NULL;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001717 thrd->req_running = -1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001718 break;
1719 }
1720 }
1721 thrd = NULL;
1722 }
1723
1724 spin_unlock_irqrestore(&pl330->lock, flags);
1725
1726 return thrd;
1727}
1728
1729/* Release an event */
1730static inline void _free_event(struct pl330_thread *thrd, int ev)
1731{
1732 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001733
1734 /* If the event is valid and was held by the thread */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001735 if (ev >= 0 && ev < pl330->pcfg.num_events
Boojin Kimb7d861d2011-12-26 18:49:52 +09001736 && pl330->events[ev] == thrd->id)
1737 pl330->events[ev] = -1;
1738}
1739
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02001740static void pl330_release_channel(struct pl330_thread *thrd)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001741{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001742 struct pl330_dmac *pl330;
1743 unsigned long flags;
1744
1745 if (!thrd || thrd->free)
1746 return;
1747
1748 _stop(thrd);
1749
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001750 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1751 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001752
1753 pl330 = thrd->dmac;
1754
1755 spin_lock_irqsave(&pl330->lock, flags);
1756 _free_event(thrd, thrd->ev);
1757 thrd->free = true;
1758 spin_unlock_irqrestore(&pl330->lock, flags);
1759}
1760
1761/* Initialize the structure for PL330 configuration, that can be used
1762 * by the client driver the make best use of the DMAC
1763 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001764static void read_dmac_config(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001765{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001766 void __iomem *regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001767 u32 val;
1768
1769 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1770 val &= CRD_DATA_WIDTH_MASK;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001771 pl330->pcfg.data_bus_width = 8 * (1 << val);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001772
1773 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1774 val &= CRD_DATA_BUFF_MASK;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001775 pl330->pcfg.data_buf_dep = val + 1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001776
1777 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1778 val &= CR0_NUM_CHANS_MASK;
1779 val += 1;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001780 pl330->pcfg.num_chan = val;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001781
1782 val = readl(regs + CR0);
1783 if (val & CR0_PERIPH_REQ_SET) {
1784 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1785 val += 1;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001786 pl330->pcfg.num_peri = val;
1787 pl330->pcfg.peri_ns = readl(regs + CR4);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001788 } else {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001789 pl330->pcfg.num_peri = 0;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001790 }
1791
1792 val = readl(regs + CR0);
1793 if (val & CR0_BOOT_MAN_NS)
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001794 pl330->pcfg.mode |= DMAC_MODE_NS;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001795 else
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001796 pl330->pcfg.mode &= ~DMAC_MODE_NS;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001797
1798 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1799 val &= CR0_NUM_EVENTS_MASK;
1800 val += 1;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001801 pl330->pcfg.num_events = val;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001802
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001803 pl330->pcfg.irq_ns = readl(regs + CR3);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001804}
1805
1806static inline void _reset_thread(struct pl330_thread *thrd)
1807{
1808 struct pl330_dmac *pl330 = thrd->dmac;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001809
1810 thrd->req[0].mc_cpu = pl330->mcode_cpu
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001811 + (thrd->id * pl330->mcbufsz);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001812 thrd->req[0].mc_bus = pl330->mcode_bus
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001813 + (thrd->id * pl330->mcbufsz);
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001814 thrd->req[0].desc = NULL;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001815
1816 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001817 + pl330->mcbufsz / 2;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001818 thrd->req[1].mc_bus = thrd->req[0].mc_bus
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001819 + pl330->mcbufsz / 2;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001820 thrd->req[1].desc = NULL;
Lars-Peter Clausen8ed30a12014-07-06 20:32:31 +02001821
1822 thrd->req_running = -1;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001823}
1824
1825static int dmac_alloc_threads(struct pl330_dmac *pl330)
1826{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001827 int chans = pl330->pcfg.num_chan;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001828 struct pl330_thread *thrd;
1829 int i;
1830
1831 /* Allocate 1 Manager and 'chans' Channel threads */
1832 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1833 GFP_KERNEL);
1834 if (!pl330->channels)
1835 return -ENOMEM;
1836
1837 /* Init Channel threads */
1838 for (i = 0; i < chans; i++) {
1839 thrd = &pl330->channels[i];
1840 thrd->id = i;
1841 thrd->dmac = pl330;
1842 _reset_thread(thrd);
1843 thrd->free = true;
1844 }
1845
1846 /* MANAGER is indexed at the end */
1847 thrd = &pl330->channels[chans];
1848 thrd->id = chans;
1849 thrd->dmac = pl330;
1850 thrd->free = false;
1851 pl330->manager = thrd;
1852
1853 return 0;
1854}
1855
1856static int dmac_alloc_resources(struct pl330_dmac *pl330)
1857{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001858 int chans = pl330->pcfg.num_chan;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001859 int ret;
1860
1861 /*
1862 * Alloc MicroCode buffer for 'chans' Channel threads.
1863 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1864 */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001865 pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
1866 chans * pl330->mcbufsz,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001867 &pl330->mcode_bus, GFP_KERNEL);
1868 if (!pl330->mcode_cpu) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001869 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001870 __func__, __LINE__);
1871 return -ENOMEM;
1872 }
1873
1874 ret = dmac_alloc_threads(pl330);
1875 if (ret) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001876 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001877 __func__, __LINE__);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001878 dma_free_coherent(pl330->ddma.dev,
1879 chans * pl330->mcbufsz,
Boojin Kimb7d861d2011-12-26 18:49:52 +09001880 pl330->mcode_cpu, pl330->mcode_bus);
1881 return ret;
1882 }
1883
1884 return 0;
1885}
1886
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001887static int pl330_add(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001888{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001889 void __iomem *regs;
1890 int i, ret;
1891
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001892 regs = pl330->base;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001893
1894 /* Check if we can handle this DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001895 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1896 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1897 pl330->pcfg.periph_id);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001898 return -EINVAL;
1899 }
1900
1901 /* Read the configuration of the DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001902 read_dmac_config(pl330);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001903
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001904 if (pl330->pcfg.num_events == 0) {
1905 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
Boojin Kimb7d861d2011-12-26 18:49:52 +09001906 __func__, __LINE__);
1907 return -EINVAL;
1908 }
1909
Boojin Kimb7d861d2011-12-26 18:49:52 +09001910 spin_lock_init(&pl330->lock);
1911
1912 INIT_LIST_HEAD(&pl330->req_done);
1913
1914 /* Use default MC buffer size if not provided */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001915 if (!pl330->mcbufsz)
1916 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001917
1918 /* Mark all events as free */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001919 for (i = 0; i < pl330->pcfg.num_events; i++)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001920 pl330->events[i] = -1;
1921
1922 /* Allocate resources needed by the DMAC */
1923 ret = dmac_alloc_resources(pl330);
1924 if (ret) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001925 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
Boojin Kimb7d861d2011-12-26 18:49:52 +09001926 return ret;
1927 }
1928
1929 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1930
1931 pl330->state = INIT;
1932
1933 return 0;
1934}
1935
1936static int dmac_free_threads(struct pl330_dmac *pl330)
1937{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001938 struct pl330_thread *thrd;
1939 int i;
1940
1941 /* Release Channel threads */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001942 for (i = 0; i < pl330->pcfg.num_chan; i++) {
Boojin Kimb7d861d2011-12-26 18:49:52 +09001943 thrd = &pl330->channels[i];
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02001944 pl330_release_channel(thrd);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001945 }
1946
1947 /* Free memory */
1948 kfree(pl330->channels);
1949
1950 return 0;
1951}
1952
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001953static void pl330_del(struct pl330_dmac *pl330)
Boojin Kimb7d861d2011-12-26 18:49:52 +09001954{
Boojin Kimb7d861d2011-12-26 18:49:52 +09001955 pl330->state = UNINIT;
1956
1957 tasklet_kill(&pl330->tasks);
1958
1959 /* Free DMAC resources */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001960 dmac_free_threads(pl330);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001961
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02001962 dma_free_coherent(pl330->ddma.dev,
1963 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
1964 pl330->mcode_bus);
Boojin Kimb7d861d2011-12-26 18:49:52 +09001965}
1966
Thomas Abraham3e2ec132011-10-24 11:43:02 +02001967/* forward declaration */
1968static struct amba_driver pl330_driver;
1969
Jassi Brarb3040e42010-05-23 20:28:19 -07001970static inline struct dma_pl330_chan *
1971to_pchan(struct dma_chan *ch)
1972{
1973 if (!ch)
1974 return NULL;
1975
1976 return container_of(ch, struct dma_pl330_chan, chan);
1977}
1978
1979static inline struct dma_pl330_desc *
1980to_desc(struct dma_async_tx_descriptor *tx)
1981{
1982 return container_of(tx, struct dma_pl330_desc, txd);
1983}
1984
Jassi Brarb3040e42010-05-23 20:28:19 -07001985static inline void fill_queue(struct dma_pl330_chan *pch)
1986{
1987 struct dma_pl330_desc *desc;
1988 int ret;
1989
1990 list_for_each_entry(desc, &pch->work_list, node) {
1991
1992 /* If already submitted */
1993 if (desc->status == BUSY)
Jassi Brar30fb9802013-02-13 16:13:14 +05301994 continue;
Jassi Brarb3040e42010-05-23 20:28:19 -07001995
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02001996 ret = pl330_submit_req(pch->thread, desc);
Jassi Brarb3040e42010-05-23 20:28:19 -07001997 if (!ret) {
1998 desc->status = BUSY;
Jassi Brarb3040e42010-05-23 20:28:19 -07001999 } else if (ret == -EAGAIN) {
2000 /* QFull or DMAC Dying */
2001 break;
2002 } else {
2003 /* Unacceptable request */
2004 desc->status = DONE;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002005 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
Jassi Brarb3040e42010-05-23 20:28:19 -07002006 __func__, __LINE__, desc->txd.cookie);
2007 tasklet_schedule(&pch->task);
2008 }
2009 }
2010}
2011
2012static void pl330_tasklet(unsigned long data)
2013{
2014 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2015 struct dma_pl330_desc *desc, *_dt;
2016 unsigned long flags;
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002017 bool power_down = false;
Jassi Brarb3040e42010-05-23 20:28:19 -07002018
2019 spin_lock_irqsave(&pch->lock, flags);
2020
2021 /* Pick up ripe tomatoes */
2022 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2023 if (desc->status == DONE) {
Tushar Behera30c1dc02012-05-23 16:47:31 +05302024 if (!pch->cyclic)
Vinod Kouleab21582012-05-11 11:24:41 +05302025 dma_cookie_complete(&desc->txd);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002026 list_move_tail(&desc->node, &pch->completed_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002027 }
2028
2029 /* Try to submit a req imm. next to the last completed cookie */
2030 fill_queue(pch);
2031
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002032 if (list_empty(&pch->work_list)) {
2033 spin_lock(&pch->thread->dmac->lock);
2034 _stop(pch->thread);
2035 spin_unlock(&pch->thread->dmac->lock);
2036 power_down = true;
Marek Szyprowskid21814a2016-12-16 11:39:11 +01002037 pch->active = false;
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002038 } else {
2039 /* Make sure the PL330 Channel thread is active */
2040 spin_lock(&pch->thread->dmac->lock);
2041 _start(pch->thread);
2042 spin_unlock(&pch->thread->dmac->lock);
2043 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002044
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002045 while (!list_empty(&pch->completed_list)) {
Dave Jiangf08462c2016-07-20 13:12:35 -07002046 struct dmaengine_desc_callback cb;
Jassi Brarb3040e42010-05-23 20:28:19 -07002047
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002048 desc = list_first_entry(&pch->completed_list,
2049 struct dma_pl330_desc, node);
2050
Dave Jiangf08462c2016-07-20 13:12:35 -07002051 dmaengine_desc_get_callback(&desc->txd, &cb);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002052
2053 if (pch->cyclic) {
2054 desc->status = PREP;
2055 list_move_tail(&desc->node, &pch->work_list);
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002056 if (power_down) {
Marek Szyprowskid21814a2016-12-16 11:39:11 +01002057 pch->active = true;
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002058 spin_lock(&pch->thread->dmac->lock);
2059 _start(pch->thread);
2060 spin_unlock(&pch->thread->dmac->lock);
2061 power_down = false;
2062 }
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002063 } else {
2064 desc->status = FREE;
2065 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2066 }
2067
Dan Williamsd38a8c62013-10-18 19:35:23 +02002068 dma_descriptor_unmap(&desc->txd);
2069
Dave Jiangf08462c2016-07-20 13:12:35 -07002070 if (dmaengine_desc_callback_valid(&cb)) {
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002071 spin_unlock_irqrestore(&pch->lock, flags);
Dave Jiangf08462c2016-07-20 13:12:35 -07002072 dmaengine_desc_callback_invoke(&cb, NULL);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002073 spin_lock_irqsave(&pch->lock, flags);
2074 }
2075 }
2076 spin_unlock_irqrestore(&pch->lock, flags);
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002077
2078 /* If work list empty, power down */
2079 if (power_down) {
2080 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2081 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2082 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002083}
2084
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002085bool pl330_filter(struct dma_chan *chan, void *param)
2086{
Thomas Abrahamcd072512011-10-24 11:43:11 +02002087 u8 *peri_id;
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002088
2089 if (chan->device->dev->driver != &pl330_driver.drv)
2090 return false;
2091
Thomas Abrahamcd072512011-10-24 11:43:11 +02002092 peri_id = chan->private;
Dan Carpenter2f986ec2013-11-08 12:51:16 +03002093 return *peri_id == (unsigned long)param;
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002094}
2095EXPORT_SYMBOL(pl330_filter);
2096
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302097static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2098 struct of_dma *ofdma)
2099{
2100 int count = dma_spec->args_count;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002101 struct pl330_dmac *pl330 = ofdma->of_dma_data;
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002102 unsigned int chan_id;
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302103
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002104 if (!pl330)
2105 return NULL;
2106
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302107 if (count != 1)
2108 return NULL;
2109
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002110 chan_id = dma_spec->args[0];
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002111 if (chan_id >= pl330->num_peripherals)
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002112 return NULL;
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302113
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002114 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
Padmavathi Vennaa80258f2013-02-14 09:10:06 +05302115}
2116
Jassi Brarb3040e42010-05-23 20:28:19 -07002117static int pl330_alloc_chan_resources(struct dma_chan *chan)
2118{
2119 struct dma_pl330_chan *pch = to_pchan(chan);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002120 struct pl330_dmac *pl330 = pch->dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -07002121 unsigned long flags;
2122
2123 spin_lock_irqsave(&pch->lock, flags);
2124
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00002125 dma_cookie_init(chan);
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002126 pch->cyclic = false;
Jassi Brarb3040e42010-05-23 20:28:19 -07002127
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002128 pch->thread = pl330_request_channel(pl330);
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02002129 if (!pch->thread) {
Jassi Brarb3040e42010-05-23 20:28:19 -07002130 spin_unlock_irqrestore(&pch->lock, flags);
Inderpal Singh02747882012-09-17 09:57:45 +05302131 return -ENOMEM;
Jassi Brarb3040e42010-05-23 20:28:19 -07002132 }
2133
2134 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2135
2136 spin_unlock_irqrestore(&pch->lock, flags);
2137
2138 return 1;
2139}
2140
Maxime Ripard740aa952014-11-17 14:42:29 +01002141static int pl330_config(struct dma_chan *chan,
2142 struct dma_slave_config *slave_config)
2143{
2144 struct dma_pl330_chan *pch = to_pchan(chan);
2145
2146 if (slave_config->direction == DMA_MEM_TO_DEV) {
2147 if (slave_config->dst_addr)
2148 pch->fifo_addr = slave_config->dst_addr;
2149 if (slave_config->dst_addr_width)
2150 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2151 if (slave_config->dst_maxburst)
2152 pch->burst_len = slave_config->dst_maxburst;
2153 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2154 if (slave_config->src_addr)
2155 pch->fifo_addr = slave_config->src_addr;
2156 if (slave_config->src_addr_width)
2157 pch->burst_sz = __ffs(slave_config->src_addr_width);
2158 if (slave_config->src_maxburst)
2159 pch->burst_len = slave_config->src_maxburst;
2160 }
2161
2162 return 0;
2163}
2164
2165static int pl330_terminate_all(struct dma_chan *chan)
Jassi Brarb3040e42010-05-23 20:28:19 -07002166{
2167 struct dma_pl330_chan *pch = to_pchan(chan);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002168 struct dma_pl330_desc *desc;
Jassi Brarb3040e42010-05-23 20:28:19 -07002169 unsigned long flags;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002170 struct pl330_dmac *pl330 = pch->dmac;
Boojin Kimae43b882011-09-02 09:44:32 +09002171 LIST_HEAD(list);
Marek Szyprowskid21814a2016-12-16 11:39:11 +01002172 bool power_down = false;
Jassi Brarb3040e42010-05-23 20:28:19 -07002173
Krzysztof Kozlowski81cc6ed2015-05-21 09:34:09 +09002174 pm_runtime_get_sync(pl330->ddma.dev);
Maxime Ripard740aa952014-11-17 14:42:29 +01002175 spin_lock_irqsave(&pch->lock, flags);
2176 spin_lock(&pl330->lock);
2177 _stop(pch->thread);
2178 spin_unlock(&pl330->lock);
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002179
Maxime Ripard740aa952014-11-17 14:42:29 +01002180 pch->thread->req[0].desc = NULL;
2181 pch->thread->req[1].desc = NULL;
2182 pch->thread->req_running = -1;
Marek Szyprowskid21814a2016-12-16 11:39:11 +01002183 power_down = pch->active;
2184 pch->active = false;
Lars-Peter Clausenc26939e2014-07-06 20:32:32 +02002185
Maxime Ripard740aa952014-11-17 14:42:29 +01002186 /* Mark all desc done */
2187 list_for_each_entry(desc, &pch->submitted_list, node) {
2188 desc->status = FREE;
2189 dma_cookie_complete(&desc->txd);
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002190 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002191
Maxime Ripard740aa952014-11-17 14:42:29 +01002192 list_for_each_entry(desc, &pch->work_list , node) {
2193 desc->status = FREE;
2194 dma_cookie_complete(&desc->txd);
2195 }
2196
2197 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2198 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2199 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2200 spin_unlock_irqrestore(&pch->lock, flags);
Krzysztof Kozlowski81cc6ed2015-05-21 09:34:09 +09002201 pm_runtime_mark_last_busy(pl330->ddma.dev);
Marek Szyprowskid21814a2016-12-16 11:39:11 +01002202 if (power_down)
2203 pm_runtime_put_autosuspend(pl330->ddma.dev);
Krzysztof Kozlowski81cc6ed2015-05-21 09:34:09 +09002204 pm_runtime_put_autosuspend(pl330->ddma.dev);
Maxime Ripard740aa952014-11-17 14:42:29 +01002205
Jassi Brarb3040e42010-05-23 20:28:19 -07002206 return 0;
2207}
2208
Robert Baldyga88987d22015-02-11 13:23:18 +01002209/*
2210 * We don't support DMA_RESUME command because of hardware
2211 * limitations, so after pausing the channel we cannot restore
2212 * it to active state. We have to terminate channel and setup
2213 * DMA transfer again. This pause feature was implemented to
2214 * allow safely read residue before channel termination.
2215 */
Ben Dooks5503aed2015-03-16 11:52:44 +00002216static int pl330_pause(struct dma_chan *chan)
Robert Baldyga88987d22015-02-11 13:23:18 +01002217{
2218 struct dma_pl330_chan *pch = to_pchan(chan);
2219 struct pl330_dmac *pl330 = pch->dmac;
2220 unsigned long flags;
2221
2222 pm_runtime_get_sync(pl330->ddma.dev);
2223 spin_lock_irqsave(&pch->lock, flags);
2224
2225 spin_lock(&pl330->lock);
2226 _stop(pch->thread);
2227 spin_unlock(&pl330->lock);
2228
2229 spin_unlock_irqrestore(&pch->lock, flags);
2230 pm_runtime_mark_last_busy(pl330->ddma.dev);
2231 pm_runtime_put_autosuspend(pl330->ddma.dev);
2232
2233 return 0;
2234}
2235
Jassi Brarb3040e42010-05-23 20:28:19 -07002236static void pl330_free_chan_resources(struct dma_chan *chan)
2237{
2238 struct dma_pl330_chan *pch = to_pchan(chan);
2239 unsigned long flags;
2240
Jassi Brarb3040e42010-05-23 20:28:19 -07002241 tasklet_kill(&pch->task);
2242
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002243 pm_runtime_get_sync(pch->dmac->ddma.dev);
Bartlomiej Zolnierkiewiczda331ba2013-07-03 15:00:43 -07002244 spin_lock_irqsave(&pch->lock, flags);
2245
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02002246 pl330_release_channel(pch->thread);
2247 pch->thread = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -07002248
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002249 if (pch->cyclic)
2250 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2251
Jassi Brarb3040e42010-05-23 20:28:19 -07002252 spin_unlock_irqrestore(&pch->lock, flags);
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002253 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2254 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
Jassi Brarb3040e42010-05-23 20:28:19 -07002255}
2256
Ben Dooks5503aed2015-03-16 11:52:44 +00002257static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2258 struct dma_pl330_desc *desc)
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002259{
2260 struct pl330_thread *thrd = pch->thread;
2261 struct pl330_dmac *pl330 = pch->dmac;
2262 void __iomem *regs = thrd->dmac->base;
2263 u32 val, addr;
2264
2265 pm_runtime_get_sync(pl330->ddma.dev);
2266 val = addr = 0;
2267 if (desc->rqcfg.src_inc) {
2268 val = readl(regs + SA(thrd->id));
2269 addr = desc->px.src_addr;
2270 } else {
2271 val = readl(regs + DA(thrd->id));
2272 addr = desc->px.dst_addr;
2273 }
2274 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2275 pm_runtime_put_autosuspend(pl330->ddma.dev);
2276 return val - addr;
2277}
2278
Jassi Brarb3040e42010-05-23 20:28:19 -07002279static enum dma_status
2280pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2281 struct dma_tx_state *txstate)
2282{
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002283 enum dma_status ret;
2284 unsigned long flags;
Stephen Barberd64e9a22016-08-18 17:59:59 -07002285 struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002286 struct dma_pl330_chan *pch = to_pchan(chan);
2287 unsigned int transferred, residual = 0;
2288
2289 ret = dma_cookie_status(chan, cookie, txstate);
2290
2291 if (!txstate)
2292 return ret;
2293
2294 if (ret == DMA_COMPLETE)
2295 goto out;
2296
2297 spin_lock_irqsave(&pch->lock, flags);
Hsin-Yu Chaoa40235a2016-08-23 17:16:55 +08002298 spin_lock(&pch->thread->dmac->lock);
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002299
2300 if (pch->thread->req_running != -1)
2301 running = pch->thread->req[pch->thread->req_running].desc;
2302
Stephen Barberd64e9a22016-08-18 17:59:59 -07002303 last_enq = pch->thread->req[pch->thread->lstenq].desc;
2304
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002305 /* Check in pending list */
2306 list_for_each_entry(desc, &pch->work_list, node) {
2307 if (desc->status == DONE)
2308 transferred = desc->bytes_requested;
2309 else if (running && desc == running)
2310 transferred =
2311 pl330_get_current_xferred_count(pch, desc);
Stephen Barberd64e9a22016-08-18 17:59:59 -07002312 else if (desc->status == BUSY)
2313 /*
2314 * Busy but not running means either just enqueued,
2315 * or finished and not yet marked done
2316 */
2317 if (desc == last_enq)
2318 transferred = 0;
2319 else
2320 transferred = desc->bytes_requested;
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002321 else
2322 transferred = 0;
2323 residual += desc->bytes_requested - transferred;
2324 if (desc->txd.cookie == cookie) {
Ben Dooks75967b72015-03-16 11:52:45 +00002325 switch (desc->status) {
2326 case DONE:
2327 ret = DMA_COMPLETE;
2328 break;
2329 case PREP:
2330 case BUSY:
2331 ret = DMA_IN_PROGRESS;
2332 break;
2333 default:
2334 WARN_ON(1);
2335 }
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002336 break;
2337 }
2338 if (desc->last)
2339 residual = 0;
2340 }
Hsin-Yu Chaoa40235a2016-08-23 17:16:55 +08002341 spin_unlock(&pch->thread->dmac->lock);
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002342 spin_unlock_irqrestore(&pch->lock, flags);
2343
2344out:
2345 dma_set_residue(txstate, residual);
2346
2347 return ret;
Jassi Brarb3040e42010-05-23 20:28:19 -07002348}
2349
2350static void pl330_issue_pending(struct dma_chan *chan)
2351{
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002352 struct dma_pl330_chan *pch = to_pchan(chan);
2353 unsigned long flags;
2354
2355 spin_lock_irqsave(&pch->lock, flags);
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002356 if (list_empty(&pch->work_list)) {
2357 /*
2358 * Warn on nothing pending. Empty submitted_list may
2359 * break our pm_runtime usage counter as it is
2360 * updated on work_list emptiness status.
2361 */
2362 WARN_ON(list_empty(&pch->submitted_list));
Marek Szyprowskid21814a2016-12-16 11:39:11 +01002363 pch->active = true;
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002364 pm_runtime_get_sync(pch->dmac->ddma.dev);
2365 }
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002366 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2367 spin_unlock_irqrestore(&pch->lock, flags);
2368
2369 pl330_tasklet((unsigned long)pch);
Jassi Brarb3040e42010-05-23 20:28:19 -07002370}
2371
2372/*
2373 * We returned the last one of the circular list of descriptor(s)
2374 * from prep_xxx, so the argument to submit corresponds to the last
2375 * descriptor of the list.
2376 */
2377static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2378{
2379 struct dma_pl330_desc *desc, *last = to_desc(tx);
2380 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2381 dma_cookie_t cookie;
2382 unsigned long flags;
2383
2384 spin_lock_irqsave(&pch->lock, flags);
2385
2386 /* Assign cookies to all nodes */
Jassi Brarb3040e42010-05-23 20:28:19 -07002387 while (!list_empty(&last->node)) {
2388 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002389 if (pch->cyclic) {
2390 desc->txd.callback = last->txd.callback;
2391 desc->txd.callback_param = last->txd.callback_param;
2392 }
Krzysztof Kozlowski5dd90e52015-06-15 23:00:09 +09002393 desc->last = false;
Jassi Brarb3040e42010-05-23 20:28:19 -07002394
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00002395 dma_cookie_assign(&desc->txd);
Jassi Brarb3040e42010-05-23 20:28:19 -07002396
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002397 list_move_tail(&desc->node, &pch->submitted_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002398 }
2399
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002400 last->last = true;
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00002401 cookie = dma_cookie_assign(&last->txd);
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002402 list_add_tail(&last->node, &pch->submitted_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002403 spin_unlock_irqrestore(&pch->lock, flags);
2404
2405 return cookie;
2406}
2407
2408static inline void _init_desc(struct dma_pl330_desc *desc)
2409{
Jassi Brarb3040e42010-05-23 20:28:19 -07002410 desc->rqcfg.swap = SWAP_NO;
Lars-Peter Clausenf0564c72014-07-06 20:32:19 +02002411 desc->rqcfg.scctl = CCTRL0;
2412 desc->rqcfg.dcctl = CCTRL0;
Jassi Brarb3040e42010-05-23 20:28:19 -07002413 desc->txd.tx_submit = pl330_tx_submit;
2414
2415 INIT_LIST_HEAD(&desc->node);
2416}
2417
2418/* Returns the number of descriptors added to the DMAC pool */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002419static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
Jassi Brarb3040e42010-05-23 20:28:19 -07002420{
2421 struct dma_pl330_desc *desc;
2422 unsigned long flags;
2423 int i;
2424
Will Deacon0baf8f62013-12-02 18:01:30 +00002425 desc = kcalloc(count, sizeof(*desc), flg);
Jassi Brarb3040e42010-05-23 20:28:19 -07002426 if (!desc)
2427 return 0;
2428
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002429 spin_lock_irqsave(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002430
2431 for (i = 0; i < count; i++) {
2432 _init_desc(&desc[i]);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002433 list_add_tail(&desc[i].node, &pl330->desc_pool);
Jassi Brarb3040e42010-05-23 20:28:19 -07002434 }
2435
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002436 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002437
2438 return count;
2439}
2440
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002441static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
Jassi Brarb3040e42010-05-23 20:28:19 -07002442{
2443 struct dma_pl330_desc *desc = NULL;
2444 unsigned long flags;
2445
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002446 spin_lock_irqsave(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002447
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002448 if (!list_empty(&pl330->desc_pool)) {
2449 desc = list_entry(pl330->desc_pool.next,
Jassi Brarb3040e42010-05-23 20:28:19 -07002450 struct dma_pl330_desc, node);
2451
2452 list_del_init(&desc->node);
2453
2454 desc->status = PREP;
2455 desc->txd.callback = NULL;
2456 }
2457
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002458 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Jassi Brarb3040e42010-05-23 20:28:19 -07002459
2460 return desc;
2461}
2462
2463static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2464{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002465 struct pl330_dmac *pl330 = pch->dmac;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002466 u8 *peri_id = pch->chan.private;
Jassi Brarb3040e42010-05-23 20:28:19 -07002467 struct dma_pl330_desc *desc;
2468
2469 /* Pluck one desc from the pool of DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002470 desc = pluck_desc(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002471
2472 /* If the DMAC pool is empty, alloc new */
2473 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002474 if (!add_desc(pl330, GFP_ATOMIC, 1))
Jassi Brarb3040e42010-05-23 20:28:19 -07002475 return NULL;
2476
2477 /* Try again */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002478 desc = pluck_desc(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002479 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002480 dev_err(pch->dmac->ddma.dev,
Jassi Brarb3040e42010-05-23 20:28:19 -07002481 "%s:%d ALERT!\n", __func__, __LINE__);
2482 return NULL;
2483 }
2484 }
2485
2486 /* Initialize the descriptor */
2487 desc->pchan = pch;
2488 desc->txd.cookie = 0;
2489 async_tx_ack(&desc->txd);
2490
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002491 desc->peri = peri_id ? pch->chan.chan_id : 0;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002492 desc->rqcfg.pcfg = &pch->dmac->pcfg;
Jassi Brarb3040e42010-05-23 20:28:19 -07002493
2494 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2495
2496 return desc;
2497}
2498
2499static inline void fill_px(struct pl330_xfer *px,
2500 dma_addr_t dst, dma_addr_t src, size_t len)
2501{
Jassi Brarb3040e42010-05-23 20:28:19 -07002502 px->bytes = len;
2503 px->dst_addr = dst;
2504 px->src_addr = src;
2505}
2506
2507static struct dma_pl330_desc *
2508__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2509 dma_addr_t src, size_t len)
2510{
2511 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2512
2513 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002514 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
Jassi Brarb3040e42010-05-23 20:28:19 -07002515 __func__, __LINE__);
2516 return NULL;
2517 }
2518
2519 /*
2520 * Ideally we should lookout for reqs bigger than
2521 * those that can be programmed with 256 bytes of
2522 * MC buffer, but considering a req size is seldom
2523 * going to be word-unaligned and more than 200MB,
2524 * we take it easy.
2525 * Also, should the limit is reached we'd rather
2526 * have the platform increase MC buffer size than
2527 * complicating this API driver.
2528 */
2529 fill_px(&desc->px, dst, src, len);
2530
2531 return desc;
2532}
2533
2534/* Call after fixing burst size */
2535static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2536{
2537 struct dma_pl330_chan *pch = desc->pchan;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002538 struct pl330_dmac *pl330 = pch->dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -07002539 int burst_len;
2540
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002541 burst_len = pl330->pcfg.data_bus_width / 8;
Jon Medhurstc27f9552014-11-07 18:05:18 +00002542 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
Jassi Brarb3040e42010-05-23 20:28:19 -07002543 burst_len >>= desc->rqcfg.brst_size;
2544
2545 /* src/dst_burst_len can't be more than 16 */
2546 if (burst_len > 16)
2547 burst_len = 16;
2548
2549 while (burst_len > 1) {
2550 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2551 break;
2552 burst_len--;
2553 }
2554
2555 return burst_len;
2556}
2557
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002558static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2559 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05002560 size_t period_len, enum dma_transfer_direction direction,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +02002561 unsigned long flags)
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002562{
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002563 struct dma_pl330_desc *desc = NULL, *first = NULL;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002564 struct dma_pl330_chan *pch = to_pchan(chan);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002565 struct pl330_dmac *pl330 = pch->dmac;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002566 unsigned int i;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002567 dma_addr_t dst;
2568 dma_addr_t src;
2569
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002570 if (len % period_len != 0)
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002571 return NULL;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002572
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002573 if (!is_slave_direction(direction)) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002574 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002575 __func__, __LINE__);
2576 return NULL;
2577 }
2578
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002579 for (i = 0; i < len / period_len; i++) {
2580 desc = pl330_get_desc(pch);
2581 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002582 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002583 __func__, __LINE__);
2584
2585 if (!first)
2586 return NULL;
2587
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002588 spin_lock_irqsave(&pl330->pool_lock, flags);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002589
2590 while (!list_empty(&first->node)) {
2591 desc = list_entry(first->node.next,
2592 struct dma_pl330_desc, node);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002593 list_move_tail(&desc->node, &pl330->desc_pool);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002594 }
2595
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002596 list_move_tail(&first->node, &pl330->desc_pool);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002597
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002598 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002599
2600 return NULL;
2601 }
2602
2603 switch (direction) {
2604 case DMA_MEM_TO_DEV:
2605 desc->rqcfg.src_inc = 1;
2606 desc->rqcfg.dst_inc = 0;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002607 src = dma_addr;
2608 dst = pch->fifo_addr;
2609 break;
2610 case DMA_DEV_TO_MEM:
2611 desc->rqcfg.src_inc = 0;
2612 desc->rqcfg.dst_inc = 1;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002613 src = pch->fifo_addr;
2614 dst = dma_addr;
2615 break;
2616 default:
2617 break;
2618 }
2619
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002620 desc->rqtype = direction;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002621 desc->rqcfg.brst_size = pch->burst_sz;
Caesar Wang0a18f9b2016-02-25 09:00:53 +08002622 desc->rqcfg.brst_len = 1;
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002623 desc->bytes_requested = period_len;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002624 fill_px(&desc->px, dst, src, period_len);
2625
2626 if (!first)
2627 first = desc;
2628 else
2629 list_add_tail(&desc->node, &first->node);
2630
2631 dma_addr += period_len;
2632 }
2633
2634 if (!desc)
2635 return NULL;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002636
2637 pch->cyclic = true;
Lars-Peter Clausenfc514462013-07-23 10:24:50 +02002638 desc->txd.flags = flags;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002639
2640 return &desc->txd;
2641}
2642
Jassi Brarb3040e42010-05-23 20:28:19 -07002643static struct dma_async_tx_descriptor *
2644pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2645 dma_addr_t src, size_t len, unsigned long flags)
2646{
2647 struct dma_pl330_desc *desc;
2648 struct dma_pl330_chan *pch = to_pchan(chan);
Maninder Singhf5636852015-05-26 00:40:05 +05302649 struct pl330_dmac *pl330;
Jassi Brarb3040e42010-05-23 20:28:19 -07002650 int burst;
2651
Rob Herring4e0e6102011-07-25 16:05:04 -05002652 if (unlikely(!pch || !len))
Jassi Brarb3040e42010-05-23 20:28:19 -07002653 return NULL;
2654
Maninder Singhf5636852015-05-26 00:40:05 +05302655 pl330 = pch->dmac;
2656
Jassi Brarb3040e42010-05-23 20:28:19 -07002657 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2658 if (!desc)
2659 return NULL;
2660
2661 desc->rqcfg.src_inc = 1;
2662 desc->rqcfg.dst_inc = 1;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002663 desc->rqtype = DMA_MEM_TO_MEM;
Jassi Brarb3040e42010-05-23 20:28:19 -07002664
2665 /* Select max possible burst size */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002666 burst = pl330->pcfg.data_bus_width / 8;
Jassi Brarb3040e42010-05-23 20:28:19 -07002667
Jon Medhurst137bd112014-11-07 18:05:17 +00002668 /*
2669 * Make sure we use a burst size that aligns with all the memcpy
2670 * parameters because our DMA programming algorithm doesn't cope with
2671 * transfers which straddle an entry in the DMA device's MFIFO.
2672 */
2673 while ((src | dst | len) & (burst - 1))
Jassi Brarb3040e42010-05-23 20:28:19 -07002674 burst /= 2;
Jassi Brarb3040e42010-05-23 20:28:19 -07002675
2676 desc->rqcfg.brst_size = 0;
2677 while (burst != (1 << desc->rqcfg.brst_size))
2678 desc->rqcfg.brst_size++;
2679
Jon Medhurst137bd112014-11-07 18:05:17 +00002680 /*
2681 * If burst size is smaller than bus width then make sure we only
2682 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2683 */
2684 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
2685 desc->rqcfg.brst_len = 1;
2686
Jassi Brarb3040e42010-05-23 20:28:19 -07002687 desc->rqcfg.brst_len = get_burst_len(desc, len);
Krzysztof Kozlowskiae128292015-06-15 17:25:16 +09002688 desc->bytes_requested = len;
Jassi Brarb3040e42010-05-23 20:28:19 -07002689
2690 desc->txd.flags = flags;
2691
2692 return &desc->txd;
2693}
2694
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002695static void __pl330_giveback_desc(struct pl330_dmac *pl330,
Chanho Park52a9d172013-08-09 20:11:33 +09002696 struct dma_pl330_desc *first)
2697{
2698 unsigned long flags;
2699 struct dma_pl330_desc *desc;
2700
2701 if (!first)
2702 return;
2703
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002704 spin_lock_irqsave(&pl330->pool_lock, flags);
Chanho Park52a9d172013-08-09 20:11:33 +09002705
2706 while (!list_empty(&first->node)) {
2707 desc = list_entry(first->node.next,
2708 struct dma_pl330_desc, node);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002709 list_move_tail(&desc->node, &pl330->desc_pool);
Chanho Park52a9d172013-08-09 20:11:33 +09002710 }
2711
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002712 list_move_tail(&first->node, &pl330->desc_pool);
Chanho Park52a9d172013-08-09 20:11:33 +09002713
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002714 spin_unlock_irqrestore(&pl330->pool_lock, flags);
Chanho Park52a9d172013-08-09 20:11:33 +09002715}
2716
Jassi Brarb3040e42010-05-23 20:28:19 -07002717static struct dma_async_tx_descriptor *
2718pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +05302719 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05002720 unsigned long flg, void *context)
Jassi Brarb3040e42010-05-23 20:28:19 -07002721{
2722 struct dma_pl330_desc *first, *desc = NULL;
2723 struct dma_pl330_chan *pch = to_pchan(chan);
Jassi Brarb3040e42010-05-23 20:28:19 -07002724 struct scatterlist *sg;
Boojin Kim1b9bb712011-09-02 09:44:30 +09002725 int i;
Jassi Brarb3040e42010-05-23 20:28:19 -07002726 dma_addr_t addr;
2727
Thomas Abrahamcd072512011-10-24 11:43:11 +02002728 if (unlikely(!pch || !sgl || !sg_len))
Jassi Brarb3040e42010-05-23 20:28:19 -07002729 return NULL;
2730
Boojin Kim1b9bb712011-09-02 09:44:30 +09002731 addr = pch->fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -07002732
2733 first = NULL;
2734
2735 for_each_sg(sgl, sg, sg_len, i) {
2736
2737 desc = pl330_get_desc(pch);
2738 if (!desc) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002739 struct pl330_dmac *pl330 = pch->dmac;
Jassi Brarb3040e42010-05-23 20:28:19 -07002740
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002741 dev_err(pch->dmac->ddma.dev,
Jassi Brarb3040e42010-05-23 20:28:19 -07002742 "%s:%d Unable to fetch desc\n",
2743 __func__, __LINE__);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002744 __pl330_giveback_desc(pl330, first);
Jassi Brarb3040e42010-05-23 20:28:19 -07002745
2746 return NULL;
2747 }
2748
2749 if (!first)
2750 first = desc;
2751 else
2752 list_add_tail(&desc->node, &first->node);
2753
Vinod Kouldb8196d2011-10-13 22:34:23 +05302754 if (direction == DMA_MEM_TO_DEV) {
Jassi Brarb3040e42010-05-23 20:28:19 -07002755 desc->rqcfg.src_inc = 1;
2756 desc->rqcfg.dst_inc = 0;
2757 fill_px(&desc->px,
2758 addr, sg_dma_address(sg), sg_dma_len(sg));
2759 } else {
2760 desc->rqcfg.src_inc = 0;
2761 desc->rqcfg.dst_inc = 1;
2762 fill_px(&desc->px,
2763 sg_dma_address(sg), addr, sg_dma_len(sg));
2764 }
2765
Boojin Kim1b9bb712011-09-02 09:44:30 +09002766 desc->rqcfg.brst_size = pch->burst_sz;
Caesar Wang0a18f9b2016-02-25 09:00:53 +08002767 desc->rqcfg.brst_len = 1;
Lars-Peter Clausen9dc5a312014-07-06 20:32:30 +02002768 desc->rqtype = direction;
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002769 desc->bytes_requested = sg_dma_len(sg);
Jassi Brarb3040e42010-05-23 20:28:19 -07002770 }
2771
2772 /* Return the last desc in the chain */
2773 desc->txd.flags = flg;
2774 return &desc->txd;
2775}
2776
2777static irqreturn_t pl330_irq_handler(int irq, void *data)
2778{
2779 if (pl330_update(data))
2780 return IRQ_HANDLED;
2781 else
2782 return IRQ_NONE;
2783}
2784
Lars-Peter Clausenca38ff12013-07-15 17:53:08 +02002785#define PL330_DMA_BUSWIDTHS \
2786 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2787 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2788 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2789 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2790 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2791
Krzysztof Kozlowskib816ccc2014-11-18 12:17:56 +01002792/*
2793 * Runtime PM callbacks are provided by amba/bus.c driver.
2794 *
2795 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2796 * bus driver will only disable/enable the clock in runtime PM callbacks.
2797 */
2798static int __maybe_unused pl330_suspend(struct device *dev)
2799{
2800 struct amba_device *pcdev = to_amba_device(dev);
2801
2802 pm_runtime_disable(dev);
2803
2804 if (!pm_runtime_status_suspended(dev)) {
2805 /* amba did not disable the clock */
2806 amba_pclk_disable(pcdev);
2807 }
2808 amba_pclk_unprepare(pcdev);
2809
2810 return 0;
2811}
2812
2813static int __maybe_unused pl330_resume(struct device *dev)
2814{
2815 struct amba_device *pcdev = to_amba_device(dev);
2816 int ret;
2817
2818 ret = amba_pclk_prepare(pcdev);
2819 if (ret)
2820 return ret;
2821
2822 if (!pm_runtime_status_suspended(dev))
2823 ret = amba_pclk_enable(pcdev);
2824
2825 pm_runtime_enable(dev);
2826
2827 return ret;
2828}
2829
2830static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
2831
Bill Pemberton463a1f82012-11-19 13:22:55 -05002832static int
Russell Kingaa25afa2011-02-19 15:55:00 +00002833pl330_probe(struct amba_device *adev, const struct amba_id *id)
Jassi Brarb3040e42010-05-23 20:28:19 -07002834{
2835 struct dma_pl330_platdata *pdat;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002836 struct pl330_config *pcfg;
2837 struct pl330_dmac *pl330;
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302838 struct dma_pl330_chan *pch, *_p;
Jassi Brarb3040e42010-05-23 20:28:19 -07002839 struct dma_device *pd;
2840 struct resource *res;
2841 int i, ret, irq;
Rob Herring4e0e6102011-07-25 16:05:04 -05002842 int num_chan;
Addy Ke271e1b862016-01-22 19:06:46 +08002843 struct device_node *np = adev->dev.of_node;
Jassi Brarb3040e42010-05-23 20:28:19 -07002844
Jingoo Hand4adcc02013-07-30 17:09:11 +09002845 pdat = dev_get_platdata(&adev->dev);
Jassi Brarb3040e42010-05-23 20:28:19 -07002846
Russell King64113012013-06-27 10:29:32 +01002847 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2848 if (ret)
2849 return ret;
2850
Jassi Brarb3040e42010-05-23 20:28:19 -07002851 /* Allocate a new DMAC and its Channels */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002852 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
Peter Griffinaef94fe2016-06-07 18:38:41 +01002853 if (!pl330)
Jassi Brarb3040e42010-05-23 20:28:19 -07002854 return -ENOMEM;
Jassi Brarb3040e42010-05-23 20:28:19 -07002855
Andrew Jacksoncee42392014-11-06 11:39:47 +00002856 pd = &pl330->ddma;
2857 pd->dev = &adev->dev;
2858
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002859 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -07002860
Addy Ke271e1b862016-01-22 19:06:46 +08002861 /* get quirk */
2862 for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
2863 if (of_property_read_bool(np, of_quirks[i].quirk))
2864 pl330->quirks |= of_quirks[i].id;
2865
Jassi Brarb3040e42010-05-23 20:28:19 -07002866 res = &adev->res;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002867 pl330->base = devm_ioremap_resource(&adev->dev, res);
2868 if (IS_ERR(pl330->base))
2869 return PTR_ERR(pl330->base);
Jassi Brarb3040e42010-05-23 20:28:19 -07002870
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002871 amba_set_drvdata(adev, pl330);
Boojin Kima2f52032011-09-02 09:44:29 +09002872
Dan Carpenter02808b42013-11-08 12:50:24 +03002873 for (i = 0; i < AMBA_NR_IRQS; i++) {
Michal Simeke98b3ca2013-09-30 08:50:48 +02002874 irq = adev->irq[i];
2875 if (irq) {
2876 ret = devm_request_irq(&adev->dev, irq,
2877 pl330_irq_handler, 0,
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002878 dev_name(&adev->dev), pl330);
Michal Simeke98b3ca2013-09-30 08:50:48 +02002879 if (ret)
2880 return ret;
2881 } else {
2882 break;
2883 }
2884 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002885
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002886 pcfg = &pl330->pcfg;
2887
2888 pcfg->periph_id = adev->periphid;
2889 ret = pl330_add(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07002890 if (ret)
Michal Simek173e8382013-09-04 16:40:17 +02002891 return ret;
Jassi Brarb3040e42010-05-23 20:28:19 -07002892
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002893 INIT_LIST_HEAD(&pl330->desc_pool);
2894 spin_lock_init(&pl330->pool_lock);
Jassi Brarb3040e42010-05-23 20:28:19 -07002895
2896 /* Create a descriptor pool of default size */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002897 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
Jassi Brarb3040e42010-05-23 20:28:19 -07002898 dev_warn(&adev->dev, "unable to allocate desc\n");
2899
Jassi Brarb3040e42010-05-23 20:28:19 -07002900 INIT_LIST_HEAD(&pd->channels);
2901
2902 /* Initialize channel parameters */
Olof Johanssonc8473822012-04-08 16:26:19 -07002903 if (pdat)
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002904 num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
Olof Johanssonc8473822012-04-08 16:26:19 -07002905 else
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002906 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
Olof Johanssonc8473822012-04-08 16:26:19 -07002907
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002908 pl330->num_peripherals = num_chan;
Lars-Peter Clausen70cbb162014-01-11 20:08:39 +01002909
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002910 pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2911 if (!pl330->peripherals) {
Sachin Kamat61c6e752012-09-17 15:20:23 +05302912 ret = -ENOMEM;
Sachin Kamate4d43c12012-11-15 06:27:50 +00002913 goto probe_err2;
Sachin Kamat61c6e752012-09-17 15:20:23 +05302914 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002915
Rob Herring4e0e6102011-07-25 16:05:04 -05002916 for (i = 0; i < num_chan; i++) {
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002917 pch = &pl330->peripherals[i];
Thomas Abraham93ed5542011-10-24 11:43:31 +02002918 if (!adev->dev.of_node)
2919 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
2920 else
2921 pch->chan.private = adev->dev.of_node;
Jassi Brarb3040e42010-05-23 20:28:19 -07002922
Lars-Peter Clausen04abf5d2014-01-11 20:08:38 +01002923 INIT_LIST_HEAD(&pch->submitted_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002924 INIT_LIST_HEAD(&pch->work_list);
Lars-Peter Clausen39ff8612013-08-27 20:34:05 +02002925 INIT_LIST_HEAD(&pch->completed_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002926 spin_lock_init(&pch->lock);
Lars-Peter Clausen65ad6062014-07-06 20:32:26 +02002927 pch->thread = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -07002928 pch->chan.device = pd;
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002929 pch->dmac = pl330;
Jassi Brarb3040e42010-05-23 20:28:19 -07002930
2931 /* Add the channel to the DMAC list */
Jassi Brarb3040e42010-05-23 20:28:19 -07002932 list_add_tail(&pch->chan.device_node, &pd->channels);
2933 }
2934
Thomas Abraham93ed5542011-10-24 11:43:31 +02002935 if (pdat) {
Thomas Abrahamcd072512011-10-24 11:43:11 +02002936 pd->cap_mask = pdat->cap_mask;
Thomas Abraham93ed5542011-10-24 11:43:31 +02002937 } else {
Thomas Abrahamcd072512011-10-24 11:43:11 +02002938 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002939 if (pcfg->num_peri) {
Thomas Abraham93ed5542011-10-24 11:43:31 +02002940 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2941 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
Tushar Behera5557a412012-08-29 10:16:25 +05302942 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
Thomas Abraham93ed5542011-10-24 11:43:31 +02002943 }
2944 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002945
2946 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2947 pd->device_free_chan_resources = pl330_free_chan_resources;
2948 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002949 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -07002950 pd->device_tx_status = pl330_tx_status;
2951 pd->device_prep_slave_sg = pl330_prep_slave_sg;
Maxime Ripard740aa952014-11-17 14:42:29 +01002952 pd->device_config = pl330_config;
Robert Baldyga88987d22015-02-11 13:23:18 +01002953 pd->device_pause = pl330_pause;
Maxime Ripard740aa952014-11-17 14:42:29 +01002954 pd->device_terminate_all = pl330_terminate_all;
Jassi Brarb3040e42010-05-23 20:28:19 -07002955 pd->device_issue_pending = pl330_issue_pending;
Maxime Riparddcabe4562014-11-17 14:42:50 +01002956 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
2957 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
2958 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
Robert Baldygaaee4d1f2015-02-11 13:23:17 +01002959 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
Shawn Lin86a8ce72016-01-22 19:06:51 +08002960 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
2961 1 : PL330_MAX_BURST);
Jassi Brarb3040e42010-05-23 20:28:19 -07002962
2963 ret = dma_async_device_register(pd);
2964 if (ret) {
2965 dev_err(&adev->dev, "unable to register DMAC\n");
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302966 goto probe_err3;
2967 }
2968
2969 if (adev->dev.of_node) {
2970 ret = of_dma_controller_register(adev->dev.of_node,
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002971 of_dma_pl330_xlate, pl330);
Padmavathi Venna0b94c572013-03-05 14:55:31 +05302972 if (ret) {
2973 dev_err(&adev->dev,
2974 "unable to register DMA to the generic DT DMA helpers\n");
2975 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002976 }
Lars-Peter Clausenb714b842013-11-25 16:07:46 +01002977
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002978 adev->dev.dma_parms = &pl330->dma_parms;
Lars-Peter Clausenb714b842013-11-25 16:07:46 +01002979
Vinod Kouldbaf6d82013-09-02 21:54:48 +05302980 /*
2981 * This is the limit for transfers with a buswidth of 1, larger
2982 * buswidths will have larger limits.
2983 */
2984 ret = dma_set_max_seg_size(&adev->dev, 1900800);
2985 if (ret)
2986 dev_err(&adev->dev, "unable to set the seg size\n");
2987
Jassi Brarb3040e42010-05-23 20:28:19 -07002988
Jassi Brarb3040e42010-05-23 20:28:19 -07002989 dev_info(&adev->dev,
Liviu Dudau1f0a5cb2014-11-06 17:20:12 +00002990 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
Jassi Brarb3040e42010-05-23 20:28:19 -07002991 dev_info(&adev->dev,
2992 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02002993 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
2994 pcfg->num_peri, pcfg->num_events);
Jassi Brarb3040e42010-05-23 20:28:19 -07002995
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01002996 pm_runtime_irq_safe(&adev->dev);
2997 pm_runtime_use_autosuspend(&adev->dev);
2998 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
2999 pm_runtime_mark_last_busy(&adev->dev);
3000 pm_runtime_put_autosuspend(&adev->dev);
3001
Jassi Brarb3040e42010-05-23 20:28:19 -07003002 return 0;
Padmavathi Venna0b94c572013-03-05 14:55:31 +05303003probe_err3:
Padmavathi Venna0b94c572013-03-05 14:55:31 +05303004 /* Idle the DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02003005 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
Padmavathi Venna0b94c572013-03-05 14:55:31 +05303006 chan.device_node) {
3007
3008 /* Remove the channel */
3009 list_del(&pch->chan.device_node);
3010
3011 /* Flush the channel */
Krzysztof Kozlowski0f5ebab2014-09-29 14:42:20 +02003012 if (pch->thread) {
Maxime Ripard740aa952014-11-17 14:42:29 +01003013 pl330_terminate_all(&pch->chan);
Krzysztof Kozlowski0f5ebab2014-09-29 14:42:20 +02003014 pl330_free_chan_resources(&pch->chan);
3015 }
Padmavathi Venna0b94c572013-03-05 14:55:31 +05303016 }
Jassi Brarb3040e42010-05-23 20:28:19 -07003017probe_err2:
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02003018 pl330_del(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07003019
3020 return ret;
3021}
3022
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08003023static int pl330_remove(struct amba_device *adev)
Jassi Brarb3040e42010-05-23 20:28:19 -07003024{
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02003025 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
Jassi Brarb3040e42010-05-23 20:28:19 -07003026 struct dma_pl330_chan *pch, *_p;
Vinod Koul46cf94d2016-07-05 10:02:16 +05303027 int i, irq;
Jassi Brarb3040e42010-05-23 20:28:19 -07003028
Krzysztof Kozlowskiae43b322014-11-14 09:48:57 +01003029 pm_runtime_get_noresume(pl330->ddma.dev);
3030
Padmavathi Venna0b94c572013-03-05 14:55:31 +05303031 if (adev->dev.of_node)
3032 of_dma_controller_free(adev->dev.of_node);
Padmavathi Venna421da892013-02-14 09:10:07 +05303033
Vinod Koul46cf94d2016-07-05 10:02:16 +05303034 for (i = 0; i < AMBA_NR_IRQS; i++) {
3035 irq = adev->irq[i];
3036 devm_free_irq(&adev->dev, irq, pl330);
3037 }
3038
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02003039 dma_async_device_unregister(&pl330->ddma);
Jassi Brarb3040e42010-05-23 20:28:19 -07003040
3041 /* Idle the DMAC */
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02003042 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
Jassi Brarb3040e42010-05-23 20:28:19 -07003043 chan.device_node) {
3044
3045 /* Remove the channel */
3046 list_del(&pch->chan.device_node);
3047
3048 /* Flush the channel */
Krzysztof Kozlowski6e4a2a82014-09-29 14:42:21 +02003049 if (pch->thread) {
Maxime Ripard740aa952014-11-17 14:42:29 +01003050 pl330_terminate_all(&pch->chan);
Krzysztof Kozlowski6e4a2a82014-09-29 14:42:21 +02003051 pl330_free_chan_resources(&pch->chan);
3052 }
Jassi Brarb3040e42010-05-23 20:28:19 -07003053 }
3054
Lars-Peter Clausenf6f24212014-07-06 20:32:29 +02003055 pl330_del(pl330);
Jassi Brarb3040e42010-05-23 20:28:19 -07003056
Jassi Brarb3040e42010-05-23 20:28:19 -07003057 return 0;
3058}
3059
3060static struct amba_id pl330_ids[] = {
3061 {
3062 .id = 0x00041330,
3063 .mask = 0x000fffff,
3064 },
3065 { 0, 0 },
3066};
3067
Dave Martine8fa5162011-10-05 15:15:20 +01003068MODULE_DEVICE_TABLE(amba, pl330_ids);
3069
Jassi Brarb3040e42010-05-23 20:28:19 -07003070static struct amba_driver pl330_driver = {
3071 .drv = {
3072 .owner = THIS_MODULE,
3073 .name = "dma-pl330",
Krzysztof Kozlowskib816ccc2014-11-18 12:17:56 +01003074 .pm = &pl330_pm,
Jassi Brarb3040e42010-05-23 20:28:19 -07003075 },
3076 .id_table = pl330_ids,
3077 .probe = pl330_probe,
3078 .remove = pl330_remove,
3079};
3080
viresh kumar9e5ed092012-03-15 10:40:38 +01003081module_amba_driver(pl330_driver);
Jassi Brarb3040e42010-05-23 20:28:19 -07003082
Jassi Brar046209f2014-12-05 19:07:49 +05303083MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
Jassi Brarb3040e42010-05-23 20:28:19 -07003084MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3085MODULE_LICENSE("GPL");