blob: fa3fb21e60bed6bb112dadf869ac6adc6e19dec9 [file] [log] [blame]
Boojin Kimb7d861d2011-12-26 18:49:52 +09001/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
Jassi Brarb3040e42010-05-23 20:28:19 -07004 *
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
Boojin Kimb7d861d2011-12-26 18:49:52 +090014#include <linux/kernel.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070015#include <linux/io.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/module.h>
Boojin Kimb7d861d2011-12-26 18:49:52 +090019#include <linux/string.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070023#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25#include <linux/amba/bus.h>
26#include <linux/amba/pl330.h>
Boojin Kima2f52032011-09-02 09:44:29 +090027#include <linux/pm_runtime.h>
Boojin Kim1b9bb712011-09-02 09:44:30 +090028#include <linux/scatterlist.h>
Thomas Abraham93ed5542011-10-24 11:43:31 +020029#include <linux/of.h>
Jassi Brarb3040e42010-05-23 20:28:19 -070030
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000031#include "dmaengine.h"
Boojin Kimb7d861d2011-12-26 18:49:52 +090032#define PL330_MAX_CHAN 8
33#define PL330_MAX_IRQS 32
34#define PL330_MAX_PERI 32
35
36enum pl330_srccachectrl {
37 SCCTRL0, /* Noncacheable and nonbufferable */
38 SCCTRL1, /* Bufferable only */
39 SCCTRL2, /* Cacheable, but do not allocate */
40 SCCTRL3, /* Cacheable and bufferable, but do not allocate */
41 SINVALID1,
42 SINVALID2,
43 SCCTRL6, /* Cacheable write-through, allocate on reads only */
44 SCCTRL7, /* Cacheable write-back, allocate on reads only */
45};
46
47enum pl330_dstcachectrl {
48 DCCTRL0, /* Noncacheable and nonbufferable */
49 DCCTRL1, /* Bufferable only */
50 DCCTRL2, /* Cacheable, but do not allocate */
51 DCCTRL3, /* Cacheable and bufferable, but do not allocate */
Linus Torvaldsef08e782012-03-29 15:34:57 -070052 DINVALID1, /* AWCACHE = 0x1000 */
Boojin Kimb7d861d2011-12-26 18:49:52 +090053 DINVALID2,
54 DCCTRL6, /* Cacheable write-through, allocate on writes only */
55 DCCTRL7, /* Cacheable write-back, allocate on writes only */
56};
57
58enum pl330_byteswap {
59 SWAP_NO,
60 SWAP_2,
61 SWAP_4,
62 SWAP_8,
63 SWAP_16,
64};
65
66enum pl330_reqtype {
67 MEMTOMEM,
68 MEMTODEV,
69 DEVTOMEM,
70 DEVTODEV,
71};
72
73/* Register and Bit field Definitions */
74#define DS 0x0
75#define DS_ST_STOP 0x0
76#define DS_ST_EXEC 0x1
77#define DS_ST_CMISS 0x2
78#define DS_ST_UPDTPC 0x3
79#define DS_ST_WFE 0x4
80#define DS_ST_ATBRR 0x5
81#define DS_ST_QBUSY 0x6
82#define DS_ST_WFP 0x7
83#define DS_ST_KILL 0x8
84#define DS_ST_CMPLT 0x9
85#define DS_ST_FLTCMP 0xe
86#define DS_ST_FAULT 0xf
87
88#define DPC 0x4
89#define INTEN 0x20
90#define ES 0x24
91#define INTSTATUS 0x28
92#define INTCLR 0x2c
93#define FSM 0x30
94#define FSC 0x34
95#define FTM 0x38
96
97#define _FTC 0x40
98#define FTC(n) (_FTC + (n)*0x4)
99
100#define _CS 0x100
101#define CS(n) (_CS + (n)*0x8)
102#define CS_CNS (1 << 21)
103
104#define _CPC 0x104
105#define CPC(n) (_CPC + (n)*0x8)
106
107#define _SA 0x400
108#define SA(n) (_SA + (n)*0x20)
109
110#define _DA 0x404
111#define DA(n) (_DA + (n)*0x20)
112
113#define _CC 0x408
114#define CC(n) (_CC + (n)*0x20)
115
116#define CC_SRCINC (1 << 0)
117#define CC_DSTINC (1 << 14)
118#define CC_SRCPRI (1 << 8)
119#define CC_DSTPRI (1 << 22)
120#define CC_SRCNS (1 << 9)
121#define CC_DSTNS (1 << 23)
122#define CC_SRCIA (1 << 10)
123#define CC_DSTIA (1 << 24)
124#define CC_SRCBRSTLEN_SHFT 4
125#define CC_DSTBRSTLEN_SHFT 18
126#define CC_SRCBRSTSIZE_SHFT 1
127#define CC_DSTBRSTSIZE_SHFT 15
128#define CC_SRCCCTRL_SHFT 11
129#define CC_SRCCCTRL_MASK 0x7
130#define CC_DSTCCTRL_SHFT 25
131#define CC_DRCCCTRL_MASK 0x7
132#define CC_SWAP_SHFT 28
133
134#define _LC0 0x40c
135#define LC0(n) (_LC0 + (n)*0x20)
136
137#define _LC1 0x410
138#define LC1(n) (_LC1 + (n)*0x20)
139
140#define DBGSTATUS 0xd00
141#define DBG_BUSY (1 << 0)
142
143#define DBGCMD 0xd04
144#define DBGINST0 0xd08
145#define DBGINST1 0xd0c
146
147#define CR0 0xe00
148#define CR1 0xe04
149#define CR2 0xe08
150#define CR3 0xe0c
151#define CR4 0xe10
152#define CRD 0xe14
153
154#define PERIPH_ID 0xfe0
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900155#define PERIPH_REV_SHIFT 20
156#define PERIPH_REV_MASK 0xf
157#define PERIPH_REV_R0P0 0
158#define PERIPH_REV_R1P0 1
159#define PERIPH_REV_R1P1 2
Boojin Kimb7d861d2011-12-26 18:49:52 +0900160#define PCELL_ID 0xff0
161
162#define CR0_PERIPH_REQ_SET (1 << 0)
163#define CR0_BOOT_EN_SET (1 << 1)
164#define CR0_BOOT_MAN_NS (1 << 2)
165#define CR0_NUM_CHANS_SHIFT 4
166#define CR0_NUM_CHANS_MASK 0x7
167#define CR0_NUM_PERIPH_SHIFT 12
168#define CR0_NUM_PERIPH_MASK 0x1f
169#define CR0_NUM_EVENTS_SHIFT 17
170#define CR0_NUM_EVENTS_MASK 0x1f
171
172#define CR1_ICACHE_LEN_SHIFT 0
173#define CR1_ICACHE_LEN_MASK 0x7
174#define CR1_NUM_ICACHELINES_SHIFT 4
175#define CR1_NUM_ICACHELINES_MASK 0xf
176
177#define CRD_DATA_WIDTH_SHIFT 0
178#define CRD_DATA_WIDTH_MASK 0x7
179#define CRD_WR_CAP_SHIFT 4
180#define CRD_WR_CAP_MASK 0x7
181#define CRD_WR_Q_DEP_SHIFT 8
182#define CRD_WR_Q_DEP_MASK 0xf
183#define CRD_RD_CAP_SHIFT 12
184#define CRD_RD_CAP_MASK 0x7
185#define CRD_RD_Q_DEP_SHIFT 16
186#define CRD_RD_Q_DEP_MASK 0xf
187#define CRD_DATA_BUFF_SHIFT 20
188#define CRD_DATA_BUFF_MASK 0x3ff
189
190#define PART 0x330
191#define DESIGNER 0x41
192#define REVISION 0x0
193#define INTEG_CFG 0x0
194#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
195
196#define PCELL_ID_VAL 0xb105f00d
197
198#define PL330_STATE_STOPPED (1 << 0)
199#define PL330_STATE_EXECUTING (1 << 1)
200#define PL330_STATE_WFE (1 << 2)
201#define PL330_STATE_FAULTING (1 << 3)
202#define PL330_STATE_COMPLETING (1 << 4)
203#define PL330_STATE_WFP (1 << 5)
204#define PL330_STATE_KILLING (1 << 6)
205#define PL330_STATE_FAULT_COMPLETING (1 << 7)
206#define PL330_STATE_CACHEMISS (1 << 8)
207#define PL330_STATE_UPDTPC (1 << 9)
208#define PL330_STATE_ATBARRIER (1 << 10)
209#define PL330_STATE_QUEUEBUSY (1 << 11)
210#define PL330_STATE_INVALID (1 << 15)
211
212#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
213 | PL330_STATE_WFE | PL330_STATE_FAULTING)
214
215#define CMD_DMAADDH 0x54
216#define CMD_DMAEND 0x00
217#define CMD_DMAFLUSHP 0x35
218#define CMD_DMAGO 0xa0
219#define CMD_DMALD 0x04
220#define CMD_DMALDP 0x25
221#define CMD_DMALP 0x20
222#define CMD_DMALPEND 0x28
223#define CMD_DMAKILL 0x01
224#define CMD_DMAMOV 0xbc
225#define CMD_DMANOP 0x18
226#define CMD_DMARMB 0x12
227#define CMD_DMASEV 0x34
228#define CMD_DMAST 0x08
229#define CMD_DMASTP 0x29
230#define CMD_DMASTZ 0x0c
231#define CMD_DMAWFE 0x36
232#define CMD_DMAWFP 0x30
233#define CMD_DMAWMB 0x13
234
235#define SZ_DMAADDH 3
236#define SZ_DMAEND 1
237#define SZ_DMAFLUSHP 2
238#define SZ_DMALD 1
239#define SZ_DMALDP 2
240#define SZ_DMALP 2
241#define SZ_DMALPEND 2
242#define SZ_DMAKILL 1
243#define SZ_DMAMOV 6
244#define SZ_DMANOP 1
245#define SZ_DMARMB 1
246#define SZ_DMASEV 2
247#define SZ_DMAST 1
248#define SZ_DMASTP 2
249#define SZ_DMASTZ 1
250#define SZ_DMAWFE 2
251#define SZ_DMAWFP 2
252#define SZ_DMAWMB 1
253#define SZ_DMAGO 6
254
255#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
256#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
257
258#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
259#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
260
261/*
262 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
263 * at 1byte/burst for P<->M and M<->M respectively.
264 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
265 * should be enough for P<->M and M<->M respectively.
266 */
267#define MCODE_BUFF_PER_REQ 256
268
269/* If the _pl330_req is available to the client */
270#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
271
272/* Use this _only_ to wait on transient states */
273#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
274
275#ifdef PL330_DEBUG_MCGEN
276static unsigned cmd_line;
277#define PL330_DBGCMD_DUMP(off, x...) do { \
278 printk("%x:", cmd_line); \
279 printk(x); \
280 cmd_line += off; \
281 } while (0)
282#define PL330_DBGMC_START(addr) (cmd_line = addr)
283#else
284#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
285#define PL330_DBGMC_START(addr) do {} while (0)
286#endif
287
288/* The number of default descriptors */
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +0000289
Jassi Brarb3040e42010-05-23 20:28:19 -0700290#define NR_DEFAULT_DESC 16
291
Boojin Kimb7d861d2011-12-26 18:49:52 +0900292/* Populated by the PL330 core driver for DMA API driver's info */
293struct pl330_config {
294 u32 periph_id;
295 u32 pcell_id;
296#define DMAC_MODE_NS (1 << 0)
297 unsigned int mode;
298 unsigned int data_bus_width:10; /* In number of bits */
299 unsigned int data_buf_dep:10;
300 unsigned int num_chan:4;
301 unsigned int num_peri:6;
302 u32 peri_ns;
303 unsigned int num_events:6;
304 u32 irq_ns;
305};
306
307/* Handle to the DMAC provided to the PL330 core */
308struct pl330_info {
309 /* Owning device */
310 struct device *dev;
311 /* Size of MicroCode buffers for each channel. */
312 unsigned mcbufsz;
313 /* ioremap'ed address of PL330 registers. */
314 void __iomem *base;
315 /* Client can freely use it. */
316 void *client_data;
317 /* PL330 core data, Client must not touch it. */
318 void *pl330_data;
319 /* Populated by the PL330 core driver during pl330_add */
320 struct pl330_config pcfg;
321 /*
322 * If the DMAC has some reset mechanism, then the
323 * client may want to provide pointer to the method.
324 */
325 void (*dmac_reset)(struct pl330_info *pi);
326};
327
328/**
329 * Request Configuration.
330 * The PL330 core does not modify this and uses the last
331 * working configuration if the request doesn't provide any.
332 *
333 * The Client may want to provide this info only for the
334 * first request and a request with new settings.
335 */
336struct pl330_reqcfg {
337 /* Address Incrementing */
338 unsigned dst_inc:1;
339 unsigned src_inc:1;
340
341 /*
342 * For now, the SRC & DST protection levels
343 * and burst size/length are assumed same.
344 */
345 bool nonsecure;
346 bool privileged;
347 bool insnaccess;
348 unsigned brst_len:5;
349 unsigned brst_size:3; /* in power of 2 */
350
351 enum pl330_dstcachectrl dcctl;
352 enum pl330_srccachectrl scctl;
353 enum pl330_byteswap swap;
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900354 struct pl330_config *pcfg;
Boojin Kimb7d861d2011-12-26 18:49:52 +0900355};
356
357/*
358 * One cycle of DMAC operation.
359 * There may be more than one xfer in a request.
360 */
361struct pl330_xfer {
362 u32 src_addr;
363 u32 dst_addr;
364 /* Size to xfer */
365 u32 bytes;
366 /*
367 * Pointer to next xfer in the list.
368 * The last xfer in the req must point to NULL.
369 */
370 struct pl330_xfer *next;
371};
372
373/* The xfer callbacks are made with one of these arguments. */
374enum pl330_op_err {
375 /* The all xfers in the request were success. */
376 PL330_ERR_NONE,
377 /* If req aborted due to global error. */
378 PL330_ERR_ABORT,
379 /* If req failed due to problem with Channel. */
380 PL330_ERR_FAIL,
381};
382
383/* A request defining Scatter-Gather List ending with NULL xfer. */
384struct pl330_req {
385 enum pl330_reqtype rqtype;
386 /* Index of peripheral for the xfer. */
387 unsigned peri:5;
388 /* Unique token for this xfer, set by the client. */
389 void *token;
390 /* Callback to be called after xfer. */
391 void (*xfer_cb)(void *token, enum pl330_op_err err);
392 /* If NULL, req will be done at last set parameters. */
393 struct pl330_reqcfg *cfg;
394 /* Pointer to first xfer in the request. */
395 struct pl330_xfer *x;
396};
397
398/*
399 * To know the status of the channel and DMAC, the client
400 * provides a pointer to this structure. The PL330 core
401 * fills it with current information.
402 */
403struct pl330_chanstatus {
404 /*
405 * If the DMAC engine halted due to some error,
406 * the client should remove-add DMAC.
407 */
408 bool dmac_halted;
409 /*
410 * If channel is halted due to some error,
411 * the client should ABORT/FLUSH and START the channel.
412 */
413 bool faulting;
414 /* Location of last load */
415 u32 src_addr;
416 /* Location of last store */
417 u32 dst_addr;
418 /*
419 * Pointer to the currently active req, NULL if channel is
420 * inactive, even though the requests may be present.
421 */
422 struct pl330_req *top_req;
423 /* Pointer to req waiting second in the queue if any. */
424 struct pl330_req *wait_req;
425};
426
427enum pl330_chan_op {
428 /* Start the channel */
429 PL330_OP_START,
430 /* Abort the active xfer */
431 PL330_OP_ABORT,
432 /* Stop xfer and flush queue */
433 PL330_OP_FLUSH,
434};
435
436struct _xfer_spec {
437 u32 ccr;
438 struct pl330_req *r;
439 struct pl330_xfer *x;
440};
441
442enum dmamov_dst {
443 SAR = 0,
444 CCR,
445 DAR,
446};
447
448enum pl330_dst {
449 SRC = 0,
450 DST,
451};
452
453enum pl330_cond {
454 SINGLE,
455 BURST,
456 ALWAYS,
457};
458
459struct _pl330_req {
460 u32 mc_bus;
461 void *mc_cpu;
462 /* Number of bytes taken to setup MC for the req */
463 u32 mc_len;
464 struct pl330_req *r;
465 /* Hook to attach to DMAC's list of reqs with due callback */
466 struct list_head rqd;
467};
468
469/* ToBeDone for tasklet */
470struct _pl330_tbd {
471 bool reset_dmac;
472 bool reset_mngr;
473 u8 reset_chan;
474};
475
476/* A DMAC Thread */
477struct pl330_thread {
478 u8 id;
479 int ev;
480 /* If the channel is not yet acquired by any client */
481 bool free;
482 /* Parent DMAC */
483 struct pl330_dmac *dmac;
484 /* Only two at a time */
485 struct _pl330_req req[2];
486 /* Index of the last enqueued request */
487 unsigned lstenq;
488 /* Index of the last submitted request or -1 if the DMA is stopped */
489 int req_running;
490};
491
492enum pl330_dmac_state {
493 UNINIT,
494 INIT,
495 DYING,
496};
497
498/* A DMAC */
499struct pl330_dmac {
500 spinlock_t lock;
501 /* Holds list of reqs with due callbacks */
502 struct list_head req_done;
503 /* Pointer to platform specific stuff */
504 struct pl330_info *pinfo;
505 /* Maximum possible events/irqs */
506 int events[32];
507 /* BUS address of MicroCode buffer */
508 u32 mcode_bus;
509 /* CPU address of MicroCode buffer */
510 void *mcode_cpu;
511 /* List of all Channel threads */
512 struct pl330_thread *channels;
513 /* Pointer to the MANAGER thread */
514 struct pl330_thread *manager;
515 /* To handle bad news in interrupt */
516 struct tasklet_struct tasks;
517 struct _pl330_tbd dmac_tbd;
518 /* State of DMAC operation */
519 enum pl330_dmac_state state;
520};
521
Jassi Brarb3040e42010-05-23 20:28:19 -0700522enum desc_status {
523 /* In the DMAC pool */
524 FREE,
525 /*
526 * Allocted to some channel during prep_xxx
527 * Also may be sitting on the work_list.
528 */
529 PREP,
530 /*
531 * Sitting on the work_list and already submitted
532 * to the PL330 core. Not more than two descriptors
533 * of a channel can be BUSY at any time.
534 */
535 BUSY,
536 /*
537 * Sitting on the channel work_list but xfer done
538 * by PL330 core
539 */
540 DONE,
541};
542
543struct dma_pl330_chan {
544 /* Schedule desc completion */
545 struct tasklet_struct task;
546
547 /* DMA-Engine Channel */
548 struct dma_chan chan;
549
Jassi Brarb3040e42010-05-23 20:28:19 -0700550 /* List of to be xfered descriptors */
551 struct list_head work_list;
552
553 /* Pointer to the DMAC that manages this channel,
554 * NULL if the channel is available to be acquired.
555 * As the parent, this DMAC also provides descriptors
556 * to the channel.
557 */
558 struct dma_pl330_dmac *dmac;
559
560 /* To protect channel manipulation */
561 spinlock_t lock;
562
563 /* Token of a hardware channel thread of PL330 DMAC
564 * NULL if the channel is available to be acquired.
565 */
566 void *pl330_chid;
Boojin Kim1b9bb712011-09-02 09:44:30 +0900567
568 /* For D-to-M and M-to-D channels */
569 int burst_sz; /* the peripheral fifo width */
Boojin Kim1d0c1d62011-09-02 09:44:31 +0900570 int burst_len; /* the number of burst */
Boojin Kim1b9bb712011-09-02 09:44:30 +0900571 dma_addr_t fifo_addr;
Boojin Kim42bc9cf2011-09-02 09:44:33 +0900572
573 /* for cyclic capability */
574 bool cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -0700575};
576
577struct dma_pl330_dmac {
578 struct pl330_info pif;
579
580 /* DMA-Engine Device */
581 struct dma_device ddma;
582
583 /* Pool of descriptors available for the DMAC's channels */
584 struct list_head desc_pool;
585 /* To protect desc_pool manipulation */
586 spinlock_t pool_lock;
587
588 /* Peripheral channels connected to this DMAC */
Rob Herring4e0e6102011-07-25 16:05:04 -0500589 struct dma_pl330_chan *peripherals; /* keep at end */
Boojin Kima2f52032011-09-02 09:44:29 +0900590
591 struct clk *clk;
Jassi Brarb3040e42010-05-23 20:28:19 -0700592};
593
594struct dma_pl330_desc {
595 /* To attach to a queue as child */
596 struct list_head node;
597
598 /* Descriptor for the DMA Engine API */
599 struct dma_async_tx_descriptor txd;
600
601 /* Xfer for PL330 core */
602 struct pl330_xfer px;
603
604 struct pl330_reqcfg rqcfg;
605 struct pl330_req req;
606
607 enum desc_status status;
608
609 /* The channel which currently holds this desc */
610 struct dma_pl330_chan *pchan;
611};
612
Boojin Kimb7d861d2011-12-26 18:49:52 +0900613static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
614{
615 if (r && r->xfer_cb)
616 r->xfer_cb(r->token, err);
617}
618
619static inline bool _queue_empty(struct pl330_thread *thrd)
620{
621 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
622 ? true : false;
623}
624
625static inline bool _queue_full(struct pl330_thread *thrd)
626{
627 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
628 ? false : true;
629}
630
631static inline bool is_manager(struct pl330_thread *thrd)
632{
633 struct pl330_dmac *pl330 = thrd->dmac;
634
635 /* MANAGER is indexed at the end */
636 if (thrd->id == pl330->pinfo->pcfg.num_chan)
637 return true;
638 else
639 return false;
640}
641
642/* If manager of the thread is in Non-Secure mode */
643static inline bool _manager_ns(struct pl330_thread *thrd)
644{
645 struct pl330_dmac *pl330 = thrd->dmac;
646
647 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
648}
649
650static inline u32 get_id(struct pl330_info *pi, u32 off)
651{
652 void __iomem *regs = pi->base;
653 u32 id = 0;
654
655 id |= (readb(regs + off + 0x0) << 0);
656 id |= (readb(regs + off + 0x4) << 8);
657 id |= (readb(regs + off + 0x8) << 16);
658 id |= (readb(regs + off + 0xc) << 24);
659
660 return id;
661}
662
Boojin Kim3ecf51a2011-12-26 18:55:47 +0900663static inline u32 get_revision(u32 periph_id)
664{
665 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
666}
667
Boojin Kimb7d861d2011-12-26 18:49:52 +0900668static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
669 enum pl330_dst da, u16 val)
670{
671 if (dry_run)
672 return SZ_DMAADDH;
673
674 buf[0] = CMD_DMAADDH;
675 buf[0] |= (da << 1);
676 *((u16 *)&buf[1]) = val;
677
678 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
679 da == 1 ? "DA" : "SA", val);
680
681 return SZ_DMAADDH;
682}
683
684static inline u32 _emit_END(unsigned dry_run, u8 buf[])
685{
686 if (dry_run)
687 return SZ_DMAEND;
688
689 buf[0] = CMD_DMAEND;
690
691 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
692
693 return SZ_DMAEND;
694}
695
696static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
697{
698 if (dry_run)
699 return SZ_DMAFLUSHP;
700
701 buf[0] = CMD_DMAFLUSHP;
702
703 peri &= 0x1f;
704 peri <<= 3;
705 buf[1] = peri;
706
707 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
708
709 return SZ_DMAFLUSHP;
710}
711
712static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
713{
714 if (dry_run)
715 return SZ_DMALD;
716
717 buf[0] = CMD_DMALD;
718
719 if (cond == SINGLE)
720 buf[0] |= (0 << 1) | (1 << 0);
721 else if (cond == BURST)
722 buf[0] |= (1 << 1) | (1 << 0);
723
724 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
725 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
726
727 return SZ_DMALD;
728}
729
730static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
731 enum pl330_cond cond, u8 peri)
732{
733 if (dry_run)
734 return SZ_DMALDP;
735
736 buf[0] = CMD_DMALDP;
737
738 if (cond == BURST)
739 buf[0] |= (1 << 1);
740
741 peri &= 0x1f;
742 peri <<= 3;
743 buf[1] = peri;
744
745 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
746 cond == SINGLE ? 'S' : 'B', peri >> 3);
747
748 return SZ_DMALDP;
749}
750
751static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
752 unsigned loop, u8 cnt)
753{
754 if (dry_run)
755 return SZ_DMALP;
756
757 buf[0] = CMD_DMALP;
758
759 if (loop)
760 buf[0] |= (1 << 1);
761
762 cnt--; /* DMAC increments by 1 internally */
763 buf[1] = cnt;
764
765 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
766
767 return SZ_DMALP;
768}
769
770struct _arg_LPEND {
771 enum pl330_cond cond;
772 bool forever;
773 unsigned loop;
774 u8 bjump;
775};
776
777static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
778 const struct _arg_LPEND *arg)
779{
780 enum pl330_cond cond = arg->cond;
781 bool forever = arg->forever;
782 unsigned loop = arg->loop;
783 u8 bjump = arg->bjump;
784
785 if (dry_run)
786 return SZ_DMALPEND;
787
788 buf[0] = CMD_DMALPEND;
789
790 if (loop)
791 buf[0] |= (1 << 2);
792
793 if (!forever)
794 buf[0] |= (1 << 4);
795
796 if (cond == SINGLE)
797 buf[0] |= (0 << 1) | (1 << 0);
798 else if (cond == BURST)
799 buf[0] |= (1 << 1) | (1 << 0);
800
801 buf[1] = bjump;
802
803 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
804 forever ? "FE" : "END",
805 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
806 loop ? '1' : '0',
807 bjump);
808
809 return SZ_DMALPEND;
810}
811
812static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
813{
814 if (dry_run)
815 return SZ_DMAKILL;
816
817 buf[0] = CMD_DMAKILL;
818
819 return SZ_DMAKILL;
820}
821
822static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
823 enum dmamov_dst dst, u32 val)
824{
825 if (dry_run)
826 return SZ_DMAMOV;
827
828 buf[0] = CMD_DMAMOV;
829 buf[1] = dst;
830 *((u32 *)&buf[2]) = val;
831
832 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
833 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
834
835 return SZ_DMAMOV;
836}
837
838static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
839{
840 if (dry_run)
841 return SZ_DMANOP;
842
843 buf[0] = CMD_DMANOP;
844
845 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
846
847 return SZ_DMANOP;
848}
849
850static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
851{
852 if (dry_run)
853 return SZ_DMARMB;
854
855 buf[0] = CMD_DMARMB;
856
857 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
858
859 return SZ_DMARMB;
860}
861
862static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
863{
864 if (dry_run)
865 return SZ_DMASEV;
866
867 buf[0] = CMD_DMASEV;
868
869 ev &= 0x1f;
870 ev <<= 3;
871 buf[1] = ev;
872
873 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
874
875 return SZ_DMASEV;
876}
877
878static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
879{
880 if (dry_run)
881 return SZ_DMAST;
882
883 buf[0] = CMD_DMAST;
884
885 if (cond == SINGLE)
886 buf[0] |= (0 << 1) | (1 << 0);
887 else if (cond == BURST)
888 buf[0] |= (1 << 1) | (1 << 0);
889
890 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
891 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
892
893 return SZ_DMAST;
894}
895
896static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
897 enum pl330_cond cond, u8 peri)
898{
899 if (dry_run)
900 return SZ_DMASTP;
901
902 buf[0] = CMD_DMASTP;
903
904 if (cond == BURST)
905 buf[0] |= (1 << 1);
906
907 peri &= 0x1f;
908 peri <<= 3;
909 buf[1] = peri;
910
911 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
912 cond == SINGLE ? 'S' : 'B', peri >> 3);
913
914 return SZ_DMASTP;
915}
916
917static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
918{
919 if (dry_run)
920 return SZ_DMASTZ;
921
922 buf[0] = CMD_DMASTZ;
923
924 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
925
926 return SZ_DMASTZ;
927}
928
929static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
930 unsigned invalidate)
931{
932 if (dry_run)
933 return SZ_DMAWFE;
934
935 buf[0] = CMD_DMAWFE;
936
937 ev &= 0x1f;
938 ev <<= 3;
939 buf[1] = ev;
940
941 if (invalidate)
942 buf[1] |= (1 << 1);
943
944 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
945 ev >> 3, invalidate ? ", I" : "");
946
947 return SZ_DMAWFE;
948}
949
950static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
951 enum pl330_cond cond, u8 peri)
952{
953 if (dry_run)
954 return SZ_DMAWFP;
955
956 buf[0] = CMD_DMAWFP;
957
958 if (cond == SINGLE)
959 buf[0] |= (0 << 1) | (0 << 0);
960 else if (cond == BURST)
961 buf[0] |= (1 << 1) | (0 << 0);
962 else
963 buf[0] |= (0 << 1) | (1 << 0);
964
965 peri &= 0x1f;
966 peri <<= 3;
967 buf[1] = peri;
968
969 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
970 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
971
972 return SZ_DMAWFP;
973}
974
975static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
976{
977 if (dry_run)
978 return SZ_DMAWMB;
979
980 buf[0] = CMD_DMAWMB;
981
982 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
983
984 return SZ_DMAWMB;
985}
986
987struct _arg_GO {
988 u8 chan;
989 u32 addr;
990 unsigned ns;
991};
992
993static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
994 const struct _arg_GO *arg)
995{
996 u8 chan = arg->chan;
997 u32 addr = arg->addr;
998 unsigned ns = arg->ns;
999
1000 if (dry_run)
1001 return SZ_DMAGO;
1002
1003 buf[0] = CMD_DMAGO;
1004 buf[0] |= (ns << 1);
1005
1006 buf[1] = chan & 0x7;
1007
1008 *((u32 *)&buf[2]) = addr;
1009
1010 return SZ_DMAGO;
1011}
1012
1013#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1014
1015/* Returns Time-Out */
1016static bool _until_dmac_idle(struct pl330_thread *thrd)
1017{
1018 void __iomem *regs = thrd->dmac->pinfo->base;
1019 unsigned long loops = msecs_to_loops(5);
1020
1021 do {
1022 /* Until Manager is Idle */
1023 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
1024 break;
1025
1026 cpu_relax();
1027 } while (--loops);
1028
1029 if (!loops)
1030 return true;
1031
1032 return false;
1033}
1034
1035static inline void _execute_DBGINSN(struct pl330_thread *thrd,
1036 u8 insn[], bool as_manager)
1037{
1038 void __iomem *regs = thrd->dmac->pinfo->base;
1039 u32 val;
1040
1041 val = (insn[0] << 16) | (insn[1] << 24);
1042 if (!as_manager) {
1043 val |= (1 << 0);
1044 val |= (thrd->id << 8); /* Channel Number */
1045 }
1046 writel(val, regs + DBGINST0);
1047
1048 val = *((u32 *)&insn[2]);
1049 writel(val, regs + DBGINST1);
1050
1051 /* If timed out due to halted state-machine */
1052 if (_until_dmac_idle(thrd)) {
1053 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
1054 return;
1055 }
1056
1057 /* Get going */
1058 writel(0, regs + DBGCMD);
1059}
1060
1061/*
1062 * Mark a _pl330_req as free.
1063 * We do it by writing DMAEND as the first instruction
1064 * because no valid request is going to have DMAEND as
1065 * its first instruction to execute.
1066 */
1067static void mark_free(struct pl330_thread *thrd, int idx)
1068{
1069 struct _pl330_req *req = &thrd->req[idx];
1070
1071 _emit_END(0, req->mc_cpu);
1072 req->mc_len = 0;
1073
1074 thrd->req_running = -1;
1075}
1076
1077static inline u32 _state(struct pl330_thread *thrd)
1078{
1079 void __iomem *regs = thrd->dmac->pinfo->base;
1080 u32 val;
1081
1082 if (is_manager(thrd))
1083 val = readl(regs + DS) & 0xf;
1084 else
1085 val = readl(regs + CS(thrd->id)) & 0xf;
1086
1087 switch (val) {
1088 case DS_ST_STOP:
1089 return PL330_STATE_STOPPED;
1090 case DS_ST_EXEC:
1091 return PL330_STATE_EXECUTING;
1092 case DS_ST_CMISS:
1093 return PL330_STATE_CACHEMISS;
1094 case DS_ST_UPDTPC:
1095 return PL330_STATE_UPDTPC;
1096 case DS_ST_WFE:
1097 return PL330_STATE_WFE;
1098 case DS_ST_FAULT:
1099 return PL330_STATE_FAULTING;
1100 case DS_ST_ATBRR:
1101 if (is_manager(thrd))
1102 return PL330_STATE_INVALID;
1103 else
1104 return PL330_STATE_ATBARRIER;
1105 case DS_ST_QBUSY:
1106 if (is_manager(thrd))
1107 return PL330_STATE_INVALID;
1108 else
1109 return PL330_STATE_QUEUEBUSY;
1110 case DS_ST_WFP:
1111 if (is_manager(thrd))
1112 return PL330_STATE_INVALID;
1113 else
1114 return PL330_STATE_WFP;
1115 case DS_ST_KILL:
1116 if (is_manager(thrd))
1117 return PL330_STATE_INVALID;
1118 else
1119 return PL330_STATE_KILLING;
1120 case DS_ST_CMPLT:
1121 if (is_manager(thrd))
1122 return PL330_STATE_INVALID;
1123 else
1124 return PL330_STATE_COMPLETING;
1125 case DS_ST_FLTCMP:
1126 if (is_manager(thrd))
1127 return PL330_STATE_INVALID;
1128 else
1129 return PL330_STATE_FAULT_COMPLETING;
1130 default:
1131 return PL330_STATE_INVALID;
1132 }
1133}
1134
1135static void _stop(struct pl330_thread *thrd)
1136{
1137 void __iomem *regs = thrd->dmac->pinfo->base;
1138 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1139
1140 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1141 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1142
1143 /* Return if nothing needs to be done */
1144 if (_state(thrd) == PL330_STATE_COMPLETING
1145 || _state(thrd) == PL330_STATE_KILLING
1146 || _state(thrd) == PL330_STATE_STOPPED)
1147 return;
1148
1149 _emit_KILL(0, insn);
1150
1151 /* Stop generating interrupts for SEV */
1152 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1153
1154 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1155}
1156
1157/* Start doing req 'idx' of thread 'thrd' */
1158static bool _trigger(struct pl330_thread *thrd)
1159{
1160 void __iomem *regs = thrd->dmac->pinfo->base;
1161 struct _pl330_req *req;
1162 struct pl330_req *r;
1163 struct _arg_GO go;
1164 unsigned ns;
1165 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1166 int idx;
1167
1168 /* Return if already ACTIVE */
1169 if (_state(thrd) != PL330_STATE_STOPPED)
1170 return true;
1171
1172 idx = 1 - thrd->lstenq;
1173 if (!IS_FREE(&thrd->req[idx]))
1174 req = &thrd->req[idx];
1175 else {
1176 idx = thrd->lstenq;
1177 if (!IS_FREE(&thrd->req[idx]))
1178 req = &thrd->req[idx];
1179 else
1180 req = NULL;
1181 }
1182
1183 /* Return if no request */
1184 if (!req || !req->r)
1185 return true;
1186
1187 r = req->r;
1188
1189 if (r->cfg)
1190 ns = r->cfg->nonsecure ? 1 : 0;
1191 else if (readl(regs + CS(thrd->id)) & CS_CNS)
1192 ns = 1;
1193 else
1194 ns = 0;
1195
1196 /* See 'Abort Sources' point-4 at Page 2-25 */
1197 if (_manager_ns(thrd) && !ns)
1198 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
1199 __func__, __LINE__);
1200
1201 go.chan = thrd->id;
1202 go.addr = req->mc_bus;
1203 go.ns = ns;
1204 _emit_GO(0, insn, &go);
1205
1206 /* Set to generate interrupts for SEV */
1207 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1208
1209 /* Only manager can execute GO */
1210 _execute_DBGINSN(thrd, insn, true);
1211
1212 thrd->req_running = idx;
1213
1214 return true;
1215}
1216
1217static bool _start(struct pl330_thread *thrd)
1218{
1219 switch (_state(thrd)) {
1220 case PL330_STATE_FAULT_COMPLETING:
1221 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1222
1223 if (_state(thrd) == PL330_STATE_KILLING)
1224 UNTIL(thrd, PL330_STATE_STOPPED)
1225
1226 case PL330_STATE_FAULTING:
1227 _stop(thrd);
1228
1229 case PL330_STATE_KILLING:
1230 case PL330_STATE_COMPLETING:
1231 UNTIL(thrd, PL330_STATE_STOPPED)
1232
1233 case PL330_STATE_STOPPED:
1234 return _trigger(thrd);
1235
1236 case PL330_STATE_WFP:
1237 case PL330_STATE_QUEUEBUSY:
1238 case PL330_STATE_ATBARRIER:
1239 case PL330_STATE_UPDTPC:
1240 case PL330_STATE_CACHEMISS:
1241 case PL330_STATE_EXECUTING:
1242 return true;
1243
1244 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1245 default:
1246 return false;
1247 }
1248}
1249
1250static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1251 const struct _xfer_spec *pxs, int cyc)
1252{
1253 int off = 0;
Boojin Kim3ecf51a2011-12-26 18:55:47 +09001254 struct pl330_config *pcfg = pxs->r->cfg->pcfg;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001255
Boojin Kim3ecf51a2011-12-26 18:55:47 +09001256 /* check lock-up free version */
1257 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1258 while (cyc--) {
1259 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1260 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1261 }
1262 } else {
1263 while (cyc--) {
1264 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1265 off += _emit_RMB(dry_run, &buf[off]);
1266 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1267 off += _emit_WMB(dry_run, &buf[off]);
1268 }
Boojin Kimb7d861d2011-12-26 18:49:52 +09001269 }
1270
1271 return off;
1272}
1273
1274static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1275 const struct _xfer_spec *pxs, int cyc)
1276{
1277 int off = 0;
1278
1279 while (cyc--) {
1280 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1281 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1282 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1283 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1284 }
1285
1286 return off;
1287}
1288
1289static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1290 const struct _xfer_spec *pxs, int cyc)
1291{
1292 int off = 0;
1293
1294 while (cyc--) {
1295 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1296 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1297 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1298 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1299 }
1300
1301 return off;
1302}
1303
1304static int _bursts(unsigned dry_run, u8 buf[],
1305 const struct _xfer_spec *pxs, int cyc)
1306{
1307 int off = 0;
1308
1309 switch (pxs->r->rqtype) {
1310 case MEMTODEV:
1311 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1312 break;
1313 case DEVTOMEM:
1314 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1315 break;
1316 case MEMTOMEM:
1317 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1318 break;
1319 default:
1320 off += 0x40000000; /* Scare off the Client */
1321 break;
1322 }
1323
1324 return off;
1325}
1326
1327/* Returns bytes consumed and updates bursts */
1328static inline int _loop(unsigned dry_run, u8 buf[],
1329 unsigned long *bursts, const struct _xfer_spec *pxs)
1330{
1331 int cyc, cycmax, szlp, szlpend, szbrst, off;
1332 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1333 struct _arg_LPEND lpend;
1334
1335 /* Max iterations possible in DMALP is 256 */
1336 if (*bursts >= 256*256) {
1337 lcnt1 = 256;
1338 lcnt0 = 256;
1339 cyc = *bursts / lcnt1 / lcnt0;
1340 } else if (*bursts > 256) {
1341 lcnt1 = 256;
1342 lcnt0 = *bursts / lcnt1;
1343 cyc = 1;
1344 } else {
1345 lcnt1 = *bursts;
1346 lcnt0 = 0;
1347 cyc = 1;
1348 }
1349
1350 szlp = _emit_LP(1, buf, 0, 0);
1351 szbrst = _bursts(1, buf, pxs, 1);
1352
1353 lpend.cond = ALWAYS;
1354 lpend.forever = false;
1355 lpend.loop = 0;
1356 lpend.bjump = 0;
1357 szlpend = _emit_LPEND(1, buf, &lpend);
1358
1359 if (lcnt0) {
1360 szlp *= 2;
1361 szlpend *= 2;
1362 }
1363
1364 /*
1365 * Max bursts that we can unroll due to limit on the
1366 * size of backward jump that can be encoded in DMALPEND
1367 * which is 8-bits and hence 255
1368 */
1369 cycmax = (255 - (szlp + szlpend)) / szbrst;
1370
1371 cyc = (cycmax < cyc) ? cycmax : cyc;
1372
1373 off = 0;
1374
1375 if (lcnt0) {
1376 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1377 ljmp0 = off;
1378 }
1379
1380 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1381 ljmp1 = off;
1382
1383 off += _bursts(dry_run, &buf[off], pxs, cyc);
1384
1385 lpend.cond = ALWAYS;
1386 lpend.forever = false;
1387 lpend.loop = 1;
1388 lpend.bjump = off - ljmp1;
1389 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1390
1391 if (lcnt0) {
1392 lpend.cond = ALWAYS;
1393 lpend.forever = false;
1394 lpend.loop = 0;
1395 lpend.bjump = off - ljmp0;
1396 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1397 }
1398
1399 *bursts = lcnt1 * cyc;
1400 if (lcnt0)
1401 *bursts *= lcnt0;
1402
1403 return off;
1404}
1405
1406static inline int _setup_loops(unsigned dry_run, u8 buf[],
1407 const struct _xfer_spec *pxs)
1408{
1409 struct pl330_xfer *x = pxs->x;
1410 u32 ccr = pxs->ccr;
1411 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1412 int off = 0;
1413
1414 while (bursts) {
1415 c = bursts;
1416 off += _loop(dry_run, &buf[off], &c, pxs);
1417 bursts -= c;
1418 }
1419
1420 return off;
1421}
1422
1423static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1424 const struct _xfer_spec *pxs)
1425{
1426 struct pl330_xfer *x = pxs->x;
1427 int off = 0;
1428
1429 /* DMAMOV SAR, x->src_addr */
1430 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1431 /* DMAMOV DAR, x->dst_addr */
1432 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1433
1434 /* Setup Loop(s) */
1435 off += _setup_loops(dry_run, &buf[off], pxs);
1436
1437 return off;
1438}
1439
1440/*
1441 * A req is a sequence of one or more xfer units.
1442 * Returns the number of bytes taken to setup the MC for the req.
1443 */
1444static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1445 unsigned index, struct _xfer_spec *pxs)
1446{
1447 struct _pl330_req *req = &thrd->req[index];
1448 struct pl330_xfer *x;
1449 u8 *buf = req->mc_cpu;
1450 int off = 0;
1451
1452 PL330_DBGMC_START(req->mc_bus);
1453
1454 /* DMAMOV CCR, ccr */
1455 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1456
1457 x = pxs->r->x;
1458 do {
1459 /* Error if xfer length is not aligned at burst size */
1460 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1461 return -EINVAL;
1462
1463 pxs->x = x;
1464 off += _setup_xfer(dry_run, &buf[off], pxs);
1465
1466 x = x->next;
1467 } while (x);
1468
1469 /* DMASEV peripheral/event */
1470 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1471 /* DMAEND */
1472 off += _emit_END(dry_run, &buf[off]);
1473
1474 return off;
1475}
1476
1477static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1478{
1479 u32 ccr = 0;
1480
1481 if (rqc->src_inc)
1482 ccr |= CC_SRCINC;
1483
1484 if (rqc->dst_inc)
1485 ccr |= CC_DSTINC;
1486
1487 /* We set same protection levels for Src and DST for now */
1488 if (rqc->privileged)
1489 ccr |= CC_SRCPRI | CC_DSTPRI;
1490 if (rqc->nonsecure)
1491 ccr |= CC_SRCNS | CC_DSTNS;
1492 if (rqc->insnaccess)
1493 ccr |= CC_SRCIA | CC_DSTIA;
1494
1495 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1496 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1497
1498 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1499 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1500
1501 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1502 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1503
1504 ccr |= (rqc->swap << CC_SWAP_SHFT);
1505
1506 return ccr;
1507}
1508
1509static inline bool _is_valid(u32 ccr)
1510{
1511 enum pl330_dstcachectrl dcctl;
1512 enum pl330_srccachectrl scctl;
1513
1514 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1515 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1516
1517 if (dcctl == DINVALID1 || dcctl == DINVALID2
1518 || scctl == SINVALID1 || scctl == SINVALID2)
1519 return false;
1520 else
1521 return true;
1522}
1523
1524/*
1525 * Submit a list of xfers after which the client wants notification.
1526 * Client is not notified after each xfer unit, just once after all
1527 * xfer units are done or some error occurs.
1528 */
1529static int pl330_submit_req(void *ch_id, struct pl330_req *r)
1530{
1531 struct pl330_thread *thrd = ch_id;
1532 struct pl330_dmac *pl330;
1533 struct pl330_info *pi;
1534 struct _xfer_spec xs;
1535 unsigned long flags;
1536 void __iomem *regs;
1537 unsigned idx;
1538 u32 ccr;
1539 int ret = 0;
1540
1541 /* No Req or Unacquired Channel or DMAC */
1542 if (!r || !thrd || thrd->free)
1543 return -EINVAL;
1544
1545 pl330 = thrd->dmac;
1546 pi = pl330->pinfo;
1547 regs = pi->base;
1548
1549 if (pl330->state == DYING
1550 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1551 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1552 __func__, __LINE__);
1553 return -EAGAIN;
1554 }
1555
1556 /* If request for non-existing peripheral */
1557 if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1558 dev_info(thrd->dmac->pinfo->dev,
1559 "%s:%d Invalid peripheral(%u)!\n",
1560 __func__, __LINE__, r->peri);
1561 return -EINVAL;
1562 }
1563
1564 spin_lock_irqsave(&pl330->lock, flags);
1565
1566 if (_queue_full(thrd)) {
1567 ret = -EAGAIN;
1568 goto xfer_exit;
1569 }
1570
1571 /* Prefer Secure Channel */
1572 if (!_manager_ns(thrd))
1573 r->cfg->nonsecure = 0;
1574 else
1575 r->cfg->nonsecure = 1;
1576
1577 /* Use last settings, if not provided */
1578 if (r->cfg)
1579 ccr = _prepare_ccr(r->cfg);
1580 else
1581 ccr = readl(regs + CC(thrd->id));
1582
1583 /* If this req doesn't have valid xfer settings */
1584 if (!_is_valid(ccr)) {
1585 ret = -EINVAL;
1586 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1587 __func__, __LINE__, ccr);
1588 goto xfer_exit;
1589 }
1590
1591 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1592
1593 xs.ccr = ccr;
1594 xs.r = r;
1595
1596 /* First dry run to check if req is acceptable */
1597 ret = _setup_req(1, thrd, idx, &xs);
1598 if (ret < 0)
1599 goto xfer_exit;
1600
1601 if (ret > pi->mcbufsz / 2) {
1602 dev_info(thrd->dmac->pinfo->dev,
1603 "%s:%d Trying increasing mcbufsz\n",
1604 __func__, __LINE__);
1605 ret = -ENOMEM;
1606 goto xfer_exit;
1607 }
1608
1609 /* Hook the request */
1610 thrd->lstenq = idx;
1611 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1612 thrd->req[idx].r = r;
1613
1614 ret = 0;
1615
1616xfer_exit:
1617 spin_unlock_irqrestore(&pl330->lock, flags);
1618
1619 return ret;
1620}
1621
1622static void pl330_dotask(unsigned long data)
1623{
1624 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1625 struct pl330_info *pi = pl330->pinfo;
1626 unsigned long flags;
1627 int i;
1628
1629 spin_lock_irqsave(&pl330->lock, flags);
1630
1631 /* The DMAC itself gone nuts */
1632 if (pl330->dmac_tbd.reset_dmac) {
1633 pl330->state = DYING;
1634 /* Reset the manager too */
1635 pl330->dmac_tbd.reset_mngr = true;
1636 /* Clear the reset flag */
1637 pl330->dmac_tbd.reset_dmac = false;
1638 }
1639
1640 if (pl330->dmac_tbd.reset_mngr) {
1641 _stop(pl330->manager);
1642 /* Reset all channels */
1643 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1644 /* Clear the reset flag */
1645 pl330->dmac_tbd.reset_mngr = false;
1646 }
1647
1648 for (i = 0; i < pi->pcfg.num_chan; i++) {
1649
1650 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1651 struct pl330_thread *thrd = &pl330->channels[i];
1652 void __iomem *regs = pi->base;
1653 enum pl330_op_err err;
1654
1655 _stop(thrd);
1656
1657 if (readl(regs + FSC) & (1 << thrd->id))
1658 err = PL330_ERR_FAIL;
1659 else
1660 err = PL330_ERR_ABORT;
1661
1662 spin_unlock_irqrestore(&pl330->lock, flags);
1663
1664 _callback(thrd->req[1 - thrd->lstenq].r, err);
1665 _callback(thrd->req[thrd->lstenq].r, err);
1666
1667 spin_lock_irqsave(&pl330->lock, flags);
1668
1669 thrd->req[0].r = NULL;
1670 thrd->req[1].r = NULL;
1671 mark_free(thrd, 0);
1672 mark_free(thrd, 1);
1673
1674 /* Clear the reset flag */
1675 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1676 }
1677 }
1678
1679 spin_unlock_irqrestore(&pl330->lock, flags);
1680
1681 return;
1682}
1683
1684/* Returns 1 if state was updated, 0 otherwise */
1685static int pl330_update(const struct pl330_info *pi)
1686{
1687 struct _pl330_req *rqdone;
1688 struct pl330_dmac *pl330;
1689 unsigned long flags;
1690 void __iomem *regs;
1691 u32 val;
1692 int id, ev, ret = 0;
1693
1694 if (!pi || !pi->pl330_data)
1695 return 0;
1696
1697 regs = pi->base;
1698 pl330 = pi->pl330_data;
1699
1700 spin_lock_irqsave(&pl330->lock, flags);
1701
1702 val = readl(regs + FSM) & 0x1;
1703 if (val)
1704 pl330->dmac_tbd.reset_mngr = true;
1705 else
1706 pl330->dmac_tbd.reset_mngr = false;
1707
1708 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1709 pl330->dmac_tbd.reset_chan |= val;
1710 if (val) {
1711 int i = 0;
1712 while (i < pi->pcfg.num_chan) {
1713 if (val & (1 << i)) {
1714 dev_info(pi->dev,
1715 "Reset Channel-%d\t CS-%x FTC-%x\n",
1716 i, readl(regs + CS(i)),
1717 readl(regs + FTC(i)));
1718 _stop(&pl330->channels[i]);
1719 }
1720 i++;
1721 }
1722 }
1723
1724 /* Check which event happened i.e, thread notified */
1725 val = readl(regs + ES);
1726 if (pi->pcfg.num_events < 32
1727 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1728 pl330->dmac_tbd.reset_dmac = true;
1729 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1730 ret = 1;
1731 goto updt_exit;
1732 }
1733
1734 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1735 if (val & (1 << ev)) { /* Event occurred */
1736 struct pl330_thread *thrd;
1737 u32 inten = readl(regs + INTEN);
1738 int active;
1739
1740 /* Clear the event */
1741 if (inten & (1 << ev))
1742 writel(1 << ev, regs + INTCLR);
1743
1744 ret = 1;
1745
1746 id = pl330->events[ev];
1747
1748 thrd = &pl330->channels[id];
1749
1750 active = thrd->req_running;
1751 if (active == -1) /* Aborted */
1752 continue;
1753
1754 rqdone = &thrd->req[active];
1755 mark_free(thrd, active);
1756
1757 /* Get going again ASAP */
1758 _start(thrd);
1759
1760 /* For now, just make a list of callbacks to be done */
1761 list_add_tail(&rqdone->rqd, &pl330->req_done);
1762 }
1763 }
1764
1765 /* Now that we are in no hurry, do the callbacks */
1766 while (!list_empty(&pl330->req_done)) {
1767 struct pl330_req *r;
1768
1769 rqdone = container_of(pl330->req_done.next,
1770 struct _pl330_req, rqd);
1771
1772 list_del_init(&rqdone->rqd);
1773
1774 /* Detach the req */
1775 r = rqdone->r;
1776 rqdone->r = NULL;
1777
1778 spin_unlock_irqrestore(&pl330->lock, flags);
1779 _callback(r, PL330_ERR_NONE);
1780 spin_lock_irqsave(&pl330->lock, flags);
1781 }
1782
1783updt_exit:
1784 spin_unlock_irqrestore(&pl330->lock, flags);
1785
1786 if (pl330->dmac_tbd.reset_dmac
1787 || pl330->dmac_tbd.reset_mngr
1788 || pl330->dmac_tbd.reset_chan) {
1789 ret = 1;
1790 tasklet_schedule(&pl330->tasks);
1791 }
1792
1793 return ret;
1794}
1795
1796static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1797{
1798 struct pl330_thread *thrd = ch_id;
1799 struct pl330_dmac *pl330;
1800 unsigned long flags;
Linus Torvaldsef08e782012-03-29 15:34:57 -07001801 int ret = 0, active;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001802
1803 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1804 return -EINVAL;
1805
1806 pl330 = thrd->dmac;
Linus Torvaldsef08e782012-03-29 15:34:57 -07001807 active = thrd->req_running;
Boojin Kimb7d861d2011-12-26 18:49:52 +09001808
1809 spin_lock_irqsave(&pl330->lock, flags);
1810
1811 switch (op) {
1812 case PL330_OP_FLUSH:
1813 /* Make sure the channel is stopped */
1814 _stop(thrd);
1815
1816 thrd->req[0].r = NULL;
1817 thrd->req[1].r = NULL;
1818 mark_free(thrd, 0);
1819 mark_free(thrd, 1);
1820 break;
1821
1822 case PL330_OP_ABORT:
1823 /* Make sure the channel is stopped */
1824 _stop(thrd);
1825
1826 /* ABORT is only for the active req */
1827 if (active == -1)
1828 break;
1829
1830 thrd->req[active].r = NULL;
1831 mark_free(thrd, active);
1832
1833 /* Start the next */
1834 case PL330_OP_START:
1835 if ((active == -1) && !_start(thrd))
1836 ret = -EIO;
1837 break;
1838
1839 default:
1840 ret = -EINVAL;
1841 }
1842
1843 spin_unlock_irqrestore(&pl330->lock, flags);
1844 return ret;
1845}
1846
Boojin Kimb7d861d2011-12-26 18:49:52 +09001847/* Reserve an event */
1848static inline int _alloc_event(struct pl330_thread *thrd)
1849{
1850 struct pl330_dmac *pl330 = thrd->dmac;
1851 struct pl330_info *pi = pl330->pinfo;
1852 int ev;
1853
1854 for (ev = 0; ev < pi->pcfg.num_events; ev++)
1855 if (pl330->events[ev] == -1) {
1856 pl330->events[ev] = thrd->id;
1857 return ev;
1858 }
1859
1860 return -1;
1861}
1862
1863static bool _chan_ns(const struct pl330_info *pi, int i)
1864{
1865 return pi->pcfg.irq_ns & (1 << i);
1866}
1867
1868/* Upon success, returns IdentityToken for the
1869 * allocated channel, NULL otherwise.
1870 */
1871static void *pl330_request_channel(const struct pl330_info *pi)
1872{
1873 struct pl330_thread *thrd = NULL;
1874 struct pl330_dmac *pl330;
1875 unsigned long flags;
1876 int chans, i;
1877
1878 if (!pi || !pi->pl330_data)
1879 return NULL;
1880
1881 pl330 = pi->pl330_data;
1882
1883 if (pl330->state == DYING)
1884 return NULL;
1885
1886 chans = pi->pcfg.num_chan;
1887
1888 spin_lock_irqsave(&pl330->lock, flags);
1889
1890 for (i = 0; i < chans; i++) {
1891 thrd = &pl330->channels[i];
1892 if ((thrd->free) && (!_manager_ns(thrd) ||
1893 _chan_ns(pi, i))) {
1894 thrd->ev = _alloc_event(thrd);
1895 if (thrd->ev >= 0) {
1896 thrd->free = false;
1897 thrd->lstenq = 1;
1898 thrd->req[0].r = NULL;
1899 mark_free(thrd, 0);
1900 thrd->req[1].r = NULL;
1901 mark_free(thrd, 1);
1902 break;
1903 }
1904 }
1905 thrd = NULL;
1906 }
1907
1908 spin_unlock_irqrestore(&pl330->lock, flags);
1909
1910 return thrd;
1911}
1912
1913/* Release an event */
1914static inline void _free_event(struct pl330_thread *thrd, int ev)
1915{
1916 struct pl330_dmac *pl330 = thrd->dmac;
1917 struct pl330_info *pi = pl330->pinfo;
1918
1919 /* If the event is valid and was held by the thread */
1920 if (ev >= 0 && ev < pi->pcfg.num_events
1921 && pl330->events[ev] == thrd->id)
1922 pl330->events[ev] = -1;
1923}
1924
1925static void pl330_release_channel(void *ch_id)
1926{
1927 struct pl330_thread *thrd = ch_id;
1928 struct pl330_dmac *pl330;
1929 unsigned long flags;
1930
1931 if (!thrd || thrd->free)
1932 return;
1933
1934 _stop(thrd);
1935
1936 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1937 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1938
1939 pl330 = thrd->dmac;
1940
1941 spin_lock_irqsave(&pl330->lock, flags);
1942 _free_event(thrd, thrd->ev);
1943 thrd->free = true;
1944 spin_unlock_irqrestore(&pl330->lock, flags);
1945}
1946
1947/* Initialize the structure for PL330 configuration, that can be used
1948 * by the client driver the make best use of the DMAC
1949 */
1950static void read_dmac_config(struct pl330_info *pi)
1951{
1952 void __iomem *regs = pi->base;
1953 u32 val;
1954
1955 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1956 val &= CRD_DATA_WIDTH_MASK;
1957 pi->pcfg.data_bus_width = 8 * (1 << val);
1958
1959 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1960 val &= CRD_DATA_BUFF_MASK;
1961 pi->pcfg.data_buf_dep = val + 1;
1962
1963 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1964 val &= CR0_NUM_CHANS_MASK;
1965 val += 1;
1966 pi->pcfg.num_chan = val;
1967
1968 val = readl(regs + CR0);
1969 if (val & CR0_PERIPH_REQ_SET) {
1970 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1971 val += 1;
1972 pi->pcfg.num_peri = val;
1973 pi->pcfg.peri_ns = readl(regs + CR4);
1974 } else {
1975 pi->pcfg.num_peri = 0;
1976 }
1977
1978 val = readl(regs + CR0);
1979 if (val & CR0_BOOT_MAN_NS)
1980 pi->pcfg.mode |= DMAC_MODE_NS;
1981 else
1982 pi->pcfg.mode &= ~DMAC_MODE_NS;
1983
1984 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1985 val &= CR0_NUM_EVENTS_MASK;
1986 val += 1;
1987 pi->pcfg.num_events = val;
1988
1989 pi->pcfg.irq_ns = readl(regs + CR3);
1990
1991 pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
1992 pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
1993}
1994
1995static inline void _reset_thread(struct pl330_thread *thrd)
1996{
1997 struct pl330_dmac *pl330 = thrd->dmac;
1998 struct pl330_info *pi = pl330->pinfo;
1999
2000 thrd->req[0].mc_cpu = pl330->mcode_cpu
2001 + (thrd->id * pi->mcbufsz);
2002 thrd->req[0].mc_bus = pl330->mcode_bus
2003 + (thrd->id * pi->mcbufsz);
2004 thrd->req[0].r = NULL;
2005 mark_free(thrd, 0);
2006
2007 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
2008 + pi->mcbufsz / 2;
2009 thrd->req[1].mc_bus = thrd->req[0].mc_bus
2010 + pi->mcbufsz / 2;
2011 thrd->req[1].r = NULL;
2012 mark_free(thrd, 1);
2013}
2014
2015static int dmac_alloc_threads(struct pl330_dmac *pl330)
2016{
2017 struct pl330_info *pi = pl330->pinfo;
2018 int chans = pi->pcfg.num_chan;
2019 struct pl330_thread *thrd;
2020 int i;
2021
2022 /* Allocate 1 Manager and 'chans' Channel threads */
2023 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
2024 GFP_KERNEL);
2025 if (!pl330->channels)
2026 return -ENOMEM;
2027
2028 /* Init Channel threads */
2029 for (i = 0; i < chans; i++) {
2030 thrd = &pl330->channels[i];
2031 thrd->id = i;
2032 thrd->dmac = pl330;
2033 _reset_thread(thrd);
2034 thrd->free = true;
2035 }
2036
2037 /* MANAGER is indexed at the end */
2038 thrd = &pl330->channels[chans];
2039 thrd->id = chans;
2040 thrd->dmac = pl330;
2041 thrd->free = false;
2042 pl330->manager = thrd;
2043
2044 return 0;
2045}
2046
2047static int dmac_alloc_resources(struct pl330_dmac *pl330)
2048{
2049 struct pl330_info *pi = pl330->pinfo;
2050 int chans = pi->pcfg.num_chan;
2051 int ret;
2052
2053 /*
2054 * Alloc MicroCode buffer for 'chans' Channel threads.
2055 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2056 */
2057 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
2058 chans * pi->mcbufsz,
2059 &pl330->mcode_bus, GFP_KERNEL);
2060 if (!pl330->mcode_cpu) {
2061 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2062 __func__, __LINE__);
2063 return -ENOMEM;
2064 }
2065
2066 ret = dmac_alloc_threads(pl330);
2067 if (ret) {
2068 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
2069 __func__, __LINE__);
2070 dma_free_coherent(pi->dev,
2071 chans * pi->mcbufsz,
2072 pl330->mcode_cpu, pl330->mcode_bus);
2073 return ret;
2074 }
2075
2076 return 0;
2077}
2078
2079static int pl330_add(struct pl330_info *pi)
2080{
2081 struct pl330_dmac *pl330;
2082 void __iomem *regs;
2083 int i, ret;
2084
2085 if (!pi || !pi->dev)
2086 return -EINVAL;
2087
2088 /* If already added */
2089 if (pi->pl330_data)
2090 return -EINVAL;
2091
2092 /*
2093 * If the SoC can perform reset on the DMAC, then do it
2094 * before reading its configuration.
2095 */
2096 if (pi->dmac_reset)
2097 pi->dmac_reset(pi);
2098
2099 regs = pi->base;
2100
2101 /* Check if we can handle this DMAC */
2102 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
2103 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
2104 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
2105 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
2106 return -EINVAL;
2107 }
2108
2109 /* Read the configuration of the DMAC */
2110 read_dmac_config(pi);
2111
2112 if (pi->pcfg.num_events == 0) {
2113 dev_err(pi->dev, "%s:%d Can't work without events!\n",
2114 __func__, __LINE__);
2115 return -EINVAL;
2116 }
2117
2118 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
2119 if (!pl330) {
2120 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2121 __func__, __LINE__);
2122 return -ENOMEM;
2123 }
2124
2125 /* Assign the info structure and private data */
2126 pl330->pinfo = pi;
2127 pi->pl330_data = pl330;
2128
2129 spin_lock_init(&pl330->lock);
2130
2131 INIT_LIST_HEAD(&pl330->req_done);
2132
2133 /* Use default MC buffer size if not provided */
2134 if (!pi->mcbufsz)
2135 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
2136
2137 /* Mark all events as free */
2138 for (i = 0; i < pi->pcfg.num_events; i++)
2139 pl330->events[i] = -1;
2140
2141 /* Allocate resources needed by the DMAC */
2142 ret = dmac_alloc_resources(pl330);
2143 if (ret) {
2144 dev_err(pi->dev, "Unable to create channels for DMAC\n");
2145 kfree(pl330);
2146 return ret;
2147 }
2148
2149 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
2150
2151 pl330->state = INIT;
2152
2153 return 0;
2154}
2155
2156static int dmac_free_threads(struct pl330_dmac *pl330)
2157{
2158 struct pl330_info *pi = pl330->pinfo;
2159 int chans = pi->pcfg.num_chan;
2160 struct pl330_thread *thrd;
2161 int i;
2162
2163 /* Release Channel threads */
2164 for (i = 0; i < chans; i++) {
2165 thrd = &pl330->channels[i];
2166 pl330_release_channel((void *)thrd);
2167 }
2168
2169 /* Free memory */
2170 kfree(pl330->channels);
2171
2172 return 0;
2173}
2174
2175static void dmac_free_resources(struct pl330_dmac *pl330)
2176{
2177 struct pl330_info *pi = pl330->pinfo;
2178 int chans = pi->pcfg.num_chan;
2179
2180 dmac_free_threads(pl330);
2181
2182 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
2183 pl330->mcode_cpu, pl330->mcode_bus);
2184}
2185
2186static void pl330_del(struct pl330_info *pi)
2187{
2188 struct pl330_dmac *pl330;
2189
2190 if (!pi || !pi->pl330_data)
2191 return;
2192
2193 pl330 = pi->pl330_data;
2194
2195 pl330->state = UNINIT;
2196
2197 tasklet_kill(&pl330->tasks);
2198
2199 /* Free DMAC resources */
2200 dmac_free_resources(pl330);
2201
2202 kfree(pl330);
2203 pi->pl330_data = NULL;
2204}
2205
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002206/* forward declaration */
2207static struct amba_driver pl330_driver;
2208
Jassi Brarb3040e42010-05-23 20:28:19 -07002209static inline struct dma_pl330_chan *
2210to_pchan(struct dma_chan *ch)
2211{
2212 if (!ch)
2213 return NULL;
2214
2215 return container_of(ch, struct dma_pl330_chan, chan);
2216}
2217
2218static inline struct dma_pl330_desc *
2219to_desc(struct dma_async_tx_descriptor *tx)
2220{
2221 return container_of(tx, struct dma_pl330_desc, txd);
2222}
2223
2224static inline void free_desc_list(struct list_head *list)
2225{
2226 struct dma_pl330_dmac *pdmac;
2227 struct dma_pl330_desc *desc;
Olof Johanssonc8473822012-04-08 16:26:19 -07002228 struct dma_pl330_chan *pch = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -07002229 unsigned long flags;
2230
Jassi Brarb3040e42010-05-23 20:28:19 -07002231 /* Finish off the work list */
2232 list_for_each_entry(desc, list, node) {
2233 dma_async_tx_callback callback;
2234 void *param;
2235
2236 /* All desc in a list belong to same channel */
2237 pch = desc->pchan;
2238 callback = desc->txd.callback;
2239 param = desc->txd.callback_param;
2240
2241 if (callback)
2242 callback(param);
2243
2244 desc->pchan = NULL;
2245 }
2246
Olof Johanssonc8473822012-04-08 16:26:19 -07002247 /* pch will be unset if list was empty */
2248 if (!pch)
2249 return;
2250
Jassi Brarb3040e42010-05-23 20:28:19 -07002251 pdmac = pch->dmac;
2252
2253 spin_lock_irqsave(&pdmac->pool_lock, flags);
2254 list_splice_tail_init(list, &pdmac->desc_pool);
2255 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2256}
2257
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002258static inline void handle_cyclic_desc_list(struct list_head *list)
2259{
2260 struct dma_pl330_desc *desc;
Olof Johanssonc8473822012-04-08 16:26:19 -07002261 struct dma_pl330_chan *pch = NULL;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002262 unsigned long flags;
2263
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002264 list_for_each_entry(desc, list, node) {
2265 dma_async_tx_callback callback;
2266
2267 /* Change status to reload it */
2268 desc->status = PREP;
2269 pch = desc->pchan;
2270 callback = desc->txd.callback;
2271 if (callback)
2272 callback(desc->txd.callback_param);
2273 }
2274
Olof Johanssonc8473822012-04-08 16:26:19 -07002275 /* pch will be unset if list was empty */
2276 if (!pch)
2277 return;
2278
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002279 spin_lock_irqsave(&pch->lock, flags);
2280 list_splice_tail_init(list, &pch->work_list);
2281 spin_unlock_irqrestore(&pch->lock, flags);
2282}
2283
Jassi Brarb3040e42010-05-23 20:28:19 -07002284static inline void fill_queue(struct dma_pl330_chan *pch)
2285{
2286 struct dma_pl330_desc *desc;
2287 int ret;
2288
2289 list_for_each_entry(desc, &pch->work_list, node) {
2290
2291 /* If already submitted */
2292 if (desc->status == BUSY)
2293 break;
2294
2295 ret = pl330_submit_req(pch->pl330_chid,
2296 &desc->req);
2297 if (!ret) {
2298 desc->status = BUSY;
2299 break;
2300 } else if (ret == -EAGAIN) {
2301 /* QFull or DMAC Dying */
2302 break;
2303 } else {
2304 /* Unacceptable request */
2305 desc->status = DONE;
2306 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
2307 __func__, __LINE__, desc->txd.cookie);
2308 tasklet_schedule(&pch->task);
2309 }
2310 }
2311}
2312
2313static void pl330_tasklet(unsigned long data)
2314{
2315 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2316 struct dma_pl330_desc *desc, *_dt;
2317 unsigned long flags;
2318 LIST_HEAD(list);
2319
2320 spin_lock_irqsave(&pch->lock, flags);
2321
2322 /* Pick up ripe tomatoes */
2323 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2324 if (desc->status == DONE) {
Vinod Kouleab21582012-05-11 11:24:41 +05302325 if (pch->cyclic)
2326 dma_cookie_complete(&desc->txd);
Jassi Brarb3040e42010-05-23 20:28:19 -07002327 list_move_tail(&desc->node, &list);
2328 }
2329
2330 /* Try to submit a req imm. next to the last completed cookie */
2331 fill_queue(pch);
2332
2333 /* Make sure the PL330 Channel thread is active */
2334 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
2335
2336 spin_unlock_irqrestore(&pch->lock, flags);
2337
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002338 if (pch->cyclic)
2339 handle_cyclic_desc_list(&list);
2340 else
2341 free_desc_list(&list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002342}
2343
2344static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
2345{
2346 struct dma_pl330_desc *desc = token;
2347 struct dma_pl330_chan *pch = desc->pchan;
2348 unsigned long flags;
2349
2350 /* If desc aborted */
2351 if (!pch)
2352 return;
2353
2354 spin_lock_irqsave(&pch->lock, flags);
2355
2356 desc->status = DONE;
2357
2358 spin_unlock_irqrestore(&pch->lock, flags);
2359
2360 tasklet_schedule(&pch->task);
2361}
2362
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002363bool pl330_filter(struct dma_chan *chan, void *param)
2364{
Thomas Abrahamcd072512011-10-24 11:43:11 +02002365 u8 *peri_id;
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002366
2367 if (chan->device->dev->driver != &pl330_driver.drv)
2368 return false;
2369
Thomas Abraham93ed5542011-10-24 11:43:31 +02002370#ifdef CONFIG_OF
2371 if (chan->device->dev->of_node) {
2372 const __be32 *prop_value;
2373 phandle phandle;
2374 struct device_node *node;
2375
2376 prop_value = ((struct property *)param)->value;
2377 phandle = be32_to_cpup(prop_value++);
2378 node = of_find_node_by_phandle(phandle);
2379 return ((chan->private == node) &&
2380 (chan->chan_id == be32_to_cpup(prop_value)));
2381 }
2382#endif
2383
Thomas Abrahamcd072512011-10-24 11:43:11 +02002384 peri_id = chan->private;
2385 return *peri_id == (unsigned)param;
Thomas Abraham3e2ec132011-10-24 11:43:02 +02002386}
2387EXPORT_SYMBOL(pl330_filter);
2388
Jassi Brarb3040e42010-05-23 20:28:19 -07002389static int pl330_alloc_chan_resources(struct dma_chan *chan)
2390{
2391 struct dma_pl330_chan *pch = to_pchan(chan);
2392 struct dma_pl330_dmac *pdmac = pch->dmac;
2393 unsigned long flags;
2394
2395 spin_lock_irqsave(&pch->lock, flags);
2396
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00002397 dma_cookie_init(chan);
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002398 pch->cyclic = false;
Jassi Brarb3040e42010-05-23 20:28:19 -07002399
2400 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
2401 if (!pch->pl330_chid) {
2402 spin_unlock_irqrestore(&pch->lock, flags);
2403 return 0;
2404 }
2405
2406 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2407
2408 spin_unlock_irqrestore(&pch->lock, flags);
2409
2410 return 1;
2411}
2412
2413static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
2414{
2415 struct dma_pl330_chan *pch = to_pchan(chan);
Boojin Kimae43b882011-09-02 09:44:32 +09002416 struct dma_pl330_desc *desc, *_dt;
Jassi Brarb3040e42010-05-23 20:28:19 -07002417 unsigned long flags;
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002418 struct dma_pl330_dmac *pdmac = pch->dmac;
2419 struct dma_slave_config *slave_config;
Boojin Kimae43b882011-09-02 09:44:32 +09002420 LIST_HEAD(list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002421
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002422 switch (cmd) {
2423 case DMA_TERMINATE_ALL:
2424 spin_lock_irqsave(&pch->lock, flags);
2425
2426 /* FLUSH the PL330 Channel thread */
2427 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
2428
2429 /* Mark all desc done */
Boojin Kimae43b882011-09-02 09:44:32 +09002430 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002431 desc->status = DONE;
Boojin Kimae43b882011-09-02 09:44:32 +09002432 list_move_tail(&desc->node, &list);
2433 }
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002434
Boojin Kimae43b882011-09-02 09:44:32 +09002435 list_splice_tail_init(&list, &pdmac->desc_pool);
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002436 spin_unlock_irqrestore(&pch->lock, flags);
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002437 break;
2438 case DMA_SLAVE_CONFIG:
2439 slave_config = (struct dma_slave_config *)arg;
2440
Vinod Kouldb8196d2011-10-13 22:34:23 +05302441 if (slave_config->direction == DMA_MEM_TO_DEV) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002442 if (slave_config->dst_addr)
2443 pch->fifo_addr = slave_config->dst_addr;
2444 if (slave_config->dst_addr_width)
2445 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2446 if (slave_config->dst_maxburst)
2447 pch->burst_len = slave_config->dst_maxburst;
Vinod Kouldb8196d2011-10-13 22:34:23 +05302448 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002449 if (slave_config->src_addr)
2450 pch->fifo_addr = slave_config->src_addr;
2451 if (slave_config->src_addr_width)
2452 pch->burst_sz = __ffs(slave_config->src_addr_width);
2453 if (slave_config->src_maxburst)
2454 pch->burst_len = slave_config->src_maxburst;
2455 }
2456 break;
2457 default:
2458 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
Jassi Brarb3040e42010-05-23 20:28:19 -07002459 return -ENXIO;
Boojin Kim1d0c1d62011-09-02 09:44:31 +09002460 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002461
2462 return 0;
2463}
2464
2465static void pl330_free_chan_resources(struct dma_chan *chan)
2466{
2467 struct dma_pl330_chan *pch = to_pchan(chan);
2468 unsigned long flags;
2469
2470 spin_lock_irqsave(&pch->lock, flags);
2471
2472 tasklet_kill(&pch->task);
2473
2474 pl330_release_channel(pch->pl330_chid);
2475 pch->pl330_chid = NULL;
2476
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002477 if (pch->cyclic)
2478 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2479
Jassi Brarb3040e42010-05-23 20:28:19 -07002480 spin_unlock_irqrestore(&pch->lock, flags);
2481}
2482
2483static enum dma_status
2484pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2485 struct dma_tx_state *txstate)
2486{
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00002487 return dma_cookie_status(chan, cookie, txstate);
Jassi Brarb3040e42010-05-23 20:28:19 -07002488}
2489
2490static void pl330_issue_pending(struct dma_chan *chan)
2491{
2492 pl330_tasklet((unsigned long) to_pchan(chan));
2493}
2494
2495/*
2496 * We returned the last one of the circular list of descriptor(s)
2497 * from prep_xxx, so the argument to submit corresponds to the last
2498 * descriptor of the list.
2499 */
2500static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2501{
2502 struct dma_pl330_desc *desc, *last = to_desc(tx);
2503 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2504 dma_cookie_t cookie;
2505 unsigned long flags;
2506
2507 spin_lock_irqsave(&pch->lock, flags);
2508
2509 /* Assign cookies to all nodes */
Jassi Brarb3040e42010-05-23 20:28:19 -07002510 while (!list_empty(&last->node)) {
2511 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2512
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00002513 dma_cookie_assign(&desc->txd);
Jassi Brarb3040e42010-05-23 20:28:19 -07002514
2515 list_move_tail(&desc->node, &pch->work_list);
2516 }
2517
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00002518 cookie = dma_cookie_assign(&last->txd);
Jassi Brarb3040e42010-05-23 20:28:19 -07002519 list_add_tail(&last->node, &pch->work_list);
Jassi Brarb3040e42010-05-23 20:28:19 -07002520 spin_unlock_irqrestore(&pch->lock, flags);
2521
2522 return cookie;
2523}
2524
2525static inline void _init_desc(struct dma_pl330_desc *desc)
2526{
2527 desc->pchan = NULL;
2528 desc->req.x = &desc->px;
2529 desc->req.token = desc;
2530 desc->rqcfg.swap = SWAP_NO;
2531 desc->rqcfg.privileged = 0;
2532 desc->rqcfg.insnaccess = 0;
2533 desc->rqcfg.scctl = SCCTRL0;
2534 desc->rqcfg.dcctl = DCCTRL0;
2535 desc->req.cfg = &desc->rqcfg;
2536 desc->req.xfer_cb = dma_pl330_rqcb;
2537 desc->txd.tx_submit = pl330_tx_submit;
2538
2539 INIT_LIST_HEAD(&desc->node);
2540}
2541
2542/* Returns the number of descriptors added to the DMAC pool */
2543int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2544{
2545 struct dma_pl330_desc *desc;
2546 unsigned long flags;
2547 int i;
2548
2549 if (!pdmac)
2550 return 0;
2551
2552 desc = kmalloc(count * sizeof(*desc), flg);
2553 if (!desc)
2554 return 0;
2555
2556 spin_lock_irqsave(&pdmac->pool_lock, flags);
2557
2558 for (i = 0; i < count; i++) {
2559 _init_desc(&desc[i]);
2560 list_add_tail(&desc[i].node, &pdmac->desc_pool);
2561 }
2562
2563 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2564
2565 return count;
2566}
2567
2568static struct dma_pl330_desc *
2569pluck_desc(struct dma_pl330_dmac *pdmac)
2570{
2571 struct dma_pl330_desc *desc = NULL;
2572 unsigned long flags;
2573
2574 if (!pdmac)
2575 return NULL;
2576
2577 spin_lock_irqsave(&pdmac->pool_lock, flags);
2578
2579 if (!list_empty(&pdmac->desc_pool)) {
2580 desc = list_entry(pdmac->desc_pool.next,
2581 struct dma_pl330_desc, node);
2582
2583 list_del_init(&desc->node);
2584
2585 desc->status = PREP;
2586 desc->txd.callback = NULL;
2587 }
2588
2589 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2590
2591 return desc;
2592}
2593
2594static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2595{
2596 struct dma_pl330_dmac *pdmac = pch->dmac;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002597 u8 *peri_id = pch->chan.private;
Jassi Brarb3040e42010-05-23 20:28:19 -07002598 struct dma_pl330_desc *desc;
2599
2600 /* Pluck one desc from the pool of DMAC */
2601 desc = pluck_desc(pdmac);
2602
2603 /* If the DMAC pool is empty, alloc new */
2604 if (!desc) {
2605 if (!add_desc(pdmac, GFP_ATOMIC, 1))
2606 return NULL;
2607
2608 /* Try again */
2609 desc = pluck_desc(pdmac);
2610 if (!desc) {
2611 dev_err(pch->dmac->pif.dev,
2612 "%s:%d ALERT!\n", __func__, __LINE__);
2613 return NULL;
2614 }
2615 }
2616
2617 /* Initialize the descriptor */
2618 desc->pchan = pch;
2619 desc->txd.cookie = 0;
2620 async_tx_ack(&desc->txd);
2621
Thomas Abrahamcd072512011-10-24 11:43:11 +02002622 desc->req.peri = peri_id ? pch->chan.chan_id : 0;
Boojin Kim3ecf51a2011-12-26 18:55:47 +09002623 desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
Jassi Brarb3040e42010-05-23 20:28:19 -07002624
2625 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2626
2627 return desc;
2628}
2629
2630static inline void fill_px(struct pl330_xfer *px,
2631 dma_addr_t dst, dma_addr_t src, size_t len)
2632{
2633 px->next = NULL;
2634 px->bytes = len;
2635 px->dst_addr = dst;
2636 px->src_addr = src;
2637}
2638
2639static struct dma_pl330_desc *
2640__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2641 dma_addr_t src, size_t len)
2642{
2643 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2644
2645 if (!desc) {
2646 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2647 __func__, __LINE__);
2648 return NULL;
2649 }
2650
2651 /*
2652 * Ideally we should lookout for reqs bigger than
2653 * those that can be programmed with 256 bytes of
2654 * MC buffer, but considering a req size is seldom
2655 * going to be word-unaligned and more than 200MB,
2656 * we take it easy.
2657 * Also, should the limit is reached we'd rather
2658 * have the platform increase MC buffer size than
2659 * complicating this API driver.
2660 */
2661 fill_px(&desc->px, dst, src, len);
2662
2663 return desc;
2664}
2665
2666/* Call after fixing burst size */
2667static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2668{
2669 struct dma_pl330_chan *pch = desc->pchan;
2670 struct pl330_info *pi = &pch->dmac->pif;
2671 int burst_len;
2672
2673 burst_len = pi->pcfg.data_bus_width / 8;
2674 burst_len *= pi->pcfg.data_buf_dep;
2675 burst_len >>= desc->rqcfg.brst_size;
2676
2677 /* src/dst_burst_len can't be more than 16 */
2678 if (burst_len > 16)
2679 burst_len = 16;
2680
2681 while (burst_len > 1) {
2682 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2683 break;
2684 burst_len--;
2685 }
2686
2687 return burst_len;
2688}
2689
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002690static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2691 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05002692 size_t period_len, enum dma_transfer_direction direction,
2693 void *context)
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002694{
2695 struct dma_pl330_desc *desc;
2696 struct dma_pl330_chan *pch = to_pchan(chan);
2697 dma_addr_t dst;
2698 dma_addr_t src;
2699
2700 desc = pl330_get_desc(pch);
2701 if (!desc) {
2702 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2703 __func__, __LINE__);
2704 return NULL;
2705 }
2706
2707 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +05302708 case DMA_MEM_TO_DEV:
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002709 desc->rqcfg.src_inc = 1;
2710 desc->rqcfg.dst_inc = 0;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002711 desc->req.rqtype = MEMTODEV;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002712 src = dma_addr;
2713 dst = pch->fifo_addr;
2714 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +05302715 case DMA_DEV_TO_MEM:
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002716 desc->rqcfg.src_inc = 0;
2717 desc->rqcfg.dst_inc = 1;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002718 desc->req.rqtype = DEVTOMEM;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002719 src = pch->fifo_addr;
2720 dst = dma_addr;
2721 break;
2722 default:
2723 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
2724 __func__, __LINE__);
2725 return NULL;
2726 }
2727
2728 desc->rqcfg.brst_size = pch->burst_sz;
2729 desc->rqcfg.brst_len = 1;
2730
2731 pch->cyclic = true;
2732
2733 fill_px(&desc->px, dst, src, period_len);
2734
2735 return &desc->txd;
2736}
2737
Jassi Brarb3040e42010-05-23 20:28:19 -07002738static struct dma_async_tx_descriptor *
2739pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2740 dma_addr_t src, size_t len, unsigned long flags)
2741{
2742 struct dma_pl330_desc *desc;
2743 struct dma_pl330_chan *pch = to_pchan(chan);
Jassi Brarb3040e42010-05-23 20:28:19 -07002744 struct pl330_info *pi;
2745 int burst;
2746
Rob Herring4e0e6102011-07-25 16:05:04 -05002747 if (unlikely(!pch || !len))
Jassi Brarb3040e42010-05-23 20:28:19 -07002748 return NULL;
2749
Jassi Brarb3040e42010-05-23 20:28:19 -07002750 pi = &pch->dmac->pif;
2751
2752 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2753 if (!desc)
2754 return NULL;
2755
2756 desc->rqcfg.src_inc = 1;
2757 desc->rqcfg.dst_inc = 1;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002758 desc->req.rqtype = MEMTOMEM;
Jassi Brarb3040e42010-05-23 20:28:19 -07002759
2760 /* Select max possible burst size */
2761 burst = pi->pcfg.data_bus_width / 8;
2762
2763 while (burst > 1) {
2764 if (!(len % burst))
2765 break;
2766 burst /= 2;
2767 }
2768
2769 desc->rqcfg.brst_size = 0;
2770 while (burst != (1 << desc->rqcfg.brst_size))
2771 desc->rqcfg.brst_size++;
2772
2773 desc->rqcfg.brst_len = get_burst_len(desc, len);
2774
2775 desc->txd.flags = flags;
2776
2777 return &desc->txd;
2778}
2779
2780static struct dma_async_tx_descriptor *
2781pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +05302782 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05002783 unsigned long flg, void *context)
Jassi Brarb3040e42010-05-23 20:28:19 -07002784{
2785 struct dma_pl330_desc *first, *desc = NULL;
2786 struct dma_pl330_chan *pch = to_pchan(chan);
Jassi Brarb3040e42010-05-23 20:28:19 -07002787 struct scatterlist *sg;
2788 unsigned long flags;
Boojin Kim1b9bb712011-09-02 09:44:30 +09002789 int i;
Jassi Brarb3040e42010-05-23 20:28:19 -07002790 dma_addr_t addr;
2791
Thomas Abrahamcd072512011-10-24 11:43:11 +02002792 if (unlikely(!pch || !sgl || !sg_len))
Jassi Brarb3040e42010-05-23 20:28:19 -07002793 return NULL;
2794
Boojin Kim1b9bb712011-09-02 09:44:30 +09002795 addr = pch->fifo_addr;
Jassi Brarb3040e42010-05-23 20:28:19 -07002796
2797 first = NULL;
2798
2799 for_each_sg(sgl, sg, sg_len, i) {
2800
2801 desc = pl330_get_desc(pch);
2802 if (!desc) {
2803 struct dma_pl330_dmac *pdmac = pch->dmac;
2804
2805 dev_err(pch->dmac->pif.dev,
2806 "%s:%d Unable to fetch desc\n",
2807 __func__, __LINE__);
2808 if (!first)
2809 return NULL;
2810
2811 spin_lock_irqsave(&pdmac->pool_lock, flags);
2812
2813 while (!list_empty(&first->node)) {
2814 desc = list_entry(first->node.next,
2815 struct dma_pl330_desc, node);
2816 list_move_tail(&desc->node, &pdmac->desc_pool);
2817 }
2818
2819 list_move_tail(&first->node, &pdmac->desc_pool);
2820
2821 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2822
2823 return NULL;
2824 }
2825
2826 if (!first)
2827 first = desc;
2828 else
2829 list_add_tail(&desc->node, &first->node);
2830
Vinod Kouldb8196d2011-10-13 22:34:23 +05302831 if (direction == DMA_MEM_TO_DEV) {
Jassi Brarb3040e42010-05-23 20:28:19 -07002832 desc->rqcfg.src_inc = 1;
2833 desc->rqcfg.dst_inc = 0;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002834 desc->req.rqtype = MEMTODEV;
Jassi Brarb3040e42010-05-23 20:28:19 -07002835 fill_px(&desc->px,
2836 addr, sg_dma_address(sg), sg_dma_len(sg));
2837 } else {
2838 desc->rqcfg.src_inc = 0;
2839 desc->rqcfg.dst_inc = 1;
Thomas Abrahamcd072512011-10-24 11:43:11 +02002840 desc->req.rqtype = DEVTOMEM;
Jassi Brarb3040e42010-05-23 20:28:19 -07002841 fill_px(&desc->px,
2842 sg_dma_address(sg), addr, sg_dma_len(sg));
2843 }
2844
Boojin Kim1b9bb712011-09-02 09:44:30 +09002845 desc->rqcfg.brst_size = pch->burst_sz;
Jassi Brarb3040e42010-05-23 20:28:19 -07002846 desc->rqcfg.brst_len = 1;
2847 }
2848
2849 /* Return the last desc in the chain */
2850 desc->txd.flags = flg;
2851 return &desc->txd;
2852}
2853
2854static irqreturn_t pl330_irq_handler(int irq, void *data)
2855{
2856 if (pl330_update(data))
2857 return IRQ_HANDLED;
2858 else
2859 return IRQ_NONE;
2860}
2861
2862static int __devinit
Russell Kingaa25afa2011-02-19 15:55:00 +00002863pl330_probe(struct amba_device *adev, const struct amba_id *id)
Jassi Brarb3040e42010-05-23 20:28:19 -07002864{
2865 struct dma_pl330_platdata *pdat;
2866 struct dma_pl330_dmac *pdmac;
2867 struct dma_pl330_chan *pch;
2868 struct pl330_info *pi;
2869 struct dma_device *pd;
2870 struct resource *res;
2871 int i, ret, irq;
Rob Herring4e0e6102011-07-25 16:05:04 -05002872 int num_chan;
Jassi Brarb3040e42010-05-23 20:28:19 -07002873
2874 pdat = adev->dev.platform_data;
2875
Jassi Brarb3040e42010-05-23 20:28:19 -07002876 /* Allocate a new DMAC and its Channels */
Rob Herring4e0e6102011-07-25 16:05:04 -05002877 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -07002878 if (!pdmac) {
2879 dev_err(&adev->dev, "unable to allocate mem\n");
2880 return -ENOMEM;
2881 }
2882
2883 pi = &pdmac->pif;
2884 pi->dev = &adev->dev;
2885 pi->pl330_data = NULL;
Rob Herring4e0e6102011-07-25 16:05:04 -05002886 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
Jassi Brarb3040e42010-05-23 20:28:19 -07002887
2888 res = &adev->res;
2889 request_mem_region(res->start, resource_size(res), "dma-pl330");
2890
2891 pi->base = ioremap(res->start, resource_size(res));
2892 if (!pi->base) {
2893 ret = -ENXIO;
2894 goto probe_err1;
2895 }
2896
Boojin Kima2f52032011-09-02 09:44:29 +09002897 pdmac->clk = clk_get(&adev->dev, "dma");
2898 if (IS_ERR(pdmac->clk)) {
2899 dev_err(&adev->dev, "Cannot get operation clock.\n");
2900 ret = -EINVAL;
Julia Lawall7bec78e2012-01-12 10:55:06 +01002901 goto probe_err2;
Boojin Kima2f52032011-09-02 09:44:29 +09002902 }
2903
2904 amba_set_drvdata(adev, pdmac);
2905
Tushar Behera3506c0d2011-12-06 16:15:54 +05302906#ifndef CONFIG_PM_RUNTIME
Boojin Kima2f52032011-09-02 09:44:29 +09002907 /* enable dma clk */
2908 clk_enable(pdmac->clk);
2909#endif
2910
Jassi Brarb3040e42010-05-23 20:28:19 -07002911 irq = adev->irq[0];
2912 ret = request_irq(irq, pl330_irq_handler, 0,
2913 dev_name(&adev->dev), pi);
2914 if (ret)
Julia Lawall7bec78e2012-01-12 10:55:06 +01002915 goto probe_err3;
Jassi Brarb3040e42010-05-23 20:28:19 -07002916
2917 ret = pl330_add(pi);
2918 if (ret)
Julia Lawall7bec78e2012-01-12 10:55:06 +01002919 goto probe_err4;
Jassi Brarb3040e42010-05-23 20:28:19 -07002920
2921 INIT_LIST_HEAD(&pdmac->desc_pool);
2922 spin_lock_init(&pdmac->pool_lock);
2923
2924 /* Create a descriptor pool of default size */
2925 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
2926 dev_warn(&adev->dev, "unable to allocate desc\n");
2927
2928 pd = &pdmac->ddma;
2929 INIT_LIST_HEAD(&pd->channels);
2930
2931 /* Initialize channel parameters */
Olof Johanssonc8473822012-04-08 16:26:19 -07002932 if (pdat)
2933 num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
2934 else
2935 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
2936
Rob Herring4e0e6102011-07-25 16:05:04 -05002937 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
Jassi Brarb3040e42010-05-23 20:28:19 -07002938
Rob Herring4e0e6102011-07-25 16:05:04 -05002939 for (i = 0; i < num_chan; i++) {
2940 pch = &pdmac->peripherals[i];
Thomas Abraham93ed5542011-10-24 11:43:31 +02002941 if (!adev->dev.of_node)
2942 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
2943 else
2944 pch->chan.private = adev->dev.of_node;
Jassi Brarb3040e42010-05-23 20:28:19 -07002945
2946 INIT_LIST_HEAD(&pch->work_list);
2947 spin_lock_init(&pch->lock);
2948 pch->pl330_chid = NULL;
Jassi Brarb3040e42010-05-23 20:28:19 -07002949 pch->chan.device = pd;
Jassi Brarb3040e42010-05-23 20:28:19 -07002950 pch->dmac = pdmac;
2951
2952 /* Add the channel to the DMAC list */
Jassi Brarb3040e42010-05-23 20:28:19 -07002953 list_add_tail(&pch->chan.device_node, &pd->channels);
2954 }
2955
2956 pd->dev = &adev->dev;
Thomas Abraham93ed5542011-10-24 11:43:31 +02002957 if (pdat) {
Thomas Abrahamcd072512011-10-24 11:43:11 +02002958 pd->cap_mask = pdat->cap_mask;
Thomas Abraham93ed5542011-10-24 11:43:31 +02002959 } else {
Thomas Abrahamcd072512011-10-24 11:43:11 +02002960 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
Thomas Abraham93ed5542011-10-24 11:43:31 +02002961 if (pi->pcfg.num_peri) {
2962 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2963 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
2964 }
2965 }
Jassi Brarb3040e42010-05-23 20:28:19 -07002966
2967 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2968 pd->device_free_chan_resources = pl330_free_chan_resources;
2969 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
Boojin Kim42bc9cf2011-09-02 09:44:33 +09002970 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
Jassi Brarb3040e42010-05-23 20:28:19 -07002971 pd->device_tx_status = pl330_tx_status;
2972 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2973 pd->device_control = pl330_control;
2974 pd->device_issue_pending = pl330_issue_pending;
2975
2976 ret = dma_async_device_register(pd);
2977 if (ret) {
2978 dev_err(&adev->dev, "unable to register DMAC\n");
Julia Lawall7bec78e2012-01-12 10:55:06 +01002979 goto probe_err5;
Jassi Brarb3040e42010-05-23 20:28:19 -07002980 }
2981
Jassi Brarb3040e42010-05-23 20:28:19 -07002982 dev_info(&adev->dev,
2983 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
2984 dev_info(&adev->dev,
2985 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2986 pi->pcfg.data_buf_dep,
2987 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
2988 pi->pcfg.num_peri, pi->pcfg.num_events);
2989
2990 return 0;
2991
Julia Lawall7bec78e2012-01-12 10:55:06 +01002992probe_err5:
Jassi Brarb3040e42010-05-23 20:28:19 -07002993 pl330_del(pi);
Julia Lawall7bec78e2012-01-12 10:55:06 +01002994probe_err4:
Jassi Brarb3040e42010-05-23 20:28:19 -07002995 free_irq(irq, pi);
Julia Lawall7bec78e2012-01-12 10:55:06 +01002996probe_err3:
2997#ifndef CONFIG_PM_RUNTIME
2998 clk_disable(pdmac->clk);
2999#endif
3000 clk_put(pdmac->clk);
Jassi Brarb3040e42010-05-23 20:28:19 -07003001probe_err2:
3002 iounmap(pi->base);
3003probe_err1:
3004 release_mem_region(res->start, resource_size(res));
3005 kfree(pdmac);
3006
3007 return ret;
3008}
3009
3010static int __devexit pl330_remove(struct amba_device *adev)
3011{
3012 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
3013 struct dma_pl330_chan *pch, *_p;
3014 struct pl330_info *pi;
3015 struct resource *res;
3016 int irq;
3017
3018 if (!pdmac)
3019 return 0;
3020
3021 amba_set_drvdata(adev, NULL);
3022
3023 /* Idle the DMAC */
3024 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3025 chan.device_node) {
3026
3027 /* Remove the channel */
3028 list_del(&pch->chan.device_node);
3029
3030 /* Flush the channel */
3031 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3032 pl330_free_chan_resources(&pch->chan);
3033 }
3034
3035 pi = &pdmac->pif;
3036
3037 pl330_del(pi);
3038
3039 irq = adev->irq[0];
3040 free_irq(irq, pi);
3041
3042 iounmap(pi->base);
3043
3044 res = &adev->res;
3045 release_mem_region(res->start, resource_size(res));
3046
Tushar Behera3506c0d2011-12-06 16:15:54 +05303047#ifndef CONFIG_PM_RUNTIME
Boojin Kima2f52032011-09-02 09:44:29 +09003048 clk_disable(pdmac->clk);
3049#endif
3050
Jassi Brarb3040e42010-05-23 20:28:19 -07003051 kfree(pdmac);
3052
3053 return 0;
3054}
3055
3056static struct amba_id pl330_ids[] = {
3057 {
3058 .id = 0x00041330,
3059 .mask = 0x000fffff,
3060 },
3061 { 0, 0 },
3062};
3063
Dave Martine8fa5162011-10-05 15:15:20 +01003064MODULE_DEVICE_TABLE(amba, pl330_ids);
3065
Boojin Kima2f52032011-09-02 09:44:29 +09003066#ifdef CONFIG_PM_RUNTIME
3067static int pl330_runtime_suspend(struct device *dev)
3068{
3069 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
3070
3071 if (!pdmac) {
3072 dev_err(dev, "failed to get dmac\n");
3073 return -ENODEV;
3074 }
3075
3076 clk_disable(pdmac->clk);
3077
3078 return 0;
3079}
3080
3081static int pl330_runtime_resume(struct device *dev)
3082{
3083 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
3084
3085 if (!pdmac) {
3086 dev_err(dev, "failed to get dmac\n");
3087 return -ENODEV;
3088 }
3089
3090 clk_enable(pdmac->clk);
3091
3092 return 0;
3093}
3094#else
3095#define pl330_runtime_suspend NULL
3096#define pl330_runtime_resume NULL
3097#endif /* CONFIG_PM_RUNTIME */
3098
3099static const struct dev_pm_ops pl330_pm_ops = {
3100 .runtime_suspend = pl330_runtime_suspend,
3101 .runtime_resume = pl330_runtime_resume,
3102};
3103
Jassi Brarb3040e42010-05-23 20:28:19 -07003104static struct amba_driver pl330_driver = {
3105 .drv = {
3106 .owner = THIS_MODULE,
3107 .name = "dma-pl330",
Boojin Kima2f52032011-09-02 09:44:29 +09003108 .pm = &pl330_pm_ops,
Jassi Brarb3040e42010-05-23 20:28:19 -07003109 },
3110 .id_table = pl330_ids,
3111 .probe = pl330_probe,
3112 .remove = pl330_remove,
3113};
3114
viresh kumar9e5ed092012-03-15 10:40:38 +01003115module_amba_driver(pl330_driver);
Jassi Brarb3040e42010-05-23 20:28:19 -07003116
3117MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3118MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3119MODULE_LICENSE("GPL");