blob: b60a6dbf1e091c30066299451b5a859a06afe81e [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
Linus Walleij8d318a52010-03-30 15:33:42 +020037/* Hardware designer of the block */
38#define D40_PERIPHID2_DESIGNER 0x8
39
40/**
41 * enum 40_command - The different commands and/or statuses.
42 *
43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
47 */
48enum d40_command {
49 D40_DMA_STOP = 0,
50 D40_DMA_RUN = 1,
51 D40_DMA_SUSPEND_REQ = 2,
52 D40_DMA_SUSPENDED = 3
53};
54
55/**
56 * struct d40_lli_pool - Structure for keeping LLIs in memory
57 *
58 * @base: Pointer to memory area when the pre_alloc_lli's are not large
59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60 * pre_alloc_lli is used.
61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63 * one buffer to one buffer.
64 */
65struct d40_lli_pool {
66 void *base;
67 int size;
68 /* Space for dst and src, plus an extra for padding */
69 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
70};
71
72/**
73 * struct d40_desc - A descriptor is one DMA job.
74 *
75 * @lli_phy: LLI settings for physical channel. Both src and dst=
76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
77 * lli_len equals one.
78 * @lli_log: Same as above but for logical channels.
79 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000080 * @lli_len: Number of llis of current descriptor.
81 * @lli_count: Number of transfered llis.
82 * @lli_tx_len: Max number of LLIs per transfer, there can be
83 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020084 * @txd: DMA engine struct. Used for among other things for communication
85 * during a transfer.
86 * @node: List entry.
87 * @dir: The transfer direction of this job.
88 * @is_in_client_list: true if the client owns this descriptor.
89 *
90 * This descriptor is used for both logical and physical transfers.
91 */
92
93struct d40_desc {
94 /* LLI physical */
95 struct d40_phy_lli_bidir lli_phy;
96 /* LLI logical */
97 struct d40_log_lli_bidir lli_log;
98
99 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000100 int lli_len;
101 int lli_count;
102 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200103
104 struct dma_async_tx_descriptor txd;
105 struct list_head node;
106
107 enum dma_data_direction dir;
108 bool is_in_client_list;
109};
110
111/**
112 * struct d40_lcla_pool - LCLA pool settings and data.
113 *
114 * @base: The virtual address of LCLA.
115 * @phy: Physical base address of LCLA.
116 * @base_size: size of lcla.
117 * @lock: Lock to protect the content in this struct.
118 * @alloc_map: Mapping between physical channel and LCLA entries.
119 * @num_blocks: The number of entries of alloc_map. Equals to the
120 * number of physical channels.
121 */
122struct d40_lcla_pool {
123 void *base;
124 dma_addr_t phy;
125 resource_size_t base_size;
126 spinlock_t lock;
127 u32 *alloc_map;
128 int num_blocks;
129};
130
131/**
132 * struct d40_phy_res - struct for handling eventlines mapped to physical
133 * channels.
134 *
135 * @lock: A lock protection this entity.
136 * @num: The physical channel number of this entity.
137 * @allocated_src: Bit mapped to show which src event line's are mapped to
138 * this physical channel. Can also be free or physically allocated.
139 * @allocated_dst: Same as for src but is dst.
140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141 * event line number. Both allocated_src and allocated_dst can not be
142 * allocated to a physical channel, since the interrupt handler has then
143 * no way of figure out which one the interrupt belongs to.
144 */
145struct d40_phy_res {
146 spinlock_t lock;
147 int num;
148 u32 allocated_src;
149 u32 allocated_dst;
150};
151
152struct d40_base;
153
154/**
155 * struct d40_chan - Struct that describes a channel.
156 *
157 * @lock: A spinlock to protect this struct.
158 * @log_num: The logical number, if any of this channel.
159 * @completed: Starts with 1, after first interrupt it is set to dma engine's
160 * current cookie.
161 * @pending_tx: The number of pending transfers. Used between interrupt handler
162 * and tasklet.
163 * @busy: Set to true when transfer is ongoing on this channel.
164 * @phy_chan: Pointer to physical channel which this instance runs on.
165 * @chan: DMA engine handle.
166 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
167 * transfer and call client callback.
168 * @client: Cliented owned descriptor list.
169 * @active: Active descriptor.
170 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200171 * @dma_cfg: The client configuration of this dma channel.
172 * @base: Pointer to the device instance struct.
173 * @src_def_cfg: Default cfg register setting for src.
174 * @dst_def_cfg: Default cfg register setting for dst.
175 * @log_def: Default logical channel settings.
176 * @lcla: Space for one dst src pair for logical channel transfers.
177 * @lcpa: Pointer to dst and src lcpa settings.
178 *
179 * This struct can either "be" a logical or a physical channel.
180 */
181struct d40_chan {
182 spinlock_t lock;
183 int log_num;
184 /* ID of the most recent completed transfer */
185 int completed;
186 int pending_tx;
187 bool busy;
188 struct d40_phy_res *phy_chan;
189 struct dma_chan chan;
190 struct tasklet_struct tasklet;
191 struct list_head client;
192 struct list_head active;
193 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200194 struct stedma40_chan_cfg dma_cfg;
195 struct d40_base *base;
196 /* Default register configurations */
197 u32 src_def_cfg;
198 u32 dst_def_cfg;
199 struct d40_def_lcsp log_def;
200 struct d40_lcla_elem lcla;
201 struct d40_log_lli_full *lcpa;
202};
203
204/**
205 * struct d40_base - The big global struct, one for each probe'd instance.
206 *
207 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
208 * @execmd_lock: Lock for execute command usage since several channels share
209 * the same physical register.
210 * @dev: The device structure.
211 * @virtbase: The virtual base address of the DMA's register.
212 * @clk: Pointer to the DMA clock structure.
213 * @phy_start: Physical memory start of the DMA registers.
214 * @phy_size: Size of the DMA register map.
215 * @irq: The IRQ number.
216 * @num_phy_chans: The number of physical channels. Read from HW. This
217 * is the number of available channels for this driver, not counting "Secure
218 * mode" allocated physical channels.
219 * @num_log_chans: The number of logical channels. Calculated from
220 * num_phy_chans.
221 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
222 * @dma_slave: dma_device channels that can do only do slave transfers.
223 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
224 * @phy_chans: Room for all possible physical channels in system.
225 * @log_chans: Room for all possible logical channels in system.
226 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
227 * to log_chans entries.
228 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
229 * to phy_chans entries.
230 * @plat_data: Pointer to provided platform_data which is the driver
231 * configuration.
232 * @phy_res: Vector containing all physical channels.
233 * @lcla_pool: lcla pool settings and data.
234 * @lcpa_base: The virtual mapped address of LCPA.
235 * @phy_lcpa: The physical address of the LCPA.
236 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000237 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200238 */
239struct d40_base {
240 spinlock_t interrupt_lock;
241 spinlock_t execmd_lock;
242 struct device *dev;
243 void __iomem *virtbase;
244 struct clk *clk;
245 phys_addr_t phy_start;
246 resource_size_t phy_size;
247 int irq;
248 int num_phy_chans;
249 int num_log_chans;
250 struct dma_device dma_both;
251 struct dma_device dma_slave;
252 struct dma_device dma_memcpy;
253 struct d40_chan *phy_chans;
254 struct d40_chan *log_chans;
255 struct d40_chan **lookup_log_chans;
256 struct d40_chan **lookup_phy_chans;
257 struct stedma40_platform_data *plat_data;
258 /* Physical half channels */
259 struct d40_phy_res *phy_res;
260 struct d40_lcla_pool lcla_pool;
261 void *lcpa_base;
262 dma_addr_t phy_lcpa;
263 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000264 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200265};
266
267/**
268 * struct d40_interrupt_lookup - lookup table for interrupt handler
269 *
270 * @src: Interrupt mask register.
271 * @clr: Interrupt clear register.
272 * @is_error: true if this is an error interrupt.
273 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
274 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
275 */
276struct d40_interrupt_lookup {
277 u32 src;
278 u32 clr;
279 bool is_error;
280 int offset;
281};
282
283/**
284 * struct d40_reg_val - simple lookup struct
285 *
286 * @reg: The register.
287 * @val: The value that belongs to the register in reg.
288 */
289struct d40_reg_val {
290 unsigned int reg;
291 unsigned int val;
292};
293
294static int d40_pool_lli_alloc(struct d40_desc *d40d,
295 int lli_len, bool is_log)
296{
297 u32 align;
298 void *base;
299
300 if (is_log)
301 align = sizeof(struct d40_log_lli);
302 else
303 align = sizeof(struct d40_phy_lli);
304
305 if (lli_len == 1) {
306 base = d40d->lli_pool.pre_alloc_lli;
307 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
308 d40d->lli_pool.base = NULL;
309 } else {
310 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
311
312 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
313 d40d->lli_pool.base = base;
314
315 if (d40d->lli_pool.base == NULL)
316 return -ENOMEM;
317 }
318
319 if (is_log) {
320 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
321 align);
322 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
323 align);
324 } else {
325 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
326 align);
327 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
328 align);
329
330 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
331 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
332 }
333
334 return 0;
335}
336
337static void d40_pool_lli_free(struct d40_desc *d40d)
338{
339 kfree(d40d->lli_pool.base);
340 d40d->lli_pool.base = NULL;
341 d40d->lli_pool.size = 0;
342 d40d->lli_log.src = NULL;
343 d40d->lli_log.dst = NULL;
344 d40d->lli_phy.src = NULL;
345 d40d->lli_phy.dst = NULL;
346 d40d->lli_phy.src_addr = 0;
347 d40d->lli_phy.dst_addr = 0;
348}
349
350static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
351 struct d40_desc *desc)
352{
353 dma_cookie_t cookie = d40c->chan.cookie;
354
355 if (++cookie < 0)
356 cookie = 1;
357
358 d40c->chan.cookie = cookie;
359 desc->txd.cookie = cookie;
360
361 return cookie;
362}
363
Linus Walleij8d318a52010-03-30 15:33:42 +0200364static void d40_desc_remove(struct d40_desc *d40d)
365{
366 list_del(&d40d->node);
367}
368
369static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
370{
Linus Walleij8d318a52010-03-30 15:33:42 +0200371 struct d40_desc *d;
372 struct d40_desc *_d;
373
374 if (!list_empty(&d40c->client)) {
375 list_for_each_entry_safe(d, _d, &d40c->client, node)
376 if (async_tx_test_ack(&d->txd)) {
377 d40_pool_lli_free(d);
378 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000379 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200380 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200381 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000382 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
383 if (d != NULL) {
384 memset(d, 0, sizeof(struct d40_desc));
385 INIT_LIST_HEAD(&d->node);
386 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200387 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000388 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200389}
390
391static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
392{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000393 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200394}
395
396static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
397{
398 list_add_tail(&desc->node, &d40c->active);
399}
400
401static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
402{
403 struct d40_desc *d;
404
405 if (list_empty(&d40c->active))
406 return NULL;
407
408 d = list_first_entry(&d40c->active,
409 struct d40_desc,
410 node);
411 return d;
412}
413
414static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
415{
416 list_add_tail(&desc->node, &d40c->queue);
417}
418
419static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
420{
421 struct d40_desc *d;
422
423 if (list_empty(&d40c->queue))
424 return NULL;
425
426 d = list_first_entry(&d40c->queue,
427 struct d40_desc,
428 node);
429 return d;
430}
431
432/* Support functions for logical channels */
433
434static int d40_lcla_id_get(struct d40_chan *d40c,
435 struct d40_lcla_pool *pool)
436{
437 int src_id = 0;
438 int dst_id = 0;
439 struct d40_log_lli *lcla_lidx_base =
440 pool->base + d40c->phy_chan->num * 1024;
441 int i;
442 int lli_per_log = d40c->base->plat_data->llis_per_log;
443
444 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
445 return 0;
446
447 if (pool->num_blocks > 32)
448 return -EINVAL;
449
450 spin_lock(&pool->lock);
451
452 for (i = 0; i < pool->num_blocks; i++) {
453 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
454 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
455 break;
456 }
457 }
458 src_id = i;
459 if (src_id >= pool->num_blocks)
460 goto err;
461
462 for (; i < pool->num_blocks; i++) {
463 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
464 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
465 break;
466 }
467 }
468
469 dst_id = i;
470 if (dst_id == src_id)
471 goto err;
472
473 d40c->lcla.src_id = src_id;
474 d40c->lcla.dst_id = dst_id;
475 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
476 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
477
478
479 spin_unlock(&pool->lock);
480 return 0;
481err:
482 spin_unlock(&pool->lock);
483 return -EINVAL;
484}
485
486static void d40_lcla_id_put(struct d40_chan *d40c,
487 struct d40_lcla_pool *pool,
488 int id)
489{
490 if (id < 0)
491 return;
492
493 d40c->lcla.src_id = -1;
494 d40c->lcla.dst_id = -1;
495
496 spin_lock(&pool->lock);
497 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
498 spin_unlock(&pool->lock);
499}
500
501static int d40_channel_execute_command(struct d40_chan *d40c,
502 enum d40_command command)
503{
504 int status, i;
505 void __iomem *active_reg;
506 int ret = 0;
507 unsigned long flags;
508
509 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
510
511 if (d40c->phy_chan->num % 2 == 0)
512 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
513 else
514 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
515
516 if (command == D40_DMA_SUSPEND_REQ) {
517 status = (readl(active_reg) &
518 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
519 D40_CHAN_POS(d40c->phy_chan->num);
520
521 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
522 goto done;
523 }
524
525 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
526
527 if (command == D40_DMA_SUSPEND_REQ) {
528
529 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
530 status = (readl(active_reg) &
531 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
532 D40_CHAN_POS(d40c->phy_chan->num);
533
534 cpu_relax();
535 /*
536 * Reduce the number of bus accesses while
537 * waiting for the DMA to suspend.
538 */
539 udelay(3);
540
541 if (status == D40_DMA_STOP ||
542 status == D40_DMA_SUSPENDED)
543 break;
544 }
545
546 if (i == D40_SUSPEND_MAX_IT) {
547 dev_err(&d40c->chan.dev->device,
548 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
549 __func__, d40c->phy_chan->num, d40c->log_num,
550 status);
551 dump_stack();
552 ret = -EBUSY;
553 }
554
555 }
556done:
557 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
558 return ret;
559}
560
561static void d40_term_all(struct d40_chan *d40c)
562{
563 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200564
565 /* Release active descriptors */
566 while ((d40d = d40_first_active_get(d40c))) {
567 d40_desc_remove(d40d);
568
569 /* Return desc to free-list */
570 d40_desc_free(d40c, d40d);
571 }
572
573 /* Release queued descriptors waiting for transfer */
574 while ((d40d = d40_first_queued(d40c))) {
575 d40_desc_remove(d40d);
576
577 /* Return desc to free-list */
578 d40_desc_free(d40c, d40d);
579 }
580
Linus Walleij8d318a52010-03-30 15:33:42 +0200581 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
582 d40c->lcla.src_id);
583 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
584 d40c->lcla.dst_id);
585
586 d40c->pending_tx = 0;
587 d40c->busy = false;
588}
589
590static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
591{
592 u32 val;
593 unsigned long flags;
594
595 if (do_enable)
596 val = D40_ACTIVATE_EVENTLINE;
597 else
598 val = D40_DEACTIVATE_EVENTLINE;
599
600 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
601
602 /* Enable event line connected to device (or memcpy) */
603 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
604 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
605 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
606
607 writel((val << D40_EVENTLINE_POS(event)) |
608 ~D40_EVENTLINE_MASK(event),
609 d40c->base->virtbase + D40_DREG_PCBASE +
610 d40c->phy_chan->num * D40_DREG_PCDELTA +
611 D40_CHAN_REG_SSLNK);
612 }
613 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
614 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
615
616 writel((val << D40_EVENTLINE_POS(event)) |
617 ~D40_EVENTLINE_MASK(event),
618 d40c->base->virtbase + D40_DREG_PCBASE +
619 d40c->phy_chan->num * D40_DREG_PCDELTA +
620 D40_CHAN_REG_SDLNK);
621 }
622
623 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
624}
625
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200626static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200627{
628 u32 val = 0;
629
630 /* If SSLNK or SDLNK is zero all events are disabled */
631 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
632 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
633 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
634 d40c->phy_chan->num * D40_DREG_PCDELTA +
635 D40_CHAN_REG_SSLNK);
636
637 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
638 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
639 d40c->phy_chan->num * D40_DREG_PCDELTA +
640 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200641 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200642}
643
644static void d40_config_enable_lidx(struct d40_chan *d40c)
645{
646 /* Set LIDX for lcla */
647 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
648 D40_SREG_ELEM_LOG_LIDX_MASK,
649 d40c->base->virtbase + D40_DREG_PCBASE +
650 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
651
652 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
653 D40_SREG_ELEM_LOG_LIDX_MASK,
654 d40c->base->virtbase + D40_DREG_PCBASE +
655 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
656}
657
658static int d40_config_write(struct d40_chan *d40c)
659{
660 u32 addr_base;
661 u32 var;
662 int res;
663
664 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
665 if (res)
666 return res;
667
668 /* Odd addresses are even addresses + 4 */
669 addr_base = (d40c->phy_chan->num % 2) * 4;
670 /* Setup channel mode to logical or physical */
671 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
672 D40_CHAN_POS(d40c->phy_chan->num);
673 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
674
675 /* Setup operational mode option register */
676 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
677 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
678
679 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
680
681 if (d40c->log_num != D40_PHY_CHAN) {
682 /* Set default config for CFG reg */
683 writel(d40c->src_def_cfg,
684 d40c->base->virtbase + D40_DREG_PCBASE +
685 d40c->phy_chan->num * D40_DREG_PCDELTA +
686 D40_CHAN_REG_SSCFG);
687 writel(d40c->dst_def_cfg,
688 d40c->base->virtbase + D40_DREG_PCBASE +
689 d40c->phy_chan->num * D40_DREG_PCDELTA +
690 D40_CHAN_REG_SDCFG);
691
692 d40_config_enable_lidx(d40c);
693 }
694 return res;
695}
696
697static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
698{
699
700 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
701 d40_phy_lli_write(d40c->base->virtbase,
702 d40c->phy_chan->num,
703 d40d->lli_phy.dst,
704 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200705 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200706 struct d40_log_lli *src = d40d->lli_log.src;
707 struct d40_log_lli *dst = d40d->lli_log.dst;
708
Per Friden941b77a2010-06-20 21:24:45 +0000709 src += d40d->lli_count;
710 dst += d40d->lli_count;
Linus Walleij8d318a52010-03-30 15:33:42 +0200711 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
712 d40c->lcla.dst,
713 dst, src,
714 d40c->base->plat_data->llis_per_log);
715 }
Per Friden941b77a2010-06-20 21:24:45 +0000716 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200717}
718
719static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
720{
721 struct d40_chan *d40c = container_of(tx->chan,
722 struct d40_chan,
723 chan);
724 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
725 unsigned long flags;
726
727 spin_lock_irqsave(&d40c->lock, flags);
728
729 tx->cookie = d40_assign_cookie(d40c, d40d);
730
731 d40_desc_queue(d40c, d40d);
732
733 spin_unlock_irqrestore(&d40c->lock, flags);
734
735 return tx->cookie;
736}
737
738static int d40_start(struct d40_chan *d40c)
739{
740 int err;
741
742 if (d40c->log_num != D40_PHY_CHAN) {
743 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
744 if (err)
745 return err;
746 d40_config_set_event(d40c, true);
747 }
748
749 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
750
751 return err;
752}
753
754static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
755{
756 struct d40_desc *d40d;
757 int err;
758
759 /* Start queued jobs, if any */
760 d40d = d40_first_queued(d40c);
761
762 if (d40d != NULL) {
763 d40c->busy = true;
764
765 /* Remove from queue */
766 d40_desc_remove(d40d);
767
768 /* Add to active queue */
769 d40_desc_submit(d40c, d40d);
770
771 /* Initiate DMA job */
772 d40_desc_load(d40c, d40d);
773
774 /* Start dma job */
775 err = d40_start(d40c);
776
777 if (err)
778 return NULL;
779 }
780
781 return d40d;
782}
783
784/* called from interrupt context */
785static void dma_tc_handle(struct d40_chan *d40c)
786{
787 struct d40_desc *d40d;
788
789 if (!d40c->phy_chan)
790 return;
791
792 /* Get first active entry from list */
793 d40d = d40_first_active_get(d40c);
794
795 if (d40d == NULL)
796 return;
797
Per Friden941b77a2010-06-20 21:24:45 +0000798 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200799
800 d40_desc_load(d40c, d40d);
801 /* Start dma job */
802 (void) d40_start(d40c);
803 return;
804 }
805
806 if (d40_queue_start(d40c) == NULL)
807 d40c->busy = false;
808
809 d40c->pending_tx++;
810 tasklet_schedule(&d40c->tasklet);
811
812}
813
814static void dma_tasklet(unsigned long data)
815{
816 struct d40_chan *d40c = (struct d40_chan *) data;
817 struct d40_desc *d40d_fin;
818 unsigned long flags;
819 dma_async_tx_callback callback;
820 void *callback_param;
821
822 spin_lock_irqsave(&d40c->lock, flags);
823
824 /* Get first active entry from list */
825 d40d_fin = d40_first_active_get(d40c);
826
827 if (d40d_fin == NULL)
828 goto err;
829
830 d40c->completed = d40d_fin->txd.cookie;
831
832 /*
833 * If terminating a channel pending_tx is set to zero.
834 * This prevents any finished active jobs to return to the client.
835 */
836 if (d40c->pending_tx == 0) {
837 spin_unlock_irqrestore(&d40c->lock, flags);
838 return;
839 }
840
841 /* Callback to client */
842 callback = d40d_fin->txd.callback;
843 callback_param = d40d_fin->txd.callback_param;
844
845 if (async_tx_test_ack(&d40d_fin->txd)) {
846 d40_pool_lli_free(d40d_fin);
847 d40_desc_remove(d40d_fin);
848 /* Return desc to free-list */
849 d40_desc_free(d40c, d40d_fin);
850 } else {
Linus Walleij8d318a52010-03-30 15:33:42 +0200851 if (!d40d_fin->is_in_client_list) {
852 d40_desc_remove(d40d_fin);
853 list_add_tail(&d40d_fin->node, &d40c->client);
854 d40d_fin->is_in_client_list = true;
855 }
856 }
857
858 d40c->pending_tx--;
859
860 if (d40c->pending_tx)
861 tasklet_schedule(&d40c->tasklet);
862
863 spin_unlock_irqrestore(&d40c->lock, flags);
864
865 if (callback)
866 callback(callback_param);
867
868 return;
869
870 err:
871 /* Rescue manouver if receiving double interrupts */
872 if (d40c->pending_tx > 0)
873 d40c->pending_tx--;
874 spin_unlock_irqrestore(&d40c->lock, flags);
875}
876
877static irqreturn_t d40_handle_interrupt(int irq, void *data)
878{
879 static const struct d40_interrupt_lookup il[] = {
880 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
881 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
882 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
883 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
884 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
885 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
886 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
887 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
888 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
889 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
890 };
891
892 int i;
893 u32 regs[ARRAY_SIZE(il)];
894 u32 tmp;
895 u32 idx;
896 u32 row;
897 long chan = -1;
898 struct d40_chan *d40c;
899 unsigned long flags;
900 struct d40_base *base = data;
901
902 spin_lock_irqsave(&base->interrupt_lock, flags);
903
904 /* Read interrupt status of both logical and physical channels */
905 for (i = 0; i < ARRAY_SIZE(il); i++)
906 regs[i] = readl(base->virtbase + il[i].src);
907
908 for (;;) {
909
910 chan = find_next_bit((unsigned long *)regs,
911 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
912
913 /* No more set bits found? */
914 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
915 break;
916
917 row = chan / BITS_PER_LONG;
918 idx = chan & (BITS_PER_LONG - 1);
919
920 /* ACK interrupt */
921 tmp = readl(base->virtbase + il[row].clr);
922 tmp |= 1 << idx;
923 writel(tmp, base->virtbase + il[row].clr);
924
925 if (il[row].offset == D40_PHY_CHAN)
926 d40c = base->lookup_phy_chans[idx];
927 else
928 d40c = base->lookup_log_chans[il[row].offset + idx];
929 spin_lock(&d40c->lock);
930
931 if (!il[row].is_error)
932 dma_tc_handle(d40c);
933 else
934 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
935 __func__, chan, il[row].offset, idx);
936
937 spin_unlock(&d40c->lock);
938 }
939
940 spin_unlock_irqrestore(&base->interrupt_lock, flags);
941
942 return IRQ_HANDLED;
943}
944
945
946static int d40_validate_conf(struct d40_chan *d40c,
947 struct stedma40_chan_cfg *conf)
948{
949 int res = 0;
950 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
951 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
952 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
953 == STEDMA40_CHANNEL_IN_LOG_MODE;
954
955 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
956 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
957 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
958 __func__);
959 res = -EINVAL;
960 }
961
962 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
963 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
964 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
965 __func__);
966 res = -EINVAL;
967 }
968
969 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
970 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
971 dev_err(&d40c->chan.dev->device,
972 "[%s] No event line\n", __func__);
973 res = -EINVAL;
974 }
975
976 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
977 (src_event_group != dst_event_group)) {
978 dev_err(&d40c->chan.dev->device,
979 "[%s] Invalid event group\n", __func__);
980 res = -EINVAL;
981 }
982
983 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
984 /*
985 * DMAC HW supports it. Will be added to this driver,
986 * in case any dma client requires it.
987 */
988 dev_err(&d40c->chan.dev->device,
989 "[%s] periph to periph not supported\n",
990 __func__);
991 res = -EINVAL;
992 }
993
994 return res;
995}
996
997static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +0200998 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +0200999{
1000 unsigned long flags;
1001 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001002 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001003 /* Physical interrupts are masked per physical full channel */
1004 if (phy->allocated_src == D40_ALLOC_FREE &&
1005 phy->allocated_dst == D40_ALLOC_FREE) {
1006 phy->allocated_dst = D40_ALLOC_PHY;
1007 phy->allocated_src = D40_ALLOC_PHY;
1008 goto found;
1009 } else
1010 goto not_found;
1011 }
1012
1013 /* Logical channel */
1014 if (is_src) {
1015 if (phy->allocated_src == D40_ALLOC_PHY)
1016 goto not_found;
1017
1018 if (phy->allocated_src == D40_ALLOC_FREE)
1019 phy->allocated_src = D40_ALLOC_LOG_FREE;
1020
1021 if (!(phy->allocated_src & (1 << log_event_line))) {
1022 phy->allocated_src |= 1 << log_event_line;
1023 goto found;
1024 } else
1025 goto not_found;
1026 } else {
1027 if (phy->allocated_dst == D40_ALLOC_PHY)
1028 goto not_found;
1029
1030 if (phy->allocated_dst == D40_ALLOC_FREE)
1031 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1032
1033 if (!(phy->allocated_dst & (1 << log_event_line))) {
1034 phy->allocated_dst |= 1 << log_event_line;
1035 goto found;
1036 } else
1037 goto not_found;
1038 }
1039
1040not_found:
1041 spin_unlock_irqrestore(&phy->lock, flags);
1042 return false;
1043found:
1044 spin_unlock_irqrestore(&phy->lock, flags);
1045 return true;
1046}
1047
1048static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1049 int log_event_line)
1050{
1051 unsigned long flags;
1052 bool is_free = false;
1053
1054 spin_lock_irqsave(&phy->lock, flags);
1055 if (!log_event_line) {
1056 /* Physical interrupts are masked per physical full channel */
1057 phy->allocated_dst = D40_ALLOC_FREE;
1058 phy->allocated_src = D40_ALLOC_FREE;
1059 is_free = true;
1060 goto out;
1061 }
1062
1063 /* Logical channel */
1064 if (is_src) {
1065 phy->allocated_src &= ~(1 << log_event_line);
1066 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1067 phy->allocated_src = D40_ALLOC_FREE;
1068 } else {
1069 phy->allocated_dst &= ~(1 << log_event_line);
1070 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1071 phy->allocated_dst = D40_ALLOC_FREE;
1072 }
1073
1074 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1075 D40_ALLOC_FREE);
1076
1077out:
1078 spin_unlock_irqrestore(&phy->lock, flags);
1079
1080 return is_free;
1081}
1082
1083static int d40_allocate_channel(struct d40_chan *d40c)
1084{
1085 int dev_type;
1086 int event_group;
1087 int event_line;
1088 struct d40_phy_res *phys;
1089 int i;
1090 int j;
1091 int log_num;
1092 bool is_src;
1093 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1094 == STEDMA40_CHANNEL_IN_LOG_MODE;
1095
1096
1097 phys = d40c->base->phy_res;
1098
1099 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1100 dev_type = d40c->dma_cfg.src_dev_type;
1101 log_num = 2 * dev_type;
1102 is_src = true;
1103 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1104 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1105 /* dst event lines are used for logical memcpy */
1106 dev_type = d40c->dma_cfg.dst_dev_type;
1107 log_num = 2 * dev_type + 1;
1108 is_src = false;
1109 } else
1110 return -EINVAL;
1111
1112 event_group = D40_TYPE_TO_GROUP(dev_type);
1113 event_line = D40_TYPE_TO_EVENT(dev_type);
1114
1115 if (!is_log) {
1116 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1117 /* Find physical half channel */
1118 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1119
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001120 if (d40_alloc_mask_set(&phys[i], is_src,
1121 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001122 goto found_phy;
1123 }
1124 } else
1125 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1126 int phy_num = j + event_group * 2;
1127 for (i = phy_num; i < phy_num + 2; i++) {
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001128 if (d40_alloc_mask_set(&phys[i], is_src,
1129 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001130 goto found_phy;
1131 }
1132 }
1133 return -EINVAL;
1134found_phy:
1135 d40c->phy_chan = &phys[i];
1136 d40c->log_num = D40_PHY_CHAN;
1137 goto out;
1138 }
1139 if (dev_type == -1)
1140 return -EINVAL;
1141
1142 /* Find logical channel */
1143 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1144 int phy_num = j + event_group * 2;
1145 /*
1146 * Spread logical channels across all available physical rather
1147 * than pack every logical channel at the first available phy
1148 * channels.
1149 */
1150 if (is_src) {
1151 for (i = phy_num; i < phy_num + 2; i++) {
1152 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001153 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001154 goto found_log;
1155 }
1156 } else {
1157 for (i = phy_num + 1; i >= phy_num; i--) {
1158 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001159 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001160 goto found_log;
1161 }
1162 }
1163 }
1164 return -EINVAL;
1165
1166found_log:
1167 d40c->phy_chan = &phys[i];
1168 d40c->log_num = log_num;
1169out:
1170
1171 if (is_log)
1172 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1173 else
1174 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1175
1176 return 0;
1177
1178}
1179
Linus Walleij8d318a52010-03-30 15:33:42 +02001180static int d40_config_memcpy(struct d40_chan *d40c)
1181{
1182 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1183
1184 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1185 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1186 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1187 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1188 memcpy[d40c->chan.chan_id];
1189
1190 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1191 dma_has_cap(DMA_SLAVE, cap)) {
1192 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1193 } else {
1194 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1195 __func__);
1196 return -EINVAL;
1197 }
1198
1199 return 0;
1200}
1201
1202
1203static int d40_free_dma(struct d40_chan *d40c)
1204{
1205
1206 int res = 0;
1207 u32 event, dir;
1208 struct d40_phy_res *phy = d40c->phy_chan;
1209 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001210 struct d40_desc *d;
1211 struct d40_desc *_d;
1212
Linus Walleij8d318a52010-03-30 15:33:42 +02001213
1214 /* Terminate all queued and active transfers */
1215 d40_term_all(d40c);
1216
Per Fridena8be8622010-06-20 21:24:59 +00001217 /* Release client owned descriptors */
1218 if (!list_empty(&d40c->client))
1219 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1220 d40_pool_lli_free(d);
1221 d40_desc_remove(d);
1222 /* Return desc to free-list */
1223 d40_desc_free(d40c, d);
1224 }
1225
Linus Walleij8d318a52010-03-30 15:33:42 +02001226 if (phy == NULL) {
1227 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1228 __func__);
1229 return -EINVAL;
1230 }
1231
1232 if (phy->allocated_src == D40_ALLOC_FREE &&
1233 phy->allocated_dst == D40_ALLOC_FREE) {
1234 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1235 __func__);
1236 return -EINVAL;
1237 }
1238
1239
1240 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1241 if (res) {
1242 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1243 __func__);
1244 return res;
1245 }
1246
1247 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1248 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1249 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1250 dir = D40_CHAN_REG_SDLNK;
1251 is_src = false;
1252 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1253 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1254 dir = D40_CHAN_REG_SSLNK;
1255 is_src = true;
1256 } else {
1257 dev_err(&d40c->chan.dev->device,
1258 "[%s] Unknown direction\n", __func__);
1259 return -EINVAL;
1260 }
1261
1262 if (d40c->log_num != D40_PHY_CHAN) {
1263 /*
1264 * Release logical channel, deactivate the event line during
1265 * the time physical res is suspended.
1266 */
1267 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1268 D40_EVENTLINE_MASK(event),
1269 d40c->base->virtbase + D40_DREG_PCBASE +
1270 phy->num * D40_DREG_PCDELTA + dir);
1271
1272 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1273
1274 /*
1275 * Check if there are more logical allocation
1276 * on this phy channel.
1277 */
1278 if (!d40_alloc_mask_free(phy, is_src, event)) {
1279 /* Resume the other logical channels if any */
1280 if (d40_chan_has_events(d40c)) {
1281 res = d40_channel_execute_command(d40c,
1282 D40_DMA_RUN);
1283 if (res) {
1284 dev_err(&d40c->chan.dev->device,
1285 "[%s] Executing RUN command\n",
1286 __func__);
1287 return res;
1288 }
1289 }
1290 return 0;
1291 }
1292 } else
1293 d40_alloc_mask_free(phy, is_src, 0);
1294
1295 /* Release physical channel */
1296 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1297 if (res) {
1298 dev_err(&d40c->chan.dev->device,
1299 "[%s] Failed to stop channel\n", __func__);
1300 return res;
1301 }
1302 d40c->phy_chan = NULL;
1303 /* Invalidate channel type */
1304 d40c->dma_cfg.channel_type = 0;
1305 d40c->base->lookup_phy_chans[phy->num] = NULL;
1306
1307 return 0;
1308
1309
1310}
1311
1312static int d40_pause(struct dma_chan *chan)
1313{
1314 struct d40_chan *d40c =
1315 container_of(chan, struct d40_chan, chan);
1316 int res;
1317
1318 unsigned long flags;
1319
1320 spin_lock_irqsave(&d40c->lock, flags);
1321
1322 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1323 if (res == 0) {
1324 if (d40c->log_num != D40_PHY_CHAN) {
1325 d40_config_set_event(d40c, false);
1326 /* Resume the other logical channels if any */
1327 if (d40_chan_has_events(d40c))
1328 res = d40_channel_execute_command(d40c,
1329 D40_DMA_RUN);
1330 }
1331 }
1332
1333 spin_unlock_irqrestore(&d40c->lock, flags);
1334 return res;
1335}
1336
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001337static bool d40_is_paused(struct d40_chan *d40c)
1338{
1339 bool is_paused = false;
1340 unsigned long flags;
1341 void __iomem *active_reg;
1342 u32 status;
1343 u32 event;
1344 int res;
1345
1346 spin_lock_irqsave(&d40c->lock, flags);
1347
1348 if (d40c->log_num == D40_PHY_CHAN) {
1349 if (d40c->phy_chan->num % 2 == 0)
1350 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1351 else
1352 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1353
1354 status = (readl(active_reg) &
1355 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1356 D40_CHAN_POS(d40c->phy_chan->num);
1357 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1358 is_paused = true;
1359
1360 goto _exit;
1361 }
1362
1363 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1364 if (res != 0)
1365 goto _exit;
1366
1367 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1368 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1369 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1370 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1371 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1372 else {
1373 dev_err(&d40c->chan.dev->device,
1374 "[%s] Unknown direction\n", __func__);
1375 goto _exit;
1376 }
1377 status = d40_chan_has_events(d40c);
1378 status = (status & D40_EVENTLINE_MASK(event)) >>
1379 D40_EVENTLINE_POS(event);
1380
1381 if (status != D40_DMA_RUN)
1382 is_paused = true;
1383
1384 /* Resume the other logical channels if any */
1385 if (d40_chan_has_events(d40c))
1386 res = d40_channel_execute_command(d40c,
1387 D40_DMA_RUN);
1388
1389_exit:
1390 spin_unlock_irqrestore(&d40c->lock, flags);
1391 return is_paused;
1392
1393}
1394
1395
Linus Walleij8d318a52010-03-30 15:33:42 +02001396static bool d40_tx_is_linked(struct d40_chan *d40c)
1397{
1398 bool is_link;
1399
1400 if (d40c->log_num != D40_PHY_CHAN)
1401 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1402 else
1403 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1404 d40c->phy_chan->num * D40_DREG_PCDELTA +
1405 D40_CHAN_REG_SDLNK) &
1406 D40_SREG_LNK_PHYS_LNK_MASK;
1407 return is_link;
1408}
1409
1410static u32 d40_residue(struct d40_chan *d40c)
1411{
1412 u32 num_elt;
1413
1414 if (d40c->log_num != D40_PHY_CHAN)
1415 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1416 >> D40_MEM_LCSP2_ECNT_POS;
1417 else
1418 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1419 d40c->phy_chan->num * D40_DREG_PCDELTA +
1420 D40_CHAN_REG_SDELT) &
1421 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1422 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1423}
1424
1425static int d40_resume(struct dma_chan *chan)
1426{
1427 struct d40_chan *d40c =
1428 container_of(chan, struct d40_chan, chan);
1429 int res = 0;
1430 unsigned long flags;
1431
1432 spin_lock_irqsave(&d40c->lock, flags);
1433
1434 if (d40c->log_num != D40_PHY_CHAN) {
1435 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1436 if (res)
1437 goto out;
1438
1439 /* If bytes left to transfer or linked tx resume job */
1440 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1441 d40_config_set_event(d40c, true);
1442 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1443 }
1444 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1445 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1446
1447out:
1448 spin_unlock_irqrestore(&d40c->lock, flags);
1449 return res;
1450}
1451
1452static u32 stedma40_residue(struct dma_chan *chan)
1453{
1454 struct d40_chan *d40c =
1455 container_of(chan, struct d40_chan, chan);
1456 u32 bytes_left;
1457 unsigned long flags;
1458
1459 spin_lock_irqsave(&d40c->lock, flags);
1460 bytes_left = d40_residue(d40c);
1461 spin_unlock_irqrestore(&d40c->lock, flags);
1462
1463 return bytes_left;
1464}
1465
1466/* Public DMA functions in addition to the DMA engine framework */
1467
1468int stedma40_set_psize(struct dma_chan *chan,
1469 int src_psize,
1470 int dst_psize)
1471{
1472 struct d40_chan *d40c =
1473 container_of(chan, struct d40_chan, chan);
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&d40c->lock, flags);
1477
1478 if (d40c->log_num != D40_PHY_CHAN) {
1479 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1480 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1481 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1482 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1483 goto out;
1484 }
1485
1486 if (src_psize == STEDMA40_PSIZE_PHY_1)
1487 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1488 else {
1489 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1490 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1491 D40_SREG_CFG_PSIZE_POS);
1492 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1493 }
1494
1495 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1496 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1497 else {
1498 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1499 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1500 D40_SREG_CFG_PSIZE_POS);
1501 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1502 }
1503out:
1504 spin_unlock_irqrestore(&d40c->lock, flags);
1505 return 0;
1506}
1507EXPORT_SYMBOL(stedma40_set_psize);
1508
1509struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1510 struct scatterlist *sgl_dst,
1511 struct scatterlist *sgl_src,
1512 unsigned int sgl_len,
1513 unsigned long flags)
1514{
1515 int res;
1516 struct d40_desc *d40d;
1517 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1518 chan);
1519 unsigned long flg;
Linus Walleij8d318a52010-03-30 15:33:42 +02001520
1521
1522 spin_lock_irqsave(&d40c->lock, flg);
1523 d40d = d40_desc_get(d40c);
1524
1525 if (d40d == NULL)
1526 goto err;
1527
1528 memset(d40d, 0, sizeof(struct d40_desc));
1529 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001530 d40d->lli_tx_len = d40d->lli_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001531 d40d->txd.flags = flags;
1532
1533 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001534 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1535 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1536
Linus Walleij8d318a52010-03-30 15:33:42 +02001537 if (sgl_len > 1)
1538 /*
1539 * Check if there is space available in lcla. If not,
1540 * split list into 1-length and run only in lcpa
1541 * space.
1542 */
1543 if (d40_lcla_id_get(d40c,
1544 &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001545 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001546
1547 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1548 dev_err(&d40c->chan.dev->device,
1549 "[%s] Out of memory\n", __func__);
1550 goto err;
1551 }
1552
1553 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1554 sgl_src,
1555 sgl_len,
1556 d40d->lli_log.src,
1557 d40c->log_def.lcsp1,
1558 d40c->dma_cfg.src_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001559 flags & DMA_PREP_INTERRUPT,
1560 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001561 d40c->base->plat_data->llis_per_log);
1562
1563 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1564 sgl_dst,
1565 sgl_len,
1566 d40d->lli_log.dst,
1567 d40c->log_def.lcsp3,
1568 d40c->dma_cfg.dst_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001569 flags & DMA_PREP_INTERRUPT,
1570 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001571 d40c->base->plat_data->llis_per_log);
1572
1573
1574 } else {
1575 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1576 dev_err(&d40c->chan.dev->device,
1577 "[%s] Out of memory\n", __func__);
1578 goto err;
1579 }
1580
1581 res = d40_phy_sg_to_lli(sgl_src,
1582 sgl_len,
1583 0,
1584 d40d->lli_phy.src,
1585 d40d->lli_phy.src_addr,
1586 d40c->src_def_cfg,
1587 d40c->dma_cfg.src_info.data_width,
1588 d40c->dma_cfg.src_info.psize,
1589 true);
1590
1591 if (res < 0)
1592 goto err;
1593
1594 res = d40_phy_sg_to_lli(sgl_dst,
1595 sgl_len,
1596 0,
1597 d40d->lli_phy.dst,
1598 d40d->lli_phy.dst_addr,
1599 d40c->dst_def_cfg,
1600 d40c->dma_cfg.dst_info.data_width,
1601 d40c->dma_cfg.dst_info.psize,
1602 true);
1603
1604 if (res < 0)
1605 goto err;
1606
1607 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1608 d40d->lli_pool.size, DMA_TO_DEVICE);
1609 }
1610
1611 dma_async_tx_descriptor_init(&d40d->txd, chan);
1612
1613 d40d->txd.tx_submit = d40_tx_submit;
1614
1615 spin_unlock_irqrestore(&d40c->lock, flg);
1616
1617 return &d40d->txd;
1618err:
1619 spin_unlock_irqrestore(&d40c->lock, flg);
1620 return NULL;
1621}
1622EXPORT_SYMBOL(stedma40_memcpy_sg);
1623
1624bool stedma40_filter(struct dma_chan *chan, void *data)
1625{
1626 struct stedma40_chan_cfg *info = data;
1627 struct d40_chan *d40c =
1628 container_of(chan, struct d40_chan, chan);
1629 int err;
1630
1631 if (data) {
1632 err = d40_validate_conf(d40c, info);
1633 if (!err)
1634 d40c->dma_cfg = *info;
1635 } else
1636 err = d40_config_memcpy(d40c);
1637
1638 return err == 0;
1639}
1640EXPORT_SYMBOL(stedma40_filter);
1641
1642/* DMA ENGINE functions */
1643static int d40_alloc_chan_resources(struct dma_chan *chan)
1644{
1645 int err;
1646 unsigned long flags;
1647 struct d40_chan *d40c =
1648 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001649 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001650 spin_lock_irqsave(&d40c->lock, flags);
1651
1652 d40c->completed = chan->cookie = 1;
1653
1654 /*
1655 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001656 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001657 */
1658 if (d40c->dma_cfg.channel_type == 0) {
1659 err = d40_config_memcpy(d40c);
1660 if (err)
1661 goto err_alloc;
1662 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001663 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001664
1665 err = d40_allocate_channel(d40c);
1666 if (err) {
1667 dev_err(&d40c->chan.dev->device,
1668 "[%s] Failed to allocate channel\n", __func__);
1669 goto err_alloc;
1670 }
1671
Linus Walleijef1872e2010-06-20 21:24:52 +00001672 /* Fill in basic CFG register values */
1673 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1674 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1675
1676 if (d40c->log_num != D40_PHY_CHAN) {
1677 d40_log_cfg(&d40c->dma_cfg,
1678 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1679
1680 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1681 d40c->lcpa = d40c->base->lcpa_base +
1682 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1683 else
1684 d40c->lcpa = d40c->base->lcpa_base +
1685 d40c->dma_cfg.dst_dev_type *
1686 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1687 }
1688
1689 /*
1690 * Only write channel configuration to the DMA if the physical
1691 * resource is free. In case of multiple logical channels
1692 * on the same physical resource, only the first write is necessary.
1693 */
1694 if (is_free_phy) {
1695 err = d40_config_write(d40c);
1696 if (err) {
1697 dev_err(&d40c->chan.dev->device,
1698 "[%s] Failed to configure channel\n",
1699 __func__);
1700 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001701 }
1702
1703 spin_unlock_irqrestore(&d40c->lock, flags);
1704 return 0;
1705
1706 err_config:
1707 (void) d40_free_dma(d40c);
1708 err_alloc:
1709 spin_unlock_irqrestore(&d40c->lock, flags);
1710 dev_err(&d40c->chan.dev->device,
1711 "[%s] Channel allocation failed\n", __func__);
1712 return -EINVAL;
1713}
1714
1715static void d40_free_chan_resources(struct dma_chan *chan)
1716{
1717 struct d40_chan *d40c =
1718 container_of(chan, struct d40_chan, chan);
1719 int err;
1720 unsigned long flags;
1721
1722 spin_lock_irqsave(&d40c->lock, flags);
1723
1724 err = d40_free_dma(d40c);
1725
1726 if (err)
1727 dev_err(&d40c->chan.dev->device,
1728 "[%s] Failed to free channel\n", __func__);
1729 spin_unlock_irqrestore(&d40c->lock, flags);
1730}
1731
1732static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1733 dma_addr_t dst,
1734 dma_addr_t src,
1735 size_t size,
1736 unsigned long flags)
1737{
1738 struct d40_desc *d40d;
1739 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1740 chan);
1741 unsigned long flg;
1742 int err = 0;
1743
1744 spin_lock_irqsave(&d40c->lock, flg);
1745 d40d = d40_desc_get(d40c);
1746
1747 if (d40d == NULL) {
1748 dev_err(&d40c->chan.dev->device,
1749 "[%s] Descriptor is NULL\n", __func__);
1750 goto err;
1751 }
1752
1753 memset(d40d, 0, sizeof(struct d40_desc));
1754
1755 d40d->txd.flags = flags;
1756
1757 dma_async_tx_descriptor_init(&d40d->txd, chan);
1758
1759 d40d->txd.tx_submit = d40_tx_submit;
1760
1761 if (d40c->log_num != D40_PHY_CHAN) {
1762
1763 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1764 dev_err(&d40c->chan.dev->device,
1765 "[%s] Out of memory\n", __func__);
1766 goto err;
1767 }
1768 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001769 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001770
1771 d40_log_fill_lli(d40d->lli_log.src,
1772 src,
1773 size,
1774 0,
1775 d40c->log_def.lcsp1,
1776 d40c->dma_cfg.src_info.data_width,
1777 true, true);
1778
1779 d40_log_fill_lli(d40d->lli_log.dst,
1780 dst,
1781 size,
1782 0,
1783 d40c->log_def.lcsp3,
1784 d40c->dma_cfg.dst_info.data_width,
1785 true, true);
1786
1787 } else {
1788
1789 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1790 dev_err(&d40c->chan.dev->device,
1791 "[%s] Out of memory\n", __func__);
1792 goto err;
1793 }
1794
1795 err = d40_phy_fill_lli(d40d->lli_phy.src,
1796 src,
1797 size,
1798 d40c->dma_cfg.src_info.psize,
1799 0,
1800 d40c->src_def_cfg,
1801 true,
1802 d40c->dma_cfg.src_info.data_width,
1803 false);
1804 if (err)
1805 goto err_fill_lli;
1806
1807 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1808 dst,
1809 size,
1810 d40c->dma_cfg.dst_info.psize,
1811 0,
1812 d40c->dst_def_cfg,
1813 true,
1814 d40c->dma_cfg.dst_info.data_width,
1815 false);
1816
1817 if (err)
1818 goto err_fill_lli;
1819
1820 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1821 d40d->lli_pool.size, DMA_TO_DEVICE);
1822 }
1823
1824 spin_unlock_irqrestore(&d40c->lock, flg);
1825 return &d40d->txd;
1826
1827err_fill_lli:
1828 dev_err(&d40c->chan.dev->device,
1829 "[%s] Failed filling in PHY LLI\n", __func__);
1830 d40_pool_lli_free(d40d);
1831err:
1832 spin_unlock_irqrestore(&d40c->lock, flg);
1833 return NULL;
1834}
1835
1836static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1837 struct d40_chan *d40c,
1838 struct scatterlist *sgl,
1839 unsigned int sg_len,
1840 enum dma_data_direction direction,
1841 unsigned long flags)
1842{
1843 dma_addr_t dev_addr = 0;
1844 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001845
1846 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1847 dev_err(&d40c->chan.dev->device,
1848 "[%s] Out of memory\n", __func__);
1849 return -ENOMEM;
1850 }
1851
1852 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001853 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1854 d40d->lli_tx_len = d40d->lli_len;
1855 else
1856 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001857
1858 if (sg_len > 1)
1859 /*
1860 * Check if there is space available in lcla.
1861 * If not, split list into 1-length and run only
1862 * in lcpa space.
1863 */
1864 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001865 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001866
1867 if (direction == DMA_FROM_DEVICE) {
1868 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1869 total_size = d40_log_sg_to_dev(&d40c->lcla,
1870 sgl, sg_len,
1871 &d40d->lli_log,
1872 &d40c->log_def,
1873 d40c->dma_cfg.src_info.data_width,
1874 d40c->dma_cfg.dst_info.data_width,
1875 direction,
1876 flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001877 dev_addr, d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001878 d40c->base->plat_data->llis_per_log);
1879 } else if (direction == DMA_TO_DEVICE) {
1880 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1881 total_size = d40_log_sg_to_dev(&d40c->lcla,
1882 sgl, sg_len,
1883 &d40d->lli_log,
1884 &d40c->log_def,
1885 d40c->dma_cfg.src_info.data_width,
1886 d40c->dma_cfg.dst_info.data_width,
1887 direction,
1888 flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001889 dev_addr, d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001890 d40c->base->plat_data->llis_per_log);
1891 } else
1892 return -EINVAL;
1893 if (total_size < 0)
1894 return -EINVAL;
1895
1896 return 0;
1897}
1898
1899static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1900 struct d40_chan *d40c,
1901 struct scatterlist *sgl,
1902 unsigned int sgl_len,
1903 enum dma_data_direction direction,
1904 unsigned long flags)
1905{
1906 dma_addr_t src_dev_addr;
1907 dma_addr_t dst_dev_addr;
1908 int res;
1909
1910 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1911 dev_err(&d40c->chan.dev->device,
1912 "[%s] Out of memory\n", __func__);
1913 return -ENOMEM;
1914 }
1915
1916 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001917 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001918
1919 if (direction == DMA_FROM_DEVICE) {
1920 dst_dev_addr = 0;
1921 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1922 } else if (direction == DMA_TO_DEVICE) {
1923 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1924 src_dev_addr = 0;
1925 } else
1926 return -EINVAL;
1927
1928 res = d40_phy_sg_to_lli(sgl,
1929 sgl_len,
1930 src_dev_addr,
1931 d40d->lli_phy.src,
1932 d40d->lli_phy.src_addr,
1933 d40c->src_def_cfg,
1934 d40c->dma_cfg.src_info.data_width,
1935 d40c->dma_cfg.src_info.psize,
1936 true);
1937 if (res < 0)
1938 return res;
1939
1940 res = d40_phy_sg_to_lli(sgl,
1941 sgl_len,
1942 dst_dev_addr,
1943 d40d->lli_phy.dst,
1944 d40d->lli_phy.dst_addr,
1945 d40c->dst_def_cfg,
1946 d40c->dma_cfg.dst_info.data_width,
1947 d40c->dma_cfg.dst_info.psize,
1948 true);
1949 if (res < 0)
1950 return res;
1951
1952 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1953 d40d->lli_pool.size, DMA_TO_DEVICE);
1954 return 0;
1955}
1956
1957static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1958 struct scatterlist *sgl,
1959 unsigned int sg_len,
1960 enum dma_data_direction direction,
1961 unsigned long flags)
1962{
1963 struct d40_desc *d40d;
1964 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1965 chan);
1966 unsigned long flg;
1967 int err;
1968
1969 if (d40c->dma_cfg.pre_transfer)
1970 d40c->dma_cfg.pre_transfer(chan,
1971 d40c->dma_cfg.pre_transfer_data,
1972 sg_dma_len(sgl));
1973
1974 spin_lock_irqsave(&d40c->lock, flg);
1975 d40d = d40_desc_get(d40c);
1976 spin_unlock_irqrestore(&d40c->lock, flg);
1977
1978 if (d40d == NULL)
1979 return NULL;
1980
1981 memset(d40d, 0, sizeof(struct d40_desc));
1982
1983 if (d40c->log_num != D40_PHY_CHAN)
1984 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
1985 direction, flags);
1986 else
1987 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
1988 direction, flags);
1989 if (err) {
1990 dev_err(&d40c->chan.dev->device,
1991 "[%s] Failed to prepare %s slave sg job: %d\n",
1992 __func__,
1993 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
1994 return NULL;
1995 }
1996
1997 d40d->txd.flags = flags;
1998
1999 dma_async_tx_descriptor_init(&d40d->txd, chan);
2000
2001 d40d->txd.tx_submit = d40_tx_submit;
2002
2003 return &d40d->txd;
2004}
2005
2006static enum dma_status d40_tx_status(struct dma_chan *chan,
2007 dma_cookie_t cookie,
2008 struct dma_tx_state *txstate)
2009{
2010 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2011 dma_cookie_t last_used;
2012 dma_cookie_t last_complete;
2013 int ret;
2014
2015 last_complete = d40c->completed;
2016 last_used = chan->cookie;
2017
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002018 if (d40_is_paused(d40c))
2019 ret = DMA_PAUSED;
2020 else
2021 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002022
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002023 dma_set_tx_state(txstate, last_complete, last_used,
2024 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002025
2026 return ret;
2027}
2028
2029static void d40_issue_pending(struct dma_chan *chan)
2030{
2031 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2032 unsigned long flags;
2033
2034 spin_lock_irqsave(&d40c->lock, flags);
2035
2036 /* Busy means that pending jobs are already being processed */
2037 if (!d40c->busy)
2038 (void) d40_queue_start(d40c);
2039
2040 spin_unlock_irqrestore(&d40c->lock, flags);
2041}
2042
Linus Walleij05827632010-05-17 16:30:42 -07002043static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2044 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002045{
2046 unsigned long flags;
2047 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2048
2049 switch (cmd) {
2050 case DMA_TERMINATE_ALL:
2051 spin_lock_irqsave(&d40c->lock, flags);
2052 d40_term_all(d40c);
2053 spin_unlock_irqrestore(&d40c->lock, flags);
2054 return 0;
2055 case DMA_PAUSE:
2056 return d40_pause(chan);
2057 case DMA_RESUME:
2058 return d40_resume(chan);
2059 }
2060
2061 /* Other commands are unimplemented */
2062 return -ENXIO;
2063}
2064
2065/* Initialization functions */
2066
2067static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2068 struct d40_chan *chans, int offset,
2069 int num_chans)
2070{
2071 int i = 0;
2072 struct d40_chan *d40c;
2073
2074 INIT_LIST_HEAD(&dma->channels);
2075
2076 for (i = offset; i < offset + num_chans; i++) {
2077 d40c = &chans[i];
2078 d40c->base = base;
2079 d40c->chan.device = dma;
2080
2081 /* Invalidate lcla element */
2082 d40c->lcla.src_id = -1;
2083 d40c->lcla.dst_id = -1;
2084
2085 spin_lock_init(&d40c->lock);
2086
2087 d40c->log_num = D40_PHY_CHAN;
2088
Linus Walleij8d318a52010-03-30 15:33:42 +02002089 INIT_LIST_HEAD(&d40c->active);
2090 INIT_LIST_HEAD(&d40c->queue);
2091 INIT_LIST_HEAD(&d40c->client);
2092
Linus Walleij8d318a52010-03-30 15:33:42 +02002093 tasklet_init(&d40c->tasklet, dma_tasklet,
2094 (unsigned long) d40c);
2095
2096 list_add_tail(&d40c->chan.device_node,
2097 &dma->channels);
2098 }
2099}
2100
2101static int __init d40_dmaengine_init(struct d40_base *base,
2102 int num_reserved_chans)
2103{
2104 int err ;
2105
2106 d40_chan_init(base, &base->dma_slave, base->log_chans,
2107 0, base->num_log_chans);
2108
2109 dma_cap_zero(base->dma_slave.cap_mask);
2110 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2111
2112 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2113 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2114 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2115 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2116 base->dma_slave.device_tx_status = d40_tx_status;
2117 base->dma_slave.device_issue_pending = d40_issue_pending;
2118 base->dma_slave.device_control = d40_control;
2119 base->dma_slave.dev = base->dev;
2120
2121 err = dma_async_device_register(&base->dma_slave);
2122
2123 if (err) {
2124 dev_err(base->dev,
2125 "[%s] Failed to register slave channels\n",
2126 __func__);
2127 goto failure1;
2128 }
2129
2130 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2131 base->num_log_chans, base->plat_data->memcpy_len);
2132
2133 dma_cap_zero(base->dma_memcpy.cap_mask);
2134 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2135
2136 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2137 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2138 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2139 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2140 base->dma_memcpy.device_tx_status = d40_tx_status;
2141 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2142 base->dma_memcpy.device_control = d40_control;
2143 base->dma_memcpy.dev = base->dev;
2144 /*
2145 * This controller can only access address at even
2146 * 32bit boundaries, i.e. 2^2
2147 */
2148 base->dma_memcpy.copy_align = 2;
2149
2150 err = dma_async_device_register(&base->dma_memcpy);
2151
2152 if (err) {
2153 dev_err(base->dev,
2154 "[%s] Failed to regsiter memcpy only channels\n",
2155 __func__);
2156 goto failure2;
2157 }
2158
2159 d40_chan_init(base, &base->dma_both, base->phy_chans,
2160 0, num_reserved_chans);
2161
2162 dma_cap_zero(base->dma_both.cap_mask);
2163 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2164 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2165
2166 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2167 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2168 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2169 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2170 base->dma_both.device_tx_status = d40_tx_status;
2171 base->dma_both.device_issue_pending = d40_issue_pending;
2172 base->dma_both.device_control = d40_control;
2173 base->dma_both.dev = base->dev;
2174 base->dma_both.copy_align = 2;
2175 err = dma_async_device_register(&base->dma_both);
2176
2177 if (err) {
2178 dev_err(base->dev,
2179 "[%s] Failed to register logical and physical capable channels\n",
2180 __func__);
2181 goto failure3;
2182 }
2183 return 0;
2184failure3:
2185 dma_async_device_unregister(&base->dma_memcpy);
2186failure2:
2187 dma_async_device_unregister(&base->dma_slave);
2188failure1:
2189 return err;
2190}
2191
2192/* Initialization functions. */
2193
2194static int __init d40_phy_res_init(struct d40_base *base)
2195{
2196 int i;
2197 int num_phy_chans_avail = 0;
2198 u32 val[2];
2199 int odd_even_bit = -2;
2200
2201 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2202 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2203
2204 for (i = 0; i < base->num_phy_chans; i++) {
2205 base->phy_res[i].num = i;
2206 odd_even_bit += 2 * ((i % 2) == 0);
2207 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2208 /* Mark security only channels as occupied */
2209 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2210 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2211 } else {
2212 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2213 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2214 num_phy_chans_avail++;
2215 }
2216 spin_lock_init(&base->phy_res[i].lock);
2217 }
2218 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2219 num_phy_chans_avail, base->num_phy_chans);
2220
2221 /* Verify settings extended vs standard */
2222 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2223
2224 for (i = 0; i < base->num_phy_chans; i++) {
2225
2226 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2227 (val[0] & 0x3) != 1)
2228 dev_info(base->dev,
2229 "[%s] INFO: channel %d is misconfigured (%d)\n",
2230 __func__, i, val[0] & 0x3);
2231
2232 val[0] = val[0] >> 2;
2233 }
2234
2235 return num_phy_chans_avail;
2236}
2237
2238static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2239{
2240 static const struct d40_reg_val dma_id_regs[] = {
2241 /* Peripheral Id */
2242 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2243 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2244 /*
2245 * D40_DREG_PERIPHID2 Depends on HW revision:
2246 * MOP500/HREF ED has 0x0008,
2247 * ? has 0x0018,
2248 * HREF V1 has 0x0028
2249 */
2250 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2251
2252 /* PCell Id */
2253 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2254 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2255 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2256 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2257 };
2258 struct stedma40_platform_data *plat_data;
2259 struct clk *clk = NULL;
2260 void __iomem *virtbase = NULL;
2261 struct resource *res = NULL;
2262 struct d40_base *base = NULL;
2263 int num_log_chans = 0;
2264 int num_phy_chans;
2265 int i;
2266
2267 clk = clk_get(&pdev->dev, NULL);
2268
2269 if (IS_ERR(clk)) {
2270 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2271 __func__);
2272 goto failure;
2273 }
2274
2275 clk_enable(clk);
2276
2277 /* Get IO for DMAC base address */
2278 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2279 if (!res)
2280 goto failure;
2281
2282 if (request_mem_region(res->start, resource_size(res),
2283 D40_NAME " I/O base") == NULL)
2284 goto failure;
2285
2286 virtbase = ioremap(res->start, resource_size(res));
2287 if (!virtbase)
2288 goto failure;
2289
2290 /* HW version check */
2291 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2292 if (dma_id_regs[i].val !=
2293 readl(virtbase + dma_id_regs[i].reg)) {
2294 dev_err(&pdev->dev,
2295 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2296 __func__,
2297 dma_id_regs[i].val,
2298 dma_id_regs[i].reg,
2299 readl(virtbase + dma_id_regs[i].reg));
2300 goto failure;
2301 }
2302 }
2303
2304 i = readl(virtbase + D40_DREG_PERIPHID2);
2305
2306 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2307 dev_err(&pdev->dev,
2308 "[%s] Unknown designer! Got %x wanted %x\n",
2309 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2310 goto failure;
2311 }
2312
2313 /* The number of physical channels on this HW */
2314 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2315
2316 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2317 (i >> 4) & 0xf, res->start);
2318
2319 plat_data = pdev->dev.platform_data;
2320
2321 /* Count the number of logical channels in use */
2322 for (i = 0; i < plat_data->dev_len; i++)
2323 if (plat_data->dev_rx[i] != 0)
2324 num_log_chans++;
2325
2326 for (i = 0; i < plat_data->dev_len; i++)
2327 if (plat_data->dev_tx[i] != 0)
2328 num_log_chans++;
2329
2330 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2331 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2332 sizeof(struct d40_chan), GFP_KERNEL);
2333
2334 if (base == NULL) {
2335 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2336 goto failure;
2337 }
2338
2339 base->clk = clk;
2340 base->num_phy_chans = num_phy_chans;
2341 base->num_log_chans = num_log_chans;
2342 base->phy_start = res->start;
2343 base->phy_size = resource_size(res);
2344 base->virtbase = virtbase;
2345 base->plat_data = plat_data;
2346 base->dev = &pdev->dev;
2347 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2348 base->log_chans = &base->phy_chans[num_phy_chans];
2349
2350 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2351 GFP_KERNEL);
2352 if (!base->phy_res)
2353 goto failure;
2354
2355 base->lookup_phy_chans = kzalloc(num_phy_chans *
2356 sizeof(struct d40_chan *),
2357 GFP_KERNEL);
2358 if (!base->lookup_phy_chans)
2359 goto failure;
2360
2361 if (num_log_chans + plat_data->memcpy_len) {
2362 /*
2363 * The max number of logical channels are event lines for all
2364 * src devices and dst devices
2365 */
2366 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2367 sizeof(struct d40_chan *),
2368 GFP_KERNEL);
2369 if (!base->lookup_log_chans)
2370 goto failure;
2371 }
2372 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2373 GFP_KERNEL);
2374 if (!base->lcla_pool.alloc_map)
2375 goto failure;
2376
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002377 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2378 0, SLAB_HWCACHE_ALIGN,
2379 NULL);
2380 if (base->desc_slab == NULL)
2381 goto failure;
2382
Linus Walleij8d318a52010-03-30 15:33:42 +02002383 return base;
2384
2385failure:
2386 if (clk) {
2387 clk_disable(clk);
2388 clk_put(clk);
2389 }
2390 if (virtbase)
2391 iounmap(virtbase);
2392 if (res)
2393 release_mem_region(res->start,
2394 resource_size(res));
2395 if (virtbase)
2396 iounmap(virtbase);
2397
2398 if (base) {
2399 kfree(base->lcla_pool.alloc_map);
2400 kfree(base->lookup_log_chans);
2401 kfree(base->lookup_phy_chans);
2402 kfree(base->phy_res);
2403 kfree(base);
2404 }
2405
2406 return NULL;
2407}
2408
2409static void __init d40_hw_init(struct d40_base *base)
2410{
2411
2412 static const struct d40_reg_val dma_init_reg[] = {
2413 /* Clock every part of the DMA block from start */
2414 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2415
2416 /* Interrupts on all logical channels */
2417 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2418 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2419 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2420 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2421 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2422 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2423 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2424 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2425 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2426 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2427 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2428 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2429 };
2430 int i;
2431 u32 prmseo[2] = {0, 0};
2432 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2433 u32 pcmis = 0;
2434 u32 pcicr = 0;
2435
2436 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2437 writel(dma_init_reg[i].val,
2438 base->virtbase + dma_init_reg[i].reg);
2439
2440 /* Configure all our dma channels to default settings */
2441 for (i = 0; i < base->num_phy_chans; i++) {
2442
2443 activeo[i % 2] = activeo[i % 2] << 2;
2444
2445 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2446 == D40_ALLOC_PHY) {
2447 activeo[i % 2] |= 3;
2448 continue;
2449 }
2450
2451 /* Enable interrupt # */
2452 pcmis = (pcmis << 1) | 1;
2453
2454 /* Clear interrupt # */
2455 pcicr = (pcicr << 1) | 1;
2456
2457 /* Set channel to physical mode */
2458 prmseo[i % 2] = prmseo[i % 2] << 2;
2459 prmseo[i % 2] |= 1;
2460
2461 }
2462
2463 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2464 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2465 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2466 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2467
2468 /* Write which interrupt to enable */
2469 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2470
2471 /* Write which interrupt to clear */
2472 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2473
2474}
2475
2476static int __init d40_probe(struct platform_device *pdev)
2477{
2478 int err;
2479 int ret = -ENOENT;
2480 struct d40_base *base;
2481 struct resource *res = NULL;
2482 int num_reserved_chans;
2483 u32 val;
2484
2485 base = d40_hw_detect_init(pdev);
2486
2487 if (!base)
2488 goto failure;
2489
2490 num_reserved_chans = d40_phy_res_init(base);
2491
2492 platform_set_drvdata(pdev, base);
2493
2494 spin_lock_init(&base->interrupt_lock);
2495 spin_lock_init(&base->execmd_lock);
2496
2497 /* Get IO for logical channel parameter address */
2498 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2499 if (!res) {
2500 ret = -ENOENT;
2501 dev_err(&pdev->dev,
2502 "[%s] No \"lcpa\" memory resource\n",
2503 __func__);
2504 goto failure;
2505 }
2506 base->lcpa_size = resource_size(res);
2507 base->phy_lcpa = res->start;
2508
2509 if (request_mem_region(res->start, resource_size(res),
2510 D40_NAME " I/O lcpa") == NULL) {
2511 ret = -EBUSY;
2512 dev_err(&pdev->dev,
2513 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2514 __func__, res->start, res->end);
2515 goto failure;
2516 }
2517
2518 /* We make use of ESRAM memory for this. */
2519 val = readl(base->virtbase + D40_DREG_LCPA);
2520 if (res->start != val && val != 0) {
2521 dev_warn(&pdev->dev,
2522 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2523 __func__, val, res->start);
2524 } else
2525 writel(res->start, base->virtbase + D40_DREG_LCPA);
2526
2527 base->lcpa_base = ioremap(res->start, resource_size(res));
2528 if (!base->lcpa_base) {
2529 ret = -ENOMEM;
2530 dev_err(&pdev->dev,
2531 "[%s] Failed to ioremap LCPA region\n",
2532 __func__);
2533 goto failure;
2534 }
2535 /* Get IO for logical channel link address */
2536 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2537 if (!res) {
2538 ret = -ENOENT;
2539 dev_err(&pdev->dev,
2540 "[%s] No \"lcla\" resource defined\n",
2541 __func__);
2542 goto failure;
2543 }
2544
2545 base->lcla_pool.base_size = resource_size(res);
2546 base->lcla_pool.phy = res->start;
2547
2548 if (request_mem_region(res->start, resource_size(res),
2549 D40_NAME " I/O lcla") == NULL) {
2550 ret = -EBUSY;
2551 dev_err(&pdev->dev,
2552 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2553 __func__, res->start, res->end);
2554 goto failure;
2555 }
2556 val = readl(base->virtbase + D40_DREG_LCLA);
2557 if (res->start != val && val != 0) {
2558 dev_warn(&pdev->dev,
2559 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2560 __func__, val, res->start);
2561 } else
2562 writel(res->start, base->virtbase + D40_DREG_LCLA);
2563
2564 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2565 if (!base->lcla_pool.base) {
2566 ret = -ENOMEM;
2567 dev_err(&pdev->dev,
2568 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2569 __func__, res->start, res->end);
2570 goto failure;
2571 }
2572
2573 spin_lock_init(&base->lcla_pool.lock);
2574
2575 base->lcla_pool.num_blocks = base->num_phy_chans;
2576
2577 base->irq = platform_get_irq(pdev, 0);
2578
2579 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2580
2581 if (ret) {
2582 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2583 goto failure;
2584 }
2585
2586 err = d40_dmaengine_init(base, num_reserved_chans);
2587 if (err)
2588 goto failure;
2589
2590 d40_hw_init(base);
2591
2592 dev_info(base->dev, "initialized\n");
2593 return 0;
2594
2595failure:
2596 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002597 if (base->desc_slab)
2598 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002599 if (base->virtbase)
2600 iounmap(base->virtbase);
2601 if (base->lcla_pool.phy)
2602 release_mem_region(base->lcla_pool.phy,
2603 base->lcla_pool.base_size);
2604 if (base->phy_lcpa)
2605 release_mem_region(base->phy_lcpa,
2606 base->lcpa_size);
2607 if (base->phy_start)
2608 release_mem_region(base->phy_start,
2609 base->phy_size);
2610 if (base->clk) {
2611 clk_disable(base->clk);
2612 clk_put(base->clk);
2613 }
2614
2615 kfree(base->lcla_pool.alloc_map);
2616 kfree(base->lookup_log_chans);
2617 kfree(base->lookup_phy_chans);
2618 kfree(base->phy_res);
2619 kfree(base);
2620 }
2621
2622 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2623 return ret;
2624}
2625
2626static struct platform_driver d40_driver = {
2627 .driver = {
2628 .owner = THIS_MODULE,
2629 .name = D40_NAME,
2630 },
2631};
2632
2633int __init stedma40_init(void)
2634{
2635 return platform_driver_probe(&d40_driver, d40_probe);
2636}
2637arch_initcall(stedma40_init);