blob: 81fec95312b61c9dc8dc14f4de8a08741cd858b4 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
37/* The number of free d40_desc to keep in memory before starting
38 * to kfree() them */
39#define D40_DESC_CACHE_SIZE 50
40
41/* Hardware designer of the block */
42#define D40_PERIPHID2_DESIGNER 0x8
43
44/**
45 * enum 40_command - The different commands and/or statuses.
46 *
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
51 */
52enum d40_command {
53 D40_DMA_STOP = 0,
54 D40_DMA_RUN = 1,
55 D40_DMA_SUSPEND_REQ = 2,
56 D40_DMA_SUSPENDED = 3
57};
58
59/**
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
61 *
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
68 */
69struct d40_lli_pool {
70 void *base;
71 int size;
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
74};
75
76/**
77 * struct d40_desc - A descriptor is one DMA job.
78 *
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
81 * lli_len equals one.
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
84 * @lli_len: Number of LLI's in lli_pool
85 * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
86 * then this transfer job is done.
87 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer.
89 * @node: List entry.
90 * @dir: The transfer direction of this job.
91 * @is_in_client_list: true if the client owns this descriptor.
92 *
93 * This descriptor is used for both logical and physical transfers.
94 */
95
96struct d40_desc {
97 /* LLI physical */
98 struct d40_phy_lli_bidir lli_phy;
99 /* LLI logical */
100 struct d40_log_lli_bidir lli_log;
101
102 struct d40_lli_pool lli_pool;
103 u32 lli_len;
104 u32 lli_tcount;
105
106 struct dma_async_tx_descriptor txd;
107 struct list_head node;
108
109 enum dma_data_direction dir;
110 bool is_in_client_list;
111};
112
113/**
114 * struct d40_lcla_pool - LCLA pool settings and data.
115 *
116 * @base: The virtual address of LCLA.
117 * @phy: Physical base address of LCLA.
118 * @base_size: size of lcla.
119 * @lock: Lock to protect the content in this struct.
120 * @alloc_map: Mapping between physical channel and LCLA entries.
121 * @num_blocks: The number of entries of alloc_map. Equals to the
122 * number of physical channels.
123 */
124struct d40_lcla_pool {
125 void *base;
126 dma_addr_t phy;
127 resource_size_t base_size;
128 spinlock_t lock;
129 u32 *alloc_map;
130 int num_blocks;
131};
132
133/**
134 * struct d40_phy_res - struct for handling eventlines mapped to physical
135 * channels.
136 *
137 * @lock: A lock protection this entity.
138 * @num: The physical channel number of this entity.
139 * @allocated_src: Bit mapped to show which src event line's are mapped to
140 * this physical channel. Can also be free or physically allocated.
141 * @allocated_dst: Same as for src but is dst.
142 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
143 * event line number. Both allocated_src and allocated_dst can not be
144 * allocated to a physical channel, since the interrupt handler has then
145 * no way of figure out which one the interrupt belongs to.
146 */
147struct d40_phy_res {
148 spinlock_t lock;
149 int num;
150 u32 allocated_src;
151 u32 allocated_dst;
152};
153
154struct d40_base;
155
156/**
157 * struct d40_chan - Struct that describes a channel.
158 *
159 * @lock: A spinlock to protect this struct.
160 * @log_num: The logical number, if any of this channel.
161 * @completed: Starts with 1, after first interrupt it is set to dma engine's
162 * current cookie.
163 * @pending_tx: The number of pending transfers. Used between interrupt handler
164 * and tasklet.
165 * @busy: Set to true when transfer is ongoing on this channel.
166 * @phy_chan: Pointer to physical channel which this instance runs on.
167 * @chan: DMA engine handle.
168 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
169 * transfer and call client callback.
170 * @client: Cliented owned descriptor list.
171 * @active: Active descriptor.
172 * @queue: Queued jobs.
173 * @free: List of free descripts, ready to be reused.
174 * @free_len: Number of descriptors in the free list.
175 * @dma_cfg: The client configuration of this dma channel.
176 * @base: Pointer to the device instance struct.
177 * @src_def_cfg: Default cfg register setting for src.
178 * @dst_def_cfg: Default cfg register setting for dst.
179 * @log_def: Default logical channel settings.
180 * @lcla: Space for one dst src pair for logical channel transfers.
181 * @lcpa: Pointer to dst and src lcpa settings.
182 *
183 * This struct can either "be" a logical or a physical channel.
184 */
185struct d40_chan {
186 spinlock_t lock;
187 int log_num;
188 /* ID of the most recent completed transfer */
189 int completed;
190 int pending_tx;
191 bool busy;
192 struct d40_phy_res *phy_chan;
193 struct dma_chan chan;
194 struct tasklet_struct tasklet;
195 struct list_head client;
196 struct list_head active;
197 struct list_head queue;
198 struct list_head free;
199 int free_len;
200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
203 u32 src_def_cfg;
204 u32 dst_def_cfg;
205 struct d40_def_lcsp log_def;
206 struct d40_lcla_elem lcla;
207 struct d40_log_lli_full *lcpa;
208};
209
210/**
211 * struct d40_base - The big global struct, one for each probe'd instance.
212 *
213 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
214 * @execmd_lock: Lock for execute command usage since several channels share
215 * the same physical register.
216 * @dev: The device structure.
217 * @virtbase: The virtual base address of the DMA's register.
218 * @clk: Pointer to the DMA clock structure.
219 * @phy_start: Physical memory start of the DMA registers.
220 * @phy_size: Size of the DMA register map.
221 * @irq: The IRQ number.
222 * @num_phy_chans: The number of physical channels. Read from HW. This
223 * is the number of available channels for this driver, not counting "Secure
224 * mode" allocated physical channels.
225 * @num_log_chans: The number of logical channels. Calculated from
226 * num_phy_chans.
227 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
228 * @dma_slave: dma_device channels that can do only do slave transfers.
229 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
230 * @phy_chans: Room for all possible physical channels in system.
231 * @log_chans: Room for all possible logical channels in system.
232 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
233 * to log_chans entries.
234 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
235 * to phy_chans entries.
236 * @plat_data: Pointer to provided platform_data which is the driver
237 * configuration.
238 * @phy_res: Vector containing all physical channels.
239 * @lcla_pool: lcla pool settings and data.
240 * @lcpa_base: The virtual mapped address of LCPA.
241 * @phy_lcpa: The physical address of the LCPA.
242 * @lcpa_size: The size of the LCPA area.
243 */
244struct d40_base {
245 spinlock_t interrupt_lock;
246 spinlock_t execmd_lock;
247 struct device *dev;
248 void __iomem *virtbase;
249 struct clk *clk;
250 phys_addr_t phy_start;
251 resource_size_t phy_size;
252 int irq;
253 int num_phy_chans;
254 int num_log_chans;
255 struct dma_device dma_both;
256 struct dma_device dma_slave;
257 struct dma_device dma_memcpy;
258 struct d40_chan *phy_chans;
259 struct d40_chan *log_chans;
260 struct d40_chan **lookup_log_chans;
261 struct d40_chan **lookup_phy_chans;
262 struct stedma40_platform_data *plat_data;
263 /* Physical half channels */
264 struct d40_phy_res *phy_res;
265 struct d40_lcla_pool lcla_pool;
266 void *lcpa_base;
267 dma_addr_t phy_lcpa;
268 resource_size_t lcpa_size;
269};
270
271/**
272 * struct d40_interrupt_lookup - lookup table for interrupt handler
273 *
274 * @src: Interrupt mask register.
275 * @clr: Interrupt clear register.
276 * @is_error: true if this is an error interrupt.
277 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
278 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
279 */
280struct d40_interrupt_lookup {
281 u32 src;
282 u32 clr;
283 bool is_error;
284 int offset;
285};
286
287/**
288 * struct d40_reg_val - simple lookup struct
289 *
290 * @reg: The register.
291 * @val: The value that belongs to the register in reg.
292 */
293struct d40_reg_val {
294 unsigned int reg;
295 unsigned int val;
296};
297
298static int d40_pool_lli_alloc(struct d40_desc *d40d,
299 int lli_len, bool is_log)
300{
301 u32 align;
302 void *base;
303
304 if (is_log)
305 align = sizeof(struct d40_log_lli);
306 else
307 align = sizeof(struct d40_phy_lli);
308
309 if (lli_len == 1) {
310 base = d40d->lli_pool.pre_alloc_lli;
311 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
312 d40d->lli_pool.base = NULL;
313 } else {
314 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
315
316 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
317 d40d->lli_pool.base = base;
318
319 if (d40d->lli_pool.base == NULL)
320 return -ENOMEM;
321 }
322
323 if (is_log) {
324 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
325 align);
326 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
327 align);
328 } else {
329 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
330 align);
331 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
332 align);
333
334 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
335 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
336 }
337
338 return 0;
339}
340
341static void d40_pool_lli_free(struct d40_desc *d40d)
342{
343 kfree(d40d->lli_pool.base);
344 d40d->lli_pool.base = NULL;
345 d40d->lli_pool.size = 0;
346 d40d->lli_log.src = NULL;
347 d40d->lli_log.dst = NULL;
348 d40d->lli_phy.src = NULL;
349 d40d->lli_phy.dst = NULL;
350 d40d->lli_phy.src_addr = 0;
351 d40d->lli_phy.dst_addr = 0;
352}
353
354static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
355 struct d40_desc *desc)
356{
357 dma_cookie_t cookie = d40c->chan.cookie;
358
359 if (++cookie < 0)
360 cookie = 1;
361
362 d40c->chan.cookie = cookie;
363 desc->txd.cookie = cookie;
364
365 return cookie;
366}
367
368static void d40_desc_reset(struct d40_desc *d40d)
369{
370 d40d->lli_tcount = 0;
371}
372
373static void d40_desc_remove(struct d40_desc *d40d)
374{
375 list_del(&d40d->node);
376}
377
378static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
379{
380 struct d40_desc *desc;
381 struct d40_desc *d;
382 struct d40_desc *_d;
383
384 if (!list_empty(&d40c->client)) {
385 list_for_each_entry_safe(d, _d, &d40c->client, node)
386 if (async_tx_test_ack(&d->txd)) {
387 d40_pool_lli_free(d);
388 d40_desc_remove(d);
389 desc = d;
390 goto out;
391 }
392 }
393
394 if (list_empty(&d40c->free)) {
395 /* Alloc new desc because we're out of used ones */
396 desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
397 if (desc == NULL)
398 goto out;
399 INIT_LIST_HEAD(&desc->node);
400 } else {
401 /* Reuse an old desc. */
402 desc = list_first_entry(&d40c->free,
403 struct d40_desc,
404 node);
405 list_del(&desc->node);
406 d40c->free_len--;
407 }
408out:
409 return desc;
410}
411
412static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
413{
414 if (d40c->free_len < D40_DESC_CACHE_SIZE) {
415 list_add_tail(&d40d->node, &d40c->free);
416 d40c->free_len++;
417 } else
418 kfree(d40d);
419}
420
421static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
422{
423 list_add_tail(&desc->node, &d40c->active);
424}
425
426static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
427{
428 struct d40_desc *d;
429
430 if (list_empty(&d40c->active))
431 return NULL;
432
433 d = list_first_entry(&d40c->active,
434 struct d40_desc,
435 node);
436 return d;
437}
438
439static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
440{
441 list_add_tail(&desc->node, &d40c->queue);
442}
443
444static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
445{
446 struct d40_desc *d;
447
448 if (list_empty(&d40c->queue))
449 return NULL;
450
451 d = list_first_entry(&d40c->queue,
452 struct d40_desc,
453 node);
454 return d;
455}
456
457/* Support functions for logical channels */
458
459static int d40_lcla_id_get(struct d40_chan *d40c,
460 struct d40_lcla_pool *pool)
461{
462 int src_id = 0;
463 int dst_id = 0;
464 struct d40_log_lli *lcla_lidx_base =
465 pool->base + d40c->phy_chan->num * 1024;
466 int i;
467 int lli_per_log = d40c->base->plat_data->llis_per_log;
468
469 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
470 return 0;
471
472 if (pool->num_blocks > 32)
473 return -EINVAL;
474
475 spin_lock(&pool->lock);
476
477 for (i = 0; i < pool->num_blocks; i++) {
478 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
479 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
480 break;
481 }
482 }
483 src_id = i;
484 if (src_id >= pool->num_blocks)
485 goto err;
486
487 for (; i < pool->num_blocks; i++) {
488 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
489 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
490 break;
491 }
492 }
493
494 dst_id = i;
495 if (dst_id == src_id)
496 goto err;
497
498 d40c->lcla.src_id = src_id;
499 d40c->lcla.dst_id = dst_id;
500 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
501 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
502
503
504 spin_unlock(&pool->lock);
505 return 0;
506err:
507 spin_unlock(&pool->lock);
508 return -EINVAL;
509}
510
511static void d40_lcla_id_put(struct d40_chan *d40c,
512 struct d40_lcla_pool *pool,
513 int id)
514{
515 if (id < 0)
516 return;
517
518 d40c->lcla.src_id = -1;
519 d40c->lcla.dst_id = -1;
520
521 spin_lock(&pool->lock);
522 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
523 spin_unlock(&pool->lock);
524}
525
526static int d40_channel_execute_command(struct d40_chan *d40c,
527 enum d40_command command)
528{
529 int status, i;
530 void __iomem *active_reg;
531 int ret = 0;
532 unsigned long flags;
533
534 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
535
536 if (d40c->phy_chan->num % 2 == 0)
537 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
538 else
539 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
540
541 if (command == D40_DMA_SUSPEND_REQ) {
542 status = (readl(active_reg) &
543 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
544 D40_CHAN_POS(d40c->phy_chan->num);
545
546 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
547 goto done;
548 }
549
550 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
551
552 if (command == D40_DMA_SUSPEND_REQ) {
553
554 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
555 status = (readl(active_reg) &
556 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
557 D40_CHAN_POS(d40c->phy_chan->num);
558
559 cpu_relax();
560 /*
561 * Reduce the number of bus accesses while
562 * waiting for the DMA to suspend.
563 */
564 udelay(3);
565
566 if (status == D40_DMA_STOP ||
567 status == D40_DMA_SUSPENDED)
568 break;
569 }
570
571 if (i == D40_SUSPEND_MAX_IT) {
572 dev_err(&d40c->chan.dev->device,
573 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
574 __func__, d40c->phy_chan->num, d40c->log_num,
575 status);
576 dump_stack();
577 ret = -EBUSY;
578 }
579
580 }
581done:
582 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
583 return ret;
584}
585
586static void d40_term_all(struct d40_chan *d40c)
587{
588 struct d40_desc *d40d;
589 struct d40_desc *d;
590 struct d40_desc *_d;
591
592 /* Release active descriptors */
593 while ((d40d = d40_first_active_get(d40c))) {
594 d40_desc_remove(d40d);
595
596 /* Return desc to free-list */
597 d40_desc_free(d40c, d40d);
598 }
599
600 /* Release queued descriptors waiting for transfer */
601 while ((d40d = d40_first_queued(d40c))) {
602 d40_desc_remove(d40d);
603
604 /* Return desc to free-list */
605 d40_desc_free(d40c, d40d);
606 }
607
608 /* Release client owned descriptors */
609 if (!list_empty(&d40c->client))
610 list_for_each_entry_safe(d, _d, &d40c->client, node) {
611 d40_pool_lli_free(d);
612 d40_desc_remove(d);
613 /* Return desc to free-list */
614 d40_desc_free(d40c, d40d);
615 }
616
617 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
618 d40c->lcla.src_id);
619 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
620 d40c->lcla.dst_id);
621
622 d40c->pending_tx = 0;
623 d40c->busy = false;
624}
625
626static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
627{
628 u32 val;
629 unsigned long flags;
630
631 if (do_enable)
632 val = D40_ACTIVATE_EVENTLINE;
633 else
634 val = D40_DEACTIVATE_EVENTLINE;
635
636 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
637
638 /* Enable event line connected to device (or memcpy) */
639 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
640 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
641 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
642
643 writel((val << D40_EVENTLINE_POS(event)) |
644 ~D40_EVENTLINE_MASK(event),
645 d40c->base->virtbase + D40_DREG_PCBASE +
646 d40c->phy_chan->num * D40_DREG_PCDELTA +
647 D40_CHAN_REG_SSLNK);
648 }
649 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
650 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
651
652 writel((val << D40_EVENTLINE_POS(event)) |
653 ~D40_EVENTLINE_MASK(event),
654 d40c->base->virtbase + D40_DREG_PCBASE +
655 d40c->phy_chan->num * D40_DREG_PCDELTA +
656 D40_CHAN_REG_SDLNK);
657 }
658
659 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
660}
661
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200662static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200663{
664 u32 val = 0;
665
666 /* If SSLNK or SDLNK is zero all events are disabled */
667 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
668 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
669 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
670 d40c->phy_chan->num * D40_DREG_PCDELTA +
671 D40_CHAN_REG_SSLNK);
672
673 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
674 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
675 d40c->phy_chan->num * D40_DREG_PCDELTA +
676 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200677 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200678}
679
680static void d40_config_enable_lidx(struct d40_chan *d40c)
681{
682 /* Set LIDX for lcla */
683 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
684 D40_SREG_ELEM_LOG_LIDX_MASK,
685 d40c->base->virtbase + D40_DREG_PCBASE +
686 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
687
688 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
689 D40_SREG_ELEM_LOG_LIDX_MASK,
690 d40c->base->virtbase + D40_DREG_PCBASE +
691 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
692}
693
694static int d40_config_write(struct d40_chan *d40c)
695{
696 u32 addr_base;
697 u32 var;
698 int res;
699
700 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
701 if (res)
702 return res;
703
704 /* Odd addresses are even addresses + 4 */
705 addr_base = (d40c->phy_chan->num % 2) * 4;
706 /* Setup channel mode to logical or physical */
707 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
708 D40_CHAN_POS(d40c->phy_chan->num);
709 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
710
711 /* Setup operational mode option register */
712 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
713 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
714
715 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
716
717 if (d40c->log_num != D40_PHY_CHAN) {
718 /* Set default config for CFG reg */
719 writel(d40c->src_def_cfg,
720 d40c->base->virtbase + D40_DREG_PCBASE +
721 d40c->phy_chan->num * D40_DREG_PCDELTA +
722 D40_CHAN_REG_SSCFG);
723 writel(d40c->dst_def_cfg,
724 d40c->base->virtbase + D40_DREG_PCBASE +
725 d40c->phy_chan->num * D40_DREG_PCDELTA +
726 D40_CHAN_REG_SDCFG);
727
728 d40_config_enable_lidx(d40c);
729 }
730 return res;
731}
732
733static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
734{
735
736 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
737 d40_phy_lli_write(d40c->base->virtbase,
738 d40c->phy_chan->num,
739 d40d->lli_phy.dst,
740 d40d->lli_phy.src);
741 d40d->lli_tcount = d40d->lli_len;
742 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
743 u32 lli_len;
744 struct d40_log_lli *src = d40d->lli_log.src;
745 struct d40_log_lli *dst = d40d->lli_log.dst;
746
747 src += d40d->lli_tcount;
748 dst += d40d->lli_tcount;
749
750 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
751 lli_len = d40d->lli_len;
752 else
753 lli_len = d40c->base->plat_data->llis_per_log;
754 d40d->lli_tcount += lli_len;
755 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
756 d40c->lcla.dst,
757 dst, src,
758 d40c->base->plat_data->llis_per_log);
759 }
760}
761
762static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
763{
764 struct d40_chan *d40c = container_of(tx->chan,
765 struct d40_chan,
766 chan);
767 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
768 unsigned long flags;
769
770 spin_lock_irqsave(&d40c->lock, flags);
771
772 tx->cookie = d40_assign_cookie(d40c, d40d);
773
774 d40_desc_queue(d40c, d40d);
775
776 spin_unlock_irqrestore(&d40c->lock, flags);
777
778 return tx->cookie;
779}
780
781static int d40_start(struct d40_chan *d40c)
782{
783 int err;
784
785 if (d40c->log_num != D40_PHY_CHAN) {
786 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
787 if (err)
788 return err;
789 d40_config_set_event(d40c, true);
790 }
791
792 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
793
794 return err;
795}
796
797static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
798{
799 struct d40_desc *d40d;
800 int err;
801
802 /* Start queued jobs, if any */
803 d40d = d40_first_queued(d40c);
804
805 if (d40d != NULL) {
806 d40c->busy = true;
807
808 /* Remove from queue */
809 d40_desc_remove(d40d);
810
811 /* Add to active queue */
812 d40_desc_submit(d40c, d40d);
813
814 /* Initiate DMA job */
815 d40_desc_load(d40c, d40d);
816
817 /* Start dma job */
818 err = d40_start(d40c);
819
820 if (err)
821 return NULL;
822 }
823
824 return d40d;
825}
826
827/* called from interrupt context */
828static void dma_tc_handle(struct d40_chan *d40c)
829{
830 struct d40_desc *d40d;
831
832 if (!d40c->phy_chan)
833 return;
834
835 /* Get first active entry from list */
836 d40d = d40_first_active_get(d40c);
837
838 if (d40d == NULL)
839 return;
840
841 if (d40d->lli_tcount < d40d->lli_len) {
842
843 d40_desc_load(d40c, d40d);
844 /* Start dma job */
845 (void) d40_start(d40c);
846 return;
847 }
848
849 if (d40_queue_start(d40c) == NULL)
850 d40c->busy = false;
851
852 d40c->pending_tx++;
853 tasklet_schedule(&d40c->tasklet);
854
855}
856
857static void dma_tasklet(unsigned long data)
858{
859 struct d40_chan *d40c = (struct d40_chan *) data;
860 struct d40_desc *d40d_fin;
861 unsigned long flags;
862 dma_async_tx_callback callback;
863 void *callback_param;
864
865 spin_lock_irqsave(&d40c->lock, flags);
866
867 /* Get first active entry from list */
868 d40d_fin = d40_first_active_get(d40c);
869
870 if (d40d_fin == NULL)
871 goto err;
872
873 d40c->completed = d40d_fin->txd.cookie;
874
875 /*
876 * If terminating a channel pending_tx is set to zero.
877 * This prevents any finished active jobs to return to the client.
878 */
879 if (d40c->pending_tx == 0) {
880 spin_unlock_irqrestore(&d40c->lock, flags);
881 return;
882 }
883
884 /* Callback to client */
885 callback = d40d_fin->txd.callback;
886 callback_param = d40d_fin->txd.callback_param;
887
888 if (async_tx_test_ack(&d40d_fin->txd)) {
889 d40_pool_lli_free(d40d_fin);
890 d40_desc_remove(d40d_fin);
891 /* Return desc to free-list */
892 d40_desc_free(d40c, d40d_fin);
893 } else {
894 d40_desc_reset(d40d_fin);
895 if (!d40d_fin->is_in_client_list) {
896 d40_desc_remove(d40d_fin);
897 list_add_tail(&d40d_fin->node, &d40c->client);
898 d40d_fin->is_in_client_list = true;
899 }
900 }
901
902 d40c->pending_tx--;
903
904 if (d40c->pending_tx)
905 tasklet_schedule(&d40c->tasklet);
906
907 spin_unlock_irqrestore(&d40c->lock, flags);
908
909 if (callback)
910 callback(callback_param);
911
912 return;
913
914 err:
915 /* Rescue manouver if receiving double interrupts */
916 if (d40c->pending_tx > 0)
917 d40c->pending_tx--;
918 spin_unlock_irqrestore(&d40c->lock, flags);
919}
920
921static irqreturn_t d40_handle_interrupt(int irq, void *data)
922{
923 static const struct d40_interrupt_lookup il[] = {
924 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
925 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
926 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
927 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
928 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
929 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
930 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
931 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
932 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
933 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
934 };
935
936 int i;
937 u32 regs[ARRAY_SIZE(il)];
938 u32 tmp;
939 u32 idx;
940 u32 row;
941 long chan = -1;
942 struct d40_chan *d40c;
943 unsigned long flags;
944 struct d40_base *base = data;
945
946 spin_lock_irqsave(&base->interrupt_lock, flags);
947
948 /* Read interrupt status of both logical and physical channels */
949 for (i = 0; i < ARRAY_SIZE(il); i++)
950 regs[i] = readl(base->virtbase + il[i].src);
951
952 for (;;) {
953
954 chan = find_next_bit((unsigned long *)regs,
955 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
956
957 /* No more set bits found? */
958 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
959 break;
960
961 row = chan / BITS_PER_LONG;
962 idx = chan & (BITS_PER_LONG - 1);
963
964 /* ACK interrupt */
965 tmp = readl(base->virtbase + il[row].clr);
966 tmp |= 1 << idx;
967 writel(tmp, base->virtbase + il[row].clr);
968
969 if (il[row].offset == D40_PHY_CHAN)
970 d40c = base->lookup_phy_chans[idx];
971 else
972 d40c = base->lookup_log_chans[il[row].offset + idx];
973 spin_lock(&d40c->lock);
974
975 if (!il[row].is_error)
976 dma_tc_handle(d40c);
977 else
978 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
979 __func__, chan, il[row].offset, idx);
980
981 spin_unlock(&d40c->lock);
982 }
983
984 spin_unlock_irqrestore(&base->interrupt_lock, flags);
985
986 return IRQ_HANDLED;
987}
988
989
990static int d40_validate_conf(struct d40_chan *d40c,
991 struct stedma40_chan_cfg *conf)
992{
993 int res = 0;
994 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
995 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
996 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
997 == STEDMA40_CHANNEL_IN_LOG_MODE;
998
999 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
1000 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1001 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1002 __func__);
1003 res = -EINVAL;
1004 }
1005
1006 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
1007 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1008 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1009 __func__);
1010 res = -EINVAL;
1011 }
1012
1013 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1014 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1015 dev_err(&d40c->chan.dev->device,
1016 "[%s] No event line\n", __func__);
1017 res = -EINVAL;
1018 }
1019
1020 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1021 (src_event_group != dst_event_group)) {
1022 dev_err(&d40c->chan.dev->device,
1023 "[%s] Invalid event group\n", __func__);
1024 res = -EINVAL;
1025 }
1026
1027 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1028 /*
1029 * DMAC HW supports it. Will be added to this driver,
1030 * in case any dma client requires it.
1031 */
1032 dev_err(&d40c->chan.dev->device,
1033 "[%s] periph to periph not supported\n",
1034 __func__);
1035 res = -EINVAL;
1036 }
1037
1038 return res;
1039}
1040
1041static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001042 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001043{
1044 unsigned long flags;
1045 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001046 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001047 /* Physical interrupts are masked per physical full channel */
1048 if (phy->allocated_src == D40_ALLOC_FREE &&
1049 phy->allocated_dst == D40_ALLOC_FREE) {
1050 phy->allocated_dst = D40_ALLOC_PHY;
1051 phy->allocated_src = D40_ALLOC_PHY;
1052 goto found;
1053 } else
1054 goto not_found;
1055 }
1056
1057 /* Logical channel */
1058 if (is_src) {
1059 if (phy->allocated_src == D40_ALLOC_PHY)
1060 goto not_found;
1061
1062 if (phy->allocated_src == D40_ALLOC_FREE)
1063 phy->allocated_src = D40_ALLOC_LOG_FREE;
1064
1065 if (!(phy->allocated_src & (1 << log_event_line))) {
1066 phy->allocated_src |= 1 << log_event_line;
1067 goto found;
1068 } else
1069 goto not_found;
1070 } else {
1071 if (phy->allocated_dst == D40_ALLOC_PHY)
1072 goto not_found;
1073
1074 if (phy->allocated_dst == D40_ALLOC_FREE)
1075 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1076
1077 if (!(phy->allocated_dst & (1 << log_event_line))) {
1078 phy->allocated_dst |= 1 << log_event_line;
1079 goto found;
1080 } else
1081 goto not_found;
1082 }
1083
1084not_found:
1085 spin_unlock_irqrestore(&phy->lock, flags);
1086 return false;
1087found:
1088 spin_unlock_irqrestore(&phy->lock, flags);
1089 return true;
1090}
1091
1092static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1093 int log_event_line)
1094{
1095 unsigned long flags;
1096 bool is_free = false;
1097
1098 spin_lock_irqsave(&phy->lock, flags);
1099 if (!log_event_line) {
1100 /* Physical interrupts are masked per physical full channel */
1101 phy->allocated_dst = D40_ALLOC_FREE;
1102 phy->allocated_src = D40_ALLOC_FREE;
1103 is_free = true;
1104 goto out;
1105 }
1106
1107 /* Logical channel */
1108 if (is_src) {
1109 phy->allocated_src &= ~(1 << log_event_line);
1110 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1111 phy->allocated_src = D40_ALLOC_FREE;
1112 } else {
1113 phy->allocated_dst &= ~(1 << log_event_line);
1114 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1115 phy->allocated_dst = D40_ALLOC_FREE;
1116 }
1117
1118 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1119 D40_ALLOC_FREE);
1120
1121out:
1122 spin_unlock_irqrestore(&phy->lock, flags);
1123
1124 return is_free;
1125}
1126
1127static int d40_allocate_channel(struct d40_chan *d40c)
1128{
1129 int dev_type;
1130 int event_group;
1131 int event_line;
1132 struct d40_phy_res *phys;
1133 int i;
1134 int j;
1135 int log_num;
1136 bool is_src;
1137 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1138 == STEDMA40_CHANNEL_IN_LOG_MODE;
1139
1140
1141 phys = d40c->base->phy_res;
1142
1143 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1144 dev_type = d40c->dma_cfg.src_dev_type;
1145 log_num = 2 * dev_type;
1146 is_src = true;
1147 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1148 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1149 /* dst event lines are used for logical memcpy */
1150 dev_type = d40c->dma_cfg.dst_dev_type;
1151 log_num = 2 * dev_type + 1;
1152 is_src = false;
1153 } else
1154 return -EINVAL;
1155
1156 event_group = D40_TYPE_TO_GROUP(dev_type);
1157 event_line = D40_TYPE_TO_EVENT(dev_type);
1158
1159 if (!is_log) {
1160 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1161 /* Find physical half channel */
1162 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1163
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001164 if (d40_alloc_mask_set(&phys[i], is_src,
1165 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001166 goto found_phy;
1167 }
1168 } else
1169 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1170 int phy_num = j + event_group * 2;
1171 for (i = phy_num; i < phy_num + 2; i++) {
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001172 if (d40_alloc_mask_set(&phys[i], is_src,
1173 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001174 goto found_phy;
1175 }
1176 }
1177 return -EINVAL;
1178found_phy:
1179 d40c->phy_chan = &phys[i];
1180 d40c->log_num = D40_PHY_CHAN;
1181 goto out;
1182 }
1183 if (dev_type == -1)
1184 return -EINVAL;
1185
1186 /* Find logical channel */
1187 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1188 int phy_num = j + event_group * 2;
1189 /*
1190 * Spread logical channels across all available physical rather
1191 * than pack every logical channel at the first available phy
1192 * channels.
1193 */
1194 if (is_src) {
1195 for (i = phy_num; i < phy_num + 2; i++) {
1196 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001197 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001198 goto found_log;
1199 }
1200 } else {
1201 for (i = phy_num + 1; i >= phy_num; i--) {
1202 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001203 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001204 goto found_log;
1205 }
1206 }
1207 }
1208 return -EINVAL;
1209
1210found_log:
1211 d40c->phy_chan = &phys[i];
1212 d40c->log_num = log_num;
1213out:
1214
1215 if (is_log)
1216 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1217 else
1218 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1219
1220 return 0;
1221
1222}
1223
1224static int d40_config_chan(struct d40_chan *d40c,
1225 struct stedma40_chan_cfg *info)
1226{
1227
1228 /* Fill in basic CFG register values */
1229 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1230 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1231
1232 if (d40c->log_num != D40_PHY_CHAN) {
1233 d40_log_cfg(&d40c->dma_cfg,
1234 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1235
1236 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1237 d40c->lcpa = d40c->base->lcpa_base +
1238 d40c->dma_cfg.src_dev_type * 32;
1239 else
1240 d40c->lcpa = d40c->base->lcpa_base +
1241 d40c->dma_cfg.dst_dev_type * 32 + 16;
1242 }
1243
1244 /* Write channel configuration to the DMA */
1245 return d40_config_write(d40c);
1246}
1247
1248static int d40_config_memcpy(struct d40_chan *d40c)
1249{
1250 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1251
1252 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1253 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1254 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1255 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1256 memcpy[d40c->chan.chan_id];
1257
1258 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1259 dma_has_cap(DMA_SLAVE, cap)) {
1260 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1261 } else {
1262 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1263 __func__);
1264 return -EINVAL;
1265 }
1266
1267 return 0;
1268}
1269
1270
1271static int d40_free_dma(struct d40_chan *d40c)
1272{
1273
1274 int res = 0;
1275 u32 event, dir;
1276 struct d40_phy_res *phy = d40c->phy_chan;
1277 bool is_src;
1278
1279 /* Terminate all queued and active transfers */
1280 d40_term_all(d40c);
1281
1282 if (phy == NULL) {
1283 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1284 __func__);
1285 return -EINVAL;
1286 }
1287
1288 if (phy->allocated_src == D40_ALLOC_FREE &&
1289 phy->allocated_dst == D40_ALLOC_FREE) {
1290 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1291 __func__);
1292 return -EINVAL;
1293 }
1294
1295
1296 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1297 if (res) {
1298 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1299 __func__);
1300 return res;
1301 }
1302
1303 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1304 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1305 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1306 dir = D40_CHAN_REG_SDLNK;
1307 is_src = false;
1308 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1309 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1310 dir = D40_CHAN_REG_SSLNK;
1311 is_src = true;
1312 } else {
1313 dev_err(&d40c->chan.dev->device,
1314 "[%s] Unknown direction\n", __func__);
1315 return -EINVAL;
1316 }
1317
1318 if (d40c->log_num != D40_PHY_CHAN) {
1319 /*
1320 * Release logical channel, deactivate the event line during
1321 * the time physical res is suspended.
1322 */
1323 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1324 D40_EVENTLINE_MASK(event),
1325 d40c->base->virtbase + D40_DREG_PCBASE +
1326 phy->num * D40_DREG_PCDELTA + dir);
1327
1328 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1329
1330 /*
1331 * Check if there are more logical allocation
1332 * on this phy channel.
1333 */
1334 if (!d40_alloc_mask_free(phy, is_src, event)) {
1335 /* Resume the other logical channels if any */
1336 if (d40_chan_has_events(d40c)) {
1337 res = d40_channel_execute_command(d40c,
1338 D40_DMA_RUN);
1339 if (res) {
1340 dev_err(&d40c->chan.dev->device,
1341 "[%s] Executing RUN command\n",
1342 __func__);
1343 return res;
1344 }
1345 }
1346 return 0;
1347 }
1348 } else
1349 d40_alloc_mask_free(phy, is_src, 0);
1350
1351 /* Release physical channel */
1352 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1353 if (res) {
1354 dev_err(&d40c->chan.dev->device,
1355 "[%s] Failed to stop channel\n", __func__);
1356 return res;
1357 }
1358 d40c->phy_chan = NULL;
1359 /* Invalidate channel type */
1360 d40c->dma_cfg.channel_type = 0;
1361 d40c->base->lookup_phy_chans[phy->num] = NULL;
1362
1363 return 0;
1364
1365
1366}
1367
1368static int d40_pause(struct dma_chan *chan)
1369{
1370 struct d40_chan *d40c =
1371 container_of(chan, struct d40_chan, chan);
1372 int res;
1373
1374 unsigned long flags;
1375
1376 spin_lock_irqsave(&d40c->lock, flags);
1377
1378 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1379 if (res == 0) {
1380 if (d40c->log_num != D40_PHY_CHAN) {
1381 d40_config_set_event(d40c, false);
1382 /* Resume the other logical channels if any */
1383 if (d40_chan_has_events(d40c))
1384 res = d40_channel_execute_command(d40c,
1385 D40_DMA_RUN);
1386 }
1387 }
1388
1389 spin_unlock_irqrestore(&d40c->lock, flags);
1390 return res;
1391}
1392
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001393static bool d40_is_paused(struct d40_chan *d40c)
1394{
1395 bool is_paused = false;
1396 unsigned long flags;
1397 void __iomem *active_reg;
1398 u32 status;
1399 u32 event;
1400 int res;
1401
1402 spin_lock_irqsave(&d40c->lock, flags);
1403
1404 if (d40c->log_num == D40_PHY_CHAN) {
1405 if (d40c->phy_chan->num % 2 == 0)
1406 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1407 else
1408 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1409
1410 status = (readl(active_reg) &
1411 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1412 D40_CHAN_POS(d40c->phy_chan->num);
1413 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1414 is_paused = true;
1415
1416 goto _exit;
1417 }
1418
1419 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1420 if (res != 0)
1421 goto _exit;
1422
1423 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1424 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1425 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1426 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1427 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1428 else {
1429 dev_err(&d40c->chan.dev->device,
1430 "[%s] Unknown direction\n", __func__);
1431 goto _exit;
1432 }
1433 status = d40_chan_has_events(d40c);
1434 status = (status & D40_EVENTLINE_MASK(event)) >>
1435 D40_EVENTLINE_POS(event);
1436
1437 if (status != D40_DMA_RUN)
1438 is_paused = true;
1439
1440 /* Resume the other logical channels if any */
1441 if (d40_chan_has_events(d40c))
1442 res = d40_channel_execute_command(d40c,
1443 D40_DMA_RUN);
1444
1445_exit:
1446 spin_unlock_irqrestore(&d40c->lock, flags);
1447 return is_paused;
1448
1449}
1450
1451
Linus Walleij8d318a52010-03-30 15:33:42 +02001452static bool d40_tx_is_linked(struct d40_chan *d40c)
1453{
1454 bool is_link;
1455
1456 if (d40c->log_num != D40_PHY_CHAN)
1457 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1458 else
1459 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1460 d40c->phy_chan->num * D40_DREG_PCDELTA +
1461 D40_CHAN_REG_SDLNK) &
1462 D40_SREG_LNK_PHYS_LNK_MASK;
1463 return is_link;
1464}
1465
1466static u32 d40_residue(struct d40_chan *d40c)
1467{
1468 u32 num_elt;
1469
1470 if (d40c->log_num != D40_PHY_CHAN)
1471 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1472 >> D40_MEM_LCSP2_ECNT_POS;
1473 else
1474 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1475 d40c->phy_chan->num * D40_DREG_PCDELTA +
1476 D40_CHAN_REG_SDELT) &
1477 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1478 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1479}
1480
1481static int d40_resume(struct dma_chan *chan)
1482{
1483 struct d40_chan *d40c =
1484 container_of(chan, struct d40_chan, chan);
1485 int res = 0;
1486 unsigned long flags;
1487
1488 spin_lock_irqsave(&d40c->lock, flags);
1489
1490 if (d40c->log_num != D40_PHY_CHAN) {
1491 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1492 if (res)
1493 goto out;
1494
1495 /* If bytes left to transfer or linked tx resume job */
1496 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1497 d40_config_set_event(d40c, true);
1498 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1499 }
1500 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1501 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1502
1503out:
1504 spin_unlock_irqrestore(&d40c->lock, flags);
1505 return res;
1506}
1507
1508static u32 stedma40_residue(struct dma_chan *chan)
1509{
1510 struct d40_chan *d40c =
1511 container_of(chan, struct d40_chan, chan);
1512 u32 bytes_left;
1513 unsigned long flags;
1514
1515 spin_lock_irqsave(&d40c->lock, flags);
1516 bytes_left = d40_residue(d40c);
1517 spin_unlock_irqrestore(&d40c->lock, flags);
1518
1519 return bytes_left;
1520}
1521
1522/* Public DMA functions in addition to the DMA engine framework */
1523
1524int stedma40_set_psize(struct dma_chan *chan,
1525 int src_psize,
1526 int dst_psize)
1527{
1528 struct d40_chan *d40c =
1529 container_of(chan, struct d40_chan, chan);
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&d40c->lock, flags);
1533
1534 if (d40c->log_num != D40_PHY_CHAN) {
1535 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1536 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1537 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1538 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1539 goto out;
1540 }
1541
1542 if (src_psize == STEDMA40_PSIZE_PHY_1)
1543 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1544 else {
1545 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1546 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1547 D40_SREG_CFG_PSIZE_POS);
1548 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1549 }
1550
1551 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1552 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1553 else {
1554 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1555 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1556 D40_SREG_CFG_PSIZE_POS);
1557 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1558 }
1559out:
1560 spin_unlock_irqrestore(&d40c->lock, flags);
1561 return 0;
1562}
1563EXPORT_SYMBOL(stedma40_set_psize);
1564
1565struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1566 struct scatterlist *sgl_dst,
1567 struct scatterlist *sgl_src,
1568 unsigned int sgl_len,
1569 unsigned long flags)
1570{
1571 int res;
1572 struct d40_desc *d40d;
1573 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1574 chan);
1575 unsigned long flg;
1576 int lli_max = d40c->base->plat_data->llis_per_log;
1577
1578
1579 spin_lock_irqsave(&d40c->lock, flg);
1580 d40d = d40_desc_get(d40c);
1581
1582 if (d40d == NULL)
1583 goto err;
1584
1585 memset(d40d, 0, sizeof(struct d40_desc));
1586 d40d->lli_len = sgl_len;
1587
1588 d40d->txd.flags = flags;
1589
1590 if (d40c->log_num != D40_PHY_CHAN) {
1591 if (sgl_len > 1)
1592 /*
1593 * Check if there is space available in lcla. If not,
1594 * split list into 1-length and run only in lcpa
1595 * space.
1596 */
1597 if (d40_lcla_id_get(d40c,
1598 &d40c->base->lcla_pool) != 0)
1599 lli_max = 1;
1600
1601 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1602 dev_err(&d40c->chan.dev->device,
1603 "[%s] Out of memory\n", __func__);
1604 goto err;
1605 }
1606
1607 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1608 sgl_src,
1609 sgl_len,
1610 d40d->lli_log.src,
1611 d40c->log_def.lcsp1,
1612 d40c->dma_cfg.src_info.data_width,
1613 flags & DMA_PREP_INTERRUPT, lli_max,
1614 d40c->base->plat_data->llis_per_log);
1615
1616 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1617 sgl_dst,
1618 sgl_len,
1619 d40d->lli_log.dst,
1620 d40c->log_def.lcsp3,
1621 d40c->dma_cfg.dst_info.data_width,
1622 flags & DMA_PREP_INTERRUPT, lli_max,
1623 d40c->base->plat_data->llis_per_log);
1624
1625
1626 } else {
1627 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1628 dev_err(&d40c->chan.dev->device,
1629 "[%s] Out of memory\n", __func__);
1630 goto err;
1631 }
1632
1633 res = d40_phy_sg_to_lli(sgl_src,
1634 sgl_len,
1635 0,
1636 d40d->lli_phy.src,
1637 d40d->lli_phy.src_addr,
1638 d40c->src_def_cfg,
1639 d40c->dma_cfg.src_info.data_width,
1640 d40c->dma_cfg.src_info.psize,
1641 true);
1642
1643 if (res < 0)
1644 goto err;
1645
1646 res = d40_phy_sg_to_lli(sgl_dst,
1647 sgl_len,
1648 0,
1649 d40d->lli_phy.dst,
1650 d40d->lli_phy.dst_addr,
1651 d40c->dst_def_cfg,
1652 d40c->dma_cfg.dst_info.data_width,
1653 d40c->dma_cfg.dst_info.psize,
1654 true);
1655
1656 if (res < 0)
1657 goto err;
1658
1659 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1660 d40d->lli_pool.size, DMA_TO_DEVICE);
1661 }
1662
1663 dma_async_tx_descriptor_init(&d40d->txd, chan);
1664
1665 d40d->txd.tx_submit = d40_tx_submit;
1666
1667 spin_unlock_irqrestore(&d40c->lock, flg);
1668
1669 return &d40d->txd;
1670err:
1671 spin_unlock_irqrestore(&d40c->lock, flg);
1672 return NULL;
1673}
1674EXPORT_SYMBOL(stedma40_memcpy_sg);
1675
1676bool stedma40_filter(struct dma_chan *chan, void *data)
1677{
1678 struct stedma40_chan_cfg *info = data;
1679 struct d40_chan *d40c =
1680 container_of(chan, struct d40_chan, chan);
1681 int err;
1682
1683 if (data) {
1684 err = d40_validate_conf(d40c, info);
1685 if (!err)
1686 d40c->dma_cfg = *info;
1687 } else
1688 err = d40_config_memcpy(d40c);
1689
1690 return err == 0;
1691}
1692EXPORT_SYMBOL(stedma40_filter);
1693
1694/* DMA ENGINE functions */
1695static int d40_alloc_chan_resources(struct dma_chan *chan)
1696{
1697 int err;
1698 unsigned long flags;
1699 struct d40_chan *d40c =
1700 container_of(chan, struct d40_chan, chan);
1701
1702 spin_lock_irqsave(&d40c->lock, flags);
1703
1704 d40c->completed = chan->cookie = 1;
1705
1706 /*
1707 * If no dma configuration is set (channel_type == 0)
1708 * use default configuration
1709 */
1710 if (d40c->dma_cfg.channel_type == 0) {
1711 err = d40_config_memcpy(d40c);
1712 if (err)
1713 goto err_alloc;
1714 }
1715
1716 err = d40_allocate_channel(d40c);
1717 if (err) {
1718 dev_err(&d40c->chan.dev->device,
1719 "[%s] Failed to allocate channel\n", __func__);
1720 goto err_alloc;
1721 }
1722
1723 err = d40_config_chan(d40c, &d40c->dma_cfg);
1724 if (err) {
1725 dev_err(&d40c->chan.dev->device,
1726 "[%s] Failed to configure channel\n",
1727 __func__);
1728 goto err_config;
1729 }
1730
1731 spin_unlock_irqrestore(&d40c->lock, flags);
1732 return 0;
1733
1734 err_config:
1735 (void) d40_free_dma(d40c);
1736 err_alloc:
1737 spin_unlock_irqrestore(&d40c->lock, flags);
1738 dev_err(&d40c->chan.dev->device,
1739 "[%s] Channel allocation failed\n", __func__);
1740 return -EINVAL;
1741}
1742
1743static void d40_free_chan_resources(struct dma_chan *chan)
1744{
1745 struct d40_chan *d40c =
1746 container_of(chan, struct d40_chan, chan);
1747 int err;
1748 unsigned long flags;
1749
1750 spin_lock_irqsave(&d40c->lock, flags);
1751
1752 err = d40_free_dma(d40c);
1753
1754 if (err)
1755 dev_err(&d40c->chan.dev->device,
1756 "[%s] Failed to free channel\n", __func__);
1757 spin_unlock_irqrestore(&d40c->lock, flags);
1758}
1759
1760static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1761 dma_addr_t dst,
1762 dma_addr_t src,
1763 size_t size,
1764 unsigned long flags)
1765{
1766 struct d40_desc *d40d;
1767 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1768 chan);
1769 unsigned long flg;
1770 int err = 0;
1771
1772 spin_lock_irqsave(&d40c->lock, flg);
1773 d40d = d40_desc_get(d40c);
1774
1775 if (d40d == NULL) {
1776 dev_err(&d40c->chan.dev->device,
1777 "[%s] Descriptor is NULL\n", __func__);
1778 goto err;
1779 }
1780
1781 memset(d40d, 0, sizeof(struct d40_desc));
1782
1783 d40d->txd.flags = flags;
1784
1785 dma_async_tx_descriptor_init(&d40d->txd, chan);
1786
1787 d40d->txd.tx_submit = d40_tx_submit;
1788
1789 if (d40c->log_num != D40_PHY_CHAN) {
1790
1791 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1792 dev_err(&d40c->chan.dev->device,
1793 "[%s] Out of memory\n", __func__);
1794 goto err;
1795 }
1796 d40d->lli_len = 1;
1797
1798 d40_log_fill_lli(d40d->lli_log.src,
1799 src,
1800 size,
1801 0,
1802 d40c->log_def.lcsp1,
1803 d40c->dma_cfg.src_info.data_width,
1804 true, true);
1805
1806 d40_log_fill_lli(d40d->lli_log.dst,
1807 dst,
1808 size,
1809 0,
1810 d40c->log_def.lcsp3,
1811 d40c->dma_cfg.dst_info.data_width,
1812 true, true);
1813
1814 } else {
1815
1816 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1817 dev_err(&d40c->chan.dev->device,
1818 "[%s] Out of memory\n", __func__);
1819 goto err;
1820 }
1821
1822 err = d40_phy_fill_lli(d40d->lli_phy.src,
1823 src,
1824 size,
1825 d40c->dma_cfg.src_info.psize,
1826 0,
1827 d40c->src_def_cfg,
1828 true,
1829 d40c->dma_cfg.src_info.data_width,
1830 false);
1831 if (err)
1832 goto err_fill_lli;
1833
1834 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1835 dst,
1836 size,
1837 d40c->dma_cfg.dst_info.psize,
1838 0,
1839 d40c->dst_def_cfg,
1840 true,
1841 d40c->dma_cfg.dst_info.data_width,
1842 false);
1843
1844 if (err)
1845 goto err_fill_lli;
1846
1847 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1848 d40d->lli_pool.size, DMA_TO_DEVICE);
1849 }
1850
1851 spin_unlock_irqrestore(&d40c->lock, flg);
1852 return &d40d->txd;
1853
1854err_fill_lli:
1855 dev_err(&d40c->chan.dev->device,
1856 "[%s] Failed filling in PHY LLI\n", __func__);
1857 d40_pool_lli_free(d40d);
1858err:
1859 spin_unlock_irqrestore(&d40c->lock, flg);
1860 return NULL;
1861}
1862
1863static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1864 struct d40_chan *d40c,
1865 struct scatterlist *sgl,
1866 unsigned int sg_len,
1867 enum dma_data_direction direction,
1868 unsigned long flags)
1869{
1870 dma_addr_t dev_addr = 0;
1871 int total_size;
1872 int lli_max = d40c->base->plat_data->llis_per_log;
1873
1874 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1875 dev_err(&d40c->chan.dev->device,
1876 "[%s] Out of memory\n", __func__);
1877 return -ENOMEM;
1878 }
1879
1880 d40d->lli_len = sg_len;
1881 d40d->lli_tcount = 0;
1882
1883 if (sg_len > 1)
1884 /*
1885 * Check if there is space available in lcla.
1886 * If not, split list into 1-length and run only
1887 * in lcpa space.
1888 */
1889 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1890 lli_max = 1;
1891
1892 if (direction == DMA_FROM_DEVICE) {
1893 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1894 total_size = d40_log_sg_to_dev(&d40c->lcla,
1895 sgl, sg_len,
1896 &d40d->lli_log,
1897 &d40c->log_def,
1898 d40c->dma_cfg.src_info.data_width,
1899 d40c->dma_cfg.dst_info.data_width,
1900 direction,
1901 flags & DMA_PREP_INTERRUPT,
1902 dev_addr, lli_max,
1903 d40c->base->plat_data->llis_per_log);
1904 } else if (direction == DMA_TO_DEVICE) {
1905 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1906 total_size = d40_log_sg_to_dev(&d40c->lcla,
1907 sgl, sg_len,
1908 &d40d->lli_log,
1909 &d40c->log_def,
1910 d40c->dma_cfg.src_info.data_width,
1911 d40c->dma_cfg.dst_info.data_width,
1912 direction,
1913 flags & DMA_PREP_INTERRUPT,
1914 dev_addr, lli_max,
1915 d40c->base->plat_data->llis_per_log);
1916 } else
1917 return -EINVAL;
1918 if (total_size < 0)
1919 return -EINVAL;
1920
1921 return 0;
1922}
1923
1924static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1925 struct d40_chan *d40c,
1926 struct scatterlist *sgl,
1927 unsigned int sgl_len,
1928 enum dma_data_direction direction,
1929 unsigned long flags)
1930{
1931 dma_addr_t src_dev_addr;
1932 dma_addr_t dst_dev_addr;
1933 int res;
1934
1935 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Out of memory\n", __func__);
1938 return -ENOMEM;
1939 }
1940
1941 d40d->lli_len = sgl_len;
1942 d40d->lli_tcount = 0;
1943
1944 if (direction == DMA_FROM_DEVICE) {
1945 dst_dev_addr = 0;
1946 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1947 } else if (direction == DMA_TO_DEVICE) {
1948 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1949 src_dev_addr = 0;
1950 } else
1951 return -EINVAL;
1952
1953 res = d40_phy_sg_to_lli(sgl,
1954 sgl_len,
1955 src_dev_addr,
1956 d40d->lli_phy.src,
1957 d40d->lli_phy.src_addr,
1958 d40c->src_def_cfg,
1959 d40c->dma_cfg.src_info.data_width,
1960 d40c->dma_cfg.src_info.psize,
1961 true);
1962 if (res < 0)
1963 return res;
1964
1965 res = d40_phy_sg_to_lli(sgl,
1966 sgl_len,
1967 dst_dev_addr,
1968 d40d->lli_phy.dst,
1969 d40d->lli_phy.dst_addr,
1970 d40c->dst_def_cfg,
1971 d40c->dma_cfg.dst_info.data_width,
1972 d40c->dma_cfg.dst_info.psize,
1973 true);
1974 if (res < 0)
1975 return res;
1976
1977 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1978 d40d->lli_pool.size, DMA_TO_DEVICE);
1979 return 0;
1980}
1981
1982static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1983 struct scatterlist *sgl,
1984 unsigned int sg_len,
1985 enum dma_data_direction direction,
1986 unsigned long flags)
1987{
1988 struct d40_desc *d40d;
1989 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1990 chan);
1991 unsigned long flg;
1992 int err;
1993
1994 if (d40c->dma_cfg.pre_transfer)
1995 d40c->dma_cfg.pre_transfer(chan,
1996 d40c->dma_cfg.pre_transfer_data,
1997 sg_dma_len(sgl));
1998
1999 spin_lock_irqsave(&d40c->lock, flg);
2000 d40d = d40_desc_get(d40c);
2001 spin_unlock_irqrestore(&d40c->lock, flg);
2002
2003 if (d40d == NULL)
2004 return NULL;
2005
2006 memset(d40d, 0, sizeof(struct d40_desc));
2007
2008 if (d40c->log_num != D40_PHY_CHAN)
2009 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2010 direction, flags);
2011 else
2012 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2013 direction, flags);
2014 if (err) {
2015 dev_err(&d40c->chan.dev->device,
2016 "[%s] Failed to prepare %s slave sg job: %d\n",
2017 __func__,
2018 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2019 return NULL;
2020 }
2021
2022 d40d->txd.flags = flags;
2023
2024 dma_async_tx_descriptor_init(&d40d->txd, chan);
2025
2026 d40d->txd.tx_submit = d40_tx_submit;
2027
2028 return &d40d->txd;
2029}
2030
2031static enum dma_status d40_tx_status(struct dma_chan *chan,
2032 dma_cookie_t cookie,
2033 struct dma_tx_state *txstate)
2034{
2035 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2036 dma_cookie_t last_used;
2037 dma_cookie_t last_complete;
2038 int ret;
2039
2040 last_complete = d40c->completed;
2041 last_used = chan->cookie;
2042
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002043 if (d40_is_paused(d40c))
2044 ret = DMA_PAUSED;
2045 else
2046 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002047
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002048 dma_set_tx_state(txstate, last_complete, last_used,
2049 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002050
2051 return ret;
2052}
2053
2054static void d40_issue_pending(struct dma_chan *chan)
2055{
2056 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2057 unsigned long flags;
2058
2059 spin_lock_irqsave(&d40c->lock, flags);
2060
2061 /* Busy means that pending jobs are already being processed */
2062 if (!d40c->busy)
2063 (void) d40_queue_start(d40c);
2064
2065 spin_unlock_irqrestore(&d40c->lock, flags);
2066}
2067
2068static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
2069{
2070 unsigned long flags;
2071 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2072
2073 switch (cmd) {
2074 case DMA_TERMINATE_ALL:
2075 spin_lock_irqsave(&d40c->lock, flags);
2076 d40_term_all(d40c);
2077 spin_unlock_irqrestore(&d40c->lock, flags);
2078 return 0;
2079 case DMA_PAUSE:
2080 return d40_pause(chan);
2081 case DMA_RESUME:
2082 return d40_resume(chan);
2083 }
2084
2085 /* Other commands are unimplemented */
2086 return -ENXIO;
2087}
2088
2089/* Initialization functions */
2090
2091static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2092 struct d40_chan *chans, int offset,
2093 int num_chans)
2094{
2095 int i = 0;
2096 struct d40_chan *d40c;
2097
2098 INIT_LIST_HEAD(&dma->channels);
2099
2100 for (i = offset; i < offset + num_chans; i++) {
2101 d40c = &chans[i];
2102 d40c->base = base;
2103 d40c->chan.device = dma;
2104
2105 /* Invalidate lcla element */
2106 d40c->lcla.src_id = -1;
2107 d40c->lcla.dst_id = -1;
2108
2109 spin_lock_init(&d40c->lock);
2110
2111 d40c->log_num = D40_PHY_CHAN;
2112
2113 INIT_LIST_HEAD(&d40c->free);
2114 INIT_LIST_HEAD(&d40c->active);
2115 INIT_LIST_HEAD(&d40c->queue);
2116 INIT_LIST_HEAD(&d40c->client);
2117
2118 d40c->free_len = 0;
2119
2120 tasklet_init(&d40c->tasklet, dma_tasklet,
2121 (unsigned long) d40c);
2122
2123 list_add_tail(&d40c->chan.device_node,
2124 &dma->channels);
2125 }
2126}
2127
2128static int __init d40_dmaengine_init(struct d40_base *base,
2129 int num_reserved_chans)
2130{
2131 int err ;
2132
2133 d40_chan_init(base, &base->dma_slave, base->log_chans,
2134 0, base->num_log_chans);
2135
2136 dma_cap_zero(base->dma_slave.cap_mask);
2137 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2138
2139 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2140 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2141 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2142 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2143 base->dma_slave.device_tx_status = d40_tx_status;
2144 base->dma_slave.device_issue_pending = d40_issue_pending;
2145 base->dma_slave.device_control = d40_control;
2146 base->dma_slave.dev = base->dev;
2147
2148 err = dma_async_device_register(&base->dma_slave);
2149
2150 if (err) {
2151 dev_err(base->dev,
2152 "[%s] Failed to register slave channels\n",
2153 __func__);
2154 goto failure1;
2155 }
2156
2157 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2158 base->num_log_chans, base->plat_data->memcpy_len);
2159
2160 dma_cap_zero(base->dma_memcpy.cap_mask);
2161 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2162
2163 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2164 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2165 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2166 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2167 base->dma_memcpy.device_tx_status = d40_tx_status;
2168 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2169 base->dma_memcpy.device_control = d40_control;
2170 base->dma_memcpy.dev = base->dev;
2171 /*
2172 * This controller can only access address at even
2173 * 32bit boundaries, i.e. 2^2
2174 */
2175 base->dma_memcpy.copy_align = 2;
2176
2177 err = dma_async_device_register(&base->dma_memcpy);
2178
2179 if (err) {
2180 dev_err(base->dev,
2181 "[%s] Failed to regsiter memcpy only channels\n",
2182 __func__);
2183 goto failure2;
2184 }
2185
2186 d40_chan_init(base, &base->dma_both, base->phy_chans,
2187 0, num_reserved_chans);
2188
2189 dma_cap_zero(base->dma_both.cap_mask);
2190 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2191 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2192
2193 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2194 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2195 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2196 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2197 base->dma_both.device_tx_status = d40_tx_status;
2198 base->dma_both.device_issue_pending = d40_issue_pending;
2199 base->dma_both.device_control = d40_control;
2200 base->dma_both.dev = base->dev;
2201 base->dma_both.copy_align = 2;
2202 err = dma_async_device_register(&base->dma_both);
2203
2204 if (err) {
2205 dev_err(base->dev,
2206 "[%s] Failed to register logical and physical capable channels\n",
2207 __func__);
2208 goto failure3;
2209 }
2210 return 0;
2211failure3:
2212 dma_async_device_unregister(&base->dma_memcpy);
2213failure2:
2214 dma_async_device_unregister(&base->dma_slave);
2215failure1:
2216 return err;
2217}
2218
2219/* Initialization functions. */
2220
2221static int __init d40_phy_res_init(struct d40_base *base)
2222{
2223 int i;
2224 int num_phy_chans_avail = 0;
2225 u32 val[2];
2226 int odd_even_bit = -2;
2227
2228 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2229 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2230
2231 for (i = 0; i < base->num_phy_chans; i++) {
2232 base->phy_res[i].num = i;
2233 odd_even_bit += 2 * ((i % 2) == 0);
2234 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2235 /* Mark security only channels as occupied */
2236 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2237 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2238 } else {
2239 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2240 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2241 num_phy_chans_avail++;
2242 }
2243 spin_lock_init(&base->phy_res[i].lock);
2244 }
2245 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2246 num_phy_chans_avail, base->num_phy_chans);
2247
2248 /* Verify settings extended vs standard */
2249 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2250
2251 for (i = 0; i < base->num_phy_chans; i++) {
2252
2253 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2254 (val[0] & 0x3) != 1)
2255 dev_info(base->dev,
2256 "[%s] INFO: channel %d is misconfigured (%d)\n",
2257 __func__, i, val[0] & 0x3);
2258
2259 val[0] = val[0] >> 2;
2260 }
2261
2262 return num_phy_chans_avail;
2263}
2264
2265static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2266{
2267 static const struct d40_reg_val dma_id_regs[] = {
2268 /* Peripheral Id */
2269 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2270 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2271 /*
2272 * D40_DREG_PERIPHID2 Depends on HW revision:
2273 * MOP500/HREF ED has 0x0008,
2274 * ? has 0x0018,
2275 * HREF V1 has 0x0028
2276 */
2277 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2278
2279 /* PCell Id */
2280 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2281 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2282 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2283 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2284 };
2285 struct stedma40_platform_data *plat_data;
2286 struct clk *clk = NULL;
2287 void __iomem *virtbase = NULL;
2288 struct resource *res = NULL;
2289 struct d40_base *base = NULL;
2290 int num_log_chans = 0;
2291 int num_phy_chans;
2292 int i;
2293
2294 clk = clk_get(&pdev->dev, NULL);
2295
2296 if (IS_ERR(clk)) {
2297 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2298 __func__);
2299 goto failure;
2300 }
2301
2302 clk_enable(clk);
2303
2304 /* Get IO for DMAC base address */
2305 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2306 if (!res)
2307 goto failure;
2308
2309 if (request_mem_region(res->start, resource_size(res),
2310 D40_NAME " I/O base") == NULL)
2311 goto failure;
2312
2313 virtbase = ioremap(res->start, resource_size(res));
2314 if (!virtbase)
2315 goto failure;
2316
2317 /* HW version check */
2318 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2319 if (dma_id_regs[i].val !=
2320 readl(virtbase + dma_id_regs[i].reg)) {
2321 dev_err(&pdev->dev,
2322 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2323 __func__,
2324 dma_id_regs[i].val,
2325 dma_id_regs[i].reg,
2326 readl(virtbase + dma_id_regs[i].reg));
2327 goto failure;
2328 }
2329 }
2330
2331 i = readl(virtbase + D40_DREG_PERIPHID2);
2332
2333 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2334 dev_err(&pdev->dev,
2335 "[%s] Unknown designer! Got %x wanted %x\n",
2336 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2337 goto failure;
2338 }
2339
2340 /* The number of physical channels on this HW */
2341 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2342
2343 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2344 (i >> 4) & 0xf, res->start);
2345
2346 plat_data = pdev->dev.platform_data;
2347
2348 /* Count the number of logical channels in use */
2349 for (i = 0; i < plat_data->dev_len; i++)
2350 if (plat_data->dev_rx[i] != 0)
2351 num_log_chans++;
2352
2353 for (i = 0; i < plat_data->dev_len; i++)
2354 if (plat_data->dev_tx[i] != 0)
2355 num_log_chans++;
2356
2357 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2358 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2359 sizeof(struct d40_chan), GFP_KERNEL);
2360
2361 if (base == NULL) {
2362 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2363 goto failure;
2364 }
2365
2366 base->clk = clk;
2367 base->num_phy_chans = num_phy_chans;
2368 base->num_log_chans = num_log_chans;
2369 base->phy_start = res->start;
2370 base->phy_size = resource_size(res);
2371 base->virtbase = virtbase;
2372 base->plat_data = plat_data;
2373 base->dev = &pdev->dev;
2374 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2375 base->log_chans = &base->phy_chans[num_phy_chans];
2376
2377 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2378 GFP_KERNEL);
2379 if (!base->phy_res)
2380 goto failure;
2381
2382 base->lookup_phy_chans = kzalloc(num_phy_chans *
2383 sizeof(struct d40_chan *),
2384 GFP_KERNEL);
2385 if (!base->lookup_phy_chans)
2386 goto failure;
2387
2388 if (num_log_chans + plat_data->memcpy_len) {
2389 /*
2390 * The max number of logical channels are event lines for all
2391 * src devices and dst devices
2392 */
2393 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2394 sizeof(struct d40_chan *),
2395 GFP_KERNEL);
2396 if (!base->lookup_log_chans)
2397 goto failure;
2398 }
2399 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2400 GFP_KERNEL);
2401 if (!base->lcla_pool.alloc_map)
2402 goto failure;
2403
2404 return base;
2405
2406failure:
2407 if (clk) {
2408 clk_disable(clk);
2409 clk_put(clk);
2410 }
2411 if (virtbase)
2412 iounmap(virtbase);
2413 if (res)
2414 release_mem_region(res->start,
2415 resource_size(res));
2416 if (virtbase)
2417 iounmap(virtbase);
2418
2419 if (base) {
2420 kfree(base->lcla_pool.alloc_map);
2421 kfree(base->lookup_log_chans);
2422 kfree(base->lookup_phy_chans);
2423 kfree(base->phy_res);
2424 kfree(base);
2425 }
2426
2427 return NULL;
2428}
2429
2430static void __init d40_hw_init(struct d40_base *base)
2431{
2432
2433 static const struct d40_reg_val dma_init_reg[] = {
2434 /* Clock every part of the DMA block from start */
2435 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2436
2437 /* Interrupts on all logical channels */
2438 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2439 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2440 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2441 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2442 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2447 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2448 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2449 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2450 };
2451 int i;
2452 u32 prmseo[2] = {0, 0};
2453 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2454 u32 pcmis = 0;
2455 u32 pcicr = 0;
2456
2457 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2458 writel(dma_init_reg[i].val,
2459 base->virtbase + dma_init_reg[i].reg);
2460
2461 /* Configure all our dma channels to default settings */
2462 for (i = 0; i < base->num_phy_chans; i++) {
2463
2464 activeo[i % 2] = activeo[i % 2] << 2;
2465
2466 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2467 == D40_ALLOC_PHY) {
2468 activeo[i % 2] |= 3;
2469 continue;
2470 }
2471
2472 /* Enable interrupt # */
2473 pcmis = (pcmis << 1) | 1;
2474
2475 /* Clear interrupt # */
2476 pcicr = (pcicr << 1) | 1;
2477
2478 /* Set channel to physical mode */
2479 prmseo[i % 2] = prmseo[i % 2] << 2;
2480 prmseo[i % 2] |= 1;
2481
2482 }
2483
2484 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2485 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2486 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2487 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2488
2489 /* Write which interrupt to enable */
2490 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2491
2492 /* Write which interrupt to clear */
2493 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2494
2495}
2496
2497static int __init d40_probe(struct platform_device *pdev)
2498{
2499 int err;
2500 int ret = -ENOENT;
2501 struct d40_base *base;
2502 struct resource *res = NULL;
2503 int num_reserved_chans;
2504 u32 val;
2505
2506 base = d40_hw_detect_init(pdev);
2507
2508 if (!base)
2509 goto failure;
2510
2511 num_reserved_chans = d40_phy_res_init(base);
2512
2513 platform_set_drvdata(pdev, base);
2514
2515 spin_lock_init(&base->interrupt_lock);
2516 spin_lock_init(&base->execmd_lock);
2517
2518 /* Get IO for logical channel parameter address */
2519 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2520 if (!res) {
2521 ret = -ENOENT;
2522 dev_err(&pdev->dev,
2523 "[%s] No \"lcpa\" memory resource\n",
2524 __func__);
2525 goto failure;
2526 }
2527 base->lcpa_size = resource_size(res);
2528 base->phy_lcpa = res->start;
2529
2530 if (request_mem_region(res->start, resource_size(res),
2531 D40_NAME " I/O lcpa") == NULL) {
2532 ret = -EBUSY;
2533 dev_err(&pdev->dev,
2534 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2535 __func__, res->start, res->end);
2536 goto failure;
2537 }
2538
2539 /* We make use of ESRAM memory for this. */
2540 val = readl(base->virtbase + D40_DREG_LCPA);
2541 if (res->start != val && val != 0) {
2542 dev_warn(&pdev->dev,
2543 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2544 __func__, val, res->start);
2545 } else
2546 writel(res->start, base->virtbase + D40_DREG_LCPA);
2547
2548 base->lcpa_base = ioremap(res->start, resource_size(res));
2549 if (!base->lcpa_base) {
2550 ret = -ENOMEM;
2551 dev_err(&pdev->dev,
2552 "[%s] Failed to ioremap LCPA region\n",
2553 __func__);
2554 goto failure;
2555 }
2556 /* Get IO for logical channel link address */
2557 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2558 if (!res) {
2559 ret = -ENOENT;
2560 dev_err(&pdev->dev,
2561 "[%s] No \"lcla\" resource defined\n",
2562 __func__);
2563 goto failure;
2564 }
2565
2566 base->lcla_pool.base_size = resource_size(res);
2567 base->lcla_pool.phy = res->start;
2568
2569 if (request_mem_region(res->start, resource_size(res),
2570 D40_NAME " I/O lcla") == NULL) {
2571 ret = -EBUSY;
2572 dev_err(&pdev->dev,
2573 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2574 __func__, res->start, res->end);
2575 goto failure;
2576 }
2577 val = readl(base->virtbase + D40_DREG_LCLA);
2578 if (res->start != val && val != 0) {
2579 dev_warn(&pdev->dev,
2580 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2581 __func__, val, res->start);
2582 } else
2583 writel(res->start, base->virtbase + D40_DREG_LCLA);
2584
2585 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2586 if (!base->lcla_pool.base) {
2587 ret = -ENOMEM;
2588 dev_err(&pdev->dev,
2589 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2590 __func__, res->start, res->end);
2591 goto failure;
2592 }
2593
2594 spin_lock_init(&base->lcla_pool.lock);
2595
2596 base->lcla_pool.num_blocks = base->num_phy_chans;
2597
2598 base->irq = platform_get_irq(pdev, 0);
2599
2600 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2601
2602 if (ret) {
2603 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2604 goto failure;
2605 }
2606
2607 err = d40_dmaengine_init(base, num_reserved_chans);
2608 if (err)
2609 goto failure;
2610
2611 d40_hw_init(base);
2612
2613 dev_info(base->dev, "initialized\n");
2614 return 0;
2615
2616failure:
2617 if (base) {
2618 if (base->virtbase)
2619 iounmap(base->virtbase);
2620 if (base->lcla_pool.phy)
2621 release_mem_region(base->lcla_pool.phy,
2622 base->lcla_pool.base_size);
2623 if (base->phy_lcpa)
2624 release_mem_region(base->phy_lcpa,
2625 base->lcpa_size);
2626 if (base->phy_start)
2627 release_mem_region(base->phy_start,
2628 base->phy_size);
2629 if (base->clk) {
2630 clk_disable(base->clk);
2631 clk_put(base->clk);
2632 }
2633
2634 kfree(base->lcla_pool.alloc_map);
2635 kfree(base->lookup_log_chans);
2636 kfree(base->lookup_phy_chans);
2637 kfree(base->phy_res);
2638 kfree(base);
2639 }
2640
2641 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2642 return ret;
2643}
2644
2645static struct platform_driver d40_driver = {
2646 .driver = {
2647 .owner = THIS_MODULE,
2648 .name = D40_NAME,
2649 },
2650};
2651
2652int __init stedma40_init(void)
2653{
2654 return platform_driver_probe(&d40_driver, d40_probe);
2655}
2656arch_initcall(stedma40_init);