blob: 1d176642e523cb229f5922ba78e4b7913a369d15 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
Linus Walleij8d318a52010-03-30 15:33:42 +020037/* Hardware designer of the block */
38#define D40_PERIPHID2_DESIGNER 0x8
39
40/**
41 * enum 40_command - The different commands and/or statuses.
42 *
43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
47 */
48enum d40_command {
49 D40_DMA_STOP = 0,
50 D40_DMA_RUN = 1,
51 D40_DMA_SUSPEND_REQ = 2,
52 D40_DMA_SUSPENDED = 3
53};
54
55/**
56 * struct d40_lli_pool - Structure for keeping LLIs in memory
57 *
58 * @base: Pointer to memory area when the pre_alloc_lli's are not large
59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60 * pre_alloc_lli is used.
61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63 * one buffer to one buffer.
64 */
65struct d40_lli_pool {
66 void *base;
67 int size;
68 /* Space for dst and src, plus an extra for padding */
69 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
70};
71
72/**
73 * struct d40_desc - A descriptor is one DMA job.
74 *
75 * @lli_phy: LLI settings for physical channel. Both src and dst=
76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
77 * lli_len equals one.
78 * @lli_log: Same as above but for logical channels.
79 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000080 * @lli_len: Number of llis of current descriptor.
81 * @lli_count: Number of transfered llis.
82 * @lli_tx_len: Max number of LLIs per transfer, there can be
83 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020084 * @txd: DMA engine struct. Used for among other things for communication
85 * during a transfer.
86 * @node: List entry.
87 * @dir: The transfer direction of this job.
88 * @is_in_client_list: true if the client owns this descriptor.
89 *
90 * This descriptor is used for both logical and physical transfers.
91 */
92
93struct d40_desc {
94 /* LLI physical */
95 struct d40_phy_lli_bidir lli_phy;
96 /* LLI logical */
97 struct d40_log_lli_bidir lli_log;
98
99 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000100 int lli_len;
101 int lli_count;
102 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200103
104 struct dma_async_tx_descriptor txd;
105 struct list_head node;
106
107 enum dma_data_direction dir;
108 bool is_in_client_list;
109};
110
111/**
112 * struct d40_lcla_pool - LCLA pool settings and data.
113 *
114 * @base: The virtual address of LCLA.
115 * @phy: Physical base address of LCLA.
116 * @base_size: size of lcla.
117 * @lock: Lock to protect the content in this struct.
118 * @alloc_map: Mapping between physical channel and LCLA entries.
119 * @num_blocks: The number of entries of alloc_map. Equals to the
120 * number of physical channels.
121 */
122struct d40_lcla_pool {
123 void *base;
124 dma_addr_t phy;
125 resource_size_t base_size;
126 spinlock_t lock;
127 u32 *alloc_map;
128 int num_blocks;
129};
130
131/**
132 * struct d40_phy_res - struct for handling eventlines mapped to physical
133 * channels.
134 *
135 * @lock: A lock protection this entity.
136 * @num: The physical channel number of this entity.
137 * @allocated_src: Bit mapped to show which src event line's are mapped to
138 * this physical channel. Can also be free or physically allocated.
139 * @allocated_dst: Same as for src but is dst.
140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141 * event line number. Both allocated_src and allocated_dst can not be
142 * allocated to a physical channel, since the interrupt handler has then
143 * no way of figure out which one the interrupt belongs to.
144 */
145struct d40_phy_res {
146 spinlock_t lock;
147 int num;
148 u32 allocated_src;
149 u32 allocated_dst;
150};
151
152struct d40_base;
153
154/**
155 * struct d40_chan - Struct that describes a channel.
156 *
157 * @lock: A spinlock to protect this struct.
158 * @log_num: The logical number, if any of this channel.
159 * @completed: Starts with 1, after first interrupt it is set to dma engine's
160 * current cookie.
161 * @pending_tx: The number of pending transfers. Used between interrupt handler
162 * and tasklet.
163 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000164 * @phy_chan: Pointer to physical channel which this instance runs on. If this
165 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200166 * @chan: DMA engine handle.
167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
168 * transfer and call client callback.
169 * @client: Cliented owned descriptor list.
170 * @active: Active descriptor.
171 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200172 * @dma_cfg: The client configuration of this dma channel.
173 * @base: Pointer to the device instance struct.
174 * @src_def_cfg: Default cfg register setting for src.
175 * @dst_def_cfg: Default cfg register setting for dst.
176 * @log_def: Default logical channel settings.
177 * @lcla: Space for one dst src pair for logical channel transfers.
178 * @lcpa: Pointer to dst and src lcpa settings.
179 *
180 * This struct can either "be" a logical or a physical channel.
181 */
182struct d40_chan {
183 spinlock_t lock;
184 int log_num;
185 /* ID of the most recent completed transfer */
186 int completed;
187 int pending_tx;
188 bool busy;
189 struct d40_phy_res *phy_chan;
190 struct dma_chan chan;
191 struct tasklet_struct tasklet;
192 struct list_head client;
193 struct list_head active;
194 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200195 struct stedma40_chan_cfg dma_cfg;
196 struct d40_base *base;
197 /* Default register configurations */
198 u32 src_def_cfg;
199 u32 dst_def_cfg;
200 struct d40_def_lcsp log_def;
201 struct d40_lcla_elem lcla;
202 struct d40_log_lli_full *lcpa;
203};
204
205/**
206 * struct d40_base - The big global struct, one for each probe'd instance.
207 *
208 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
209 * @execmd_lock: Lock for execute command usage since several channels share
210 * the same physical register.
211 * @dev: The device structure.
212 * @virtbase: The virtual base address of the DMA's register.
213 * @clk: Pointer to the DMA clock structure.
214 * @phy_start: Physical memory start of the DMA registers.
215 * @phy_size: Size of the DMA register map.
216 * @irq: The IRQ number.
217 * @num_phy_chans: The number of physical channels. Read from HW. This
218 * is the number of available channels for this driver, not counting "Secure
219 * mode" allocated physical channels.
220 * @num_log_chans: The number of logical channels. Calculated from
221 * num_phy_chans.
222 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
223 * @dma_slave: dma_device channels that can do only do slave transfers.
224 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
225 * @phy_chans: Room for all possible physical channels in system.
226 * @log_chans: Room for all possible logical channels in system.
227 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
228 * to log_chans entries.
229 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
230 * to phy_chans entries.
231 * @plat_data: Pointer to provided platform_data which is the driver
232 * configuration.
233 * @phy_res: Vector containing all physical channels.
234 * @lcla_pool: lcla pool settings and data.
235 * @lcpa_base: The virtual mapped address of LCPA.
236 * @phy_lcpa: The physical address of the LCPA.
237 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000238 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200239 */
240struct d40_base {
241 spinlock_t interrupt_lock;
242 spinlock_t execmd_lock;
243 struct device *dev;
244 void __iomem *virtbase;
245 struct clk *clk;
246 phys_addr_t phy_start;
247 resource_size_t phy_size;
248 int irq;
249 int num_phy_chans;
250 int num_log_chans;
251 struct dma_device dma_both;
252 struct dma_device dma_slave;
253 struct dma_device dma_memcpy;
254 struct d40_chan *phy_chans;
255 struct d40_chan *log_chans;
256 struct d40_chan **lookup_log_chans;
257 struct d40_chan **lookup_phy_chans;
258 struct stedma40_platform_data *plat_data;
259 /* Physical half channels */
260 struct d40_phy_res *phy_res;
261 struct d40_lcla_pool lcla_pool;
262 void *lcpa_base;
263 dma_addr_t phy_lcpa;
264 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000265 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200266};
267
268/**
269 * struct d40_interrupt_lookup - lookup table for interrupt handler
270 *
271 * @src: Interrupt mask register.
272 * @clr: Interrupt clear register.
273 * @is_error: true if this is an error interrupt.
274 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
275 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
276 */
277struct d40_interrupt_lookup {
278 u32 src;
279 u32 clr;
280 bool is_error;
281 int offset;
282};
283
284/**
285 * struct d40_reg_val - simple lookup struct
286 *
287 * @reg: The register.
288 * @val: The value that belongs to the register in reg.
289 */
290struct d40_reg_val {
291 unsigned int reg;
292 unsigned int val;
293};
294
295static int d40_pool_lli_alloc(struct d40_desc *d40d,
296 int lli_len, bool is_log)
297{
298 u32 align;
299 void *base;
300
301 if (is_log)
302 align = sizeof(struct d40_log_lli);
303 else
304 align = sizeof(struct d40_phy_lli);
305
306 if (lli_len == 1) {
307 base = d40d->lli_pool.pre_alloc_lli;
308 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
309 d40d->lli_pool.base = NULL;
310 } else {
311 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
312
313 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
314 d40d->lli_pool.base = base;
315
316 if (d40d->lli_pool.base == NULL)
317 return -ENOMEM;
318 }
319
320 if (is_log) {
321 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
322 align);
323 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
324 align);
325 } else {
326 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
327 align);
328 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
329 align);
330
331 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
332 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
333 }
334
335 return 0;
336}
337
338static void d40_pool_lli_free(struct d40_desc *d40d)
339{
340 kfree(d40d->lli_pool.base);
341 d40d->lli_pool.base = NULL;
342 d40d->lli_pool.size = 0;
343 d40d->lli_log.src = NULL;
344 d40d->lli_log.dst = NULL;
345 d40d->lli_phy.src = NULL;
346 d40d->lli_phy.dst = NULL;
347 d40d->lli_phy.src_addr = 0;
348 d40d->lli_phy.dst_addr = 0;
349}
350
351static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
352 struct d40_desc *desc)
353{
354 dma_cookie_t cookie = d40c->chan.cookie;
355
356 if (++cookie < 0)
357 cookie = 1;
358
359 d40c->chan.cookie = cookie;
360 desc->txd.cookie = cookie;
361
362 return cookie;
363}
364
Linus Walleij8d318a52010-03-30 15:33:42 +0200365static void d40_desc_remove(struct d40_desc *d40d)
366{
367 list_del(&d40d->node);
368}
369
370static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
371{
Linus Walleij8d318a52010-03-30 15:33:42 +0200372 struct d40_desc *d;
373 struct d40_desc *_d;
374
375 if (!list_empty(&d40c->client)) {
376 list_for_each_entry_safe(d, _d, &d40c->client, node)
377 if (async_tx_test_ack(&d->txd)) {
378 d40_pool_lli_free(d);
379 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000380 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200381 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200382 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000383 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
384 if (d != NULL) {
385 memset(d, 0, sizeof(struct d40_desc));
386 INIT_LIST_HEAD(&d->node);
387 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200388 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000389 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200390}
391
392static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
393{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000394 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200395}
396
397static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
398{
399 list_add_tail(&desc->node, &d40c->active);
400}
401
402static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
403{
404 struct d40_desc *d;
405
406 if (list_empty(&d40c->active))
407 return NULL;
408
409 d = list_first_entry(&d40c->active,
410 struct d40_desc,
411 node);
412 return d;
413}
414
415static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
416{
417 list_add_tail(&desc->node, &d40c->queue);
418}
419
420static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
421{
422 struct d40_desc *d;
423
424 if (list_empty(&d40c->queue))
425 return NULL;
426
427 d = list_first_entry(&d40c->queue,
428 struct d40_desc,
429 node);
430 return d;
431}
432
433/* Support functions for logical channels */
434
435static int d40_lcla_id_get(struct d40_chan *d40c,
436 struct d40_lcla_pool *pool)
437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
441 pool->base + d40c->phy_chan->num * 1024;
442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
444
445 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
446 return 0;
447
448 if (pool->num_blocks > 32)
449 return -EINVAL;
450
451 spin_lock(&pool->lock);
452
453 for (i = 0; i < pool->num_blocks; i++) {
454 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
455 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
456 break;
457 }
458 }
459 src_id = i;
460 if (src_id >= pool->num_blocks)
461 goto err;
462
463 for (; i < pool->num_blocks; i++) {
464 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
465 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
466 break;
467 }
468 }
469
470 dst_id = i;
471 if (dst_id == src_id)
472 goto err;
473
474 d40c->lcla.src_id = src_id;
475 d40c->lcla.dst_id = dst_id;
476 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
477 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
478
479
480 spin_unlock(&pool->lock);
481 return 0;
482err:
483 spin_unlock(&pool->lock);
484 return -EINVAL;
485}
486
487static void d40_lcla_id_put(struct d40_chan *d40c,
488 struct d40_lcla_pool *pool,
489 int id)
490{
491 if (id < 0)
492 return;
493
494 d40c->lcla.src_id = -1;
495 d40c->lcla.dst_id = -1;
496
497 spin_lock(&pool->lock);
498 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
499 spin_unlock(&pool->lock);
500}
501
502static int d40_channel_execute_command(struct d40_chan *d40c,
503 enum d40_command command)
504{
505 int status, i;
506 void __iomem *active_reg;
507 int ret = 0;
508 unsigned long flags;
509
510 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
511
512 if (d40c->phy_chan->num % 2 == 0)
513 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
514 else
515 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
516
517 if (command == D40_DMA_SUSPEND_REQ) {
518 status = (readl(active_reg) &
519 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
520 D40_CHAN_POS(d40c->phy_chan->num);
521
522 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
523 goto done;
524 }
525
526 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
527
528 if (command == D40_DMA_SUSPEND_REQ) {
529
530 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
531 status = (readl(active_reg) &
532 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
533 D40_CHAN_POS(d40c->phy_chan->num);
534
535 cpu_relax();
536 /*
537 * Reduce the number of bus accesses while
538 * waiting for the DMA to suspend.
539 */
540 udelay(3);
541
542 if (status == D40_DMA_STOP ||
543 status == D40_DMA_SUSPENDED)
544 break;
545 }
546
547 if (i == D40_SUSPEND_MAX_IT) {
548 dev_err(&d40c->chan.dev->device,
549 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
550 __func__, d40c->phy_chan->num, d40c->log_num,
551 status);
552 dump_stack();
553 ret = -EBUSY;
554 }
555
556 }
557done:
558 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
559 return ret;
560}
561
562static void d40_term_all(struct d40_chan *d40c)
563{
564 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200565
566 /* Release active descriptors */
567 while ((d40d = d40_first_active_get(d40c))) {
568 d40_desc_remove(d40d);
569
570 /* Return desc to free-list */
571 d40_desc_free(d40c, d40d);
572 }
573
574 /* Release queued descriptors waiting for transfer */
575 while ((d40d = d40_first_queued(d40c))) {
576 d40_desc_remove(d40d);
577
578 /* Return desc to free-list */
579 d40_desc_free(d40c, d40d);
580 }
581
Linus Walleij8d318a52010-03-30 15:33:42 +0200582 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
583 d40c->lcla.src_id);
584 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
585 d40c->lcla.dst_id);
586
587 d40c->pending_tx = 0;
588 d40c->busy = false;
589}
590
591static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
592{
593 u32 val;
594 unsigned long flags;
595
596 if (do_enable)
597 val = D40_ACTIVATE_EVENTLINE;
598 else
599 val = D40_DEACTIVATE_EVENTLINE;
600
601 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
602
603 /* Enable event line connected to device (or memcpy) */
604 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
605 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
606 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
607
608 writel((val << D40_EVENTLINE_POS(event)) |
609 ~D40_EVENTLINE_MASK(event),
610 d40c->base->virtbase + D40_DREG_PCBASE +
611 d40c->phy_chan->num * D40_DREG_PCDELTA +
612 D40_CHAN_REG_SSLNK);
613 }
614 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
615 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
616
617 writel((val << D40_EVENTLINE_POS(event)) |
618 ~D40_EVENTLINE_MASK(event),
619 d40c->base->virtbase + D40_DREG_PCBASE +
620 d40c->phy_chan->num * D40_DREG_PCDELTA +
621 D40_CHAN_REG_SDLNK);
622 }
623
624 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
625}
626
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200627static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200628{
629 u32 val = 0;
630
631 /* If SSLNK or SDLNK is zero all events are disabled */
632 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
633 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
634 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
635 d40c->phy_chan->num * D40_DREG_PCDELTA +
636 D40_CHAN_REG_SSLNK);
637
638 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
639 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
640 d40c->phy_chan->num * D40_DREG_PCDELTA +
641 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200642 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200643}
644
645static void d40_config_enable_lidx(struct d40_chan *d40c)
646{
647 /* Set LIDX for lcla */
648 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
649 D40_SREG_ELEM_LOG_LIDX_MASK,
650 d40c->base->virtbase + D40_DREG_PCBASE +
651 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
652
653 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
654 D40_SREG_ELEM_LOG_LIDX_MASK,
655 d40c->base->virtbase + D40_DREG_PCBASE +
656 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
657}
658
659static int d40_config_write(struct d40_chan *d40c)
660{
661 u32 addr_base;
662 u32 var;
663 int res;
664
665 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
666 if (res)
667 return res;
668
669 /* Odd addresses are even addresses + 4 */
670 addr_base = (d40c->phy_chan->num % 2) * 4;
671 /* Setup channel mode to logical or physical */
672 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
673 D40_CHAN_POS(d40c->phy_chan->num);
674 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
675
676 /* Setup operational mode option register */
677 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
678 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
679
680 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
681
682 if (d40c->log_num != D40_PHY_CHAN) {
683 /* Set default config for CFG reg */
684 writel(d40c->src_def_cfg,
685 d40c->base->virtbase + D40_DREG_PCBASE +
686 d40c->phy_chan->num * D40_DREG_PCDELTA +
687 D40_CHAN_REG_SSCFG);
688 writel(d40c->dst_def_cfg,
689 d40c->base->virtbase + D40_DREG_PCBASE +
690 d40c->phy_chan->num * D40_DREG_PCDELTA +
691 D40_CHAN_REG_SDCFG);
692
693 d40_config_enable_lidx(d40c);
694 }
695 return res;
696}
697
698static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
699{
700
701 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
702 d40_phy_lli_write(d40c->base->virtbase,
703 d40c->phy_chan->num,
704 d40d->lli_phy.dst,
705 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200706 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200707 struct d40_log_lli *src = d40d->lli_log.src;
708 struct d40_log_lli *dst = d40d->lli_log.dst;
709
Per Friden941b77a2010-06-20 21:24:45 +0000710 src += d40d->lli_count;
711 dst += d40d->lli_count;
Linus Walleij8d318a52010-03-30 15:33:42 +0200712 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
713 d40c->lcla.dst,
714 dst, src,
715 d40c->base->plat_data->llis_per_log);
716 }
Per Friden941b77a2010-06-20 21:24:45 +0000717 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200718}
719
720static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
721{
722 struct d40_chan *d40c = container_of(tx->chan,
723 struct d40_chan,
724 chan);
725 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
726 unsigned long flags;
727
728 spin_lock_irqsave(&d40c->lock, flags);
729
730 tx->cookie = d40_assign_cookie(d40c, d40d);
731
732 d40_desc_queue(d40c, d40d);
733
734 spin_unlock_irqrestore(&d40c->lock, flags);
735
736 return tx->cookie;
737}
738
739static int d40_start(struct d40_chan *d40c)
740{
741 int err;
742
743 if (d40c->log_num != D40_PHY_CHAN) {
744 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
745 if (err)
746 return err;
747 d40_config_set_event(d40c, true);
748 }
749
750 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
751
752 return err;
753}
754
755static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
756{
757 struct d40_desc *d40d;
758 int err;
759
760 /* Start queued jobs, if any */
761 d40d = d40_first_queued(d40c);
762
763 if (d40d != NULL) {
764 d40c->busy = true;
765
766 /* Remove from queue */
767 d40_desc_remove(d40d);
768
769 /* Add to active queue */
770 d40_desc_submit(d40c, d40d);
771
772 /* Initiate DMA job */
773 d40_desc_load(d40c, d40d);
774
775 /* Start dma job */
776 err = d40_start(d40c);
777
778 if (err)
779 return NULL;
780 }
781
782 return d40d;
783}
784
785/* called from interrupt context */
786static void dma_tc_handle(struct d40_chan *d40c)
787{
788 struct d40_desc *d40d;
789
790 if (!d40c->phy_chan)
791 return;
792
793 /* Get first active entry from list */
794 d40d = d40_first_active_get(d40c);
795
796 if (d40d == NULL)
797 return;
798
Per Friden941b77a2010-06-20 21:24:45 +0000799 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200800
801 d40_desc_load(d40c, d40d);
802 /* Start dma job */
803 (void) d40_start(d40c);
804 return;
805 }
806
807 if (d40_queue_start(d40c) == NULL)
808 d40c->busy = false;
809
810 d40c->pending_tx++;
811 tasklet_schedule(&d40c->tasklet);
812
813}
814
815static void dma_tasklet(unsigned long data)
816{
817 struct d40_chan *d40c = (struct d40_chan *) data;
818 struct d40_desc *d40d_fin;
819 unsigned long flags;
820 dma_async_tx_callback callback;
821 void *callback_param;
822
823 spin_lock_irqsave(&d40c->lock, flags);
824
825 /* Get first active entry from list */
826 d40d_fin = d40_first_active_get(d40c);
827
828 if (d40d_fin == NULL)
829 goto err;
830
831 d40c->completed = d40d_fin->txd.cookie;
832
833 /*
834 * If terminating a channel pending_tx is set to zero.
835 * This prevents any finished active jobs to return to the client.
836 */
837 if (d40c->pending_tx == 0) {
838 spin_unlock_irqrestore(&d40c->lock, flags);
839 return;
840 }
841
842 /* Callback to client */
843 callback = d40d_fin->txd.callback;
844 callback_param = d40d_fin->txd.callback_param;
845
846 if (async_tx_test_ack(&d40d_fin->txd)) {
847 d40_pool_lli_free(d40d_fin);
848 d40_desc_remove(d40d_fin);
849 /* Return desc to free-list */
850 d40_desc_free(d40c, d40d_fin);
851 } else {
Linus Walleij8d318a52010-03-30 15:33:42 +0200852 if (!d40d_fin->is_in_client_list) {
853 d40_desc_remove(d40d_fin);
854 list_add_tail(&d40d_fin->node, &d40c->client);
855 d40d_fin->is_in_client_list = true;
856 }
857 }
858
859 d40c->pending_tx--;
860
861 if (d40c->pending_tx)
862 tasklet_schedule(&d40c->tasklet);
863
864 spin_unlock_irqrestore(&d40c->lock, flags);
865
866 if (callback)
867 callback(callback_param);
868
869 return;
870
871 err:
872 /* Rescue manouver if receiving double interrupts */
873 if (d40c->pending_tx > 0)
874 d40c->pending_tx--;
875 spin_unlock_irqrestore(&d40c->lock, flags);
876}
877
878static irqreturn_t d40_handle_interrupt(int irq, void *data)
879{
880 static const struct d40_interrupt_lookup il[] = {
881 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
882 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
883 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
884 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
885 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
886 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
887 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
888 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
889 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
890 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
891 };
892
893 int i;
894 u32 regs[ARRAY_SIZE(il)];
895 u32 tmp;
896 u32 idx;
897 u32 row;
898 long chan = -1;
899 struct d40_chan *d40c;
900 unsigned long flags;
901 struct d40_base *base = data;
902
903 spin_lock_irqsave(&base->interrupt_lock, flags);
904
905 /* Read interrupt status of both logical and physical channels */
906 for (i = 0; i < ARRAY_SIZE(il); i++)
907 regs[i] = readl(base->virtbase + il[i].src);
908
909 for (;;) {
910
911 chan = find_next_bit((unsigned long *)regs,
912 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
913
914 /* No more set bits found? */
915 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
916 break;
917
918 row = chan / BITS_PER_LONG;
919 idx = chan & (BITS_PER_LONG - 1);
920
921 /* ACK interrupt */
922 tmp = readl(base->virtbase + il[row].clr);
923 tmp |= 1 << idx;
924 writel(tmp, base->virtbase + il[row].clr);
925
926 if (il[row].offset == D40_PHY_CHAN)
927 d40c = base->lookup_phy_chans[idx];
928 else
929 d40c = base->lookup_log_chans[il[row].offset + idx];
930 spin_lock(&d40c->lock);
931
932 if (!il[row].is_error)
933 dma_tc_handle(d40c);
934 else
935 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
936 __func__, chan, il[row].offset, idx);
937
938 spin_unlock(&d40c->lock);
939 }
940
941 spin_unlock_irqrestore(&base->interrupt_lock, flags);
942
943 return IRQ_HANDLED;
944}
945
946
947static int d40_validate_conf(struct d40_chan *d40c,
948 struct stedma40_chan_cfg *conf)
949{
950 int res = 0;
951 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
952 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
953 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
954 == STEDMA40_CHANNEL_IN_LOG_MODE;
955
956 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
957 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
958 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
959 __func__);
960 res = -EINVAL;
961 }
962
963 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
964 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
965 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
966 __func__);
967 res = -EINVAL;
968 }
969
970 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
971 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
972 dev_err(&d40c->chan.dev->device,
973 "[%s] No event line\n", __func__);
974 res = -EINVAL;
975 }
976
977 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
978 (src_event_group != dst_event_group)) {
979 dev_err(&d40c->chan.dev->device,
980 "[%s] Invalid event group\n", __func__);
981 res = -EINVAL;
982 }
983
984 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
985 /*
986 * DMAC HW supports it. Will be added to this driver,
987 * in case any dma client requires it.
988 */
989 dev_err(&d40c->chan.dev->device,
990 "[%s] periph to periph not supported\n",
991 __func__);
992 res = -EINVAL;
993 }
994
995 return res;
996}
997
998static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +0200999 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001000{
1001 unsigned long flags;
1002 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001003 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001004 /* Physical interrupts are masked per physical full channel */
1005 if (phy->allocated_src == D40_ALLOC_FREE &&
1006 phy->allocated_dst == D40_ALLOC_FREE) {
1007 phy->allocated_dst = D40_ALLOC_PHY;
1008 phy->allocated_src = D40_ALLOC_PHY;
1009 goto found;
1010 } else
1011 goto not_found;
1012 }
1013
1014 /* Logical channel */
1015 if (is_src) {
1016 if (phy->allocated_src == D40_ALLOC_PHY)
1017 goto not_found;
1018
1019 if (phy->allocated_src == D40_ALLOC_FREE)
1020 phy->allocated_src = D40_ALLOC_LOG_FREE;
1021
1022 if (!(phy->allocated_src & (1 << log_event_line))) {
1023 phy->allocated_src |= 1 << log_event_line;
1024 goto found;
1025 } else
1026 goto not_found;
1027 } else {
1028 if (phy->allocated_dst == D40_ALLOC_PHY)
1029 goto not_found;
1030
1031 if (phy->allocated_dst == D40_ALLOC_FREE)
1032 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1033
1034 if (!(phy->allocated_dst & (1 << log_event_line))) {
1035 phy->allocated_dst |= 1 << log_event_line;
1036 goto found;
1037 } else
1038 goto not_found;
1039 }
1040
1041not_found:
1042 spin_unlock_irqrestore(&phy->lock, flags);
1043 return false;
1044found:
1045 spin_unlock_irqrestore(&phy->lock, flags);
1046 return true;
1047}
1048
1049static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1050 int log_event_line)
1051{
1052 unsigned long flags;
1053 bool is_free = false;
1054
1055 spin_lock_irqsave(&phy->lock, flags);
1056 if (!log_event_line) {
1057 /* Physical interrupts are masked per physical full channel */
1058 phy->allocated_dst = D40_ALLOC_FREE;
1059 phy->allocated_src = D40_ALLOC_FREE;
1060 is_free = true;
1061 goto out;
1062 }
1063
1064 /* Logical channel */
1065 if (is_src) {
1066 phy->allocated_src &= ~(1 << log_event_line);
1067 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1068 phy->allocated_src = D40_ALLOC_FREE;
1069 } else {
1070 phy->allocated_dst &= ~(1 << log_event_line);
1071 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1072 phy->allocated_dst = D40_ALLOC_FREE;
1073 }
1074
1075 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1076 D40_ALLOC_FREE);
1077
1078out:
1079 spin_unlock_irqrestore(&phy->lock, flags);
1080
1081 return is_free;
1082}
1083
1084static int d40_allocate_channel(struct d40_chan *d40c)
1085{
1086 int dev_type;
1087 int event_group;
1088 int event_line;
1089 struct d40_phy_res *phys;
1090 int i;
1091 int j;
1092 int log_num;
1093 bool is_src;
1094 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1095 == STEDMA40_CHANNEL_IN_LOG_MODE;
1096
1097
1098 phys = d40c->base->phy_res;
1099
1100 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1101 dev_type = d40c->dma_cfg.src_dev_type;
1102 log_num = 2 * dev_type;
1103 is_src = true;
1104 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1105 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1106 /* dst event lines are used for logical memcpy */
1107 dev_type = d40c->dma_cfg.dst_dev_type;
1108 log_num = 2 * dev_type + 1;
1109 is_src = false;
1110 } else
1111 return -EINVAL;
1112
1113 event_group = D40_TYPE_TO_GROUP(dev_type);
1114 event_line = D40_TYPE_TO_EVENT(dev_type);
1115
1116 if (!is_log) {
1117 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1118 /* Find physical half channel */
1119 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1120
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001121 if (d40_alloc_mask_set(&phys[i], is_src,
1122 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001123 goto found_phy;
1124 }
1125 } else
1126 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1127 int phy_num = j + event_group * 2;
1128 for (i = phy_num; i < phy_num + 2; i++) {
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001129 if (d40_alloc_mask_set(&phys[i], is_src,
1130 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001131 goto found_phy;
1132 }
1133 }
1134 return -EINVAL;
1135found_phy:
1136 d40c->phy_chan = &phys[i];
1137 d40c->log_num = D40_PHY_CHAN;
1138 goto out;
1139 }
1140 if (dev_type == -1)
1141 return -EINVAL;
1142
1143 /* Find logical channel */
1144 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1145 int phy_num = j + event_group * 2;
1146 /*
1147 * Spread logical channels across all available physical rather
1148 * than pack every logical channel at the first available phy
1149 * channels.
1150 */
1151 if (is_src) {
1152 for (i = phy_num; i < phy_num + 2; i++) {
1153 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001154 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001155 goto found_log;
1156 }
1157 } else {
1158 for (i = phy_num + 1; i >= phy_num; i--) {
1159 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001160 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001161 goto found_log;
1162 }
1163 }
1164 }
1165 return -EINVAL;
1166
1167found_log:
1168 d40c->phy_chan = &phys[i];
1169 d40c->log_num = log_num;
1170out:
1171
1172 if (is_log)
1173 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1174 else
1175 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1176
1177 return 0;
1178
1179}
1180
Linus Walleij8d318a52010-03-30 15:33:42 +02001181static int d40_config_memcpy(struct d40_chan *d40c)
1182{
1183 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1184
1185 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1186 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1187 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1188 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1189 memcpy[d40c->chan.chan_id];
1190
1191 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1192 dma_has_cap(DMA_SLAVE, cap)) {
1193 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1194 } else {
1195 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1196 __func__);
1197 return -EINVAL;
1198 }
1199
1200 return 0;
1201}
1202
1203
1204static int d40_free_dma(struct d40_chan *d40c)
1205{
1206
1207 int res = 0;
1208 u32 event, dir;
1209 struct d40_phy_res *phy = d40c->phy_chan;
1210 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001211 struct d40_desc *d;
1212 struct d40_desc *_d;
1213
Linus Walleij8d318a52010-03-30 15:33:42 +02001214
1215 /* Terminate all queued and active transfers */
1216 d40_term_all(d40c);
1217
Per Fridena8be8622010-06-20 21:24:59 +00001218 /* Release client owned descriptors */
1219 if (!list_empty(&d40c->client))
1220 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1221 d40_pool_lli_free(d);
1222 d40_desc_remove(d);
1223 /* Return desc to free-list */
1224 d40_desc_free(d40c, d);
1225 }
1226
Linus Walleij8d318a52010-03-30 15:33:42 +02001227 if (phy == NULL) {
1228 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1229 __func__);
1230 return -EINVAL;
1231 }
1232
1233 if (phy->allocated_src == D40_ALLOC_FREE &&
1234 phy->allocated_dst == D40_ALLOC_FREE) {
1235 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1236 __func__);
1237 return -EINVAL;
1238 }
1239
Linus Walleij8d318a52010-03-30 15:33:42 +02001240 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1241 if (res) {
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001242 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001243 __func__);
1244 return res;
1245 }
1246
1247 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1248 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1249 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1250 dir = D40_CHAN_REG_SDLNK;
1251 is_src = false;
1252 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1253 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1254 dir = D40_CHAN_REG_SSLNK;
1255 is_src = true;
1256 } else {
1257 dev_err(&d40c->chan.dev->device,
1258 "[%s] Unknown direction\n", __func__);
1259 return -EINVAL;
1260 }
1261
1262 if (d40c->log_num != D40_PHY_CHAN) {
1263 /*
1264 * Release logical channel, deactivate the event line during
1265 * the time physical res is suspended.
1266 */
1267 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1268 D40_EVENTLINE_MASK(event),
1269 d40c->base->virtbase + D40_DREG_PCBASE +
1270 phy->num * D40_DREG_PCDELTA + dir);
1271
1272 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1273
1274 /*
1275 * Check if there are more logical allocation
1276 * on this phy channel.
1277 */
1278 if (!d40_alloc_mask_free(phy, is_src, event)) {
1279 /* Resume the other logical channels if any */
1280 if (d40_chan_has_events(d40c)) {
1281 res = d40_channel_execute_command(d40c,
1282 D40_DMA_RUN);
1283 if (res) {
1284 dev_err(&d40c->chan.dev->device,
1285 "[%s] Executing RUN command\n",
1286 __func__);
1287 return res;
1288 }
1289 }
1290 return 0;
1291 }
1292 } else
1293 d40_alloc_mask_free(phy, is_src, 0);
1294
1295 /* Release physical channel */
1296 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1297 if (res) {
1298 dev_err(&d40c->chan.dev->device,
1299 "[%s] Failed to stop channel\n", __func__);
1300 return res;
1301 }
1302 d40c->phy_chan = NULL;
1303 /* Invalidate channel type */
1304 d40c->dma_cfg.channel_type = 0;
1305 d40c->base->lookup_phy_chans[phy->num] = NULL;
1306
1307 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001308}
1309
1310static int d40_pause(struct dma_chan *chan)
1311{
1312 struct d40_chan *d40c =
1313 container_of(chan, struct d40_chan, chan);
1314 int res;
Linus Walleij8d318a52010-03-30 15:33:42 +02001315 unsigned long flags;
1316
1317 spin_lock_irqsave(&d40c->lock, flags);
1318
1319 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1320 if (res == 0) {
1321 if (d40c->log_num != D40_PHY_CHAN) {
1322 d40_config_set_event(d40c, false);
1323 /* Resume the other logical channels if any */
1324 if (d40_chan_has_events(d40c))
1325 res = d40_channel_execute_command(d40c,
1326 D40_DMA_RUN);
1327 }
1328 }
1329
1330 spin_unlock_irqrestore(&d40c->lock, flags);
1331 return res;
1332}
1333
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001334static bool d40_is_paused(struct d40_chan *d40c)
1335{
1336 bool is_paused = false;
1337 unsigned long flags;
1338 void __iomem *active_reg;
1339 u32 status;
1340 u32 event;
1341 int res;
1342
1343 spin_lock_irqsave(&d40c->lock, flags);
1344
1345 if (d40c->log_num == D40_PHY_CHAN) {
1346 if (d40c->phy_chan->num % 2 == 0)
1347 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1348 else
1349 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1350
1351 status = (readl(active_reg) &
1352 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1353 D40_CHAN_POS(d40c->phy_chan->num);
1354 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1355 is_paused = true;
1356
1357 goto _exit;
1358 }
1359
1360 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1361 if (res != 0)
1362 goto _exit;
1363
1364 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1365 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1366 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1367 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1368 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1369 else {
1370 dev_err(&d40c->chan.dev->device,
1371 "[%s] Unknown direction\n", __func__);
1372 goto _exit;
1373 }
1374 status = d40_chan_has_events(d40c);
1375 status = (status & D40_EVENTLINE_MASK(event)) >>
1376 D40_EVENTLINE_POS(event);
1377
1378 if (status != D40_DMA_RUN)
1379 is_paused = true;
1380
1381 /* Resume the other logical channels if any */
1382 if (d40_chan_has_events(d40c))
1383 res = d40_channel_execute_command(d40c,
1384 D40_DMA_RUN);
1385
1386_exit:
1387 spin_unlock_irqrestore(&d40c->lock, flags);
1388 return is_paused;
1389
1390}
1391
1392
Linus Walleij8d318a52010-03-30 15:33:42 +02001393static bool d40_tx_is_linked(struct d40_chan *d40c)
1394{
1395 bool is_link;
1396
1397 if (d40c->log_num != D40_PHY_CHAN)
1398 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1399 else
1400 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1401 d40c->phy_chan->num * D40_DREG_PCDELTA +
1402 D40_CHAN_REG_SDLNK) &
1403 D40_SREG_LNK_PHYS_LNK_MASK;
1404 return is_link;
1405}
1406
1407static u32 d40_residue(struct d40_chan *d40c)
1408{
1409 u32 num_elt;
1410
1411 if (d40c->log_num != D40_PHY_CHAN)
1412 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1413 >> D40_MEM_LCSP2_ECNT_POS;
1414 else
1415 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1416 d40c->phy_chan->num * D40_DREG_PCDELTA +
1417 D40_CHAN_REG_SDELT) &
1418 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1419 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1420}
1421
1422static int d40_resume(struct dma_chan *chan)
1423{
1424 struct d40_chan *d40c =
1425 container_of(chan, struct d40_chan, chan);
1426 int res = 0;
1427 unsigned long flags;
1428
1429 spin_lock_irqsave(&d40c->lock, flags);
1430
1431 if (d40c->log_num != D40_PHY_CHAN) {
1432 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1433 if (res)
1434 goto out;
1435
1436 /* If bytes left to transfer or linked tx resume job */
1437 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1438 d40_config_set_event(d40c, true);
1439 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1440 }
1441 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1442 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1443
1444out:
1445 spin_unlock_irqrestore(&d40c->lock, flags);
1446 return res;
1447}
1448
1449static u32 stedma40_residue(struct dma_chan *chan)
1450{
1451 struct d40_chan *d40c =
1452 container_of(chan, struct d40_chan, chan);
1453 u32 bytes_left;
1454 unsigned long flags;
1455
1456 spin_lock_irqsave(&d40c->lock, flags);
1457 bytes_left = d40_residue(d40c);
1458 spin_unlock_irqrestore(&d40c->lock, flags);
1459
1460 return bytes_left;
1461}
1462
1463/* Public DMA functions in addition to the DMA engine framework */
1464
1465int stedma40_set_psize(struct dma_chan *chan,
1466 int src_psize,
1467 int dst_psize)
1468{
1469 struct d40_chan *d40c =
1470 container_of(chan, struct d40_chan, chan);
1471 unsigned long flags;
1472
1473 spin_lock_irqsave(&d40c->lock, flags);
1474
1475 if (d40c->log_num != D40_PHY_CHAN) {
1476 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1477 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1478 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1479 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1480 goto out;
1481 }
1482
1483 if (src_psize == STEDMA40_PSIZE_PHY_1)
1484 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1485 else {
1486 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1487 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1488 D40_SREG_CFG_PSIZE_POS);
1489 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1490 }
1491
1492 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1493 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1494 else {
1495 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1496 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1497 D40_SREG_CFG_PSIZE_POS);
1498 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1499 }
1500out:
1501 spin_unlock_irqrestore(&d40c->lock, flags);
1502 return 0;
1503}
1504EXPORT_SYMBOL(stedma40_set_psize);
1505
1506struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1507 struct scatterlist *sgl_dst,
1508 struct scatterlist *sgl_src,
1509 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001510 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001511{
1512 int res;
1513 struct d40_desc *d40d;
1514 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1515 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001516 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001517
Jonas Aaberg2a614342010-06-20 21:25:24 +00001518 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001519 d40d = d40_desc_get(d40c);
1520
1521 if (d40d == NULL)
1522 goto err;
1523
Linus Walleij8d318a52010-03-30 15:33:42 +02001524 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001525 d40d->lli_tx_len = d40d->lli_len;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001526 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001527
1528 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001529 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1530 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1531
Linus Walleij8d318a52010-03-30 15:33:42 +02001532 if (sgl_len > 1)
1533 /*
1534 * Check if there is space available in lcla. If not,
1535 * split list into 1-length and run only in lcpa
1536 * space.
1537 */
1538 if (d40_lcla_id_get(d40c,
1539 &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001540 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001541
1542 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1543 dev_err(&d40c->chan.dev->device,
1544 "[%s] Out of memory\n", __func__);
1545 goto err;
1546 }
1547
1548 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1549 sgl_src,
1550 sgl_len,
1551 d40d->lli_log.src,
1552 d40c->log_def.lcsp1,
1553 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001554 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001555 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001556 d40c->base->plat_data->llis_per_log);
1557
1558 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1559 sgl_dst,
1560 sgl_len,
1561 d40d->lli_log.dst,
1562 d40c->log_def.lcsp3,
1563 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001564 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001565 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001566 d40c->base->plat_data->llis_per_log);
1567
1568
1569 } else {
1570 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1571 dev_err(&d40c->chan.dev->device,
1572 "[%s] Out of memory\n", __func__);
1573 goto err;
1574 }
1575
1576 res = d40_phy_sg_to_lli(sgl_src,
1577 sgl_len,
1578 0,
1579 d40d->lli_phy.src,
1580 d40d->lli_phy.src_addr,
1581 d40c->src_def_cfg,
1582 d40c->dma_cfg.src_info.data_width,
1583 d40c->dma_cfg.src_info.psize,
1584 true);
1585
1586 if (res < 0)
1587 goto err;
1588
1589 res = d40_phy_sg_to_lli(sgl_dst,
1590 sgl_len,
1591 0,
1592 d40d->lli_phy.dst,
1593 d40d->lli_phy.dst_addr,
1594 d40c->dst_def_cfg,
1595 d40c->dma_cfg.dst_info.data_width,
1596 d40c->dma_cfg.dst_info.psize,
1597 true);
1598
1599 if (res < 0)
1600 goto err;
1601
1602 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1603 d40d->lli_pool.size, DMA_TO_DEVICE);
1604 }
1605
1606 dma_async_tx_descriptor_init(&d40d->txd, chan);
1607
1608 d40d->txd.tx_submit = d40_tx_submit;
1609
Jonas Aaberg2a614342010-06-20 21:25:24 +00001610 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001611
1612 return &d40d->txd;
1613err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001614 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001615 return NULL;
1616}
1617EXPORT_SYMBOL(stedma40_memcpy_sg);
1618
1619bool stedma40_filter(struct dma_chan *chan, void *data)
1620{
1621 struct stedma40_chan_cfg *info = data;
1622 struct d40_chan *d40c =
1623 container_of(chan, struct d40_chan, chan);
1624 int err;
1625
1626 if (data) {
1627 err = d40_validate_conf(d40c, info);
1628 if (!err)
1629 d40c->dma_cfg = *info;
1630 } else
1631 err = d40_config_memcpy(d40c);
1632
1633 return err == 0;
1634}
1635EXPORT_SYMBOL(stedma40_filter);
1636
1637/* DMA ENGINE functions */
1638static int d40_alloc_chan_resources(struct dma_chan *chan)
1639{
1640 int err;
1641 unsigned long flags;
1642 struct d40_chan *d40c =
1643 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001644 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001645 spin_lock_irqsave(&d40c->lock, flags);
1646
1647 d40c->completed = chan->cookie = 1;
1648
1649 /*
1650 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001651 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001652 */
1653 if (d40c->dma_cfg.channel_type == 0) {
1654 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001655 if (err) {
1656 dev_err(&d40c->chan.dev->device,
1657 "[%s] Failed to configure memcpy channel\n",
1658 __func__);
1659 goto fail;
1660 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001661 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001662 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001663
1664 err = d40_allocate_channel(d40c);
1665 if (err) {
1666 dev_err(&d40c->chan.dev->device,
1667 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001668 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001669 }
1670
Linus Walleijef1872e2010-06-20 21:24:52 +00001671 /* Fill in basic CFG register values */
1672 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1673 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1674
1675 if (d40c->log_num != D40_PHY_CHAN) {
1676 d40_log_cfg(&d40c->dma_cfg,
1677 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1678
1679 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1680 d40c->lcpa = d40c->base->lcpa_base +
1681 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1682 else
1683 d40c->lcpa = d40c->base->lcpa_base +
1684 d40c->dma_cfg.dst_dev_type *
1685 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1686 }
1687
1688 /*
1689 * Only write channel configuration to the DMA if the physical
1690 * resource is free. In case of multiple logical channels
1691 * on the same physical resource, only the first write is necessary.
1692 */
1693 if (is_free_phy) {
1694 err = d40_config_write(d40c);
1695 if (err) {
1696 dev_err(&d40c->chan.dev->device,
1697 "[%s] Failed to configure channel\n",
1698 __func__);
1699 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001700 }
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001701fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001702 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001703 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001704}
1705
1706static void d40_free_chan_resources(struct dma_chan *chan)
1707{
1708 struct d40_chan *d40c =
1709 container_of(chan, struct d40_chan, chan);
1710 int err;
1711 unsigned long flags;
1712
1713 spin_lock_irqsave(&d40c->lock, flags);
1714
1715 err = d40_free_dma(d40c);
1716
1717 if (err)
1718 dev_err(&d40c->chan.dev->device,
1719 "[%s] Failed to free channel\n", __func__);
1720 spin_unlock_irqrestore(&d40c->lock, flags);
1721}
1722
1723static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1724 dma_addr_t dst,
1725 dma_addr_t src,
1726 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001727 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001728{
1729 struct d40_desc *d40d;
1730 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1731 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001732 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001733 int err = 0;
1734
Jonas Aaberg2a614342010-06-20 21:25:24 +00001735 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001736 d40d = d40_desc_get(d40c);
1737
1738 if (d40d == NULL) {
1739 dev_err(&d40c->chan.dev->device,
1740 "[%s] Descriptor is NULL\n", __func__);
1741 goto err;
1742 }
1743
Jonas Aaberg2a614342010-06-20 21:25:24 +00001744 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001745
1746 dma_async_tx_descriptor_init(&d40d->txd, chan);
1747
1748 d40d->txd.tx_submit = d40_tx_submit;
1749
1750 if (d40c->log_num != D40_PHY_CHAN) {
1751
1752 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1753 dev_err(&d40c->chan.dev->device,
1754 "[%s] Out of memory\n", __func__);
1755 goto err;
1756 }
1757 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001758 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001759
1760 d40_log_fill_lli(d40d->lli_log.src,
1761 src,
1762 size,
1763 0,
1764 d40c->log_def.lcsp1,
1765 d40c->dma_cfg.src_info.data_width,
1766 true, true);
1767
1768 d40_log_fill_lli(d40d->lli_log.dst,
1769 dst,
1770 size,
1771 0,
1772 d40c->log_def.lcsp3,
1773 d40c->dma_cfg.dst_info.data_width,
1774 true, true);
1775
1776 } else {
1777
1778 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1779 dev_err(&d40c->chan.dev->device,
1780 "[%s] Out of memory\n", __func__);
1781 goto err;
1782 }
1783
1784 err = d40_phy_fill_lli(d40d->lli_phy.src,
1785 src,
1786 size,
1787 d40c->dma_cfg.src_info.psize,
1788 0,
1789 d40c->src_def_cfg,
1790 true,
1791 d40c->dma_cfg.src_info.data_width,
1792 false);
1793 if (err)
1794 goto err_fill_lli;
1795
1796 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1797 dst,
1798 size,
1799 d40c->dma_cfg.dst_info.psize,
1800 0,
1801 d40c->dst_def_cfg,
1802 true,
1803 d40c->dma_cfg.dst_info.data_width,
1804 false);
1805
1806 if (err)
1807 goto err_fill_lli;
1808
1809 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1810 d40d->lli_pool.size, DMA_TO_DEVICE);
1811 }
1812
Jonas Aaberg2a614342010-06-20 21:25:24 +00001813 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001814 return &d40d->txd;
1815
1816err_fill_lli:
1817 dev_err(&d40c->chan.dev->device,
1818 "[%s] Failed filling in PHY LLI\n", __func__);
1819 d40_pool_lli_free(d40d);
1820err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001821 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001822 return NULL;
1823}
1824
1825static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1826 struct d40_chan *d40c,
1827 struct scatterlist *sgl,
1828 unsigned int sg_len,
1829 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001830 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001831{
1832 dma_addr_t dev_addr = 0;
1833 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001834
1835 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1836 dev_err(&d40c->chan.dev->device,
1837 "[%s] Out of memory\n", __func__);
1838 return -ENOMEM;
1839 }
1840
1841 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001842 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1843 d40d->lli_tx_len = d40d->lli_len;
1844 else
1845 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001846
1847 if (sg_len > 1)
1848 /*
1849 * Check if there is space available in lcla.
1850 * If not, split list into 1-length and run only
1851 * in lcpa space.
1852 */
1853 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001854 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001855
Jonas Aaberg2a614342010-06-20 21:25:24 +00001856 if (direction == DMA_FROM_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001857 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001858 else if (direction == DMA_TO_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001859 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001860 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001861 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001862
1863 total_size = d40_log_sg_to_dev(&d40c->lcla,
1864 sgl, sg_len,
1865 &d40d->lli_log,
1866 &d40c->log_def,
1867 d40c->dma_cfg.src_info.data_width,
1868 d40c->dma_cfg.dst_info.data_width,
1869 direction,
1870 dma_flags & DMA_PREP_INTERRUPT,
1871 dev_addr, d40d->lli_tx_len,
1872 d40c->base->plat_data->llis_per_log);
1873
Linus Walleij8d318a52010-03-30 15:33:42 +02001874 if (total_size < 0)
1875 return -EINVAL;
1876
1877 return 0;
1878}
1879
1880static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1881 struct d40_chan *d40c,
1882 struct scatterlist *sgl,
1883 unsigned int sgl_len,
1884 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001885 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001886{
1887 dma_addr_t src_dev_addr;
1888 dma_addr_t dst_dev_addr;
1889 int res;
1890
1891 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1892 dev_err(&d40c->chan.dev->device,
1893 "[%s] Out of memory\n", __func__);
1894 return -ENOMEM;
1895 }
1896
1897 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001898 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001899
1900 if (direction == DMA_FROM_DEVICE) {
1901 dst_dev_addr = 0;
1902 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1903 } else if (direction == DMA_TO_DEVICE) {
1904 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1905 src_dev_addr = 0;
1906 } else
1907 return -EINVAL;
1908
1909 res = d40_phy_sg_to_lli(sgl,
1910 sgl_len,
1911 src_dev_addr,
1912 d40d->lli_phy.src,
1913 d40d->lli_phy.src_addr,
1914 d40c->src_def_cfg,
1915 d40c->dma_cfg.src_info.data_width,
1916 d40c->dma_cfg.src_info.psize,
1917 true);
1918 if (res < 0)
1919 return res;
1920
1921 res = d40_phy_sg_to_lli(sgl,
1922 sgl_len,
1923 dst_dev_addr,
1924 d40d->lli_phy.dst,
1925 d40d->lli_phy.dst_addr,
1926 d40c->dst_def_cfg,
1927 d40c->dma_cfg.dst_info.data_width,
1928 d40c->dma_cfg.dst_info.psize,
1929 true);
1930 if (res < 0)
1931 return res;
1932
1933 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1934 d40d->lli_pool.size, DMA_TO_DEVICE);
1935 return 0;
1936}
1937
1938static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1939 struct scatterlist *sgl,
1940 unsigned int sg_len,
1941 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001942 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001943{
1944 struct d40_desc *d40d;
1945 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1946 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001947 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001948 int err;
1949
1950 if (d40c->dma_cfg.pre_transfer)
1951 d40c->dma_cfg.pre_transfer(chan,
1952 d40c->dma_cfg.pre_transfer_data,
1953 sg_dma_len(sgl));
1954
Jonas Aaberg2a614342010-06-20 21:25:24 +00001955 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001956 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001957 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001958
1959 if (d40d == NULL)
1960 return NULL;
1961
1962 memset(d40d, 0, sizeof(struct d40_desc));
1963
1964 if (d40c->log_num != D40_PHY_CHAN)
1965 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001966 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001967 else
1968 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001969 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001970 if (err) {
1971 dev_err(&d40c->chan.dev->device,
1972 "[%s] Failed to prepare %s slave sg job: %d\n",
1973 __func__,
1974 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
1975 return NULL;
1976 }
1977
Jonas Aaberg2a614342010-06-20 21:25:24 +00001978 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001979
1980 dma_async_tx_descriptor_init(&d40d->txd, chan);
1981
1982 d40d->txd.tx_submit = d40_tx_submit;
1983
1984 return &d40d->txd;
1985}
1986
1987static enum dma_status d40_tx_status(struct dma_chan *chan,
1988 dma_cookie_t cookie,
1989 struct dma_tx_state *txstate)
1990{
1991 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1992 dma_cookie_t last_used;
1993 dma_cookie_t last_complete;
1994 int ret;
1995
1996 last_complete = d40c->completed;
1997 last_used = chan->cookie;
1998
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001999 if (d40_is_paused(d40c))
2000 ret = DMA_PAUSED;
2001 else
2002 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002003
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002004 dma_set_tx_state(txstate, last_complete, last_used,
2005 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002006
2007 return ret;
2008}
2009
2010static void d40_issue_pending(struct dma_chan *chan)
2011{
2012 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2013 unsigned long flags;
2014
2015 spin_lock_irqsave(&d40c->lock, flags);
2016
2017 /* Busy means that pending jobs are already being processed */
2018 if (!d40c->busy)
2019 (void) d40_queue_start(d40c);
2020
2021 spin_unlock_irqrestore(&d40c->lock, flags);
2022}
2023
Linus Walleij05827632010-05-17 16:30:42 -07002024static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2025 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002026{
2027 unsigned long flags;
2028 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2029
2030 switch (cmd) {
2031 case DMA_TERMINATE_ALL:
2032 spin_lock_irqsave(&d40c->lock, flags);
2033 d40_term_all(d40c);
2034 spin_unlock_irqrestore(&d40c->lock, flags);
2035 return 0;
2036 case DMA_PAUSE:
2037 return d40_pause(chan);
2038 case DMA_RESUME:
2039 return d40_resume(chan);
2040 }
2041
2042 /* Other commands are unimplemented */
2043 return -ENXIO;
2044}
2045
2046/* Initialization functions */
2047
2048static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2049 struct d40_chan *chans, int offset,
2050 int num_chans)
2051{
2052 int i = 0;
2053 struct d40_chan *d40c;
2054
2055 INIT_LIST_HEAD(&dma->channels);
2056
2057 for (i = offset; i < offset + num_chans; i++) {
2058 d40c = &chans[i];
2059 d40c->base = base;
2060 d40c->chan.device = dma;
2061
2062 /* Invalidate lcla element */
2063 d40c->lcla.src_id = -1;
2064 d40c->lcla.dst_id = -1;
2065
2066 spin_lock_init(&d40c->lock);
2067
2068 d40c->log_num = D40_PHY_CHAN;
2069
Linus Walleij8d318a52010-03-30 15:33:42 +02002070 INIT_LIST_HEAD(&d40c->active);
2071 INIT_LIST_HEAD(&d40c->queue);
2072 INIT_LIST_HEAD(&d40c->client);
2073
Linus Walleij8d318a52010-03-30 15:33:42 +02002074 tasklet_init(&d40c->tasklet, dma_tasklet,
2075 (unsigned long) d40c);
2076
2077 list_add_tail(&d40c->chan.device_node,
2078 &dma->channels);
2079 }
2080}
2081
2082static int __init d40_dmaengine_init(struct d40_base *base,
2083 int num_reserved_chans)
2084{
2085 int err ;
2086
2087 d40_chan_init(base, &base->dma_slave, base->log_chans,
2088 0, base->num_log_chans);
2089
2090 dma_cap_zero(base->dma_slave.cap_mask);
2091 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2092
2093 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2094 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2095 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2096 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2097 base->dma_slave.device_tx_status = d40_tx_status;
2098 base->dma_slave.device_issue_pending = d40_issue_pending;
2099 base->dma_slave.device_control = d40_control;
2100 base->dma_slave.dev = base->dev;
2101
2102 err = dma_async_device_register(&base->dma_slave);
2103
2104 if (err) {
2105 dev_err(base->dev,
2106 "[%s] Failed to register slave channels\n",
2107 __func__);
2108 goto failure1;
2109 }
2110
2111 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2112 base->num_log_chans, base->plat_data->memcpy_len);
2113
2114 dma_cap_zero(base->dma_memcpy.cap_mask);
2115 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2116
2117 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2118 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2119 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2120 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2121 base->dma_memcpy.device_tx_status = d40_tx_status;
2122 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2123 base->dma_memcpy.device_control = d40_control;
2124 base->dma_memcpy.dev = base->dev;
2125 /*
2126 * This controller can only access address at even
2127 * 32bit boundaries, i.e. 2^2
2128 */
2129 base->dma_memcpy.copy_align = 2;
2130
2131 err = dma_async_device_register(&base->dma_memcpy);
2132
2133 if (err) {
2134 dev_err(base->dev,
2135 "[%s] Failed to regsiter memcpy only channels\n",
2136 __func__);
2137 goto failure2;
2138 }
2139
2140 d40_chan_init(base, &base->dma_both, base->phy_chans,
2141 0, num_reserved_chans);
2142
2143 dma_cap_zero(base->dma_both.cap_mask);
2144 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2145 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2146
2147 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2148 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2149 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2150 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2151 base->dma_both.device_tx_status = d40_tx_status;
2152 base->dma_both.device_issue_pending = d40_issue_pending;
2153 base->dma_both.device_control = d40_control;
2154 base->dma_both.dev = base->dev;
2155 base->dma_both.copy_align = 2;
2156 err = dma_async_device_register(&base->dma_both);
2157
2158 if (err) {
2159 dev_err(base->dev,
2160 "[%s] Failed to register logical and physical capable channels\n",
2161 __func__);
2162 goto failure3;
2163 }
2164 return 0;
2165failure3:
2166 dma_async_device_unregister(&base->dma_memcpy);
2167failure2:
2168 dma_async_device_unregister(&base->dma_slave);
2169failure1:
2170 return err;
2171}
2172
2173/* Initialization functions. */
2174
2175static int __init d40_phy_res_init(struct d40_base *base)
2176{
2177 int i;
2178 int num_phy_chans_avail = 0;
2179 u32 val[2];
2180 int odd_even_bit = -2;
2181
2182 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2183 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2184
2185 for (i = 0; i < base->num_phy_chans; i++) {
2186 base->phy_res[i].num = i;
2187 odd_even_bit += 2 * ((i % 2) == 0);
2188 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2189 /* Mark security only channels as occupied */
2190 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2191 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2192 } else {
2193 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2194 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2195 num_phy_chans_avail++;
2196 }
2197 spin_lock_init(&base->phy_res[i].lock);
2198 }
2199 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2200 num_phy_chans_avail, base->num_phy_chans);
2201
2202 /* Verify settings extended vs standard */
2203 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2204
2205 for (i = 0; i < base->num_phy_chans; i++) {
2206
2207 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2208 (val[0] & 0x3) != 1)
2209 dev_info(base->dev,
2210 "[%s] INFO: channel %d is misconfigured (%d)\n",
2211 __func__, i, val[0] & 0x3);
2212
2213 val[0] = val[0] >> 2;
2214 }
2215
2216 return num_phy_chans_avail;
2217}
2218
2219static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2220{
2221 static const struct d40_reg_val dma_id_regs[] = {
2222 /* Peripheral Id */
2223 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2224 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2225 /*
2226 * D40_DREG_PERIPHID2 Depends on HW revision:
2227 * MOP500/HREF ED has 0x0008,
2228 * ? has 0x0018,
2229 * HREF V1 has 0x0028
2230 */
2231 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2232
2233 /* PCell Id */
2234 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2235 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2236 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2237 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2238 };
2239 struct stedma40_platform_data *plat_data;
2240 struct clk *clk = NULL;
2241 void __iomem *virtbase = NULL;
2242 struct resource *res = NULL;
2243 struct d40_base *base = NULL;
2244 int num_log_chans = 0;
2245 int num_phy_chans;
2246 int i;
2247
2248 clk = clk_get(&pdev->dev, NULL);
2249
2250 if (IS_ERR(clk)) {
2251 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2252 __func__);
2253 goto failure;
2254 }
2255
2256 clk_enable(clk);
2257
2258 /* Get IO for DMAC base address */
2259 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2260 if (!res)
2261 goto failure;
2262
2263 if (request_mem_region(res->start, resource_size(res),
2264 D40_NAME " I/O base") == NULL)
2265 goto failure;
2266
2267 virtbase = ioremap(res->start, resource_size(res));
2268 if (!virtbase)
2269 goto failure;
2270
2271 /* HW version check */
2272 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2273 if (dma_id_regs[i].val !=
2274 readl(virtbase + dma_id_regs[i].reg)) {
2275 dev_err(&pdev->dev,
2276 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2277 __func__,
2278 dma_id_regs[i].val,
2279 dma_id_regs[i].reg,
2280 readl(virtbase + dma_id_regs[i].reg));
2281 goto failure;
2282 }
2283 }
2284
2285 i = readl(virtbase + D40_DREG_PERIPHID2);
2286
2287 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2288 dev_err(&pdev->dev,
2289 "[%s] Unknown designer! Got %x wanted %x\n",
2290 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2291 goto failure;
2292 }
2293
2294 /* The number of physical channels on this HW */
2295 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2296
2297 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2298 (i >> 4) & 0xf, res->start);
2299
2300 plat_data = pdev->dev.platform_data;
2301
2302 /* Count the number of logical channels in use */
2303 for (i = 0; i < plat_data->dev_len; i++)
2304 if (plat_data->dev_rx[i] != 0)
2305 num_log_chans++;
2306
2307 for (i = 0; i < plat_data->dev_len; i++)
2308 if (plat_data->dev_tx[i] != 0)
2309 num_log_chans++;
2310
2311 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2312 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2313 sizeof(struct d40_chan), GFP_KERNEL);
2314
2315 if (base == NULL) {
2316 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2317 goto failure;
2318 }
2319
2320 base->clk = clk;
2321 base->num_phy_chans = num_phy_chans;
2322 base->num_log_chans = num_log_chans;
2323 base->phy_start = res->start;
2324 base->phy_size = resource_size(res);
2325 base->virtbase = virtbase;
2326 base->plat_data = plat_data;
2327 base->dev = &pdev->dev;
2328 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2329 base->log_chans = &base->phy_chans[num_phy_chans];
2330
2331 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2332 GFP_KERNEL);
2333 if (!base->phy_res)
2334 goto failure;
2335
2336 base->lookup_phy_chans = kzalloc(num_phy_chans *
2337 sizeof(struct d40_chan *),
2338 GFP_KERNEL);
2339 if (!base->lookup_phy_chans)
2340 goto failure;
2341
2342 if (num_log_chans + plat_data->memcpy_len) {
2343 /*
2344 * The max number of logical channels are event lines for all
2345 * src devices and dst devices
2346 */
2347 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2348 sizeof(struct d40_chan *),
2349 GFP_KERNEL);
2350 if (!base->lookup_log_chans)
2351 goto failure;
2352 }
2353 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2354 GFP_KERNEL);
2355 if (!base->lcla_pool.alloc_map)
2356 goto failure;
2357
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002358 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2359 0, SLAB_HWCACHE_ALIGN,
2360 NULL);
2361 if (base->desc_slab == NULL)
2362 goto failure;
2363
Linus Walleij8d318a52010-03-30 15:33:42 +02002364 return base;
2365
2366failure:
2367 if (clk) {
2368 clk_disable(clk);
2369 clk_put(clk);
2370 }
2371 if (virtbase)
2372 iounmap(virtbase);
2373 if (res)
2374 release_mem_region(res->start,
2375 resource_size(res));
2376 if (virtbase)
2377 iounmap(virtbase);
2378
2379 if (base) {
2380 kfree(base->lcla_pool.alloc_map);
2381 kfree(base->lookup_log_chans);
2382 kfree(base->lookup_phy_chans);
2383 kfree(base->phy_res);
2384 kfree(base);
2385 }
2386
2387 return NULL;
2388}
2389
2390static void __init d40_hw_init(struct d40_base *base)
2391{
2392
2393 static const struct d40_reg_val dma_init_reg[] = {
2394 /* Clock every part of the DMA block from start */
2395 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2396
2397 /* Interrupts on all logical channels */
2398 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2399 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2400 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2401 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2402 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2403 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2404 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2405 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2406 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2407 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2408 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2409 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2410 };
2411 int i;
2412 u32 prmseo[2] = {0, 0};
2413 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2414 u32 pcmis = 0;
2415 u32 pcicr = 0;
2416
2417 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2418 writel(dma_init_reg[i].val,
2419 base->virtbase + dma_init_reg[i].reg);
2420
2421 /* Configure all our dma channels to default settings */
2422 for (i = 0; i < base->num_phy_chans; i++) {
2423
2424 activeo[i % 2] = activeo[i % 2] << 2;
2425
2426 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2427 == D40_ALLOC_PHY) {
2428 activeo[i % 2] |= 3;
2429 continue;
2430 }
2431
2432 /* Enable interrupt # */
2433 pcmis = (pcmis << 1) | 1;
2434
2435 /* Clear interrupt # */
2436 pcicr = (pcicr << 1) | 1;
2437
2438 /* Set channel to physical mode */
2439 prmseo[i % 2] = prmseo[i % 2] << 2;
2440 prmseo[i % 2] |= 1;
2441
2442 }
2443
2444 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2445 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2446 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2447 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2448
2449 /* Write which interrupt to enable */
2450 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2451
2452 /* Write which interrupt to clear */
2453 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2454
2455}
2456
2457static int __init d40_probe(struct platform_device *pdev)
2458{
2459 int err;
2460 int ret = -ENOENT;
2461 struct d40_base *base;
2462 struct resource *res = NULL;
2463 int num_reserved_chans;
2464 u32 val;
2465
2466 base = d40_hw_detect_init(pdev);
2467
2468 if (!base)
2469 goto failure;
2470
2471 num_reserved_chans = d40_phy_res_init(base);
2472
2473 platform_set_drvdata(pdev, base);
2474
2475 spin_lock_init(&base->interrupt_lock);
2476 spin_lock_init(&base->execmd_lock);
2477
2478 /* Get IO for logical channel parameter address */
2479 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2480 if (!res) {
2481 ret = -ENOENT;
2482 dev_err(&pdev->dev,
2483 "[%s] No \"lcpa\" memory resource\n",
2484 __func__);
2485 goto failure;
2486 }
2487 base->lcpa_size = resource_size(res);
2488 base->phy_lcpa = res->start;
2489
2490 if (request_mem_region(res->start, resource_size(res),
2491 D40_NAME " I/O lcpa") == NULL) {
2492 ret = -EBUSY;
2493 dev_err(&pdev->dev,
2494 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2495 __func__, res->start, res->end);
2496 goto failure;
2497 }
2498
2499 /* We make use of ESRAM memory for this. */
2500 val = readl(base->virtbase + D40_DREG_LCPA);
2501 if (res->start != val && val != 0) {
2502 dev_warn(&pdev->dev,
2503 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2504 __func__, val, res->start);
2505 } else
2506 writel(res->start, base->virtbase + D40_DREG_LCPA);
2507
2508 base->lcpa_base = ioremap(res->start, resource_size(res));
2509 if (!base->lcpa_base) {
2510 ret = -ENOMEM;
2511 dev_err(&pdev->dev,
2512 "[%s] Failed to ioremap LCPA region\n",
2513 __func__);
2514 goto failure;
2515 }
2516 /* Get IO for logical channel link address */
2517 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2518 if (!res) {
2519 ret = -ENOENT;
2520 dev_err(&pdev->dev,
2521 "[%s] No \"lcla\" resource defined\n",
2522 __func__);
2523 goto failure;
2524 }
2525
2526 base->lcla_pool.base_size = resource_size(res);
2527 base->lcla_pool.phy = res->start;
2528
2529 if (request_mem_region(res->start, resource_size(res),
2530 D40_NAME " I/O lcla") == NULL) {
2531 ret = -EBUSY;
2532 dev_err(&pdev->dev,
2533 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2534 __func__, res->start, res->end);
2535 goto failure;
2536 }
2537 val = readl(base->virtbase + D40_DREG_LCLA);
2538 if (res->start != val && val != 0) {
2539 dev_warn(&pdev->dev,
2540 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2541 __func__, val, res->start);
2542 } else
2543 writel(res->start, base->virtbase + D40_DREG_LCLA);
2544
2545 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2546 if (!base->lcla_pool.base) {
2547 ret = -ENOMEM;
2548 dev_err(&pdev->dev,
2549 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2550 __func__, res->start, res->end);
2551 goto failure;
2552 }
2553
2554 spin_lock_init(&base->lcla_pool.lock);
2555
2556 base->lcla_pool.num_blocks = base->num_phy_chans;
2557
2558 base->irq = platform_get_irq(pdev, 0);
2559
2560 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2561
2562 if (ret) {
2563 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2564 goto failure;
2565 }
2566
2567 err = d40_dmaengine_init(base, num_reserved_chans);
2568 if (err)
2569 goto failure;
2570
2571 d40_hw_init(base);
2572
2573 dev_info(base->dev, "initialized\n");
2574 return 0;
2575
2576failure:
2577 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002578 if (base->desc_slab)
2579 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002580 if (base->virtbase)
2581 iounmap(base->virtbase);
2582 if (base->lcla_pool.phy)
2583 release_mem_region(base->lcla_pool.phy,
2584 base->lcla_pool.base_size);
2585 if (base->phy_lcpa)
2586 release_mem_region(base->phy_lcpa,
2587 base->lcpa_size);
2588 if (base->phy_start)
2589 release_mem_region(base->phy_start,
2590 base->phy_size);
2591 if (base->clk) {
2592 clk_disable(base->clk);
2593 clk_put(base->clk);
2594 }
2595
2596 kfree(base->lcla_pool.alloc_map);
2597 kfree(base->lookup_log_chans);
2598 kfree(base->lookup_phy_chans);
2599 kfree(base->phy_res);
2600 kfree(base);
2601 }
2602
2603 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2604 return ret;
2605}
2606
2607static struct platform_driver d40_driver = {
2608 .driver = {
2609 .owner = THIS_MODULE,
2610 .name = D40_NAME,
2611 },
2612};
2613
2614int __init stedma40_init(void)
2615{
2616 return platform_driver_probe(&d40_driver, d40_probe);
2617}
2618arch_initcall(stedma40_init);