blob: 4d56d214fa058bfb1db1ee4ddaaad7d8524011f3 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
Linus Walleij8d318a52010-03-30 15:33:42 +020037/* Hardware designer of the block */
38#define D40_PERIPHID2_DESIGNER 0x8
39
40/**
41 * enum 40_command - The different commands and/or statuses.
42 *
43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
47 */
48enum d40_command {
49 D40_DMA_STOP = 0,
50 D40_DMA_RUN = 1,
51 D40_DMA_SUSPEND_REQ = 2,
52 D40_DMA_SUSPENDED = 3
53};
54
55/**
56 * struct d40_lli_pool - Structure for keeping LLIs in memory
57 *
58 * @base: Pointer to memory area when the pre_alloc_lli's are not large
59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60 * pre_alloc_lli is used.
61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63 * one buffer to one buffer.
64 */
65struct d40_lli_pool {
66 void *base;
67 int size;
68 /* Space for dst and src, plus an extra for padding */
69 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
70};
71
72/**
73 * struct d40_desc - A descriptor is one DMA job.
74 *
75 * @lli_phy: LLI settings for physical channel. Both src and dst=
76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
77 * lli_len equals one.
78 * @lli_log: Same as above but for logical channels.
79 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000080 * @lli_len: Number of llis of current descriptor.
81 * @lli_count: Number of transfered llis.
82 * @lli_tx_len: Max number of LLIs per transfer, there can be
83 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020084 * @txd: DMA engine struct. Used for among other things for communication
85 * during a transfer.
86 * @node: List entry.
87 * @dir: The transfer direction of this job.
88 * @is_in_client_list: true if the client owns this descriptor.
89 *
90 * This descriptor is used for both logical and physical transfers.
91 */
92
93struct d40_desc {
94 /* LLI physical */
95 struct d40_phy_lli_bidir lli_phy;
96 /* LLI logical */
97 struct d40_log_lli_bidir lli_log;
98
99 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000100 int lli_len;
101 int lli_count;
102 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200103
104 struct dma_async_tx_descriptor txd;
105 struct list_head node;
106
107 enum dma_data_direction dir;
108 bool is_in_client_list;
109};
110
111/**
112 * struct d40_lcla_pool - LCLA pool settings and data.
113 *
114 * @base: The virtual address of LCLA.
115 * @phy: Physical base address of LCLA.
116 * @base_size: size of lcla.
117 * @lock: Lock to protect the content in this struct.
118 * @alloc_map: Mapping between physical channel and LCLA entries.
119 * @num_blocks: The number of entries of alloc_map. Equals to the
120 * number of physical channels.
121 */
122struct d40_lcla_pool {
123 void *base;
124 dma_addr_t phy;
125 resource_size_t base_size;
126 spinlock_t lock;
127 u32 *alloc_map;
128 int num_blocks;
129};
130
131/**
132 * struct d40_phy_res - struct for handling eventlines mapped to physical
133 * channels.
134 *
135 * @lock: A lock protection this entity.
136 * @num: The physical channel number of this entity.
137 * @allocated_src: Bit mapped to show which src event line's are mapped to
138 * this physical channel. Can also be free or physically allocated.
139 * @allocated_dst: Same as for src but is dst.
140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141 * event line number. Both allocated_src and allocated_dst can not be
142 * allocated to a physical channel, since the interrupt handler has then
143 * no way of figure out which one the interrupt belongs to.
144 */
145struct d40_phy_res {
146 spinlock_t lock;
147 int num;
148 u32 allocated_src;
149 u32 allocated_dst;
150};
151
152struct d40_base;
153
154/**
155 * struct d40_chan - Struct that describes a channel.
156 *
157 * @lock: A spinlock to protect this struct.
158 * @log_num: The logical number, if any of this channel.
159 * @completed: Starts with 1, after first interrupt it is set to dma engine's
160 * current cookie.
161 * @pending_tx: The number of pending transfers. Used between interrupt handler
162 * and tasklet.
163 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000164 * @phy_chan: Pointer to physical channel which this instance runs on. If this
165 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200166 * @chan: DMA engine handle.
167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
168 * transfer and call client callback.
169 * @client: Cliented owned descriptor list.
170 * @active: Active descriptor.
171 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200172 * @dma_cfg: The client configuration of this dma channel.
173 * @base: Pointer to the device instance struct.
174 * @src_def_cfg: Default cfg register setting for src.
175 * @dst_def_cfg: Default cfg register setting for dst.
176 * @log_def: Default logical channel settings.
177 * @lcla: Space for one dst src pair for logical channel transfers.
178 * @lcpa: Pointer to dst and src lcpa settings.
179 *
180 * This struct can either "be" a logical or a physical channel.
181 */
182struct d40_chan {
183 spinlock_t lock;
184 int log_num;
185 /* ID of the most recent completed transfer */
186 int completed;
187 int pending_tx;
188 bool busy;
189 struct d40_phy_res *phy_chan;
190 struct dma_chan chan;
191 struct tasklet_struct tasklet;
192 struct list_head client;
193 struct list_head active;
194 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200195 struct stedma40_chan_cfg dma_cfg;
196 struct d40_base *base;
197 /* Default register configurations */
198 u32 src_def_cfg;
199 u32 dst_def_cfg;
200 struct d40_def_lcsp log_def;
201 struct d40_lcla_elem lcla;
202 struct d40_log_lli_full *lcpa;
203};
204
205/**
206 * struct d40_base - The big global struct, one for each probe'd instance.
207 *
208 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
209 * @execmd_lock: Lock for execute command usage since several channels share
210 * the same physical register.
211 * @dev: The device structure.
212 * @virtbase: The virtual base address of the DMA's register.
213 * @clk: Pointer to the DMA clock structure.
214 * @phy_start: Physical memory start of the DMA registers.
215 * @phy_size: Size of the DMA register map.
216 * @irq: The IRQ number.
217 * @num_phy_chans: The number of physical channels. Read from HW. This
218 * is the number of available channels for this driver, not counting "Secure
219 * mode" allocated physical channels.
220 * @num_log_chans: The number of logical channels. Calculated from
221 * num_phy_chans.
222 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
223 * @dma_slave: dma_device channels that can do only do slave transfers.
224 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
225 * @phy_chans: Room for all possible physical channels in system.
226 * @log_chans: Room for all possible logical channels in system.
227 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
228 * to log_chans entries.
229 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
230 * to phy_chans entries.
231 * @plat_data: Pointer to provided platform_data which is the driver
232 * configuration.
233 * @phy_res: Vector containing all physical channels.
234 * @lcla_pool: lcla pool settings and data.
235 * @lcpa_base: The virtual mapped address of LCPA.
236 * @phy_lcpa: The physical address of the LCPA.
237 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000238 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200239 */
240struct d40_base {
241 spinlock_t interrupt_lock;
242 spinlock_t execmd_lock;
243 struct device *dev;
244 void __iomem *virtbase;
245 struct clk *clk;
246 phys_addr_t phy_start;
247 resource_size_t phy_size;
248 int irq;
249 int num_phy_chans;
250 int num_log_chans;
251 struct dma_device dma_both;
252 struct dma_device dma_slave;
253 struct dma_device dma_memcpy;
254 struct d40_chan *phy_chans;
255 struct d40_chan *log_chans;
256 struct d40_chan **lookup_log_chans;
257 struct d40_chan **lookup_phy_chans;
258 struct stedma40_platform_data *plat_data;
259 /* Physical half channels */
260 struct d40_phy_res *phy_res;
261 struct d40_lcla_pool lcla_pool;
262 void *lcpa_base;
263 dma_addr_t phy_lcpa;
264 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000265 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200266};
267
268/**
269 * struct d40_interrupt_lookup - lookup table for interrupt handler
270 *
271 * @src: Interrupt mask register.
272 * @clr: Interrupt clear register.
273 * @is_error: true if this is an error interrupt.
274 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
275 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
276 */
277struct d40_interrupt_lookup {
278 u32 src;
279 u32 clr;
280 bool is_error;
281 int offset;
282};
283
284/**
285 * struct d40_reg_val - simple lookup struct
286 *
287 * @reg: The register.
288 * @val: The value that belongs to the register in reg.
289 */
290struct d40_reg_val {
291 unsigned int reg;
292 unsigned int val;
293};
294
295static int d40_pool_lli_alloc(struct d40_desc *d40d,
296 int lli_len, bool is_log)
297{
298 u32 align;
299 void *base;
300
301 if (is_log)
302 align = sizeof(struct d40_log_lli);
303 else
304 align = sizeof(struct d40_phy_lli);
305
306 if (lli_len == 1) {
307 base = d40d->lli_pool.pre_alloc_lli;
308 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
309 d40d->lli_pool.base = NULL;
310 } else {
311 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
312
313 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
314 d40d->lli_pool.base = base;
315
316 if (d40d->lli_pool.base == NULL)
317 return -ENOMEM;
318 }
319
320 if (is_log) {
321 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
322 align);
323 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
324 align);
325 } else {
326 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
327 align);
328 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
329 align);
330
331 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
332 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
333 }
334
335 return 0;
336}
337
338static void d40_pool_lli_free(struct d40_desc *d40d)
339{
340 kfree(d40d->lli_pool.base);
341 d40d->lli_pool.base = NULL;
342 d40d->lli_pool.size = 0;
343 d40d->lli_log.src = NULL;
344 d40d->lli_log.dst = NULL;
345 d40d->lli_phy.src = NULL;
346 d40d->lli_phy.dst = NULL;
347 d40d->lli_phy.src_addr = 0;
348 d40d->lli_phy.dst_addr = 0;
349}
350
351static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
352 struct d40_desc *desc)
353{
354 dma_cookie_t cookie = d40c->chan.cookie;
355
356 if (++cookie < 0)
357 cookie = 1;
358
359 d40c->chan.cookie = cookie;
360 desc->txd.cookie = cookie;
361
362 return cookie;
363}
364
Linus Walleij8d318a52010-03-30 15:33:42 +0200365static void d40_desc_remove(struct d40_desc *d40d)
366{
367 list_del(&d40d->node);
368}
369
370static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
371{
Linus Walleij8d318a52010-03-30 15:33:42 +0200372 struct d40_desc *d;
373 struct d40_desc *_d;
374
375 if (!list_empty(&d40c->client)) {
376 list_for_each_entry_safe(d, _d, &d40c->client, node)
377 if (async_tx_test_ack(&d->txd)) {
378 d40_pool_lli_free(d);
379 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000380 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200381 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200382 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000383 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
384 if (d != NULL) {
385 memset(d, 0, sizeof(struct d40_desc));
386 INIT_LIST_HEAD(&d->node);
387 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200388 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000389 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200390}
391
392static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
393{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000394 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200395}
396
397static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
398{
399 list_add_tail(&desc->node, &d40c->active);
400}
401
402static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
403{
404 struct d40_desc *d;
405
406 if (list_empty(&d40c->active))
407 return NULL;
408
409 d = list_first_entry(&d40c->active,
410 struct d40_desc,
411 node);
412 return d;
413}
414
415static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
416{
417 list_add_tail(&desc->node, &d40c->queue);
418}
419
420static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
421{
422 struct d40_desc *d;
423
424 if (list_empty(&d40c->queue))
425 return NULL;
426
427 d = list_first_entry(&d40c->queue,
428 struct d40_desc,
429 node);
430 return d;
431}
432
433/* Support functions for logical channels */
434
435static int d40_lcla_id_get(struct d40_chan *d40c,
436 struct d40_lcla_pool *pool)
437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
441 pool->base + d40c->phy_chan->num * 1024;
442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
444
445 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
446 return 0;
447
448 if (pool->num_blocks > 32)
449 return -EINVAL;
450
451 spin_lock(&pool->lock);
452
453 for (i = 0; i < pool->num_blocks; i++) {
454 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
455 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
456 break;
457 }
458 }
459 src_id = i;
460 if (src_id >= pool->num_blocks)
461 goto err;
462
463 for (; i < pool->num_blocks; i++) {
464 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
465 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
466 break;
467 }
468 }
469
470 dst_id = i;
471 if (dst_id == src_id)
472 goto err;
473
474 d40c->lcla.src_id = src_id;
475 d40c->lcla.dst_id = dst_id;
476 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
477 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
478
479
480 spin_unlock(&pool->lock);
481 return 0;
482err:
483 spin_unlock(&pool->lock);
484 return -EINVAL;
485}
486
487static void d40_lcla_id_put(struct d40_chan *d40c,
488 struct d40_lcla_pool *pool,
489 int id)
490{
491 if (id < 0)
492 return;
493
494 d40c->lcla.src_id = -1;
495 d40c->lcla.dst_id = -1;
496
497 spin_lock(&pool->lock);
498 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
499 spin_unlock(&pool->lock);
500}
501
502static int d40_channel_execute_command(struct d40_chan *d40c,
503 enum d40_command command)
504{
505 int status, i;
506 void __iomem *active_reg;
507 int ret = 0;
508 unsigned long flags;
509
510 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
511
512 if (d40c->phy_chan->num % 2 == 0)
513 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
514 else
515 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
516
517 if (command == D40_DMA_SUSPEND_REQ) {
518 status = (readl(active_reg) &
519 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
520 D40_CHAN_POS(d40c->phy_chan->num);
521
522 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
523 goto done;
524 }
525
526 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
527
528 if (command == D40_DMA_SUSPEND_REQ) {
529
530 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
531 status = (readl(active_reg) &
532 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
533 D40_CHAN_POS(d40c->phy_chan->num);
534
535 cpu_relax();
536 /*
537 * Reduce the number of bus accesses while
538 * waiting for the DMA to suspend.
539 */
540 udelay(3);
541
542 if (status == D40_DMA_STOP ||
543 status == D40_DMA_SUSPENDED)
544 break;
545 }
546
547 if (i == D40_SUSPEND_MAX_IT) {
548 dev_err(&d40c->chan.dev->device,
549 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
550 __func__, d40c->phy_chan->num, d40c->log_num,
551 status);
552 dump_stack();
553 ret = -EBUSY;
554 }
555
556 }
557done:
558 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
559 return ret;
560}
561
562static void d40_term_all(struct d40_chan *d40c)
563{
564 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200565
566 /* Release active descriptors */
567 while ((d40d = d40_first_active_get(d40c))) {
568 d40_desc_remove(d40d);
569
570 /* Return desc to free-list */
571 d40_desc_free(d40c, d40d);
572 }
573
574 /* Release queued descriptors waiting for transfer */
575 while ((d40d = d40_first_queued(d40c))) {
576 d40_desc_remove(d40d);
577
578 /* Return desc to free-list */
579 d40_desc_free(d40c, d40d);
580 }
581
Linus Walleij8d318a52010-03-30 15:33:42 +0200582 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
583 d40c->lcla.src_id);
584 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
585 d40c->lcla.dst_id);
586
587 d40c->pending_tx = 0;
588 d40c->busy = false;
589}
590
591static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
592{
593 u32 val;
594 unsigned long flags;
595
596 if (do_enable)
597 val = D40_ACTIVATE_EVENTLINE;
598 else
599 val = D40_DEACTIVATE_EVENTLINE;
600
601 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
602
603 /* Enable event line connected to device (or memcpy) */
604 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
605 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
606 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
607
608 writel((val << D40_EVENTLINE_POS(event)) |
609 ~D40_EVENTLINE_MASK(event),
610 d40c->base->virtbase + D40_DREG_PCBASE +
611 d40c->phy_chan->num * D40_DREG_PCDELTA +
612 D40_CHAN_REG_SSLNK);
613 }
614 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
615 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
616
617 writel((val << D40_EVENTLINE_POS(event)) |
618 ~D40_EVENTLINE_MASK(event),
619 d40c->base->virtbase + D40_DREG_PCBASE +
620 d40c->phy_chan->num * D40_DREG_PCDELTA +
621 D40_CHAN_REG_SDLNK);
622 }
623
624 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
625}
626
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200627static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200628{
629 u32 val = 0;
630
631 /* If SSLNK or SDLNK is zero all events are disabled */
632 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
633 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
634 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
635 d40c->phy_chan->num * D40_DREG_PCDELTA +
636 D40_CHAN_REG_SSLNK);
637
638 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
639 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
640 d40c->phy_chan->num * D40_DREG_PCDELTA +
641 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200642 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200643}
644
645static void d40_config_enable_lidx(struct d40_chan *d40c)
646{
647 /* Set LIDX for lcla */
648 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
649 D40_SREG_ELEM_LOG_LIDX_MASK,
650 d40c->base->virtbase + D40_DREG_PCBASE +
651 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
652
653 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
654 D40_SREG_ELEM_LOG_LIDX_MASK,
655 d40c->base->virtbase + D40_DREG_PCBASE +
656 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
657}
658
659static int d40_config_write(struct d40_chan *d40c)
660{
661 u32 addr_base;
662 u32 var;
663 int res;
664
665 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
666 if (res)
667 return res;
668
669 /* Odd addresses are even addresses + 4 */
670 addr_base = (d40c->phy_chan->num % 2) * 4;
671 /* Setup channel mode to logical or physical */
672 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
673 D40_CHAN_POS(d40c->phy_chan->num);
674 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
675
676 /* Setup operational mode option register */
677 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
678 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
679
680 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
681
682 if (d40c->log_num != D40_PHY_CHAN) {
683 /* Set default config for CFG reg */
684 writel(d40c->src_def_cfg,
685 d40c->base->virtbase + D40_DREG_PCBASE +
686 d40c->phy_chan->num * D40_DREG_PCDELTA +
687 D40_CHAN_REG_SSCFG);
688 writel(d40c->dst_def_cfg,
689 d40c->base->virtbase + D40_DREG_PCBASE +
690 d40c->phy_chan->num * D40_DREG_PCDELTA +
691 D40_CHAN_REG_SDCFG);
692
693 d40_config_enable_lidx(d40c);
694 }
695 return res;
696}
697
698static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
699{
700
701 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
702 d40_phy_lli_write(d40c->base->virtbase,
703 d40c->phy_chan->num,
704 d40d->lli_phy.dst,
705 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200706 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200707 struct d40_log_lli *src = d40d->lli_log.src;
708 struct d40_log_lli *dst = d40d->lli_log.dst;
709
Per Friden941b77a2010-06-20 21:24:45 +0000710 src += d40d->lli_count;
711 dst += d40d->lli_count;
Linus Walleij8d318a52010-03-30 15:33:42 +0200712 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
713 d40c->lcla.dst,
714 dst, src,
715 d40c->base->plat_data->llis_per_log);
716 }
Per Friden941b77a2010-06-20 21:24:45 +0000717 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200718}
719
720static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
721{
722 struct d40_chan *d40c = container_of(tx->chan,
723 struct d40_chan,
724 chan);
725 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
726 unsigned long flags;
727
728 spin_lock_irqsave(&d40c->lock, flags);
729
730 tx->cookie = d40_assign_cookie(d40c, d40d);
731
732 d40_desc_queue(d40c, d40d);
733
734 spin_unlock_irqrestore(&d40c->lock, flags);
735
736 return tx->cookie;
737}
738
739static int d40_start(struct d40_chan *d40c)
740{
741 int err;
742
743 if (d40c->log_num != D40_PHY_CHAN) {
744 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
745 if (err)
746 return err;
747 d40_config_set_event(d40c, true);
748 }
749
750 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
751
752 return err;
753}
754
755static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
756{
757 struct d40_desc *d40d;
758 int err;
759
760 /* Start queued jobs, if any */
761 d40d = d40_first_queued(d40c);
762
763 if (d40d != NULL) {
764 d40c->busy = true;
765
766 /* Remove from queue */
767 d40_desc_remove(d40d);
768
769 /* Add to active queue */
770 d40_desc_submit(d40c, d40d);
771
772 /* Initiate DMA job */
773 d40_desc_load(d40c, d40d);
774
775 /* Start dma job */
776 err = d40_start(d40c);
777
778 if (err)
779 return NULL;
780 }
781
782 return d40d;
783}
784
785/* called from interrupt context */
786static void dma_tc_handle(struct d40_chan *d40c)
787{
788 struct d40_desc *d40d;
789
790 if (!d40c->phy_chan)
791 return;
792
793 /* Get first active entry from list */
794 d40d = d40_first_active_get(d40c);
795
796 if (d40d == NULL)
797 return;
798
Per Friden941b77a2010-06-20 21:24:45 +0000799 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200800
801 d40_desc_load(d40c, d40d);
802 /* Start dma job */
803 (void) d40_start(d40c);
804 return;
805 }
806
807 if (d40_queue_start(d40c) == NULL)
808 d40c->busy = false;
809
810 d40c->pending_tx++;
811 tasklet_schedule(&d40c->tasklet);
812
813}
814
815static void dma_tasklet(unsigned long data)
816{
817 struct d40_chan *d40c = (struct d40_chan *) data;
818 struct d40_desc *d40d_fin;
819 unsigned long flags;
820 dma_async_tx_callback callback;
821 void *callback_param;
822
823 spin_lock_irqsave(&d40c->lock, flags);
824
825 /* Get first active entry from list */
826 d40d_fin = d40_first_active_get(d40c);
827
828 if (d40d_fin == NULL)
829 goto err;
830
831 d40c->completed = d40d_fin->txd.cookie;
832
833 /*
834 * If terminating a channel pending_tx is set to zero.
835 * This prevents any finished active jobs to return to the client.
836 */
837 if (d40c->pending_tx == 0) {
838 spin_unlock_irqrestore(&d40c->lock, flags);
839 return;
840 }
841
842 /* Callback to client */
843 callback = d40d_fin->txd.callback;
844 callback_param = d40d_fin->txd.callback_param;
845
846 if (async_tx_test_ack(&d40d_fin->txd)) {
847 d40_pool_lli_free(d40d_fin);
848 d40_desc_remove(d40d_fin);
849 /* Return desc to free-list */
850 d40_desc_free(d40c, d40d_fin);
851 } else {
Linus Walleij8d318a52010-03-30 15:33:42 +0200852 if (!d40d_fin->is_in_client_list) {
853 d40_desc_remove(d40d_fin);
854 list_add_tail(&d40d_fin->node, &d40c->client);
855 d40d_fin->is_in_client_list = true;
856 }
857 }
858
859 d40c->pending_tx--;
860
861 if (d40c->pending_tx)
862 tasklet_schedule(&d40c->tasklet);
863
864 spin_unlock_irqrestore(&d40c->lock, flags);
865
866 if (callback)
867 callback(callback_param);
868
869 return;
870
871 err:
872 /* Rescue manouver if receiving double interrupts */
873 if (d40c->pending_tx > 0)
874 d40c->pending_tx--;
875 spin_unlock_irqrestore(&d40c->lock, flags);
876}
877
878static irqreturn_t d40_handle_interrupt(int irq, void *data)
879{
880 static const struct d40_interrupt_lookup il[] = {
881 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
882 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
883 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
884 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
885 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
886 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
887 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
888 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
889 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
890 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
891 };
892
893 int i;
894 u32 regs[ARRAY_SIZE(il)];
895 u32 tmp;
896 u32 idx;
897 u32 row;
898 long chan = -1;
899 struct d40_chan *d40c;
900 unsigned long flags;
901 struct d40_base *base = data;
902
903 spin_lock_irqsave(&base->interrupt_lock, flags);
904
905 /* Read interrupt status of both logical and physical channels */
906 for (i = 0; i < ARRAY_SIZE(il); i++)
907 regs[i] = readl(base->virtbase + il[i].src);
908
909 for (;;) {
910
911 chan = find_next_bit((unsigned long *)regs,
912 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
913
914 /* No more set bits found? */
915 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
916 break;
917
918 row = chan / BITS_PER_LONG;
919 idx = chan & (BITS_PER_LONG - 1);
920
921 /* ACK interrupt */
922 tmp = readl(base->virtbase + il[row].clr);
923 tmp |= 1 << idx;
924 writel(tmp, base->virtbase + il[row].clr);
925
926 if (il[row].offset == D40_PHY_CHAN)
927 d40c = base->lookup_phy_chans[idx];
928 else
929 d40c = base->lookup_log_chans[il[row].offset + idx];
930 spin_lock(&d40c->lock);
931
932 if (!il[row].is_error)
933 dma_tc_handle(d40c);
934 else
935 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
936 __func__, chan, il[row].offset, idx);
937
938 spin_unlock(&d40c->lock);
939 }
940
941 spin_unlock_irqrestore(&base->interrupt_lock, flags);
942
943 return IRQ_HANDLED;
944}
945
946
947static int d40_validate_conf(struct d40_chan *d40c,
948 struct stedma40_chan_cfg *conf)
949{
950 int res = 0;
951 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
952 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
953 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
954 == STEDMA40_CHANNEL_IN_LOG_MODE;
955
956 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
957 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
958 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
959 __func__);
960 res = -EINVAL;
961 }
962
963 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
964 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
965 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
966 __func__);
967 res = -EINVAL;
968 }
969
970 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
971 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
972 dev_err(&d40c->chan.dev->device,
973 "[%s] No event line\n", __func__);
974 res = -EINVAL;
975 }
976
977 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
978 (src_event_group != dst_event_group)) {
979 dev_err(&d40c->chan.dev->device,
980 "[%s] Invalid event group\n", __func__);
981 res = -EINVAL;
982 }
983
984 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
985 /*
986 * DMAC HW supports it. Will be added to this driver,
987 * in case any dma client requires it.
988 */
989 dev_err(&d40c->chan.dev->device,
990 "[%s] periph to periph not supported\n",
991 __func__);
992 res = -EINVAL;
993 }
994
995 return res;
996}
997
998static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +0200999 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001000{
1001 unsigned long flags;
1002 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001003 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001004 /* Physical interrupts are masked per physical full channel */
1005 if (phy->allocated_src == D40_ALLOC_FREE &&
1006 phy->allocated_dst == D40_ALLOC_FREE) {
1007 phy->allocated_dst = D40_ALLOC_PHY;
1008 phy->allocated_src = D40_ALLOC_PHY;
1009 goto found;
1010 } else
1011 goto not_found;
1012 }
1013
1014 /* Logical channel */
1015 if (is_src) {
1016 if (phy->allocated_src == D40_ALLOC_PHY)
1017 goto not_found;
1018
1019 if (phy->allocated_src == D40_ALLOC_FREE)
1020 phy->allocated_src = D40_ALLOC_LOG_FREE;
1021
1022 if (!(phy->allocated_src & (1 << log_event_line))) {
1023 phy->allocated_src |= 1 << log_event_line;
1024 goto found;
1025 } else
1026 goto not_found;
1027 } else {
1028 if (phy->allocated_dst == D40_ALLOC_PHY)
1029 goto not_found;
1030
1031 if (phy->allocated_dst == D40_ALLOC_FREE)
1032 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1033
1034 if (!(phy->allocated_dst & (1 << log_event_line))) {
1035 phy->allocated_dst |= 1 << log_event_line;
1036 goto found;
1037 } else
1038 goto not_found;
1039 }
1040
1041not_found:
1042 spin_unlock_irqrestore(&phy->lock, flags);
1043 return false;
1044found:
1045 spin_unlock_irqrestore(&phy->lock, flags);
1046 return true;
1047}
1048
1049static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1050 int log_event_line)
1051{
1052 unsigned long flags;
1053 bool is_free = false;
1054
1055 spin_lock_irqsave(&phy->lock, flags);
1056 if (!log_event_line) {
1057 /* Physical interrupts are masked per physical full channel */
1058 phy->allocated_dst = D40_ALLOC_FREE;
1059 phy->allocated_src = D40_ALLOC_FREE;
1060 is_free = true;
1061 goto out;
1062 }
1063
1064 /* Logical channel */
1065 if (is_src) {
1066 phy->allocated_src &= ~(1 << log_event_line);
1067 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1068 phy->allocated_src = D40_ALLOC_FREE;
1069 } else {
1070 phy->allocated_dst &= ~(1 << log_event_line);
1071 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1072 phy->allocated_dst = D40_ALLOC_FREE;
1073 }
1074
1075 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1076 D40_ALLOC_FREE);
1077
1078out:
1079 spin_unlock_irqrestore(&phy->lock, flags);
1080
1081 return is_free;
1082}
1083
1084static int d40_allocate_channel(struct d40_chan *d40c)
1085{
1086 int dev_type;
1087 int event_group;
1088 int event_line;
1089 struct d40_phy_res *phys;
1090 int i;
1091 int j;
1092 int log_num;
1093 bool is_src;
1094 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1095 == STEDMA40_CHANNEL_IN_LOG_MODE;
1096
1097
1098 phys = d40c->base->phy_res;
1099
1100 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1101 dev_type = d40c->dma_cfg.src_dev_type;
1102 log_num = 2 * dev_type;
1103 is_src = true;
1104 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1105 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1106 /* dst event lines are used for logical memcpy */
1107 dev_type = d40c->dma_cfg.dst_dev_type;
1108 log_num = 2 * dev_type + 1;
1109 is_src = false;
1110 } else
1111 return -EINVAL;
1112
1113 event_group = D40_TYPE_TO_GROUP(dev_type);
1114 event_line = D40_TYPE_TO_EVENT(dev_type);
1115
1116 if (!is_log) {
1117 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1118 /* Find physical half channel */
1119 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1120
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001121 if (d40_alloc_mask_set(&phys[i], is_src,
1122 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001123 goto found_phy;
1124 }
1125 } else
1126 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1127 int phy_num = j + event_group * 2;
1128 for (i = phy_num; i < phy_num + 2; i++) {
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001129 if (d40_alloc_mask_set(&phys[i], is_src,
1130 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001131 goto found_phy;
1132 }
1133 }
1134 return -EINVAL;
1135found_phy:
1136 d40c->phy_chan = &phys[i];
1137 d40c->log_num = D40_PHY_CHAN;
1138 goto out;
1139 }
1140 if (dev_type == -1)
1141 return -EINVAL;
1142
1143 /* Find logical channel */
1144 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1145 int phy_num = j + event_group * 2;
1146 /*
1147 * Spread logical channels across all available physical rather
1148 * than pack every logical channel at the first available phy
1149 * channels.
1150 */
1151 if (is_src) {
1152 for (i = phy_num; i < phy_num + 2; i++) {
1153 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001154 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001155 goto found_log;
1156 }
1157 } else {
1158 for (i = phy_num + 1; i >= phy_num; i--) {
1159 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001160 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001161 goto found_log;
1162 }
1163 }
1164 }
1165 return -EINVAL;
1166
1167found_log:
1168 d40c->phy_chan = &phys[i];
1169 d40c->log_num = log_num;
1170out:
1171
1172 if (is_log)
1173 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1174 else
1175 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1176
1177 return 0;
1178
1179}
1180
Linus Walleij8d318a52010-03-30 15:33:42 +02001181static int d40_config_memcpy(struct d40_chan *d40c)
1182{
1183 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1184
1185 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1186 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1187 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1188 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1189 memcpy[d40c->chan.chan_id];
1190
1191 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1192 dma_has_cap(DMA_SLAVE, cap)) {
1193 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1194 } else {
1195 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1196 __func__);
1197 return -EINVAL;
1198 }
1199
1200 return 0;
1201}
1202
1203
1204static int d40_free_dma(struct d40_chan *d40c)
1205{
1206
1207 int res = 0;
1208 u32 event, dir;
1209 struct d40_phy_res *phy = d40c->phy_chan;
1210 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001211 struct d40_desc *d;
1212 struct d40_desc *_d;
1213
Linus Walleij8d318a52010-03-30 15:33:42 +02001214
1215 /* Terminate all queued and active transfers */
1216 d40_term_all(d40c);
1217
Per Fridena8be8622010-06-20 21:24:59 +00001218 /* Release client owned descriptors */
1219 if (!list_empty(&d40c->client))
1220 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1221 d40_pool_lli_free(d);
1222 d40_desc_remove(d);
1223 /* Return desc to free-list */
1224 d40_desc_free(d40c, d);
1225 }
1226
Linus Walleij8d318a52010-03-30 15:33:42 +02001227 if (phy == NULL) {
1228 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1229 __func__);
1230 return -EINVAL;
1231 }
1232
1233 if (phy->allocated_src == D40_ALLOC_FREE &&
1234 phy->allocated_dst == D40_ALLOC_FREE) {
1235 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1236 __func__);
1237 return -EINVAL;
1238 }
1239
Linus Walleij8d318a52010-03-30 15:33:42 +02001240 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1241 if (res) {
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001242 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001243 __func__);
1244 return res;
1245 }
1246
1247 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1248 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1249 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1250 dir = D40_CHAN_REG_SDLNK;
1251 is_src = false;
1252 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1253 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1254 dir = D40_CHAN_REG_SSLNK;
1255 is_src = true;
1256 } else {
1257 dev_err(&d40c->chan.dev->device,
1258 "[%s] Unknown direction\n", __func__);
1259 return -EINVAL;
1260 }
1261
1262 if (d40c->log_num != D40_PHY_CHAN) {
1263 /*
1264 * Release logical channel, deactivate the event line during
1265 * the time physical res is suspended.
1266 */
1267 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1268 D40_EVENTLINE_MASK(event),
1269 d40c->base->virtbase + D40_DREG_PCBASE +
1270 phy->num * D40_DREG_PCDELTA + dir);
1271
1272 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1273
1274 /*
1275 * Check if there are more logical allocation
1276 * on this phy channel.
1277 */
1278 if (!d40_alloc_mask_free(phy, is_src, event)) {
1279 /* Resume the other logical channels if any */
1280 if (d40_chan_has_events(d40c)) {
1281 res = d40_channel_execute_command(d40c,
1282 D40_DMA_RUN);
1283 if (res) {
1284 dev_err(&d40c->chan.dev->device,
1285 "[%s] Executing RUN command\n",
1286 __func__);
1287 return res;
1288 }
1289 }
1290 return 0;
1291 }
1292 } else
1293 d40_alloc_mask_free(phy, is_src, 0);
1294
1295 /* Release physical channel */
1296 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1297 if (res) {
1298 dev_err(&d40c->chan.dev->device,
1299 "[%s] Failed to stop channel\n", __func__);
1300 return res;
1301 }
1302 d40c->phy_chan = NULL;
1303 /* Invalidate channel type */
1304 d40c->dma_cfg.channel_type = 0;
1305 d40c->base->lookup_phy_chans[phy->num] = NULL;
1306
1307 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001308}
1309
1310static int d40_pause(struct dma_chan *chan)
1311{
1312 struct d40_chan *d40c =
1313 container_of(chan, struct d40_chan, chan);
1314 int res;
Linus Walleij8d318a52010-03-30 15:33:42 +02001315 unsigned long flags;
1316
1317 spin_lock_irqsave(&d40c->lock, flags);
1318
1319 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1320 if (res == 0) {
1321 if (d40c->log_num != D40_PHY_CHAN) {
1322 d40_config_set_event(d40c, false);
1323 /* Resume the other logical channels if any */
1324 if (d40_chan_has_events(d40c))
1325 res = d40_channel_execute_command(d40c,
1326 D40_DMA_RUN);
1327 }
1328 }
1329
1330 spin_unlock_irqrestore(&d40c->lock, flags);
1331 return res;
1332}
1333
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001334static bool d40_is_paused(struct d40_chan *d40c)
1335{
1336 bool is_paused = false;
1337 unsigned long flags;
1338 void __iomem *active_reg;
1339 u32 status;
1340 u32 event;
1341 int res;
1342
1343 spin_lock_irqsave(&d40c->lock, flags);
1344
1345 if (d40c->log_num == D40_PHY_CHAN) {
1346 if (d40c->phy_chan->num % 2 == 0)
1347 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1348 else
1349 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1350
1351 status = (readl(active_reg) &
1352 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1353 D40_CHAN_POS(d40c->phy_chan->num);
1354 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1355 is_paused = true;
1356
1357 goto _exit;
1358 }
1359
1360 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1361 if (res != 0)
1362 goto _exit;
1363
1364 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1365 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1366 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1367 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1368 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1369 else {
1370 dev_err(&d40c->chan.dev->device,
1371 "[%s] Unknown direction\n", __func__);
1372 goto _exit;
1373 }
1374 status = d40_chan_has_events(d40c);
1375 status = (status & D40_EVENTLINE_MASK(event)) >>
1376 D40_EVENTLINE_POS(event);
1377
1378 if (status != D40_DMA_RUN)
1379 is_paused = true;
1380
1381 /* Resume the other logical channels if any */
1382 if (d40_chan_has_events(d40c))
1383 res = d40_channel_execute_command(d40c,
1384 D40_DMA_RUN);
1385
1386_exit:
1387 spin_unlock_irqrestore(&d40c->lock, flags);
1388 return is_paused;
1389
1390}
1391
1392
Linus Walleij8d318a52010-03-30 15:33:42 +02001393static bool d40_tx_is_linked(struct d40_chan *d40c)
1394{
1395 bool is_link;
1396
1397 if (d40c->log_num != D40_PHY_CHAN)
1398 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1399 else
1400 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1401 d40c->phy_chan->num * D40_DREG_PCDELTA +
1402 D40_CHAN_REG_SDLNK) &
1403 D40_SREG_LNK_PHYS_LNK_MASK;
1404 return is_link;
1405}
1406
1407static u32 d40_residue(struct d40_chan *d40c)
1408{
1409 u32 num_elt;
1410
1411 if (d40c->log_num != D40_PHY_CHAN)
1412 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1413 >> D40_MEM_LCSP2_ECNT_POS;
1414 else
1415 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1416 d40c->phy_chan->num * D40_DREG_PCDELTA +
1417 D40_CHAN_REG_SDELT) &
1418 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1419 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1420}
1421
1422static int d40_resume(struct dma_chan *chan)
1423{
1424 struct d40_chan *d40c =
1425 container_of(chan, struct d40_chan, chan);
1426 int res = 0;
1427 unsigned long flags;
1428
1429 spin_lock_irqsave(&d40c->lock, flags);
1430
1431 if (d40c->log_num != D40_PHY_CHAN) {
1432 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1433 if (res)
1434 goto out;
1435
1436 /* If bytes left to transfer or linked tx resume job */
1437 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1438 d40_config_set_event(d40c, true);
1439 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1440 }
1441 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1442 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1443
1444out:
1445 spin_unlock_irqrestore(&d40c->lock, flags);
1446 return res;
1447}
1448
1449static u32 stedma40_residue(struct dma_chan *chan)
1450{
1451 struct d40_chan *d40c =
1452 container_of(chan, struct d40_chan, chan);
1453 u32 bytes_left;
1454 unsigned long flags;
1455
1456 spin_lock_irqsave(&d40c->lock, flags);
1457 bytes_left = d40_residue(d40c);
1458 spin_unlock_irqrestore(&d40c->lock, flags);
1459
1460 return bytes_left;
1461}
1462
1463/* Public DMA functions in addition to the DMA engine framework */
1464
1465int stedma40_set_psize(struct dma_chan *chan,
1466 int src_psize,
1467 int dst_psize)
1468{
1469 struct d40_chan *d40c =
1470 container_of(chan, struct d40_chan, chan);
1471 unsigned long flags;
1472
1473 spin_lock_irqsave(&d40c->lock, flags);
1474
1475 if (d40c->log_num != D40_PHY_CHAN) {
1476 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1477 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1478 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1479 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1480 goto out;
1481 }
1482
1483 if (src_psize == STEDMA40_PSIZE_PHY_1)
1484 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1485 else {
1486 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1487 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1488 D40_SREG_CFG_PSIZE_POS);
1489 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1490 }
1491
1492 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1493 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1494 else {
1495 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1496 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1497 D40_SREG_CFG_PSIZE_POS);
1498 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1499 }
1500out:
1501 spin_unlock_irqrestore(&d40c->lock, flags);
1502 return 0;
1503}
1504EXPORT_SYMBOL(stedma40_set_psize);
1505
1506struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1507 struct scatterlist *sgl_dst,
1508 struct scatterlist *sgl_src,
1509 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001510 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001511{
1512 int res;
1513 struct d40_desc *d40d;
1514 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1515 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001516 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001517
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001518 if (d40c->phy_chan == NULL) {
1519 dev_err(&d40c->chan.dev->device,
1520 "[%s] Unallocated channel.\n", __func__);
1521 return ERR_PTR(-EINVAL);
1522 }
1523
Jonas Aaberg2a614342010-06-20 21:25:24 +00001524 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001525 d40d = d40_desc_get(d40c);
1526
1527 if (d40d == NULL)
1528 goto err;
1529
Linus Walleij8d318a52010-03-30 15:33:42 +02001530 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001531 d40d->lli_tx_len = d40d->lli_len;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001532 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001533
1534 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001535 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1536 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1537
Linus Walleij8d318a52010-03-30 15:33:42 +02001538 if (sgl_len > 1)
1539 /*
1540 * Check if there is space available in lcla. If not,
1541 * split list into 1-length and run only in lcpa
1542 * space.
1543 */
1544 if (d40_lcla_id_get(d40c,
1545 &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001546 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001547
1548 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1549 dev_err(&d40c->chan.dev->device,
1550 "[%s] Out of memory\n", __func__);
1551 goto err;
1552 }
1553
1554 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1555 sgl_src,
1556 sgl_len,
1557 d40d->lli_log.src,
1558 d40c->log_def.lcsp1,
1559 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001560 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001561 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001562 d40c->base->plat_data->llis_per_log);
1563
1564 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1565 sgl_dst,
1566 sgl_len,
1567 d40d->lli_log.dst,
1568 d40c->log_def.lcsp3,
1569 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001570 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001571 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001572 d40c->base->plat_data->llis_per_log);
1573
1574
1575 } else {
1576 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1577 dev_err(&d40c->chan.dev->device,
1578 "[%s] Out of memory\n", __func__);
1579 goto err;
1580 }
1581
1582 res = d40_phy_sg_to_lli(sgl_src,
1583 sgl_len,
1584 0,
1585 d40d->lli_phy.src,
1586 d40d->lli_phy.src_addr,
1587 d40c->src_def_cfg,
1588 d40c->dma_cfg.src_info.data_width,
1589 d40c->dma_cfg.src_info.psize,
1590 true);
1591
1592 if (res < 0)
1593 goto err;
1594
1595 res = d40_phy_sg_to_lli(sgl_dst,
1596 sgl_len,
1597 0,
1598 d40d->lli_phy.dst,
1599 d40d->lli_phy.dst_addr,
1600 d40c->dst_def_cfg,
1601 d40c->dma_cfg.dst_info.data_width,
1602 d40c->dma_cfg.dst_info.psize,
1603 true);
1604
1605 if (res < 0)
1606 goto err;
1607
1608 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1609 d40d->lli_pool.size, DMA_TO_DEVICE);
1610 }
1611
1612 dma_async_tx_descriptor_init(&d40d->txd, chan);
1613
1614 d40d->txd.tx_submit = d40_tx_submit;
1615
Jonas Aaberg2a614342010-06-20 21:25:24 +00001616 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001617
1618 return &d40d->txd;
1619err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001620 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001621 return NULL;
1622}
1623EXPORT_SYMBOL(stedma40_memcpy_sg);
1624
1625bool stedma40_filter(struct dma_chan *chan, void *data)
1626{
1627 struct stedma40_chan_cfg *info = data;
1628 struct d40_chan *d40c =
1629 container_of(chan, struct d40_chan, chan);
1630 int err;
1631
1632 if (data) {
1633 err = d40_validate_conf(d40c, info);
1634 if (!err)
1635 d40c->dma_cfg = *info;
1636 } else
1637 err = d40_config_memcpy(d40c);
1638
1639 return err == 0;
1640}
1641EXPORT_SYMBOL(stedma40_filter);
1642
1643/* DMA ENGINE functions */
1644static int d40_alloc_chan_resources(struct dma_chan *chan)
1645{
1646 int err;
1647 unsigned long flags;
1648 struct d40_chan *d40c =
1649 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001650 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001651 spin_lock_irqsave(&d40c->lock, flags);
1652
1653 d40c->completed = chan->cookie = 1;
1654
1655 /*
1656 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001657 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001658 */
1659 if (d40c->dma_cfg.channel_type == 0) {
1660 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001661 if (err) {
1662 dev_err(&d40c->chan.dev->device,
1663 "[%s] Failed to configure memcpy channel\n",
1664 __func__);
1665 goto fail;
1666 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001667 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001668 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001669
1670 err = d40_allocate_channel(d40c);
1671 if (err) {
1672 dev_err(&d40c->chan.dev->device,
1673 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001674 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001675 }
1676
Linus Walleijef1872e2010-06-20 21:24:52 +00001677 /* Fill in basic CFG register values */
1678 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1679 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1680
1681 if (d40c->log_num != D40_PHY_CHAN) {
1682 d40_log_cfg(&d40c->dma_cfg,
1683 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1684
1685 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1686 d40c->lcpa = d40c->base->lcpa_base +
1687 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1688 else
1689 d40c->lcpa = d40c->base->lcpa_base +
1690 d40c->dma_cfg.dst_dev_type *
1691 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1692 }
1693
1694 /*
1695 * Only write channel configuration to the DMA if the physical
1696 * resource is free. In case of multiple logical channels
1697 * on the same physical resource, only the first write is necessary.
1698 */
1699 if (is_free_phy) {
1700 err = d40_config_write(d40c);
1701 if (err) {
1702 dev_err(&d40c->chan.dev->device,
1703 "[%s] Failed to configure channel\n",
1704 __func__);
1705 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001706 }
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001707fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001708 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001709 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001710}
1711
1712static void d40_free_chan_resources(struct dma_chan *chan)
1713{
1714 struct d40_chan *d40c =
1715 container_of(chan, struct d40_chan, chan);
1716 int err;
1717 unsigned long flags;
1718
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001719 if (d40c->phy_chan == NULL) {
1720 dev_err(&d40c->chan.dev->device,
1721 "[%s] Cannot free unallocated channel\n", __func__);
1722 return;
1723 }
1724
1725
Linus Walleij8d318a52010-03-30 15:33:42 +02001726 spin_lock_irqsave(&d40c->lock, flags);
1727
1728 err = d40_free_dma(d40c);
1729
1730 if (err)
1731 dev_err(&d40c->chan.dev->device,
1732 "[%s] Failed to free channel\n", __func__);
1733 spin_unlock_irqrestore(&d40c->lock, flags);
1734}
1735
1736static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1737 dma_addr_t dst,
1738 dma_addr_t src,
1739 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001740 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001741{
1742 struct d40_desc *d40d;
1743 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1744 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001745 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001746 int err = 0;
1747
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001748 if (d40c->phy_chan == NULL) {
1749 dev_err(&d40c->chan.dev->device,
1750 "[%s] Channel is not allocated.\n", __func__);
1751 return ERR_PTR(-EINVAL);
1752 }
1753
Jonas Aaberg2a614342010-06-20 21:25:24 +00001754 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001755 d40d = d40_desc_get(d40c);
1756
1757 if (d40d == NULL) {
1758 dev_err(&d40c->chan.dev->device,
1759 "[%s] Descriptor is NULL\n", __func__);
1760 goto err;
1761 }
1762
Jonas Aaberg2a614342010-06-20 21:25:24 +00001763 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001764
1765 dma_async_tx_descriptor_init(&d40d->txd, chan);
1766
1767 d40d->txd.tx_submit = d40_tx_submit;
1768
1769 if (d40c->log_num != D40_PHY_CHAN) {
1770
1771 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1772 dev_err(&d40c->chan.dev->device,
1773 "[%s] Out of memory\n", __func__);
1774 goto err;
1775 }
1776 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001777 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001778
1779 d40_log_fill_lli(d40d->lli_log.src,
1780 src,
1781 size,
1782 0,
1783 d40c->log_def.lcsp1,
1784 d40c->dma_cfg.src_info.data_width,
1785 true, true);
1786
1787 d40_log_fill_lli(d40d->lli_log.dst,
1788 dst,
1789 size,
1790 0,
1791 d40c->log_def.lcsp3,
1792 d40c->dma_cfg.dst_info.data_width,
1793 true, true);
1794
1795 } else {
1796
1797 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1798 dev_err(&d40c->chan.dev->device,
1799 "[%s] Out of memory\n", __func__);
1800 goto err;
1801 }
1802
1803 err = d40_phy_fill_lli(d40d->lli_phy.src,
1804 src,
1805 size,
1806 d40c->dma_cfg.src_info.psize,
1807 0,
1808 d40c->src_def_cfg,
1809 true,
1810 d40c->dma_cfg.src_info.data_width,
1811 false);
1812 if (err)
1813 goto err_fill_lli;
1814
1815 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1816 dst,
1817 size,
1818 d40c->dma_cfg.dst_info.psize,
1819 0,
1820 d40c->dst_def_cfg,
1821 true,
1822 d40c->dma_cfg.dst_info.data_width,
1823 false);
1824
1825 if (err)
1826 goto err_fill_lli;
1827
1828 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1829 d40d->lli_pool.size, DMA_TO_DEVICE);
1830 }
1831
Jonas Aaberg2a614342010-06-20 21:25:24 +00001832 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001833 return &d40d->txd;
1834
1835err_fill_lli:
1836 dev_err(&d40c->chan.dev->device,
1837 "[%s] Failed filling in PHY LLI\n", __func__);
1838 d40_pool_lli_free(d40d);
1839err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001840 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001841 return NULL;
1842}
1843
1844static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1845 struct d40_chan *d40c,
1846 struct scatterlist *sgl,
1847 unsigned int sg_len,
1848 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001849 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001850{
1851 dma_addr_t dev_addr = 0;
1852 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001853
1854 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1855 dev_err(&d40c->chan.dev->device,
1856 "[%s] Out of memory\n", __func__);
1857 return -ENOMEM;
1858 }
1859
1860 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001861 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1862 d40d->lli_tx_len = d40d->lli_len;
1863 else
1864 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001865
1866 if (sg_len > 1)
1867 /*
1868 * Check if there is space available in lcla.
1869 * If not, split list into 1-length and run only
1870 * in lcpa space.
1871 */
1872 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001873 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001874
Jonas Aaberg2a614342010-06-20 21:25:24 +00001875 if (direction == DMA_FROM_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001876 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001877 else if (direction == DMA_TO_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001878 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001879 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001880 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001881
1882 total_size = d40_log_sg_to_dev(&d40c->lcla,
1883 sgl, sg_len,
1884 &d40d->lli_log,
1885 &d40c->log_def,
1886 d40c->dma_cfg.src_info.data_width,
1887 d40c->dma_cfg.dst_info.data_width,
1888 direction,
1889 dma_flags & DMA_PREP_INTERRUPT,
1890 dev_addr, d40d->lli_tx_len,
1891 d40c->base->plat_data->llis_per_log);
1892
Linus Walleij8d318a52010-03-30 15:33:42 +02001893 if (total_size < 0)
1894 return -EINVAL;
1895
1896 return 0;
1897}
1898
1899static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1900 struct d40_chan *d40c,
1901 struct scatterlist *sgl,
1902 unsigned int sgl_len,
1903 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001904 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001905{
1906 dma_addr_t src_dev_addr;
1907 dma_addr_t dst_dev_addr;
1908 int res;
1909
1910 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1911 dev_err(&d40c->chan.dev->device,
1912 "[%s] Out of memory\n", __func__);
1913 return -ENOMEM;
1914 }
1915
1916 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001917 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001918
1919 if (direction == DMA_FROM_DEVICE) {
1920 dst_dev_addr = 0;
1921 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1922 } else if (direction == DMA_TO_DEVICE) {
1923 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1924 src_dev_addr = 0;
1925 } else
1926 return -EINVAL;
1927
1928 res = d40_phy_sg_to_lli(sgl,
1929 sgl_len,
1930 src_dev_addr,
1931 d40d->lli_phy.src,
1932 d40d->lli_phy.src_addr,
1933 d40c->src_def_cfg,
1934 d40c->dma_cfg.src_info.data_width,
1935 d40c->dma_cfg.src_info.psize,
1936 true);
1937 if (res < 0)
1938 return res;
1939
1940 res = d40_phy_sg_to_lli(sgl,
1941 sgl_len,
1942 dst_dev_addr,
1943 d40d->lli_phy.dst,
1944 d40d->lli_phy.dst_addr,
1945 d40c->dst_def_cfg,
1946 d40c->dma_cfg.dst_info.data_width,
1947 d40c->dma_cfg.dst_info.psize,
1948 true);
1949 if (res < 0)
1950 return res;
1951
1952 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1953 d40d->lli_pool.size, DMA_TO_DEVICE);
1954 return 0;
1955}
1956
1957static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1958 struct scatterlist *sgl,
1959 unsigned int sg_len,
1960 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001961 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001962{
1963 struct d40_desc *d40d;
1964 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1965 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001966 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001967 int err;
1968
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001969 if (d40c->phy_chan == NULL) {
1970 dev_err(&d40c->chan.dev->device,
1971 "[%s] Cannot prepare unallocated channel\n", __func__);
1972 return ERR_PTR(-EINVAL);
1973 }
1974
Linus Walleij8d318a52010-03-30 15:33:42 +02001975 if (d40c->dma_cfg.pre_transfer)
1976 d40c->dma_cfg.pre_transfer(chan,
1977 d40c->dma_cfg.pre_transfer_data,
1978 sg_dma_len(sgl));
1979
Jonas Aaberg2a614342010-06-20 21:25:24 +00001980 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001981 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001982 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001983
1984 if (d40d == NULL)
1985 return NULL;
1986
1987 memset(d40d, 0, sizeof(struct d40_desc));
1988
1989 if (d40c->log_num != D40_PHY_CHAN)
1990 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001991 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001992 else
1993 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001994 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001995 if (err) {
1996 dev_err(&d40c->chan.dev->device,
1997 "[%s] Failed to prepare %s slave sg job: %d\n",
1998 __func__,
1999 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2000 return NULL;
2001 }
2002
Jonas Aaberg2a614342010-06-20 21:25:24 +00002003 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002004
2005 dma_async_tx_descriptor_init(&d40d->txd, chan);
2006
2007 d40d->txd.tx_submit = d40_tx_submit;
2008
2009 return &d40d->txd;
2010}
2011
2012static enum dma_status d40_tx_status(struct dma_chan *chan,
2013 dma_cookie_t cookie,
2014 struct dma_tx_state *txstate)
2015{
2016 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2017 dma_cookie_t last_used;
2018 dma_cookie_t last_complete;
2019 int ret;
2020
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002021 if (d40c->phy_chan == NULL) {
2022 dev_err(&d40c->chan.dev->device,
2023 "[%s] Cannot read status of unallocated channel\n",
2024 __func__);
2025 return -EINVAL;
2026 }
2027
Linus Walleij8d318a52010-03-30 15:33:42 +02002028 last_complete = d40c->completed;
2029 last_used = chan->cookie;
2030
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002031 if (d40_is_paused(d40c))
2032 ret = DMA_PAUSED;
2033 else
2034 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002035
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002036 dma_set_tx_state(txstate, last_complete, last_used,
2037 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002038
2039 return ret;
2040}
2041
2042static void d40_issue_pending(struct dma_chan *chan)
2043{
2044 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2045 unsigned long flags;
2046
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002047 if (d40c->phy_chan == NULL) {
2048 dev_err(&d40c->chan.dev->device,
2049 "[%s] Channel is not allocated!\n", __func__);
2050 return;
2051 }
2052
Linus Walleij8d318a52010-03-30 15:33:42 +02002053 spin_lock_irqsave(&d40c->lock, flags);
2054
2055 /* Busy means that pending jobs are already being processed */
2056 if (!d40c->busy)
2057 (void) d40_queue_start(d40c);
2058
2059 spin_unlock_irqrestore(&d40c->lock, flags);
2060}
2061
Linus Walleij05827632010-05-17 16:30:42 -07002062static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2063 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002064{
2065 unsigned long flags;
2066 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2067
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002068 if (d40c->phy_chan == NULL) {
2069 dev_err(&d40c->chan.dev->device,
2070 "[%s] Channel is not allocated!\n", __func__);
2071 return -EINVAL;
2072 }
2073
Linus Walleij8d318a52010-03-30 15:33:42 +02002074 switch (cmd) {
2075 case DMA_TERMINATE_ALL:
2076 spin_lock_irqsave(&d40c->lock, flags);
2077 d40_term_all(d40c);
2078 spin_unlock_irqrestore(&d40c->lock, flags);
2079 return 0;
2080 case DMA_PAUSE:
2081 return d40_pause(chan);
2082 case DMA_RESUME:
2083 return d40_resume(chan);
2084 }
2085
2086 /* Other commands are unimplemented */
2087 return -ENXIO;
2088}
2089
2090/* Initialization functions */
2091
2092static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2093 struct d40_chan *chans, int offset,
2094 int num_chans)
2095{
2096 int i = 0;
2097 struct d40_chan *d40c;
2098
2099 INIT_LIST_HEAD(&dma->channels);
2100
2101 for (i = offset; i < offset + num_chans; i++) {
2102 d40c = &chans[i];
2103 d40c->base = base;
2104 d40c->chan.device = dma;
2105
2106 /* Invalidate lcla element */
2107 d40c->lcla.src_id = -1;
2108 d40c->lcla.dst_id = -1;
2109
2110 spin_lock_init(&d40c->lock);
2111
2112 d40c->log_num = D40_PHY_CHAN;
2113
Linus Walleij8d318a52010-03-30 15:33:42 +02002114 INIT_LIST_HEAD(&d40c->active);
2115 INIT_LIST_HEAD(&d40c->queue);
2116 INIT_LIST_HEAD(&d40c->client);
2117
Linus Walleij8d318a52010-03-30 15:33:42 +02002118 tasklet_init(&d40c->tasklet, dma_tasklet,
2119 (unsigned long) d40c);
2120
2121 list_add_tail(&d40c->chan.device_node,
2122 &dma->channels);
2123 }
2124}
2125
2126static int __init d40_dmaengine_init(struct d40_base *base,
2127 int num_reserved_chans)
2128{
2129 int err ;
2130
2131 d40_chan_init(base, &base->dma_slave, base->log_chans,
2132 0, base->num_log_chans);
2133
2134 dma_cap_zero(base->dma_slave.cap_mask);
2135 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2136
2137 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2138 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2139 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2140 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2141 base->dma_slave.device_tx_status = d40_tx_status;
2142 base->dma_slave.device_issue_pending = d40_issue_pending;
2143 base->dma_slave.device_control = d40_control;
2144 base->dma_slave.dev = base->dev;
2145
2146 err = dma_async_device_register(&base->dma_slave);
2147
2148 if (err) {
2149 dev_err(base->dev,
2150 "[%s] Failed to register slave channels\n",
2151 __func__);
2152 goto failure1;
2153 }
2154
2155 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2156 base->num_log_chans, base->plat_data->memcpy_len);
2157
2158 dma_cap_zero(base->dma_memcpy.cap_mask);
2159 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2160
2161 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2162 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2163 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2164 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2165 base->dma_memcpy.device_tx_status = d40_tx_status;
2166 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2167 base->dma_memcpy.device_control = d40_control;
2168 base->dma_memcpy.dev = base->dev;
2169 /*
2170 * This controller can only access address at even
2171 * 32bit boundaries, i.e. 2^2
2172 */
2173 base->dma_memcpy.copy_align = 2;
2174
2175 err = dma_async_device_register(&base->dma_memcpy);
2176
2177 if (err) {
2178 dev_err(base->dev,
2179 "[%s] Failed to regsiter memcpy only channels\n",
2180 __func__);
2181 goto failure2;
2182 }
2183
2184 d40_chan_init(base, &base->dma_both, base->phy_chans,
2185 0, num_reserved_chans);
2186
2187 dma_cap_zero(base->dma_both.cap_mask);
2188 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2189 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2190
2191 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2192 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2193 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2194 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2195 base->dma_both.device_tx_status = d40_tx_status;
2196 base->dma_both.device_issue_pending = d40_issue_pending;
2197 base->dma_both.device_control = d40_control;
2198 base->dma_both.dev = base->dev;
2199 base->dma_both.copy_align = 2;
2200 err = dma_async_device_register(&base->dma_both);
2201
2202 if (err) {
2203 dev_err(base->dev,
2204 "[%s] Failed to register logical and physical capable channels\n",
2205 __func__);
2206 goto failure3;
2207 }
2208 return 0;
2209failure3:
2210 dma_async_device_unregister(&base->dma_memcpy);
2211failure2:
2212 dma_async_device_unregister(&base->dma_slave);
2213failure1:
2214 return err;
2215}
2216
2217/* Initialization functions. */
2218
2219static int __init d40_phy_res_init(struct d40_base *base)
2220{
2221 int i;
2222 int num_phy_chans_avail = 0;
2223 u32 val[2];
2224 int odd_even_bit = -2;
2225
2226 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2227 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2228
2229 for (i = 0; i < base->num_phy_chans; i++) {
2230 base->phy_res[i].num = i;
2231 odd_even_bit += 2 * ((i % 2) == 0);
2232 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2233 /* Mark security only channels as occupied */
2234 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2235 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2236 } else {
2237 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2238 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2239 num_phy_chans_avail++;
2240 }
2241 spin_lock_init(&base->phy_res[i].lock);
2242 }
2243 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2244 num_phy_chans_avail, base->num_phy_chans);
2245
2246 /* Verify settings extended vs standard */
2247 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2248
2249 for (i = 0; i < base->num_phy_chans; i++) {
2250
2251 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2252 (val[0] & 0x3) != 1)
2253 dev_info(base->dev,
2254 "[%s] INFO: channel %d is misconfigured (%d)\n",
2255 __func__, i, val[0] & 0x3);
2256
2257 val[0] = val[0] >> 2;
2258 }
2259
2260 return num_phy_chans_avail;
2261}
2262
2263static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2264{
2265 static const struct d40_reg_val dma_id_regs[] = {
2266 /* Peripheral Id */
2267 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2268 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2269 /*
2270 * D40_DREG_PERIPHID2 Depends on HW revision:
2271 * MOP500/HREF ED has 0x0008,
2272 * ? has 0x0018,
2273 * HREF V1 has 0x0028
2274 */
2275 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2276
2277 /* PCell Id */
2278 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2279 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2280 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2281 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2282 };
2283 struct stedma40_platform_data *plat_data;
2284 struct clk *clk = NULL;
2285 void __iomem *virtbase = NULL;
2286 struct resource *res = NULL;
2287 struct d40_base *base = NULL;
2288 int num_log_chans = 0;
2289 int num_phy_chans;
2290 int i;
2291
2292 clk = clk_get(&pdev->dev, NULL);
2293
2294 if (IS_ERR(clk)) {
2295 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2296 __func__);
2297 goto failure;
2298 }
2299
2300 clk_enable(clk);
2301
2302 /* Get IO for DMAC base address */
2303 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2304 if (!res)
2305 goto failure;
2306
2307 if (request_mem_region(res->start, resource_size(res),
2308 D40_NAME " I/O base") == NULL)
2309 goto failure;
2310
2311 virtbase = ioremap(res->start, resource_size(res));
2312 if (!virtbase)
2313 goto failure;
2314
2315 /* HW version check */
2316 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2317 if (dma_id_regs[i].val !=
2318 readl(virtbase + dma_id_regs[i].reg)) {
2319 dev_err(&pdev->dev,
2320 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2321 __func__,
2322 dma_id_regs[i].val,
2323 dma_id_regs[i].reg,
2324 readl(virtbase + dma_id_regs[i].reg));
2325 goto failure;
2326 }
2327 }
2328
2329 i = readl(virtbase + D40_DREG_PERIPHID2);
2330
2331 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2332 dev_err(&pdev->dev,
2333 "[%s] Unknown designer! Got %x wanted %x\n",
2334 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2335 goto failure;
2336 }
2337
2338 /* The number of physical channels on this HW */
2339 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2340
2341 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2342 (i >> 4) & 0xf, res->start);
2343
2344 plat_data = pdev->dev.platform_data;
2345
2346 /* Count the number of logical channels in use */
2347 for (i = 0; i < plat_data->dev_len; i++)
2348 if (plat_data->dev_rx[i] != 0)
2349 num_log_chans++;
2350
2351 for (i = 0; i < plat_data->dev_len; i++)
2352 if (plat_data->dev_tx[i] != 0)
2353 num_log_chans++;
2354
2355 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2356 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2357 sizeof(struct d40_chan), GFP_KERNEL);
2358
2359 if (base == NULL) {
2360 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2361 goto failure;
2362 }
2363
2364 base->clk = clk;
2365 base->num_phy_chans = num_phy_chans;
2366 base->num_log_chans = num_log_chans;
2367 base->phy_start = res->start;
2368 base->phy_size = resource_size(res);
2369 base->virtbase = virtbase;
2370 base->plat_data = plat_data;
2371 base->dev = &pdev->dev;
2372 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2373 base->log_chans = &base->phy_chans[num_phy_chans];
2374
2375 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2376 GFP_KERNEL);
2377 if (!base->phy_res)
2378 goto failure;
2379
2380 base->lookup_phy_chans = kzalloc(num_phy_chans *
2381 sizeof(struct d40_chan *),
2382 GFP_KERNEL);
2383 if (!base->lookup_phy_chans)
2384 goto failure;
2385
2386 if (num_log_chans + plat_data->memcpy_len) {
2387 /*
2388 * The max number of logical channels are event lines for all
2389 * src devices and dst devices
2390 */
2391 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2392 sizeof(struct d40_chan *),
2393 GFP_KERNEL);
2394 if (!base->lookup_log_chans)
2395 goto failure;
2396 }
2397 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2398 GFP_KERNEL);
2399 if (!base->lcla_pool.alloc_map)
2400 goto failure;
2401
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002402 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2403 0, SLAB_HWCACHE_ALIGN,
2404 NULL);
2405 if (base->desc_slab == NULL)
2406 goto failure;
2407
Linus Walleij8d318a52010-03-30 15:33:42 +02002408 return base;
2409
2410failure:
2411 if (clk) {
2412 clk_disable(clk);
2413 clk_put(clk);
2414 }
2415 if (virtbase)
2416 iounmap(virtbase);
2417 if (res)
2418 release_mem_region(res->start,
2419 resource_size(res));
2420 if (virtbase)
2421 iounmap(virtbase);
2422
2423 if (base) {
2424 kfree(base->lcla_pool.alloc_map);
2425 kfree(base->lookup_log_chans);
2426 kfree(base->lookup_phy_chans);
2427 kfree(base->phy_res);
2428 kfree(base);
2429 }
2430
2431 return NULL;
2432}
2433
2434static void __init d40_hw_init(struct d40_base *base)
2435{
2436
2437 static const struct d40_reg_val dma_init_reg[] = {
2438 /* Clock every part of the DMA block from start */
2439 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2440
2441 /* Interrupts on all logical channels */
2442 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2447 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2448 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2449 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2450 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2451 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2452 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2453 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2454 };
2455 int i;
2456 u32 prmseo[2] = {0, 0};
2457 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2458 u32 pcmis = 0;
2459 u32 pcicr = 0;
2460
2461 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2462 writel(dma_init_reg[i].val,
2463 base->virtbase + dma_init_reg[i].reg);
2464
2465 /* Configure all our dma channels to default settings */
2466 for (i = 0; i < base->num_phy_chans; i++) {
2467
2468 activeo[i % 2] = activeo[i % 2] << 2;
2469
2470 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2471 == D40_ALLOC_PHY) {
2472 activeo[i % 2] |= 3;
2473 continue;
2474 }
2475
2476 /* Enable interrupt # */
2477 pcmis = (pcmis << 1) | 1;
2478
2479 /* Clear interrupt # */
2480 pcicr = (pcicr << 1) | 1;
2481
2482 /* Set channel to physical mode */
2483 prmseo[i % 2] = prmseo[i % 2] << 2;
2484 prmseo[i % 2] |= 1;
2485
2486 }
2487
2488 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2489 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2490 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2491 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2492
2493 /* Write which interrupt to enable */
2494 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2495
2496 /* Write which interrupt to clear */
2497 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2498
2499}
2500
2501static int __init d40_probe(struct platform_device *pdev)
2502{
2503 int err;
2504 int ret = -ENOENT;
2505 struct d40_base *base;
2506 struct resource *res = NULL;
2507 int num_reserved_chans;
2508 u32 val;
2509
2510 base = d40_hw_detect_init(pdev);
2511
2512 if (!base)
2513 goto failure;
2514
2515 num_reserved_chans = d40_phy_res_init(base);
2516
2517 platform_set_drvdata(pdev, base);
2518
2519 spin_lock_init(&base->interrupt_lock);
2520 spin_lock_init(&base->execmd_lock);
2521
2522 /* Get IO for logical channel parameter address */
2523 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2524 if (!res) {
2525 ret = -ENOENT;
2526 dev_err(&pdev->dev,
2527 "[%s] No \"lcpa\" memory resource\n",
2528 __func__);
2529 goto failure;
2530 }
2531 base->lcpa_size = resource_size(res);
2532 base->phy_lcpa = res->start;
2533
2534 if (request_mem_region(res->start, resource_size(res),
2535 D40_NAME " I/O lcpa") == NULL) {
2536 ret = -EBUSY;
2537 dev_err(&pdev->dev,
2538 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2539 __func__, res->start, res->end);
2540 goto failure;
2541 }
2542
2543 /* We make use of ESRAM memory for this. */
2544 val = readl(base->virtbase + D40_DREG_LCPA);
2545 if (res->start != val && val != 0) {
2546 dev_warn(&pdev->dev,
2547 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2548 __func__, val, res->start);
2549 } else
2550 writel(res->start, base->virtbase + D40_DREG_LCPA);
2551
2552 base->lcpa_base = ioremap(res->start, resource_size(res));
2553 if (!base->lcpa_base) {
2554 ret = -ENOMEM;
2555 dev_err(&pdev->dev,
2556 "[%s] Failed to ioremap LCPA region\n",
2557 __func__);
2558 goto failure;
2559 }
2560 /* Get IO for logical channel link address */
2561 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2562 if (!res) {
2563 ret = -ENOENT;
2564 dev_err(&pdev->dev,
2565 "[%s] No \"lcla\" resource defined\n",
2566 __func__);
2567 goto failure;
2568 }
2569
2570 base->lcla_pool.base_size = resource_size(res);
2571 base->lcla_pool.phy = res->start;
2572
2573 if (request_mem_region(res->start, resource_size(res),
2574 D40_NAME " I/O lcla") == NULL) {
2575 ret = -EBUSY;
2576 dev_err(&pdev->dev,
2577 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2578 __func__, res->start, res->end);
2579 goto failure;
2580 }
2581 val = readl(base->virtbase + D40_DREG_LCLA);
2582 if (res->start != val && val != 0) {
2583 dev_warn(&pdev->dev,
2584 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2585 __func__, val, res->start);
2586 } else
2587 writel(res->start, base->virtbase + D40_DREG_LCLA);
2588
2589 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2590 if (!base->lcla_pool.base) {
2591 ret = -ENOMEM;
2592 dev_err(&pdev->dev,
2593 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2594 __func__, res->start, res->end);
2595 goto failure;
2596 }
2597
2598 spin_lock_init(&base->lcla_pool.lock);
2599
2600 base->lcla_pool.num_blocks = base->num_phy_chans;
2601
2602 base->irq = platform_get_irq(pdev, 0);
2603
2604 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2605
2606 if (ret) {
2607 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2608 goto failure;
2609 }
2610
2611 err = d40_dmaengine_init(base, num_reserved_chans);
2612 if (err)
2613 goto failure;
2614
2615 d40_hw_init(base);
2616
2617 dev_info(base->dev, "initialized\n");
2618 return 0;
2619
2620failure:
2621 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002622 if (base->desc_slab)
2623 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002624 if (base->virtbase)
2625 iounmap(base->virtbase);
2626 if (base->lcla_pool.phy)
2627 release_mem_region(base->lcla_pool.phy,
2628 base->lcla_pool.base_size);
2629 if (base->phy_lcpa)
2630 release_mem_region(base->phy_lcpa,
2631 base->lcpa_size);
2632 if (base->phy_start)
2633 release_mem_region(base->phy_start,
2634 base->phy_size);
2635 if (base->clk) {
2636 clk_disable(base->clk);
2637 clk_put(base->clk);
2638 }
2639
2640 kfree(base->lcla_pool.alloc_map);
2641 kfree(base->lookup_log_chans);
2642 kfree(base->lookup_phy_chans);
2643 kfree(base->phy_res);
2644 kfree(base);
2645 }
2646
2647 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2648 return ret;
2649}
2650
2651static struct platform_driver d40_driver = {
2652 .driver = {
2653 .owner = THIS_MODULE,
2654 .name = D40_NAME,
2655 },
2656};
2657
2658int __init stedma40_init(void)
2659{
2660 return platform_driver_probe(&d40_driver, d40_probe);
2661}
2662arch_initcall(stedma40_init);