blob: abbc1b627bccd1280b61a72f1c1b2f808b1c7e5d [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
Linus Walleij8d318a52010-03-30 15:33:42 +020037/* Hardware designer of the block */
38#define D40_PERIPHID2_DESIGNER 0x8
39
40/**
41 * enum 40_command - The different commands and/or statuses.
42 *
43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
47 */
48enum d40_command {
49 D40_DMA_STOP = 0,
50 D40_DMA_RUN = 1,
51 D40_DMA_SUSPEND_REQ = 2,
52 D40_DMA_SUSPENDED = 3
53};
54
55/**
56 * struct d40_lli_pool - Structure for keeping LLIs in memory
57 *
58 * @base: Pointer to memory area when the pre_alloc_lli's are not large
59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60 * pre_alloc_lli is used.
61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63 * one buffer to one buffer.
64 */
65struct d40_lli_pool {
66 void *base;
67 int size;
68 /* Space for dst and src, plus an extra for padding */
69 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
70};
71
72/**
73 * struct d40_desc - A descriptor is one DMA job.
74 *
75 * @lli_phy: LLI settings for physical channel. Both src and dst=
76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
77 * lli_len equals one.
78 * @lli_log: Same as above but for logical channels.
79 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000080 * @lli_len: Number of llis of current descriptor.
81 * @lli_count: Number of transfered llis.
82 * @lli_tx_len: Max number of LLIs per transfer, there can be
83 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020084 * @txd: DMA engine struct. Used for among other things for communication
85 * during a transfer.
86 * @node: List entry.
87 * @dir: The transfer direction of this job.
88 * @is_in_client_list: true if the client owns this descriptor.
89 *
90 * This descriptor is used for both logical and physical transfers.
91 */
92
93struct d40_desc {
94 /* LLI physical */
95 struct d40_phy_lli_bidir lli_phy;
96 /* LLI logical */
97 struct d40_log_lli_bidir lli_log;
98
99 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000100 int lli_len;
101 int lli_count;
102 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200103
104 struct dma_async_tx_descriptor txd;
105 struct list_head node;
106
107 enum dma_data_direction dir;
108 bool is_in_client_list;
109};
110
111/**
112 * struct d40_lcla_pool - LCLA pool settings and data.
113 *
114 * @base: The virtual address of LCLA.
115 * @phy: Physical base address of LCLA.
116 * @base_size: size of lcla.
117 * @lock: Lock to protect the content in this struct.
118 * @alloc_map: Mapping between physical channel and LCLA entries.
119 * @num_blocks: The number of entries of alloc_map. Equals to the
120 * number of physical channels.
121 */
122struct d40_lcla_pool {
123 void *base;
124 dma_addr_t phy;
125 resource_size_t base_size;
126 spinlock_t lock;
127 u32 *alloc_map;
128 int num_blocks;
129};
130
131/**
132 * struct d40_phy_res - struct for handling eventlines mapped to physical
133 * channels.
134 *
135 * @lock: A lock protection this entity.
136 * @num: The physical channel number of this entity.
137 * @allocated_src: Bit mapped to show which src event line's are mapped to
138 * this physical channel. Can also be free or physically allocated.
139 * @allocated_dst: Same as for src but is dst.
140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141 * event line number. Both allocated_src and allocated_dst can not be
142 * allocated to a physical channel, since the interrupt handler has then
143 * no way of figure out which one the interrupt belongs to.
144 */
145struct d40_phy_res {
146 spinlock_t lock;
147 int num;
148 u32 allocated_src;
149 u32 allocated_dst;
150};
151
152struct d40_base;
153
154/**
155 * struct d40_chan - Struct that describes a channel.
156 *
157 * @lock: A spinlock to protect this struct.
158 * @log_num: The logical number, if any of this channel.
159 * @completed: Starts with 1, after first interrupt it is set to dma engine's
160 * current cookie.
161 * @pending_tx: The number of pending transfers. Used between interrupt handler
162 * and tasklet.
163 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000164 * @phy_chan: Pointer to physical channel which this instance runs on. If this
165 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200166 * @chan: DMA engine handle.
167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
168 * transfer and call client callback.
169 * @client: Cliented owned descriptor list.
170 * @active: Active descriptor.
171 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200172 * @dma_cfg: The client configuration of this dma channel.
173 * @base: Pointer to the device instance struct.
174 * @src_def_cfg: Default cfg register setting for src.
175 * @dst_def_cfg: Default cfg register setting for dst.
176 * @log_def: Default logical channel settings.
177 * @lcla: Space for one dst src pair for logical channel transfers.
178 * @lcpa: Pointer to dst and src lcpa settings.
179 *
180 * This struct can either "be" a logical or a physical channel.
181 */
182struct d40_chan {
183 spinlock_t lock;
184 int log_num;
185 /* ID of the most recent completed transfer */
186 int completed;
187 int pending_tx;
188 bool busy;
189 struct d40_phy_res *phy_chan;
190 struct dma_chan chan;
191 struct tasklet_struct tasklet;
192 struct list_head client;
193 struct list_head active;
194 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200195 struct stedma40_chan_cfg dma_cfg;
196 struct d40_base *base;
197 /* Default register configurations */
198 u32 src_def_cfg;
199 u32 dst_def_cfg;
200 struct d40_def_lcsp log_def;
201 struct d40_lcla_elem lcla;
202 struct d40_log_lli_full *lcpa;
203};
204
205/**
206 * struct d40_base - The big global struct, one for each probe'd instance.
207 *
208 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
209 * @execmd_lock: Lock for execute command usage since several channels share
210 * the same physical register.
211 * @dev: The device structure.
212 * @virtbase: The virtual base address of the DMA's register.
213 * @clk: Pointer to the DMA clock structure.
214 * @phy_start: Physical memory start of the DMA registers.
215 * @phy_size: Size of the DMA register map.
216 * @irq: The IRQ number.
217 * @num_phy_chans: The number of physical channels. Read from HW. This
218 * is the number of available channels for this driver, not counting "Secure
219 * mode" allocated physical channels.
220 * @num_log_chans: The number of logical channels. Calculated from
221 * num_phy_chans.
222 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
223 * @dma_slave: dma_device channels that can do only do slave transfers.
224 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
225 * @phy_chans: Room for all possible physical channels in system.
226 * @log_chans: Room for all possible logical channels in system.
227 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
228 * to log_chans entries.
229 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
230 * to phy_chans entries.
231 * @plat_data: Pointer to provided platform_data which is the driver
232 * configuration.
233 * @phy_res: Vector containing all physical channels.
234 * @lcla_pool: lcla pool settings and data.
235 * @lcpa_base: The virtual mapped address of LCPA.
236 * @phy_lcpa: The physical address of the LCPA.
237 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000238 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200239 */
240struct d40_base {
241 spinlock_t interrupt_lock;
242 spinlock_t execmd_lock;
243 struct device *dev;
244 void __iomem *virtbase;
245 struct clk *clk;
246 phys_addr_t phy_start;
247 resource_size_t phy_size;
248 int irq;
249 int num_phy_chans;
250 int num_log_chans;
251 struct dma_device dma_both;
252 struct dma_device dma_slave;
253 struct dma_device dma_memcpy;
254 struct d40_chan *phy_chans;
255 struct d40_chan *log_chans;
256 struct d40_chan **lookup_log_chans;
257 struct d40_chan **lookup_phy_chans;
258 struct stedma40_platform_data *plat_data;
259 /* Physical half channels */
260 struct d40_phy_res *phy_res;
261 struct d40_lcla_pool lcla_pool;
262 void *lcpa_base;
263 dma_addr_t phy_lcpa;
264 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000265 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200266};
267
268/**
269 * struct d40_interrupt_lookup - lookup table for interrupt handler
270 *
271 * @src: Interrupt mask register.
272 * @clr: Interrupt clear register.
273 * @is_error: true if this is an error interrupt.
274 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
275 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
276 */
277struct d40_interrupt_lookup {
278 u32 src;
279 u32 clr;
280 bool is_error;
281 int offset;
282};
283
284/**
285 * struct d40_reg_val - simple lookup struct
286 *
287 * @reg: The register.
288 * @val: The value that belongs to the register in reg.
289 */
290struct d40_reg_val {
291 unsigned int reg;
292 unsigned int val;
293};
294
295static int d40_pool_lli_alloc(struct d40_desc *d40d,
296 int lli_len, bool is_log)
297{
298 u32 align;
299 void *base;
300
301 if (is_log)
302 align = sizeof(struct d40_log_lli);
303 else
304 align = sizeof(struct d40_phy_lli);
305
306 if (lli_len == 1) {
307 base = d40d->lli_pool.pre_alloc_lli;
308 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
309 d40d->lli_pool.base = NULL;
310 } else {
311 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
312
313 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
314 d40d->lli_pool.base = base;
315
316 if (d40d->lli_pool.base == NULL)
317 return -ENOMEM;
318 }
319
320 if (is_log) {
321 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
322 align);
323 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
324 align);
325 } else {
326 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
327 align);
328 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
329 align);
330
331 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
332 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
333 }
334
335 return 0;
336}
337
338static void d40_pool_lli_free(struct d40_desc *d40d)
339{
340 kfree(d40d->lli_pool.base);
341 d40d->lli_pool.base = NULL;
342 d40d->lli_pool.size = 0;
343 d40d->lli_log.src = NULL;
344 d40d->lli_log.dst = NULL;
345 d40d->lli_phy.src = NULL;
346 d40d->lli_phy.dst = NULL;
347 d40d->lli_phy.src_addr = 0;
348 d40d->lli_phy.dst_addr = 0;
349}
350
351static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
352 struct d40_desc *desc)
353{
354 dma_cookie_t cookie = d40c->chan.cookie;
355
356 if (++cookie < 0)
357 cookie = 1;
358
359 d40c->chan.cookie = cookie;
360 desc->txd.cookie = cookie;
361
362 return cookie;
363}
364
Linus Walleij8d318a52010-03-30 15:33:42 +0200365static void d40_desc_remove(struct d40_desc *d40d)
366{
367 list_del(&d40d->node);
368}
369
370static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
371{
Linus Walleij8d318a52010-03-30 15:33:42 +0200372 struct d40_desc *d;
373 struct d40_desc *_d;
374
375 if (!list_empty(&d40c->client)) {
376 list_for_each_entry_safe(d, _d, &d40c->client, node)
377 if (async_tx_test_ack(&d->txd)) {
378 d40_pool_lli_free(d);
379 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000380 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200381 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200382 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000383 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
384 if (d != NULL) {
385 memset(d, 0, sizeof(struct d40_desc));
386 INIT_LIST_HEAD(&d->node);
387 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200388 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000389 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200390}
391
392static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
393{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000394 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200395}
396
397static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
398{
399 list_add_tail(&desc->node, &d40c->active);
400}
401
402static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
403{
404 struct d40_desc *d;
405
406 if (list_empty(&d40c->active))
407 return NULL;
408
409 d = list_first_entry(&d40c->active,
410 struct d40_desc,
411 node);
412 return d;
413}
414
415static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
416{
417 list_add_tail(&desc->node, &d40c->queue);
418}
419
420static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
421{
422 struct d40_desc *d;
423
424 if (list_empty(&d40c->queue))
425 return NULL;
426
427 d = list_first_entry(&d40c->queue,
428 struct d40_desc,
429 node);
430 return d;
431}
432
433/* Support functions for logical channels */
434
435static int d40_lcla_id_get(struct d40_chan *d40c,
436 struct d40_lcla_pool *pool)
437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
441 pool->base + d40c->phy_chan->num * 1024;
442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
Jonas Aaberg2292b882010-06-20 21:25:39 +0000444 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200445
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0;
448
449 if (pool->num_blocks > 32)
450 return -EINVAL;
451
Jonas Aaberg2292b882010-06-20 21:25:39 +0000452 spin_lock_irqsave(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200453
454 for (i = 0; i < pool->num_blocks; i++) {
455 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
456 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
457 break;
458 }
459 }
460 src_id = i;
461 if (src_id >= pool->num_blocks)
462 goto err;
463
464 for (; i < pool->num_blocks; i++) {
465 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
466 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
467 break;
468 }
469 }
470
471 dst_id = i;
472 if (dst_id == src_id)
473 goto err;
474
475 d40c->lcla.src_id = src_id;
476 d40c->lcla.dst_id = dst_id;
477 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
478 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
479
480
Jonas Aaberg2292b882010-06-20 21:25:39 +0000481 spin_unlock_irqrestore(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200482 return 0;
483err:
Jonas Aaberg2292b882010-06-20 21:25:39 +0000484 spin_unlock_irqrestore(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200485 return -EINVAL;
486}
487
488static void d40_lcla_id_put(struct d40_chan *d40c,
489 struct d40_lcla_pool *pool,
490 int id)
491{
Jonas Aaberg2292b882010-06-20 21:25:39 +0000492 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200493 if (id < 0)
494 return;
495
496 d40c->lcla.src_id = -1;
497 d40c->lcla.dst_id = -1;
498
Jonas Aaberg2292b882010-06-20 21:25:39 +0000499 spin_lock_irqsave(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200500 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
Jonas Aaberg2292b882010-06-20 21:25:39 +0000501 spin_unlock_irqrestore(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200502}
503
504static int d40_channel_execute_command(struct d40_chan *d40c,
505 enum d40_command command)
506{
507 int status, i;
508 void __iomem *active_reg;
509 int ret = 0;
510 unsigned long flags;
511
512 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
513
514 if (d40c->phy_chan->num % 2 == 0)
515 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
516 else
517 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
518
519 if (command == D40_DMA_SUSPEND_REQ) {
520 status = (readl(active_reg) &
521 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
522 D40_CHAN_POS(d40c->phy_chan->num);
523
524 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
525 goto done;
526 }
527
528 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
529
530 if (command == D40_DMA_SUSPEND_REQ) {
531
532 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
533 status = (readl(active_reg) &
534 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
535 D40_CHAN_POS(d40c->phy_chan->num);
536
537 cpu_relax();
538 /*
539 * Reduce the number of bus accesses while
540 * waiting for the DMA to suspend.
541 */
542 udelay(3);
543
544 if (status == D40_DMA_STOP ||
545 status == D40_DMA_SUSPENDED)
546 break;
547 }
548
549 if (i == D40_SUSPEND_MAX_IT) {
550 dev_err(&d40c->chan.dev->device,
551 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
552 __func__, d40c->phy_chan->num, d40c->log_num,
553 status);
554 dump_stack();
555 ret = -EBUSY;
556 }
557
558 }
559done:
560 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
561 return ret;
562}
563
564static void d40_term_all(struct d40_chan *d40c)
565{
566 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200567
568 /* Release active descriptors */
569 while ((d40d = d40_first_active_get(d40c))) {
570 d40_desc_remove(d40d);
571
572 /* Return desc to free-list */
573 d40_desc_free(d40c, d40d);
574 }
575
576 /* Release queued descriptors waiting for transfer */
577 while ((d40d = d40_first_queued(d40c))) {
578 d40_desc_remove(d40d);
579
580 /* Return desc to free-list */
581 d40_desc_free(d40c, d40d);
582 }
583
Linus Walleij8d318a52010-03-30 15:33:42 +0200584 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
585 d40c->lcla.src_id);
586 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
587 d40c->lcla.dst_id);
588
589 d40c->pending_tx = 0;
590 d40c->busy = false;
591}
592
593static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
594{
595 u32 val;
596 unsigned long flags;
597
Jonas Aaberg0c322692010-06-20 21:25:46 +0000598 /* Notice, that disable requires the physical channel to be stopped */
Linus Walleij8d318a52010-03-30 15:33:42 +0200599 if (do_enable)
600 val = D40_ACTIVATE_EVENTLINE;
601 else
602 val = D40_DEACTIVATE_EVENTLINE;
603
604 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
605
606 /* Enable event line connected to device (or memcpy) */
607 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
608 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
609 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
610
611 writel((val << D40_EVENTLINE_POS(event)) |
612 ~D40_EVENTLINE_MASK(event),
613 d40c->base->virtbase + D40_DREG_PCBASE +
614 d40c->phy_chan->num * D40_DREG_PCDELTA +
615 D40_CHAN_REG_SSLNK);
616 }
617 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
618 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
619
620 writel((val << D40_EVENTLINE_POS(event)) |
621 ~D40_EVENTLINE_MASK(event),
622 d40c->base->virtbase + D40_DREG_PCBASE +
623 d40c->phy_chan->num * D40_DREG_PCDELTA +
624 D40_CHAN_REG_SDLNK);
625 }
626
627 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
628}
629
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200630static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200631{
632 u32 val = 0;
633
634 /* If SSLNK or SDLNK is zero all events are disabled */
635 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
636 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
637 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
638 d40c->phy_chan->num * D40_DREG_PCDELTA +
639 D40_CHAN_REG_SSLNK);
640
641 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
642 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
643 d40c->phy_chan->num * D40_DREG_PCDELTA +
644 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200645 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200646}
647
648static void d40_config_enable_lidx(struct d40_chan *d40c)
649{
650 /* Set LIDX for lcla */
651 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
652 D40_SREG_ELEM_LOG_LIDX_MASK,
653 d40c->base->virtbase + D40_DREG_PCBASE +
654 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
655
656 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
657 D40_SREG_ELEM_LOG_LIDX_MASK,
658 d40c->base->virtbase + D40_DREG_PCBASE +
659 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
660}
661
662static int d40_config_write(struct d40_chan *d40c)
663{
664 u32 addr_base;
665 u32 var;
666 int res;
667
668 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
669 if (res)
670 return res;
671
672 /* Odd addresses are even addresses + 4 */
673 addr_base = (d40c->phy_chan->num % 2) * 4;
674 /* Setup channel mode to logical or physical */
675 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
676 D40_CHAN_POS(d40c->phy_chan->num);
677 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
678
679 /* Setup operational mode option register */
680 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
681 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
682
683 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
684
685 if (d40c->log_num != D40_PHY_CHAN) {
686 /* Set default config for CFG reg */
687 writel(d40c->src_def_cfg,
688 d40c->base->virtbase + D40_DREG_PCBASE +
689 d40c->phy_chan->num * D40_DREG_PCDELTA +
690 D40_CHAN_REG_SSCFG);
691 writel(d40c->dst_def_cfg,
692 d40c->base->virtbase + D40_DREG_PCBASE +
693 d40c->phy_chan->num * D40_DREG_PCDELTA +
694 D40_CHAN_REG_SDCFG);
695
696 d40_config_enable_lidx(d40c);
697 }
698 return res;
699}
700
701static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
702{
703
704 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
705 d40_phy_lli_write(d40c->base->virtbase,
706 d40c->phy_chan->num,
707 d40d->lli_phy.dst,
708 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200709 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200710 struct d40_log_lli *src = d40d->lli_log.src;
711 struct d40_log_lli *dst = d40d->lli_log.dst;
712
Per Friden941b77a2010-06-20 21:24:45 +0000713 src += d40d->lli_count;
714 dst += d40d->lli_count;
Linus Walleij8d318a52010-03-30 15:33:42 +0200715 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
716 d40c->lcla.dst,
717 dst, src,
718 d40c->base->plat_data->llis_per_log);
719 }
Per Friden941b77a2010-06-20 21:24:45 +0000720 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200721}
722
723static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
724{
725 struct d40_chan *d40c = container_of(tx->chan,
726 struct d40_chan,
727 chan);
728 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
729 unsigned long flags;
730
731 spin_lock_irqsave(&d40c->lock, flags);
732
733 tx->cookie = d40_assign_cookie(d40c, d40d);
734
735 d40_desc_queue(d40c, d40d);
736
737 spin_unlock_irqrestore(&d40c->lock, flags);
738
739 return tx->cookie;
740}
741
742static int d40_start(struct d40_chan *d40c)
743{
Jonas Aaberg0c322692010-06-20 21:25:46 +0000744 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +0200745 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +0200746
Jonas Aaberg0c322692010-06-20 21:25:46 +0000747 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +0200748}
749
750static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
751{
752 struct d40_desc *d40d;
753 int err;
754
755 /* Start queued jobs, if any */
756 d40d = d40_first_queued(d40c);
757
758 if (d40d != NULL) {
759 d40c->busy = true;
760
761 /* Remove from queue */
762 d40_desc_remove(d40d);
763
764 /* Add to active queue */
765 d40_desc_submit(d40c, d40d);
766
767 /* Initiate DMA job */
768 d40_desc_load(d40c, d40d);
769
770 /* Start dma job */
771 err = d40_start(d40c);
772
773 if (err)
774 return NULL;
775 }
776
777 return d40d;
778}
779
780/* called from interrupt context */
781static void dma_tc_handle(struct d40_chan *d40c)
782{
783 struct d40_desc *d40d;
784
785 if (!d40c->phy_chan)
786 return;
787
788 /* Get first active entry from list */
789 d40d = d40_first_active_get(d40c);
790
791 if (d40d == NULL)
792 return;
793
Per Friden941b77a2010-06-20 21:24:45 +0000794 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200795
796 d40_desc_load(d40c, d40d);
797 /* Start dma job */
798 (void) d40_start(d40c);
799 return;
800 }
801
802 if (d40_queue_start(d40c) == NULL)
803 d40c->busy = false;
804
805 d40c->pending_tx++;
806 tasklet_schedule(&d40c->tasklet);
807
808}
809
810static void dma_tasklet(unsigned long data)
811{
812 struct d40_chan *d40c = (struct d40_chan *) data;
813 struct d40_desc *d40d_fin;
814 unsigned long flags;
815 dma_async_tx_callback callback;
816 void *callback_param;
817
818 spin_lock_irqsave(&d40c->lock, flags);
819
820 /* Get first active entry from list */
821 d40d_fin = d40_first_active_get(d40c);
822
823 if (d40d_fin == NULL)
824 goto err;
825
826 d40c->completed = d40d_fin->txd.cookie;
827
828 /*
829 * If terminating a channel pending_tx is set to zero.
830 * This prevents any finished active jobs to return to the client.
831 */
832 if (d40c->pending_tx == 0) {
833 spin_unlock_irqrestore(&d40c->lock, flags);
834 return;
835 }
836
837 /* Callback to client */
838 callback = d40d_fin->txd.callback;
839 callback_param = d40d_fin->txd.callback_param;
840
841 if (async_tx_test_ack(&d40d_fin->txd)) {
842 d40_pool_lli_free(d40d_fin);
843 d40_desc_remove(d40d_fin);
844 /* Return desc to free-list */
845 d40_desc_free(d40c, d40d_fin);
846 } else {
Linus Walleij8d318a52010-03-30 15:33:42 +0200847 if (!d40d_fin->is_in_client_list) {
848 d40_desc_remove(d40d_fin);
849 list_add_tail(&d40d_fin->node, &d40c->client);
850 d40d_fin->is_in_client_list = true;
851 }
852 }
853
854 d40c->pending_tx--;
855
856 if (d40c->pending_tx)
857 tasklet_schedule(&d40c->tasklet);
858
859 spin_unlock_irqrestore(&d40c->lock, flags);
860
861 if (callback)
862 callback(callback_param);
863
864 return;
865
866 err:
867 /* Rescue manouver if receiving double interrupts */
868 if (d40c->pending_tx > 0)
869 d40c->pending_tx--;
870 spin_unlock_irqrestore(&d40c->lock, flags);
871}
872
873static irqreturn_t d40_handle_interrupt(int irq, void *data)
874{
875 static const struct d40_interrupt_lookup il[] = {
876 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
877 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
878 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
879 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
880 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
881 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
882 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
883 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
884 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
885 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
886 };
887
888 int i;
889 u32 regs[ARRAY_SIZE(il)];
890 u32 tmp;
891 u32 idx;
892 u32 row;
893 long chan = -1;
894 struct d40_chan *d40c;
895 unsigned long flags;
896 struct d40_base *base = data;
897
898 spin_lock_irqsave(&base->interrupt_lock, flags);
899
900 /* Read interrupt status of both logical and physical channels */
901 for (i = 0; i < ARRAY_SIZE(il); i++)
902 regs[i] = readl(base->virtbase + il[i].src);
903
904 for (;;) {
905
906 chan = find_next_bit((unsigned long *)regs,
907 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
908
909 /* No more set bits found? */
910 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
911 break;
912
913 row = chan / BITS_PER_LONG;
914 idx = chan & (BITS_PER_LONG - 1);
915
916 /* ACK interrupt */
917 tmp = readl(base->virtbase + il[row].clr);
918 tmp |= 1 << idx;
919 writel(tmp, base->virtbase + il[row].clr);
920
921 if (il[row].offset == D40_PHY_CHAN)
922 d40c = base->lookup_phy_chans[idx];
923 else
924 d40c = base->lookup_log_chans[il[row].offset + idx];
925 spin_lock(&d40c->lock);
926
927 if (!il[row].is_error)
928 dma_tc_handle(d40c);
929 else
930 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
931 __func__, chan, il[row].offset, idx);
932
933 spin_unlock(&d40c->lock);
934 }
935
936 spin_unlock_irqrestore(&base->interrupt_lock, flags);
937
938 return IRQ_HANDLED;
939}
940
941
942static int d40_validate_conf(struct d40_chan *d40c,
943 struct stedma40_chan_cfg *conf)
944{
945 int res = 0;
946 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
947 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
948 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
949 == STEDMA40_CHANNEL_IN_LOG_MODE;
950
951 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
952 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
953 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
954 __func__);
955 res = -EINVAL;
956 }
957
958 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
959 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
960 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
961 __func__);
962 res = -EINVAL;
963 }
964
965 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
966 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
967 dev_err(&d40c->chan.dev->device,
968 "[%s] No event line\n", __func__);
969 res = -EINVAL;
970 }
971
972 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
973 (src_event_group != dst_event_group)) {
974 dev_err(&d40c->chan.dev->device,
975 "[%s] Invalid event group\n", __func__);
976 res = -EINVAL;
977 }
978
979 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
980 /*
981 * DMAC HW supports it. Will be added to this driver,
982 * in case any dma client requires it.
983 */
984 dev_err(&d40c->chan.dev->device,
985 "[%s] periph to periph not supported\n",
986 __func__);
987 res = -EINVAL;
988 }
989
990 return res;
991}
992
993static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +0200994 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +0200995{
996 unsigned long flags;
997 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +0200998 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200999 /* Physical interrupts are masked per physical full channel */
1000 if (phy->allocated_src == D40_ALLOC_FREE &&
1001 phy->allocated_dst == D40_ALLOC_FREE) {
1002 phy->allocated_dst = D40_ALLOC_PHY;
1003 phy->allocated_src = D40_ALLOC_PHY;
1004 goto found;
1005 } else
1006 goto not_found;
1007 }
1008
1009 /* Logical channel */
1010 if (is_src) {
1011 if (phy->allocated_src == D40_ALLOC_PHY)
1012 goto not_found;
1013
1014 if (phy->allocated_src == D40_ALLOC_FREE)
1015 phy->allocated_src = D40_ALLOC_LOG_FREE;
1016
1017 if (!(phy->allocated_src & (1 << log_event_line))) {
1018 phy->allocated_src |= 1 << log_event_line;
1019 goto found;
1020 } else
1021 goto not_found;
1022 } else {
1023 if (phy->allocated_dst == D40_ALLOC_PHY)
1024 goto not_found;
1025
1026 if (phy->allocated_dst == D40_ALLOC_FREE)
1027 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1028
1029 if (!(phy->allocated_dst & (1 << log_event_line))) {
1030 phy->allocated_dst |= 1 << log_event_line;
1031 goto found;
1032 } else
1033 goto not_found;
1034 }
1035
1036not_found:
1037 spin_unlock_irqrestore(&phy->lock, flags);
1038 return false;
1039found:
1040 spin_unlock_irqrestore(&phy->lock, flags);
1041 return true;
1042}
1043
1044static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1045 int log_event_line)
1046{
1047 unsigned long flags;
1048 bool is_free = false;
1049
1050 spin_lock_irqsave(&phy->lock, flags);
1051 if (!log_event_line) {
1052 /* Physical interrupts are masked per physical full channel */
1053 phy->allocated_dst = D40_ALLOC_FREE;
1054 phy->allocated_src = D40_ALLOC_FREE;
1055 is_free = true;
1056 goto out;
1057 }
1058
1059 /* Logical channel */
1060 if (is_src) {
1061 phy->allocated_src &= ~(1 << log_event_line);
1062 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1063 phy->allocated_src = D40_ALLOC_FREE;
1064 } else {
1065 phy->allocated_dst &= ~(1 << log_event_line);
1066 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1067 phy->allocated_dst = D40_ALLOC_FREE;
1068 }
1069
1070 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1071 D40_ALLOC_FREE);
1072
1073out:
1074 spin_unlock_irqrestore(&phy->lock, flags);
1075
1076 return is_free;
1077}
1078
1079static int d40_allocate_channel(struct d40_chan *d40c)
1080{
1081 int dev_type;
1082 int event_group;
1083 int event_line;
1084 struct d40_phy_res *phys;
1085 int i;
1086 int j;
1087 int log_num;
1088 bool is_src;
1089 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1090 == STEDMA40_CHANNEL_IN_LOG_MODE;
1091
1092
1093 phys = d40c->base->phy_res;
1094
1095 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1096 dev_type = d40c->dma_cfg.src_dev_type;
1097 log_num = 2 * dev_type;
1098 is_src = true;
1099 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1100 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1101 /* dst event lines are used for logical memcpy */
1102 dev_type = d40c->dma_cfg.dst_dev_type;
1103 log_num = 2 * dev_type + 1;
1104 is_src = false;
1105 } else
1106 return -EINVAL;
1107
1108 event_group = D40_TYPE_TO_GROUP(dev_type);
1109 event_line = D40_TYPE_TO_EVENT(dev_type);
1110
1111 if (!is_log) {
1112 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1113 /* Find physical half channel */
1114 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1115
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001116 if (d40_alloc_mask_set(&phys[i], is_src,
1117 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001118 goto found_phy;
1119 }
1120 } else
1121 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1122 int phy_num = j + event_group * 2;
1123 for (i = phy_num; i < phy_num + 2; i++) {
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001124 if (d40_alloc_mask_set(&phys[i], is_src,
1125 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001126 goto found_phy;
1127 }
1128 }
1129 return -EINVAL;
1130found_phy:
1131 d40c->phy_chan = &phys[i];
1132 d40c->log_num = D40_PHY_CHAN;
1133 goto out;
1134 }
1135 if (dev_type == -1)
1136 return -EINVAL;
1137
1138 /* Find logical channel */
1139 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1140 int phy_num = j + event_group * 2;
1141 /*
1142 * Spread logical channels across all available physical rather
1143 * than pack every logical channel at the first available phy
1144 * channels.
1145 */
1146 if (is_src) {
1147 for (i = phy_num; i < phy_num + 2; i++) {
1148 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001149 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001150 goto found_log;
1151 }
1152 } else {
1153 for (i = phy_num + 1; i >= phy_num; i--) {
1154 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001155 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001156 goto found_log;
1157 }
1158 }
1159 }
1160 return -EINVAL;
1161
1162found_log:
1163 d40c->phy_chan = &phys[i];
1164 d40c->log_num = log_num;
1165out:
1166
1167 if (is_log)
1168 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1169 else
1170 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1171
1172 return 0;
1173
1174}
1175
Linus Walleij8d318a52010-03-30 15:33:42 +02001176static int d40_config_memcpy(struct d40_chan *d40c)
1177{
1178 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1179
1180 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1181 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1182 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1183 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1184 memcpy[d40c->chan.chan_id];
1185
1186 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1187 dma_has_cap(DMA_SLAVE, cap)) {
1188 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1189 } else {
1190 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1191 __func__);
1192 return -EINVAL;
1193 }
1194
1195 return 0;
1196}
1197
1198
1199static int d40_free_dma(struct d40_chan *d40c)
1200{
1201
1202 int res = 0;
1203 u32 event, dir;
1204 struct d40_phy_res *phy = d40c->phy_chan;
1205 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001206 struct d40_desc *d;
1207 struct d40_desc *_d;
1208
Linus Walleij8d318a52010-03-30 15:33:42 +02001209
1210 /* Terminate all queued and active transfers */
1211 d40_term_all(d40c);
1212
Per Fridena8be8622010-06-20 21:24:59 +00001213 /* Release client owned descriptors */
1214 if (!list_empty(&d40c->client))
1215 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1216 d40_pool_lli_free(d);
1217 d40_desc_remove(d);
1218 /* Return desc to free-list */
1219 d40_desc_free(d40c, d);
1220 }
1221
Linus Walleij8d318a52010-03-30 15:33:42 +02001222 if (phy == NULL) {
1223 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1224 __func__);
1225 return -EINVAL;
1226 }
1227
1228 if (phy->allocated_src == D40_ALLOC_FREE &&
1229 phy->allocated_dst == D40_ALLOC_FREE) {
1230 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1231 __func__);
1232 return -EINVAL;
1233 }
1234
Linus Walleij8d318a52010-03-30 15:33:42 +02001235 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1236 if (res) {
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001237 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001238 __func__);
1239 return res;
1240 }
1241
1242 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1243 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1244 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1245 dir = D40_CHAN_REG_SDLNK;
1246 is_src = false;
1247 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1248 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1249 dir = D40_CHAN_REG_SSLNK;
1250 is_src = true;
1251 } else {
1252 dev_err(&d40c->chan.dev->device,
1253 "[%s] Unknown direction\n", __func__);
1254 return -EINVAL;
1255 }
1256
1257 if (d40c->log_num != D40_PHY_CHAN) {
1258 /*
1259 * Release logical channel, deactivate the event line during
1260 * the time physical res is suspended.
1261 */
1262 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1263 D40_EVENTLINE_MASK(event),
1264 d40c->base->virtbase + D40_DREG_PCBASE +
1265 phy->num * D40_DREG_PCDELTA + dir);
1266
1267 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1268
1269 /*
1270 * Check if there are more logical allocation
1271 * on this phy channel.
1272 */
1273 if (!d40_alloc_mask_free(phy, is_src, event)) {
1274 /* Resume the other logical channels if any */
1275 if (d40_chan_has_events(d40c)) {
1276 res = d40_channel_execute_command(d40c,
1277 D40_DMA_RUN);
1278 if (res) {
1279 dev_err(&d40c->chan.dev->device,
1280 "[%s] Executing RUN command\n",
1281 __func__);
1282 return res;
1283 }
1284 }
1285 return 0;
1286 }
1287 } else
1288 d40_alloc_mask_free(phy, is_src, 0);
1289
1290 /* Release physical channel */
1291 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1292 if (res) {
1293 dev_err(&d40c->chan.dev->device,
1294 "[%s] Failed to stop channel\n", __func__);
1295 return res;
1296 }
1297 d40c->phy_chan = NULL;
1298 /* Invalidate channel type */
1299 d40c->dma_cfg.channel_type = 0;
1300 d40c->base->lookup_phy_chans[phy->num] = NULL;
1301
1302 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001303}
1304
1305static int d40_pause(struct dma_chan *chan)
1306{
1307 struct d40_chan *d40c =
1308 container_of(chan, struct d40_chan, chan);
1309 int res;
Linus Walleij8d318a52010-03-30 15:33:42 +02001310 unsigned long flags;
1311
1312 spin_lock_irqsave(&d40c->lock, flags);
1313
1314 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1315 if (res == 0) {
1316 if (d40c->log_num != D40_PHY_CHAN) {
1317 d40_config_set_event(d40c, false);
1318 /* Resume the other logical channels if any */
1319 if (d40_chan_has_events(d40c))
1320 res = d40_channel_execute_command(d40c,
1321 D40_DMA_RUN);
1322 }
1323 }
1324
1325 spin_unlock_irqrestore(&d40c->lock, flags);
1326 return res;
1327}
1328
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001329static bool d40_is_paused(struct d40_chan *d40c)
1330{
1331 bool is_paused = false;
1332 unsigned long flags;
1333 void __iomem *active_reg;
1334 u32 status;
1335 u32 event;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001336
1337 spin_lock_irqsave(&d40c->lock, flags);
1338
1339 if (d40c->log_num == D40_PHY_CHAN) {
1340 if (d40c->phy_chan->num % 2 == 0)
1341 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1342 else
1343 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1344
1345 status = (readl(active_reg) &
1346 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1347 D40_CHAN_POS(d40c->phy_chan->num);
1348 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1349 is_paused = true;
1350
1351 goto _exit;
1352 }
1353
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001354 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1355 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1356 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1357 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1358 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1359 else {
1360 dev_err(&d40c->chan.dev->device,
1361 "[%s] Unknown direction\n", __func__);
1362 goto _exit;
1363 }
1364 status = d40_chan_has_events(d40c);
1365 status = (status & D40_EVENTLINE_MASK(event)) >>
1366 D40_EVENTLINE_POS(event);
1367
1368 if (status != D40_DMA_RUN)
1369 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001370_exit:
1371 spin_unlock_irqrestore(&d40c->lock, flags);
1372 return is_paused;
1373
1374}
1375
1376
Linus Walleij8d318a52010-03-30 15:33:42 +02001377static bool d40_tx_is_linked(struct d40_chan *d40c)
1378{
1379 bool is_link;
1380
1381 if (d40c->log_num != D40_PHY_CHAN)
1382 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1383 else
1384 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1385 d40c->phy_chan->num * D40_DREG_PCDELTA +
1386 D40_CHAN_REG_SDLNK) &
1387 D40_SREG_LNK_PHYS_LNK_MASK;
1388 return is_link;
1389}
1390
1391static u32 d40_residue(struct d40_chan *d40c)
1392{
1393 u32 num_elt;
1394
1395 if (d40c->log_num != D40_PHY_CHAN)
1396 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1397 >> D40_MEM_LCSP2_ECNT_POS;
1398 else
1399 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1400 d40c->phy_chan->num * D40_DREG_PCDELTA +
1401 D40_CHAN_REG_SDELT) &
1402 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1403 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1404}
1405
1406static int d40_resume(struct dma_chan *chan)
1407{
1408 struct d40_chan *d40c =
1409 container_of(chan, struct d40_chan, chan);
1410 int res = 0;
1411 unsigned long flags;
1412
1413 spin_lock_irqsave(&d40c->lock, flags);
1414
Jonas Aaberg0c322692010-06-20 21:25:46 +00001415 /* If bytes left to transfer or linked tx resume job */
1416 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1417 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +02001418 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001419 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
Jonas Aaberg0c322692010-06-20 21:25:46 +00001420 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001421
Linus Walleij8d318a52010-03-30 15:33:42 +02001422 spin_unlock_irqrestore(&d40c->lock, flags);
1423 return res;
1424}
1425
1426static u32 stedma40_residue(struct dma_chan *chan)
1427{
1428 struct d40_chan *d40c =
1429 container_of(chan, struct d40_chan, chan);
1430 u32 bytes_left;
1431 unsigned long flags;
1432
1433 spin_lock_irqsave(&d40c->lock, flags);
1434 bytes_left = d40_residue(d40c);
1435 spin_unlock_irqrestore(&d40c->lock, flags);
1436
1437 return bytes_left;
1438}
1439
1440/* Public DMA functions in addition to the DMA engine framework */
1441
1442int stedma40_set_psize(struct dma_chan *chan,
1443 int src_psize,
1444 int dst_psize)
1445{
1446 struct d40_chan *d40c =
1447 container_of(chan, struct d40_chan, chan);
1448 unsigned long flags;
1449
1450 spin_lock_irqsave(&d40c->lock, flags);
1451
1452 if (d40c->log_num != D40_PHY_CHAN) {
1453 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1454 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1455 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1456 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1457 goto out;
1458 }
1459
1460 if (src_psize == STEDMA40_PSIZE_PHY_1)
1461 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1462 else {
1463 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1464 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1465 D40_SREG_CFG_PSIZE_POS);
1466 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1467 }
1468
1469 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1470 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1471 else {
1472 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1473 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1474 D40_SREG_CFG_PSIZE_POS);
1475 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1476 }
1477out:
1478 spin_unlock_irqrestore(&d40c->lock, flags);
1479 return 0;
1480}
1481EXPORT_SYMBOL(stedma40_set_psize);
1482
1483struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1484 struct scatterlist *sgl_dst,
1485 struct scatterlist *sgl_src,
1486 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001487 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001488{
1489 int res;
1490 struct d40_desc *d40d;
1491 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1492 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001493 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001494
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001495 if (d40c->phy_chan == NULL) {
1496 dev_err(&d40c->chan.dev->device,
1497 "[%s] Unallocated channel.\n", __func__);
1498 return ERR_PTR(-EINVAL);
1499 }
1500
Jonas Aaberg2a614342010-06-20 21:25:24 +00001501 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001502 d40d = d40_desc_get(d40c);
1503
1504 if (d40d == NULL)
1505 goto err;
1506
Linus Walleij8d318a52010-03-30 15:33:42 +02001507 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001508 d40d->lli_tx_len = d40d->lli_len;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001509 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001510
1511 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001512 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1513 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1514
Linus Walleij8d318a52010-03-30 15:33:42 +02001515 if (sgl_len > 1)
1516 /*
1517 * Check if there is space available in lcla. If not,
1518 * split list into 1-length and run only in lcpa
1519 * space.
1520 */
1521 if (d40_lcla_id_get(d40c,
1522 &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001523 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001524
1525 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1526 dev_err(&d40c->chan.dev->device,
1527 "[%s] Out of memory\n", __func__);
1528 goto err;
1529 }
1530
1531 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1532 sgl_src,
1533 sgl_len,
1534 d40d->lli_log.src,
1535 d40c->log_def.lcsp1,
1536 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001537 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001538 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001539 d40c->base->plat_data->llis_per_log);
1540
1541 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1542 sgl_dst,
1543 sgl_len,
1544 d40d->lli_log.dst,
1545 d40c->log_def.lcsp3,
1546 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001547 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001548 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001549 d40c->base->plat_data->llis_per_log);
1550
1551
1552 } else {
1553 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1554 dev_err(&d40c->chan.dev->device,
1555 "[%s] Out of memory\n", __func__);
1556 goto err;
1557 }
1558
1559 res = d40_phy_sg_to_lli(sgl_src,
1560 sgl_len,
1561 0,
1562 d40d->lli_phy.src,
1563 d40d->lli_phy.src_addr,
1564 d40c->src_def_cfg,
1565 d40c->dma_cfg.src_info.data_width,
1566 d40c->dma_cfg.src_info.psize,
1567 true);
1568
1569 if (res < 0)
1570 goto err;
1571
1572 res = d40_phy_sg_to_lli(sgl_dst,
1573 sgl_len,
1574 0,
1575 d40d->lli_phy.dst,
1576 d40d->lli_phy.dst_addr,
1577 d40c->dst_def_cfg,
1578 d40c->dma_cfg.dst_info.data_width,
1579 d40c->dma_cfg.dst_info.psize,
1580 true);
1581
1582 if (res < 0)
1583 goto err;
1584
1585 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1586 d40d->lli_pool.size, DMA_TO_DEVICE);
1587 }
1588
1589 dma_async_tx_descriptor_init(&d40d->txd, chan);
1590
1591 d40d->txd.tx_submit = d40_tx_submit;
1592
Jonas Aaberg2a614342010-06-20 21:25:24 +00001593 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001594
1595 return &d40d->txd;
1596err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001597 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001598 return NULL;
1599}
1600EXPORT_SYMBOL(stedma40_memcpy_sg);
1601
1602bool stedma40_filter(struct dma_chan *chan, void *data)
1603{
1604 struct stedma40_chan_cfg *info = data;
1605 struct d40_chan *d40c =
1606 container_of(chan, struct d40_chan, chan);
1607 int err;
1608
1609 if (data) {
1610 err = d40_validate_conf(d40c, info);
1611 if (!err)
1612 d40c->dma_cfg = *info;
1613 } else
1614 err = d40_config_memcpy(d40c);
1615
1616 return err == 0;
1617}
1618EXPORT_SYMBOL(stedma40_filter);
1619
1620/* DMA ENGINE functions */
1621static int d40_alloc_chan_resources(struct dma_chan *chan)
1622{
1623 int err;
1624 unsigned long flags;
1625 struct d40_chan *d40c =
1626 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001627 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001628 spin_lock_irqsave(&d40c->lock, flags);
1629
1630 d40c->completed = chan->cookie = 1;
1631
1632 /*
1633 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001634 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001635 */
1636 if (d40c->dma_cfg.channel_type == 0) {
1637 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001638 if (err) {
1639 dev_err(&d40c->chan.dev->device,
1640 "[%s] Failed to configure memcpy channel\n",
1641 __func__);
1642 goto fail;
1643 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001644 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001645 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001646
1647 err = d40_allocate_channel(d40c);
1648 if (err) {
1649 dev_err(&d40c->chan.dev->device,
1650 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001651 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001652 }
1653
Linus Walleijef1872e2010-06-20 21:24:52 +00001654 /* Fill in basic CFG register values */
1655 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1656 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1657
1658 if (d40c->log_num != D40_PHY_CHAN) {
1659 d40_log_cfg(&d40c->dma_cfg,
1660 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1661
1662 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1663 d40c->lcpa = d40c->base->lcpa_base +
1664 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1665 else
1666 d40c->lcpa = d40c->base->lcpa_base +
1667 d40c->dma_cfg.dst_dev_type *
1668 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1669 }
1670
1671 /*
1672 * Only write channel configuration to the DMA if the physical
1673 * resource is free. In case of multiple logical channels
1674 * on the same physical resource, only the first write is necessary.
1675 */
1676 if (is_free_phy) {
1677 err = d40_config_write(d40c);
1678 if (err) {
1679 dev_err(&d40c->chan.dev->device,
1680 "[%s] Failed to configure channel\n",
1681 __func__);
1682 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001683 }
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001684fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001685 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001686 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001687}
1688
1689static void d40_free_chan_resources(struct dma_chan *chan)
1690{
1691 struct d40_chan *d40c =
1692 container_of(chan, struct d40_chan, chan);
1693 int err;
1694 unsigned long flags;
1695
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001696 if (d40c->phy_chan == NULL) {
1697 dev_err(&d40c->chan.dev->device,
1698 "[%s] Cannot free unallocated channel\n", __func__);
1699 return;
1700 }
1701
1702
Linus Walleij8d318a52010-03-30 15:33:42 +02001703 spin_lock_irqsave(&d40c->lock, flags);
1704
1705 err = d40_free_dma(d40c);
1706
1707 if (err)
1708 dev_err(&d40c->chan.dev->device,
1709 "[%s] Failed to free channel\n", __func__);
1710 spin_unlock_irqrestore(&d40c->lock, flags);
1711}
1712
1713static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1714 dma_addr_t dst,
1715 dma_addr_t src,
1716 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001717 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001718{
1719 struct d40_desc *d40d;
1720 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1721 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001722 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001723 int err = 0;
1724
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001725 if (d40c->phy_chan == NULL) {
1726 dev_err(&d40c->chan.dev->device,
1727 "[%s] Channel is not allocated.\n", __func__);
1728 return ERR_PTR(-EINVAL);
1729 }
1730
Jonas Aaberg2a614342010-06-20 21:25:24 +00001731 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001732 d40d = d40_desc_get(d40c);
1733
1734 if (d40d == NULL) {
1735 dev_err(&d40c->chan.dev->device,
1736 "[%s] Descriptor is NULL\n", __func__);
1737 goto err;
1738 }
1739
Jonas Aaberg2a614342010-06-20 21:25:24 +00001740 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001741
1742 dma_async_tx_descriptor_init(&d40d->txd, chan);
1743
1744 d40d->txd.tx_submit = d40_tx_submit;
1745
1746 if (d40c->log_num != D40_PHY_CHAN) {
1747
1748 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1749 dev_err(&d40c->chan.dev->device,
1750 "[%s] Out of memory\n", __func__);
1751 goto err;
1752 }
1753 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001754 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001755
1756 d40_log_fill_lli(d40d->lli_log.src,
1757 src,
1758 size,
1759 0,
1760 d40c->log_def.lcsp1,
1761 d40c->dma_cfg.src_info.data_width,
1762 true, true);
1763
1764 d40_log_fill_lli(d40d->lli_log.dst,
1765 dst,
1766 size,
1767 0,
1768 d40c->log_def.lcsp3,
1769 d40c->dma_cfg.dst_info.data_width,
1770 true, true);
1771
1772 } else {
1773
1774 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1775 dev_err(&d40c->chan.dev->device,
1776 "[%s] Out of memory\n", __func__);
1777 goto err;
1778 }
1779
1780 err = d40_phy_fill_lli(d40d->lli_phy.src,
1781 src,
1782 size,
1783 d40c->dma_cfg.src_info.psize,
1784 0,
1785 d40c->src_def_cfg,
1786 true,
1787 d40c->dma_cfg.src_info.data_width,
1788 false);
1789 if (err)
1790 goto err_fill_lli;
1791
1792 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1793 dst,
1794 size,
1795 d40c->dma_cfg.dst_info.psize,
1796 0,
1797 d40c->dst_def_cfg,
1798 true,
1799 d40c->dma_cfg.dst_info.data_width,
1800 false);
1801
1802 if (err)
1803 goto err_fill_lli;
1804
1805 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1806 d40d->lli_pool.size, DMA_TO_DEVICE);
1807 }
1808
Jonas Aaberg2a614342010-06-20 21:25:24 +00001809 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001810 return &d40d->txd;
1811
1812err_fill_lli:
1813 dev_err(&d40c->chan.dev->device,
1814 "[%s] Failed filling in PHY LLI\n", __func__);
1815 d40_pool_lli_free(d40d);
1816err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001817 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001818 return NULL;
1819}
1820
1821static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1822 struct d40_chan *d40c,
1823 struct scatterlist *sgl,
1824 unsigned int sg_len,
1825 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001826 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001827{
1828 dma_addr_t dev_addr = 0;
1829 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001830
1831 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1832 dev_err(&d40c->chan.dev->device,
1833 "[%s] Out of memory\n", __func__);
1834 return -ENOMEM;
1835 }
1836
1837 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001838 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1839 d40d->lli_tx_len = d40d->lli_len;
1840 else
1841 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001842
1843 if (sg_len > 1)
1844 /*
1845 * Check if there is space available in lcla.
1846 * If not, split list into 1-length and run only
1847 * in lcpa space.
1848 */
1849 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001850 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001851
Jonas Aaberg2a614342010-06-20 21:25:24 +00001852 if (direction == DMA_FROM_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001853 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001854 else if (direction == DMA_TO_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001855 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001856 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001857 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001858
1859 total_size = d40_log_sg_to_dev(&d40c->lcla,
1860 sgl, sg_len,
1861 &d40d->lli_log,
1862 &d40c->log_def,
1863 d40c->dma_cfg.src_info.data_width,
1864 d40c->dma_cfg.dst_info.data_width,
1865 direction,
1866 dma_flags & DMA_PREP_INTERRUPT,
1867 dev_addr, d40d->lli_tx_len,
1868 d40c->base->plat_data->llis_per_log);
1869
Linus Walleij8d318a52010-03-30 15:33:42 +02001870 if (total_size < 0)
1871 return -EINVAL;
1872
1873 return 0;
1874}
1875
1876static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1877 struct d40_chan *d40c,
1878 struct scatterlist *sgl,
1879 unsigned int sgl_len,
1880 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001881 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001882{
1883 dma_addr_t src_dev_addr;
1884 dma_addr_t dst_dev_addr;
1885 int res;
1886
1887 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1888 dev_err(&d40c->chan.dev->device,
1889 "[%s] Out of memory\n", __func__);
1890 return -ENOMEM;
1891 }
1892
1893 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001894 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001895
1896 if (direction == DMA_FROM_DEVICE) {
1897 dst_dev_addr = 0;
1898 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1899 } else if (direction == DMA_TO_DEVICE) {
1900 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1901 src_dev_addr = 0;
1902 } else
1903 return -EINVAL;
1904
1905 res = d40_phy_sg_to_lli(sgl,
1906 sgl_len,
1907 src_dev_addr,
1908 d40d->lli_phy.src,
1909 d40d->lli_phy.src_addr,
1910 d40c->src_def_cfg,
1911 d40c->dma_cfg.src_info.data_width,
1912 d40c->dma_cfg.src_info.psize,
1913 true);
1914 if (res < 0)
1915 return res;
1916
1917 res = d40_phy_sg_to_lli(sgl,
1918 sgl_len,
1919 dst_dev_addr,
1920 d40d->lli_phy.dst,
1921 d40d->lli_phy.dst_addr,
1922 d40c->dst_def_cfg,
1923 d40c->dma_cfg.dst_info.data_width,
1924 d40c->dma_cfg.dst_info.psize,
1925 true);
1926 if (res < 0)
1927 return res;
1928
1929 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1930 d40d->lli_pool.size, DMA_TO_DEVICE);
1931 return 0;
1932}
1933
1934static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1935 struct scatterlist *sgl,
1936 unsigned int sg_len,
1937 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001938 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001939{
1940 struct d40_desc *d40d;
1941 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1942 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001943 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001944 int err;
1945
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001946 if (d40c->phy_chan == NULL) {
1947 dev_err(&d40c->chan.dev->device,
1948 "[%s] Cannot prepare unallocated channel\n", __func__);
1949 return ERR_PTR(-EINVAL);
1950 }
1951
Linus Walleij8d318a52010-03-30 15:33:42 +02001952 if (d40c->dma_cfg.pre_transfer)
1953 d40c->dma_cfg.pre_transfer(chan,
1954 d40c->dma_cfg.pre_transfer_data,
1955 sg_dma_len(sgl));
1956
Jonas Aaberg2a614342010-06-20 21:25:24 +00001957 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001958 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001959 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001960
1961 if (d40d == NULL)
1962 return NULL;
1963
Linus Walleij8d318a52010-03-30 15:33:42 +02001964 if (d40c->log_num != D40_PHY_CHAN)
1965 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001966 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001967 else
1968 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001969 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001970 if (err) {
1971 dev_err(&d40c->chan.dev->device,
1972 "[%s] Failed to prepare %s slave sg job: %d\n",
1973 __func__,
1974 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
1975 return NULL;
1976 }
1977
Jonas Aaberg2a614342010-06-20 21:25:24 +00001978 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001979
1980 dma_async_tx_descriptor_init(&d40d->txd, chan);
1981
1982 d40d->txd.tx_submit = d40_tx_submit;
1983
1984 return &d40d->txd;
1985}
1986
1987static enum dma_status d40_tx_status(struct dma_chan *chan,
1988 dma_cookie_t cookie,
1989 struct dma_tx_state *txstate)
1990{
1991 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1992 dma_cookie_t last_used;
1993 dma_cookie_t last_complete;
1994 int ret;
1995
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001996 if (d40c->phy_chan == NULL) {
1997 dev_err(&d40c->chan.dev->device,
1998 "[%s] Cannot read status of unallocated channel\n",
1999 __func__);
2000 return -EINVAL;
2001 }
2002
Linus Walleij8d318a52010-03-30 15:33:42 +02002003 last_complete = d40c->completed;
2004 last_used = chan->cookie;
2005
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002006 if (d40_is_paused(d40c))
2007 ret = DMA_PAUSED;
2008 else
2009 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002010
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002011 dma_set_tx_state(txstate, last_complete, last_used,
2012 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002013
2014 return ret;
2015}
2016
2017static void d40_issue_pending(struct dma_chan *chan)
2018{
2019 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2020 unsigned long flags;
2021
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002022 if (d40c->phy_chan == NULL) {
2023 dev_err(&d40c->chan.dev->device,
2024 "[%s] Channel is not allocated!\n", __func__);
2025 return;
2026 }
2027
Linus Walleij8d318a52010-03-30 15:33:42 +02002028 spin_lock_irqsave(&d40c->lock, flags);
2029
2030 /* Busy means that pending jobs are already being processed */
2031 if (!d40c->busy)
2032 (void) d40_queue_start(d40c);
2033
2034 spin_unlock_irqrestore(&d40c->lock, flags);
2035}
2036
Linus Walleij05827632010-05-17 16:30:42 -07002037static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2038 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002039{
2040 unsigned long flags;
2041 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2042
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002043 if (d40c->phy_chan == NULL) {
2044 dev_err(&d40c->chan.dev->device,
2045 "[%s] Channel is not allocated!\n", __func__);
2046 return -EINVAL;
2047 }
2048
Linus Walleij8d318a52010-03-30 15:33:42 +02002049 switch (cmd) {
2050 case DMA_TERMINATE_ALL:
2051 spin_lock_irqsave(&d40c->lock, flags);
2052 d40_term_all(d40c);
2053 spin_unlock_irqrestore(&d40c->lock, flags);
2054 return 0;
2055 case DMA_PAUSE:
2056 return d40_pause(chan);
2057 case DMA_RESUME:
2058 return d40_resume(chan);
2059 }
2060
2061 /* Other commands are unimplemented */
2062 return -ENXIO;
2063}
2064
2065/* Initialization functions */
2066
2067static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2068 struct d40_chan *chans, int offset,
2069 int num_chans)
2070{
2071 int i = 0;
2072 struct d40_chan *d40c;
2073
2074 INIT_LIST_HEAD(&dma->channels);
2075
2076 for (i = offset; i < offset + num_chans; i++) {
2077 d40c = &chans[i];
2078 d40c->base = base;
2079 d40c->chan.device = dma;
2080
2081 /* Invalidate lcla element */
2082 d40c->lcla.src_id = -1;
2083 d40c->lcla.dst_id = -1;
2084
2085 spin_lock_init(&d40c->lock);
2086
2087 d40c->log_num = D40_PHY_CHAN;
2088
Linus Walleij8d318a52010-03-30 15:33:42 +02002089 INIT_LIST_HEAD(&d40c->active);
2090 INIT_LIST_HEAD(&d40c->queue);
2091 INIT_LIST_HEAD(&d40c->client);
2092
Linus Walleij8d318a52010-03-30 15:33:42 +02002093 tasklet_init(&d40c->tasklet, dma_tasklet,
2094 (unsigned long) d40c);
2095
2096 list_add_tail(&d40c->chan.device_node,
2097 &dma->channels);
2098 }
2099}
2100
2101static int __init d40_dmaengine_init(struct d40_base *base,
2102 int num_reserved_chans)
2103{
2104 int err ;
2105
2106 d40_chan_init(base, &base->dma_slave, base->log_chans,
2107 0, base->num_log_chans);
2108
2109 dma_cap_zero(base->dma_slave.cap_mask);
2110 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2111
2112 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2113 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2114 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2115 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2116 base->dma_slave.device_tx_status = d40_tx_status;
2117 base->dma_slave.device_issue_pending = d40_issue_pending;
2118 base->dma_slave.device_control = d40_control;
2119 base->dma_slave.dev = base->dev;
2120
2121 err = dma_async_device_register(&base->dma_slave);
2122
2123 if (err) {
2124 dev_err(base->dev,
2125 "[%s] Failed to register slave channels\n",
2126 __func__);
2127 goto failure1;
2128 }
2129
2130 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2131 base->num_log_chans, base->plat_data->memcpy_len);
2132
2133 dma_cap_zero(base->dma_memcpy.cap_mask);
2134 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2135
2136 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2137 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2138 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2139 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2140 base->dma_memcpy.device_tx_status = d40_tx_status;
2141 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2142 base->dma_memcpy.device_control = d40_control;
2143 base->dma_memcpy.dev = base->dev;
2144 /*
2145 * This controller can only access address at even
2146 * 32bit boundaries, i.e. 2^2
2147 */
2148 base->dma_memcpy.copy_align = 2;
2149
2150 err = dma_async_device_register(&base->dma_memcpy);
2151
2152 if (err) {
2153 dev_err(base->dev,
2154 "[%s] Failed to regsiter memcpy only channels\n",
2155 __func__);
2156 goto failure2;
2157 }
2158
2159 d40_chan_init(base, &base->dma_both, base->phy_chans,
2160 0, num_reserved_chans);
2161
2162 dma_cap_zero(base->dma_both.cap_mask);
2163 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2164 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2165
2166 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2167 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2168 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2169 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2170 base->dma_both.device_tx_status = d40_tx_status;
2171 base->dma_both.device_issue_pending = d40_issue_pending;
2172 base->dma_both.device_control = d40_control;
2173 base->dma_both.dev = base->dev;
2174 base->dma_both.copy_align = 2;
2175 err = dma_async_device_register(&base->dma_both);
2176
2177 if (err) {
2178 dev_err(base->dev,
2179 "[%s] Failed to register logical and physical capable channels\n",
2180 __func__);
2181 goto failure3;
2182 }
2183 return 0;
2184failure3:
2185 dma_async_device_unregister(&base->dma_memcpy);
2186failure2:
2187 dma_async_device_unregister(&base->dma_slave);
2188failure1:
2189 return err;
2190}
2191
2192/* Initialization functions. */
2193
2194static int __init d40_phy_res_init(struct d40_base *base)
2195{
2196 int i;
2197 int num_phy_chans_avail = 0;
2198 u32 val[2];
2199 int odd_even_bit = -2;
2200
2201 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2202 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2203
2204 for (i = 0; i < base->num_phy_chans; i++) {
2205 base->phy_res[i].num = i;
2206 odd_even_bit += 2 * ((i % 2) == 0);
2207 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2208 /* Mark security only channels as occupied */
2209 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2210 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2211 } else {
2212 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2213 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2214 num_phy_chans_avail++;
2215 }
2216 spin_lock_init(&base->phy_res[i].lock);
2217 }
2218 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2219 num_phy_chans_avail, base->num_phy_chans);
2220
2221 /* Verify settings extended vs standard */
2222 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2223
2224 for (i = 0; i < base->num_phy_chans; i++) {
2225
2226 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2227 (val[0] & 0x3) != 1)
2228 dev_info(base->dev,
2229 "[%s] INFO: channel %d is misconfigured (%d)\n",
2230 __func__, i, val[0] & 0x3);
2231
2232 val[0] = val[0] >> 2;
2233 }
2234
2235 return num_phy_chans_avail;
2236}
2237
2238static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2239{
2240 static const struct d40_reg_val dma_id_regs[] = {
2241 /* Peripheral Id */
2242 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2243 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2244 /*
2245 * D40_DREG_PERIPHID2 Depends on HW revision:
2246 * MOP500/HREF ED has 0x0008,
2247 * ? has 0x0018,
2248 * HREF V1 has 0x0028
2249 */
2250 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2251
2252 /* PCell Id */
2253 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2254 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2255 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2256 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2257 };
2258 struct stedma40_platform_data *plat_data;
2259 struct clk *clk = NULL;
2260 void __iomem *virtbase = NULL;
2261 struct resource *res = NULL;
2262 struct d40_base *base = NULL;
2263 int num_log_chans = 0;
2264 int num_phy_chans;
2265 int i;
2266
2267 clk = clk_get(&pdev->dev, NULL);
2268
2269 if (IS_ERR(clk)) {
2270 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2271 __func__);
2272 goto failure;
2273 }
2274
2275 clk_enable(clk);
2276
2277 /* Get IO for DMAC base address */
2278 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2279 if (!res)
2280 goto failure;
2281
2282 if (request_mem_region(res->start, resource_size(res),
2283 D40_NAME " I/O base") == NULL)
2284 goto failure;
2285
2286 virtbase = ioremap(res->start, resource_size(res));
2287 if (!virtbase)
2288 goto failure;
2289
2290 /* HW version check */
2291 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2292 if (dma_id_regs[i].val !=
2293 readl(virtbase + dma_id_regs[i].reg)) {
2294 dev_err(&pdev->dev,
2295 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2296 __func__,
2297 dma_id_regs[i].val,
2298 dma_id_regs[i].reg,
2299 readl(virtbase + dma_id_regs[i].reg));
2300 goto failure;
2301 }
2302 }
2303
2304 i = readl(virtbase + D40_DREG_PERIPHID2);
2305
2306 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2307 dev_err(&pdev->dev,
2308 "[%s] Unknown designer! Got %x wanted %x\n",
2309 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2310 goto failure;
2311 }
2312
2313 /* The number of physical channels on this HW */
2314 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2315
2316 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2317 (i >> 4) & 0xf, res->start);
2318
2319 plat_data = pdev->dev.platform_data;
2320
2321 /* Count the number of logical channels in use */
2322 for (i = 0; i < plat_data->dev_len; i++)
2323 if (plat_data->dev_rx[i] != 0)
2324 num_log_chans++;
2325
2326 for (i = 0; i < plat_data->dev_len; i++)
2327 if (plat_data->dev_tx[i] != 0)
2328 num_log_chans++;
2329
2330 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2331 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2332 sizeof(struct d40_chan), GFP_KERNEL);
2333
2334 if (base == NULL) {
2335 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2336 goto failure;
2337 }
2338
2339 base->clk = clk;
2340 base->num_phy_chans = num_phy_chans;
2341 base->num_log_chans = num_log_chans;
2342 base->phy_start = res->start;
2343 base->phy_size = resource_size(res);
2344 base->virtbase = virtbase;
2345 base->plat_data = plat_data;
2346 base->dev = &pdev->dev;
2347 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2348 base->log_chans = &base->phy_chans[num_phy_chans];
2349
2350 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2351 GFP_KERNEL);
2352 if (!base->phy_res)
2353 goto failure;
2354
2355 base->lookup_phy_chans = kzalloc(num_phy_chans *
2356 sizeof(struct d40_chan *),
2357 GFP_KERNEL);
2358 if (!base->lookup_phy_chans)
2359 goto failure;
2360
2361 if (num_log_chans + plat_data->memcpy_len) {
2362 /*
2363 * The max number of logical channels are event lines for all
2364 * src devices and dst devices
2365 */
2366 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2367 sizeof(struct d40_chan *),
2368 GFP_KERNEL);
2369 if (!base->lookup_log_chans)
2370 goto failure;
2371 }
2372 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2373 GFP_KERNEL);
2374 if (!base->lcla_pool.alloc_map)
2375 goto failure;
2376
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002377 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2378 0, SLAB_HWCACHE_ALIGN,
2379 NULL);
2380 if (base->desc_slab == NULL)
2381 goto failure;
2382
Linus Walleij8d318a52010-03-30 15:33:42 +02002383 return base;
2384
2385failure:
2386 if (clk) {
2387 clk_disable(clk);
2388 clk_put(clk);
2389 }
2390 if (virtbase)
2391 iounmap(virtbase);
2392 if (res)
2393 release_mem_region(res->start,
2394 resource_size(res));
2395 if (virtbase)
2396 iounmap(virtbase);
2397
2398 if (base) {
2399 kfree(base->lcla_pool.alloc_map);
2400 kfree(base->lookup_log_chans);
2401 kfree(base->lookup_phy_chans);
2402 kfree(base->phy_res);
2403 kfree(base);
2404 }
2405
2406 return NULL;
2407}
2408
2409static void __init d40_hw_init(struct d40_base *base)
2410{
2411
2412 static const struct d40_reg_val dma_init_reg[] = {
2413 /* Clock every part of the DMA block from start */
2414 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2415
2416 /* Interrupts on all logical channels */
2417 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2418 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2419 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2420 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2421 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2422 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2423 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2424 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2425 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2426 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2427 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2428 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2429 };
2430 int i;
2431 u32 prmseo[2] = {0, 0};
2432 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2433 u32 pcmis = 0;
2434 u32 pcicr = 0;
2435
2436 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2437 writel(dma_init_reg[i].val,
2438 base->virtbase + dma_init_reg[i].reg);
2439
2440 /* Configure all our dma channels to default settings */
2441 for (i = 0; i < base->num_phy_chans; i++) {
2442
2443 activeo[i % 2] = activeo[i % 2] << 2;
2444
2445 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2446 == D40_ALLOC_PHY) {
2447 activeo[i % 2] |= 3;
2448 continue;
2449 }
2450
2451 /* Enable interrupt # */
2452 pcmis = (pcmis << 1) | 1;
2453
2454 /* Clear interrupt # */
2455 pcicr = (pcicr << 1) | 1;
2456
2457 /* Set channel to physical mode */
2458 prmseo[i % 2] = prmseo[i % 2] << 2;
2459 prmseo[i % 2] |= 1;
2460
2461 }
2462
2463 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2464 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2465 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2466 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2467
2468 /* Write which interrupt to enable */
2469 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2470
2471 /* Write which interrupt to clear */
2472 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2473
2474}
2475
2476static int __init d40_probe(struct platform_device *pdev)
2477{
2478 int err;
2479 int ret = -ENOENT;
2480 struct d40_base *base;
2481 struct resource *res = NULL;
2482 int num_reserved_chans;
2483 u32 val;
2484
2485 base = d40_hw_detect_init(pdev);
2486
2487 if (!base)
2488 goto failure;
2489
2490 num_reserved_chans = d40_phy_res_init(base);
2491
2492 platform_set_drvdata(pdev, base);
2493
2494 spin_lock_init(&base->interrupt_lock);
2495 spin_lock_init(&base->execmd_lock);
2496
2497 /* Get IO for logical channel parameter address */
2498 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2499 if (!res) {
2500 ret = -ENOENT;
2501 dev_err(&pdev->dev,
2502 "[%s] No \"lcpa\" memory resource\n",
2503 __func__);
2504 goto failure;
2505 }
2506 base->lcpa_size = resource_size(res);
2507 base->phy_lcpa = res->start;
2508
2509 if (request_mem_region(res->start, resource_size(res),
2510 D40_NAME " I/O lcpa") == NULL) {
2511 ret = -EBUSY;
2512 dev_err(&pdev->dev,
2513 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2514 __func__, res->start, res->end);
2515 goto failure;
2516 }
2517
2518 /* We make use of ESRAM memory for this. */
2519 val = readl(base->virtbase + D40_DREG_LCPA);
2520 if (res->start != val && val != 0) {
2521 dev_warn(&pdev->dev,
2522 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2523 __func__, val, res->start);
2524 } else
2525 writel(res->start, base->virtbase + D40_DREG_LCPA);
2526
2527 base->lcpa_base = ioremap(res->start, resource_size(res));
2528 if (!base->lcpa_base) {
2529 ret = -ENOMEM;
2530 dev_err(&pdev->dev,
2531 "[%s] Failed to ioremap LCPA region\n",
2532 __func__);
2533 goto failure;
2534 }
2535 /* Get IO for logical channel link address */
2536 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2537 if (!res) {
2538 ret = -ENOENT;
2539 dev_err(&pdev->dev,
2540 "[%s] No \"lcla\" resource defined\n",
2541 __func__);
2542 goto failure;
2543 }
2544
2545 base->lcla_pool.base_size = resource_size(res);
2546 base->lcla_pool.phy = res->start;
2547
2548 if (request_mem_region(res->start, resource_size(res),
2549 D40_NAME " I/O lcla") == NULL) {
2550 ret = -EBUSY;
2551 dev_err(&pdev->dev,
2552 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2553 __func__, res->start, res->end);
2554 goto failure;
2555 }
2556 val = readl(base->virtbase + D40_DREG_LCLA);
2557 if (res->start != val && val != 0) {
2558 dev_warn(&pdev->dev,
2559 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2560 __func__, val, res->start);
2561 } else
2562 writel(res->start, base->virtbase + D40_DREG_LCLA);
2563
2564 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2565 if (!base->lcla_pool.base) {
2566 ret = -ENOMEM;
2567 dev_err(&pdev->dev,
2568 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2569 __func__, res->start, res->end);
2570 goto failure;
2571 }
2572
2573 spin_lock_init(&base->lcla_pool.lock);
2574
2575 base->lcla_pool.num_blocks = base->num_phy_chans;
2576
2577 base->irq = platform_get_irq(pdev, 0);
2578
2579 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2580
2581 if (ret) {
2582 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2583 goto failure;
2584 }
2585
2586 err = d40_dmaengine_init(base, num_reserved_chans);
2587 if (err)
2588 goto failure;
2589
2590 d40_hw_init(base);
2591
2592 dev_info(base->dev, "initialized\n");
2593 return 0;
2594
2595failure:
2596 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002597 if (base->desc_slab)
2598 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002599 if (base->virtbase)
2600 iounmap(base->virtbase);
2601 if (base->lcla_pool.phy)
2602 release_mem_region(base->lcla_pool.phy,
2603 base->lcla_pool.base_size);
2604 if (base->phy_lcpa)
2605 release_mem_region(base->phy_lcpa,
2606 base->lcpa_size);
2607 if (base->phy_start)
2608 release_mem_region(base->phy_start,
2609 base->phy_size);
2610 if (base->clk) {
2611 clk_disable(base->clk);
2612 clk_put(base->clk);
2613 }
2614
2615 kfree(base->lcla_pool.alloc_map);
2616 kfree(base->lookup_log_chans);
2617 kfree(base->lookup_phy_chans);
2618 kfree(base->phy_res);
2619 kfree(base);
2620 }
2621
2622 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2623 return ret;
2624}
2625
2626static struct platform_driver d40_driver = {
2627 .driver = {
2628 .owner = THIS_MODULE,
2629 .name = D40_NAME,
2630 },
2631};
2632
2633int __init stedma40_init(void)
2634{
2635 return platform_driver_probe(&d40_driver, d40_probe);
2636}
2637arch_initcall(stedma40_init);