blob: 8c46bb803dbbc02588a813701eb80dedf4892c6e [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
Linus Walleij8d318a52010-03-30 15:33:42 +020037/* Hardware designer of the block */
38#define D40_PERIPHID2_DESIGNER 0x8
39
40/**
41 * enum 40_command - The different commands and/or statuses.
42 *
43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
47 */
48enum d40_command {
49 D40_DMA_STOP = 0,
50 D40_DMA_RUN = 1,
51 D40_DMA_SUSPEND_REQ = 2,
52 D40_DMA_SUSPENDED = 3
53};
54
55/**
56 * struct d40_lli_pool - Structure for keeping LLIs in memory
57 *
58 * @base: Pointer to memory area when the pre_alloc_lli's are not large
59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60 * pre_alloc_lli is used.
61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63 * one buffer to one buffer.
64 */
65struct d40_lli_pool {
66 void *base;
67 int size;
68 /* Space for dst and src, plus an extra for padding */
69 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
70};
71
72/**
73 * struct d40_desc - A descriptor is one DMA job.
74 *
75 * @lli_phy: LLI settings for physical channel. Both src and dst=
76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
77 * lli_len equals one.
78 * @lli_log: Same as above but for logical channels.
79 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000080 * @lli_len: Number of llis of current descriptor.
81 * @lli_count: Number of transfered llis.
82 * @lli_tx_len: Max number of LLIs per transfer, there can be
83 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020084 * @txd: DMA engine struct. Used for among other things for communication
85 * during a transfer.
86 * @node: List entry.
87 * @dir: The transfer direction of this job.
88 * @is_in_client_list: true if the client owns this descriptor.
89 *
90 * This descriptor is used for both logical and physical transfers.
91 */
92
93struct d40_desc {
94 /* LLI physical */
95 struct d40_phy_lli_bidir lli_phy;
96 /* LLI logical */
97 struct d40_log_lli_bidir lli_log;
98
99 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000100 int lli_len;
101 int lli_count;
102 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200103
104 struct dma_async_tx_descriptor txd;
105 struct list_head node;
106
107 enum dma_data_direction dir;
108 bool is_in_client_list;
109};
110
111/**
112 * struct d40_lcla_pool - LCLA pool settings and data.
113 *
114 * @base: The virtual address of LCLA.
115 * @phy: Physical base address of LCLA.
116 * @base_size: size of lcla.
117 * @lock: Lock to protect the content in this struct.
118 * @alloc_map: Mapping between physical channel and LCLA entries.
119 * @num_blocks: The number of entries of alloc_map. Equals to the
120 * number of physical channels.
121 */
122struct d40_lcla_pool {
123 void *base;
124 dma_addr_t phy;
125 resource_size_t base_size;
126 spinlock_t lock;
127 u32 *alloc_map;
128 int num_blocks;
129};
130
131/**
132 * struct d40_phy_res - struct for handling eventlines mapped to physical
133 * channels.
134 *
135 * @lock: A lock protection this entity.
136 * @num: The physical channel number of this entity.
137 * @allocated_src: Bit mapped to show which src event line's are mapped to
138 * this physical channel. Can also be free or physically allocated.
139 * @allocated_dst: Same as for src but is dst.
140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141 * event line number. Both allocated_src and allocated_dst can not be
142 * allocated to a physical channel, since the interrupt handler has then
143 * no way of figure out which one the interrupt belongs to.
144 */
145struct d40_phy_res {
146 spinlock_t lock;
147 int num;
148 u32 allocated_src;
149 u32 allocated_dst;
150};
151
152struct d40_base;
153
154/**
155 * struct d40_chan - Struct that describes a channel.
156 *
157 * @lock: A spinlock to protect this struct.
158 * @log_num: The logical number, if any of this channel.
159 * @completed: Starts with 1, after first interrupt it is set to dma engine's
160 * current cookie.
161 * @pending_tx: The number of pending transfers. Used between interrupt handler
162 * and tasklet.
163 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000164 * @phy_chan: Pointer to physical channel which this instance runs on. If this
165 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200166 * @chan: DMA engine handle.
167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
168 * transfer and call client callback.
169 * @client: Cliented owned descriptor list.
170 * @active: Active descriptor.
171 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200172 * @dma_cfg: The client configuration of this dma channel.
173 * @base: Pointer to the device instance struct.
174 * @src_def_cfg: Default cfg register setting for src.
175 * @dst_def_cfg: Default cfg register setting for dst.
176 * @log_def: Default logical channel settings.
177 * @lcla: Space for one dst src pair for logical channel transfers.
178 * @lcpa: Pointer to dst and src lcpa settings.
179 *
180 * This struct can either "be" a logical or a physical channel.
181 */
182struct d40_chan {
183 spinlock_t lock;
184 int log_num;
185 /* ID of the most recent completed transfer */
186 int completed;
187 int pending_tx;
188 bool busy;
189 struct d40_phy_res *phy_chan;
190 struct dma_chan chan;
191 struct tasklet_struct tasklet;
192 struct list_head client;
193 struct list_head active;
194 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200195 struct stedma40_chan_cfg dma_cfg;
196 struct d40_base *base;
197 /* Default register configurations */
198 u32 src_def_cfg;
199 u32 dst_def_cfg;
200 struct d40_def_lcsp log_def;
201 struct d40_lcla_elem lcla;
202 struct d40_log_lli_full *lcpa;
203};
204
205/**
206 * struct d40_base - The big global struct, one for each probe'd instance.
207 *
208 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
209 * @execmd_lock: Lock for execute command usage since several channels share
210 * the same physical register.
211 * @dev: The device structure.
212 * @virtbase: The virtual base address of the DMA's register.
213 * @clk: Pointer to the DMA clock structure.
214 * @phy_start: Physical memory start of the DMA registers.
215 * @phy_size: Size of the DMA register map.
216 * @irq: The IRQ number.
217 * @num_phy_chans: The number of physical channels. Read from HW. This
218 * is the number of available channels for this driver, not counting "Secure
219 * mode" allocated physical channels.
220 * @num_log_chans: The number of logical channels. Calculated from
221 * num_phy_chans.
222 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
223 * @dma_slave: dma_device channels that can do only do slave transfers.
224 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
225 * @phy_chans: Room for all possible physical channels in system.
226 * @log_chans: Room for all possible logical channels in system.
227 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
228 * to log_chans entries.
229 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
230 * to phy_chans entries.
231 * @plat_data: Pointer to provided platform_data which is the driver
232 * configuration.
233 * @phy_res: Vector containing all physical channels.
234 * @lcla_pool: lcla pool settings and data.
235 * @lcpa_base: The virtual mapped address of LCPA.
236 * @phy_lcpa: The physical address of the LCPA.
237 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000238 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200239 */
240struct d40_base {
241 spinlock_t interrupt_lock;
242 spinlock_t execmd_lock;
243 struct device *dev;
244 void __iomem *virtbase;
245 struct clk *clk;
246 phys_addr_t phy_start;
247 resource_size_t phy_size;
248 int irq;
249 int num_phy_chans;
250 int num_log_chans;
251 struct dma_device dma_both;
252 struct dma_device dma_slave;
253 struct dma_device dma_memcpy;
254 struct d40_chan *phy_chans;
255 struct d40_chan *log_chans;
256 struct d40_chan **lookup_log_chans;
257 struct d40_chan **lookup_phy_chans;
258 struct stedma40_platform_data *plat_data;
259 /* Physical half channels */
260 struct d40_phy_res *phy_res;
261 struct d40_lcla_pool lcla_pool;
262 void *lcpa_base;
263 dma_addr_t phy_lcpa;
264 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000265 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200266};
267
268/**
269 * struct d40_interrupt_lookup - lookup table for interrupt handler
270 *
271 * @src: Interrupt mask register.
272 * @clr: Interrupt clear register.
273 * @is_error: true if this is an error interrupt.
274 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
275 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
276 */
277struct d40_interrupt_lookup {
278 u32 src;
279 u32 clr;
280 bool is_error;
281 int offset;
282};
283
284/**
285 * struct d40_reg_val - simple lookup struct
286 *
287 * @reg: The register.
288 * @val: The value that belongs to the register in reg.
289 */
290struct d40_reg_val {
291 unsigned int reg;
292 unsigned int val;
293};
294
295static int d40_pool_lli_alloc(struct d40_desc *d40d,
296 int lli_len, bool is_log)
297{
298 u32 align;
299 void *base;
300
301 if (is_log)
302 align = sizeof(struct d40_log_lli);
303 else
304 align = sizeof(struct d40_phy_lli);
305
306 if (lli_len == 1) {
307 base = d40d->lli_pool.pre_alloc_lli;
308 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
309 d40d->lli_pool.base = NULL;
310 } else {
311 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
312
313 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
314 d40d->lli_pool.base = base;
315
316 if (d40d->lli_pool.base == NULL)
317 return -ENOMEM;
318 }
319
320 if (is_log) {
321 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
322 align);
323 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
324 align);
325 } else {
326 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
327 align);
328 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
329 align);
330
331 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
332 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
333 }
334
335 return 0;
336}
337
338static void d40_pool_lli_free(struct d40_desc *d40d)
339{
340 kfree(d40d->lli_pool.base);
341 d40d->lli_pool.base = NULL;
342 d40d->lli_pool.size = 0;
343 d40d->lli_log.src = NULL;
344 d40d->lli_log.dst = NULL;
345 d40d->lli_phy.src = NULL;
346 d40d->lli_phy.dst = NULL;
347 d40d->lli_phy.src_addr = 0;
348 d40d->lli_phy.dst_addr = 0;
349}
350
351static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
352 struct d40_desc *desc)
353{
354 dma_cookie_t cookie = d40c->chan.cookie;
355
356 if (++cookie < 0)
357 cookie = 1;
358
359 d40c->chan.cookie = cookie;
360 desc->txd.cookie = cookie;
361
362 return cookie;
363}
364
Linus Walleij8d318a52010-03-30 15:33:42 +0200365static void d40_desc_remove(struct d40_desc *d40d)
366{
367 list_del(&d40d->node);
368}
369
370static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
371{
Linus Walleij8d318a52010-03-30 15:33:42 +0200372 struct d40_desc *d;
373 struct d40_desc *_d;
374
375 if (!list_empty(&d40c->client)) {
376 list_for_each_entry_safe(d, _d, &d40c->client, node)
377 if (async_tx_test_ack(&d->txd)) {
378 d40_pool_lli_free(d);
379 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000380 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200381 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200382 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000383 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
384 if (d != NULL) {
385 memset(d, 0, sizeof(struct d40_desc));
386 INIT_LIST_HEAD(&d->node);
387 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200388 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000389 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200390}
391
392static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
393{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000394 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200395}
396
397static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
398{
399 list_add_tail(&desc->node, &d40c->active);
400}
401
402static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
403{
404 struct d40_desc *d;
405
406 if (list_empty(&d40c->active))
407 return NULL;
408
409 d = list_first_entry(&d40c->active,
410 struct d40_desc,
411 node);
412 return d;
413}
414
415static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
416{
417 list_add_tail(&desc->node, &d40c->queue);
418}
419
420static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
421{
422 struct d40_desc *d;
423
424 if (list_empty(&d40c->queue))
425 return NULL;
426
427 d = list_first_entry(&d40c->queue,
428 struct d40_desc,
429 node);
430 return d;
431}
432
433/* Support functions for logical channels */
434
435static int d40_lcla_id_get(struct d40_chan *d40c,
436 struct d40_lcla_pool *pool)
437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
441 pool->base + d40c->phy_chan->num * 1024;
442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
Jonas Aaberg2292b882010-06-20 21:25:39 +0000444 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200445
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0;
448
449 if (pool->num_blocks > 32)
450 return -EINVAL;
451
Jonas Aaberg2292b882010-06-20 21:25:39 +0000452 spin_lock_irqsave(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200453
454 for (i = 0; i < pool->num_blocks; i++) {
455 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
456 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
457 break;
458 }
459 }
460 src_id = i;
461 if (src_id >= pool->num_blocks)
462 goto err;
463
464 for (; i < pool->num_blocks; i++) {
465 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
466 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
467 break;
468 }
469 }
470
471 dst_id = i;
472 if (dst_id == src_id)
473 goto err;
474
475 d40c->lcla.src_id = src_id;
476 d40c->lcla.dst_id = dst_id;
477 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
478 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
479
480
Jonas Aaberg2292b882010-06-20 21:25:39 +0000481 spin_unlock_irqrestore(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200482 return 0;
483err:
Jonas Aaberg2292b882010-06-20 21:25:39 +0000484 spin_unlock_irqrestore(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200485 return -EINVAL;
486}
487
488static void d40_lcla_id_put(struct d40_chan *d40c,
489 struct d40_lcla_pool *pool,
490 int id)
491{
Jonas Aaberg2292b882010-06-20 21:25:39 +0000492 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200493 if (id < 0)
494 return;
495
496 d40c->lcla.src_id = -1;
497 d40c->lcla.dst_id = -1;
498
Jonas Aaberg2292b882010-06-20 21:25:39 +0000499 spin_lock_irqsave(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200500 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
Jonas Aaberg2292b882010-06-20 21:25:39 +0000501 spin_unlock_irqrestore(&pool->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200502}
503
504static int d40_channel_execute_command(struct d40_chan *d40c,
505 enum d40_command command)
506{
507 int status, i;
508 void __iomem *active_reg;
509 int ret = 0;
510 unsigned long flags;
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000511 u32 wmask;
Linus Walleij8d318a52010-03-30 15:33:42 +0200512
513 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
514
515 if (d40c->phy_chan->num % 2 == 0)
516 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
517 else
518 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
519
520 if (command == D40_DMA_SUSPEND_REQ) {
521 status = (readl(active_reg) &
522 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
523 D40_CHAN_POS(d40c->phy_chan->num);
524
525 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
526 goto done;
527 }
528
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000529 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
530 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
531 active_reg);
Linus Walleij8d318a52010-03-30 15:33:42 +0200532
533 if (command == D40_DMA_SUSPEND_REQ) {
534
535 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
536 status = (readl(active_reg) &
537 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
538 D40_CHAN_POS(d40c->phy_chan->num);
539
540 cpu_relax();
541 /*
542 * Reduce the number of bus accesses while
543 * waiting for the DMA to suspend.
544 */
545 udelay(3);
546
547 if (status == D40_DMA_STOP ||
548 status == D40_DMA_SUSPENDED)
549 break;
550 }
551
552 if (i == D40_SUSPEND_MAX_IT) {
553 dev_err(&d40c->chan.dev->device,
554 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
555 __func__, d40c->phy_chan->num, d40c->log_num,
556 status);
557 dump_stack();
558 ret = -EBUSY;
559 }
560
561 }
562done:
563 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
564 return ret;
565}
566
567static void d40_term_all(struct d40_chan *d40c)
568{
569 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200570
571 /* Release active descriptors */
572 while ((d40d = d40_first_active_get(d40c))) {
573 d40_desc_remove(d40d);
574
575 /* Return desc to free-list */
576 d40_desc_free(d40c, d40d);
577 }
578
579 /* Release queued descriptors waiting for transfer */
580 while ((d40d = d40_first_queued(d40c))) {
581 d40_desc_remove(d40d);
582
583 /* Return desc to free-list */
584 d40_desc_free(d40c, d40d);
585 }
586
Linus Walleij8d318a52010-03-30 15:33:42 +0200587 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
588 d40c->lcla.src_id);
589 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
590 d40c->lcla.dst_id);
591
592 d40c->pending_tx = 0;
593 d40c->busy = false;
594}
595
596static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
597{
598 u32 val;
599 unsigned long flags;
600
Jonas Aaberg0c322692010-06-20 21:25:46 +0000601 /* Notice, that disable requires the physical channel to be stopped */
Linus Walleij8d318a52010-03-30 15:33:42 +0200602 if (do_enable)
603 val = D40_ACTIVATE_EVENTLINE;
604 else
605 val = D40_DEACTIVATE_EVENTLINE;
606
607 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
608
609 /* Enable event line connected to device (or memcpy) */
610 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
611 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
612 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
613
614 writel((val << D40_EVENTLINE_POS(event)) |
615 ~D40_EVENTLINE_MASK(event),
616 d40c->base->virtbase + D40_DREG_PCBASE +
617 d40c->phy_chan->num * D40_DREG_PCDELTA +
618 D40_CHAN_REG_SSLNK);
619 }
620 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
621 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
622
623 writel((val << D40_EVENTLINE_POS(event)) |
624 ~D40_EVENTLINE_MASK(event),
625 d40c->base->virtbase + D40_DREG_PCBASE +
626 d40c->phy_chan->num * D40_DREG_PCDELTA +
627 D40_CHAN_REG_SDLNK);
628 }
629
630 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
631}
632
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200633static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200634{
635 u32 val = 0;
636
637 /* If SSLNK or SDLNK is zero all events are disabled */
638 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
639 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
640 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
641 d40c->phy_chan->num * D40_DREG_PCDELTA +
642 D40_CHAN_REG_SSLNK);
643
644 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
645 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
646 d40c->phy_chan->num * D40_DREG_PCDELTA +
647 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200648 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200649}
650
651static void d40_config_enable_lidx(struct d40_chan *d40c)
652{
653 /* Set LIDX for lcla */
654 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
655 D40_SREG_ELEM_LOG_LIDX_MASK,
656 d40c->base->virtbase + D40_DREG_PCBASE +
657 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
658
659 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
660 D40_SREG_ELEM_LOG_LIDX_MASK,
661 d40c->base->virtbase + D40_DREG_PCBASE +
662 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
663}
664
665static int d40_config_write(struct d40_chan *d40c)
666{
667 u32 addr_base;
668 u32 var;
669 int res;
670
671 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
672 if (res)
673 return res;
674
675 /* Odd addresses are even addresses + 4 */
676 addr_base = (d40c->phy_chan->num % 2) * 4;
677 /* Setup channel mode to logical or physical */
678 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
679 D40_CHAN_POS(d40c->phy_chan->num);
680 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
681
682 /* Setup operational mode option register */
683 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
684 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
685
686 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
687
688 if (d40c->log_num != D40_PHY_CHAN) {
689 /* Set default config for CFG reg */
690 writel(d40c->src_def_cfg,
691 d40c->base->virtbase + D40_DREG_PCBASE +
692 d40c->phy_chan->num * D40_DREG_PCDELTA +
693 D40_CHAN_REG_SSCFG);
694 writel(d40c->dst_def_cfg,
695 d40c->base->virtbase + D40_DREG_PCBASE +
696 d40c->phy_chan->num * D40_DREG_PCDELTA +
697 D40_CHAN_REG_SDCFG);
698
699 d40_config_enable_lidx(d40c);
700 }
701 return res;
702}
703
704static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
705{
706
707 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
708 d40_phy_lli_write(d40c->base->virtbase,
709 d40c->phy_chan->num,
710 d40d->lli_phy.dst,
711 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200712 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200713 struct d40_log_lli *src = d40d->lli_log.src;
714 struct d40_log_lli *dst = d40d->lli_log.dst;
715
Per Friden941b77a2010-06-20 21:24:45 +0000716 src += d40d->lli_count;
717 dst += d40d->lli_count;
Linus Walleij8d318a52010-03-30 15:33:42 +0200718 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
719 d40c->lcla.dst,
720 dst, src,
721 d40c->base->plat_data->llis_per_log);
722 }
Per Friden941b77a2010-06-20 21:24:45 +0000723 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200724}
725
726static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
727{
728 struct d40_chan *d40c = container_of(tx->chan,
729 struct d40_chan,
730 chan);
731 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
732 unsigned long flags;
733
734 spin_lock_irqsave(&d40c->lock, flags);
735
736 tx->cookie = d40_assign_cookie(d40c, d40d);
737
738 d40_desc_queue(d40c, d40d);
739
740 spin_unlock_irqrestore(&d40c->lock, flags);
741
742 return tx->cookie;
743}
744
745static int d40_start(struct d40_chan *d40c)
746{
Jonas Aaberg0c322692010-06-20 21:25:46 +0000747 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +0200748 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +0200749
Jonas Aaberg0c322692010-06-20 21:25:46 +0000750 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +0200751}
752
753static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
754{
755 struct d40_desc *d40d;
756 int err;
757
758 /* Start queued jobs, if any */
759 d40d = d40_first_queued(d40c);
760
761 if (d40d != NULL) {
762 d40c->busy = true;
763
764 /* Remove from queue */
765 d40_desc_remove(d40d);
766
767 /* Add to active queue */
768 d40_desc_submit(d40c, d40d);
769
770 /* Initiate DMA job */
771 d40_desc_load(d40c, d40d);
772
773 /* Start dma job */
774 err = d40_start(d40c);
775
776 if (err)
777 return NULL;
778 }
779
780 return d40d;
781}
782
783/* called from interrupt context */
784static void dma_tc_handle(struct d40_chan *d40c)
785{
786 struct d40_desc *d40d;
787
788 if (!d40c->phy_chan)
789 return;
790
791 /* Get first active entry from list */
792 d40d = d40_first_active_get(d40c);
793
794 if (d40d == NULL)
795 return;
796
Per Friden941b77a2010-06-20 21:24:45 +0000797 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200798
799 d40_desc_load(d40c, d40d);
800 /* Start dma job */
801 (void) d40_start(d40c);
802 return;
803 }
804
805 if (d40_queue_start(d40c) == NULL)
806 d40c->busy = false;
807
808 d40c->pending_tx++;
809 tasklet_schedule(&d40c->tasklet);
810
811}
812
813static void dma_tasklet(unsigned long data)
814{
815 struct d40_chan *d40c = (struct d40_chan *) data;
816 struct d40_desc *d40d_fin;
817 unsigned long flags;
818 dma_async_tx_callback callback;
819 void *callback_param;
820
821 spin_lock_irqsave(&d40c->lock, flags);
822
823 /* Get first active entry from list */
824 d40d_fin = d40_first_active_get(d40c);
825
826 if (d40d_fin == NULL)
827 goto err;
828
829 d40c->completed = d40d_fin->txd.cookie;
830
831 /*
832 * If terminating a channel pending_tx is set to zero.
833 * This prevents any finished active jobs to return to the client.
834 */
835 if (d40c->pending_tx == 0) {
836 spin_unlock_irqrestore(&d40c->lock, flags);
837 return;
838 }
839
840 /* Callback to client */
841 callback = d40d_fin->txd.callback;
842 callback_param = d40d_fin->txd.callback_param;
843
844 if (async_tx_test_ack(&d40d_fin->txd)) {
845 d40_pool_lli_free(d40d_fin);
846 d40_desc_remove(d40d_fin);
847 /* Return desc to free-list */
848 d40_desc_free(d40c, d40d_fin);
849 } else {
Linus Walleij8d318a52010-03-30 15:33:42 +0200850 if (!d40d_fin->is_in_client_list) {
851 d40_desc_remove(d40d_fin);
852 list_add_tail(&d40d_fin->node, &d40c->client);
853 d40d_fin->is_in_client_list = true;
854 }
855 }
856
857 d40c->pending_tx--;
858
859 if (d40c->pending_tx)
860 tasklet_schedule(&d40c->tasklet);
861
862 spin_unlock_irqrestore(&d40c->lock, flags);
863
864 if (callback)
865 callback(callback_param);
866
867 return;
868
869 err:
870 /* Rescue manouver if receiving double interrupts */
871 if (d40c->pending_tx > 0)
872 d40c->pending_tx--;
873 spin_unlock_irqrestore(&d40c->lock, flags);
874}
875
876static irqreturn_t d40_handle_interrupt(int irq, void *data)
877{
878 static const struct d40_interrupt_lookup il[] = {
879 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
880 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
881 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
882 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
883 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
884 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
885 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
886 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
887 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
888 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
889 };
890
891 int i;
892 u32 regs[ARRAY_SIZE(il)];
893 u32 tmp;
894 u32 idx;
895 u32 row;
896 long chan = -1;
897 struct d40_chan *d40c;
898 unsigned long flags;
899 struct d40_base *base = data;
900
901 spin_lock_irqsave(&base->interrupt_lock, flags);
902
903 /* Read interrupt status of both logical and physical channels */
904 for (i = 0; i < ARRAY_SIZE(il); i++)
905 regs[i] = readl(base->virtbase + il[i].src);
906
907 for (;;) {
908
909 chan = find_next_bit((unsigned long *)regs,
910 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
911
912 /* No more set bits found? */
913 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
914 break;
915
916 row = chan / BITS_PER_LONG;
917 idx = chan & (BITS_PER_LONG - 1);
918
919 /* ACK interrupt */
920 tmp = readl(base->virtbase + il[row].clr);
921 tmp |= 1 << idx;
922 writel(tmp, base->virtbase + il[row].clr);
923
924 if (il[row].offset == D40_PHY_CHAN)
925 d40c = base->lookup_phy_chans[idx];
926 else
927 d40c = base->lookup_log_chans[il[row].offset + idx];
928 spin_lock(&d40c->lock);
929
930 if (!il[row].is_error)
931 dma_tc_handle(d40c);
932 else
933 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
934 __func__, chan, il[row].offset, idx);
935
936 spin_unlock(&d40c->lock);
937 }
938
939 spin_unlock_irqrestore(&base->interrupt_lock, flags);
940
941 return IRQ_HANDLED;
942}
943
944
945static int d40_validate_conf(struct d40_chan *d40c,
946 struct stedma40_chan_cfg *conf)
947{
948 int res = 0;
949 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
950 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
951 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
952 == STEDMA40_CHANNEL_IN_LOG_MODE;
953
954 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
955 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
956 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
957 __func__);
958 res = -EINVAL;
959 }
960
961 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
962 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
963 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
964 __func__);
965 res = -EINVAL;
966 }
967
968 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
969 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
970 dev_err(&d40c->chan.dev->device,
971 "[%s] No event line\n", __func__);
972 res = -EINVAL;
973 }
974
975 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
976 (src_event_group != dst_event_group)) {
977 dev_err(&d40c->chan.dev->device,
978 "[%s] Invalid event group\n", __func__);
979 res = -EINVAL;
980 }
981
982 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
983 /*
984 * DMAC HW supports it. Will be added to this driver,
985 * in case any dma client requires it.
986 */
987 dev_err(&d40c->chan.dev->device,
988 "[%s] periph to periph not supported\n",
989 __func__);
990 res = -EINVAL;
991 }
992
993 return res;
994}
995
996static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +0200997 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +0200998{
999 unsigned long flags;
1000 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001001 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001002 /* Physical interrupts are masked per physical full channel */
1003 if (phy->allocated_src == D40_ALLOC_FREE &&
1004 phy->allocated_dst == D40_ALLOC_FREE) {
1005 phy->allocated_dst = D40_ALLOC_PHY;
1006 phy->allocated_src = D40_ALLOC_PHY;
1007 goto found;
1008 } else
1009 goto not_found;
1010 }
1011
1012 /* Logical channel */
1013 if (is_src) {
1014 if (phy->allocated_src == D40_ALLOC_PHY)
1015 goto not_found;
1016
1017 if (phy->allocated_src == D40_ALLOC_FREE)
1018 phy->allocated_src = D40_ALLOC_LOG_FREE;
1019
1020 if (!(phy->allocated_src & (1 << log_event_line))) {
1021 phy->allocated_src |= 1 << log_event_line;
1022 goto found;
1023 } else
1024 goto not_found;
1025 } else {
1026 if (phy->allocated_dst == D40_ALLOC_PHY)
1027 goto not_found;
1028
1029 if (phy->allocated_dst == D40_ALLOC_FREE)
1030 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1031
1032 if (!(phy->allocated_dst & (1 << log_event_line))) {
1033 phy->allocated_dst |= 1 << log_event_line;
1034 goto found;
1035 } else
1036 goto not_found;
1037 }
1038
1039not_found:
1040 spin_unlock_irqrestore(&phy->lock, flags);
1041 return false;
1042found:
1043 spin_unlock_irqrestore(&phy->lock, flags);
1044 return true;
1045}
1046
1047static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1048 int log_event_line)
1049{
1050 unsigned long flags;
1051 bool is_free = false;
1052
1053 spin_lock_irqsave(&phy->lock, flags);
1054 if (!log_event_line) {
1055 /* Physical interrupts are masked per physical full channel */
1056 phy->allocated_dst = D40_ALLOC_FREE;
1057 phy->allocated_src = D40_ALLOC_FREE;
1058 is_free = true;
1059 goto out;
1060 }
1061
1062 /* Logical channel */
1063 if (is_src) {
1064 phy->allocated_src &= ~(1 << log_event_line);
1065 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1066 phy->allocated_src = D40_ALLOC_FREE;
1067 } else {
1068 phy->allocated_dst &= ~(1 << log_event_line);
1069 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1070 phy->allocated_dst = D40_ALLOC_FREE;
1071 }
1072
1073 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1074 D40_ALLOC_FREE);
1075
1076out:
1077 spin_unlock_irqrestore(&phy->lock, flags);
1078
1079 return is_free;
1080}
1081
1082static int d40_allocate_channel(struct d40_chan *d40c)
1083{
1084 int dev_type;
1085 int event_group;
1086 int event_line;
1087 struct d40_phy_res *phys;
1088 int i;
1089 int j;
1090 int log_num;
1091 bool is_src;
1092 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1093 == STEDMA40_CHANNEL_IN_LOG_MODE;
1094
1095
1096 phys = d40c->base->phy_res;
1097
1098 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1099 dev_type = d40c->dma_cfg.src_dev_type;
1100 log_num = 2 * dev_type;
1101 is_src = true;
1102 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1103 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1104 /* dst event lines are used for logical memcpy */
1105 dev_type = d40c->dma_cfg.dst_dev_type;
1106 log_num = 2 * dev_type + 1;
1107 is_src = false;
1108 } else
1109 return -EINVAL;
1110
1111 event_group = D40_TYPE_TO_GROUP(dev_type);
1112 event_line = D40_TYPE_TO_EVENT(dev_type);
1113
1114 if (!is_log) {
1115 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1116 /* Find physical half channel */
1117 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1118
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001119 if (d40_alloc_mask_set(&phys[i], is_src,
1120 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001121 goto found_phy;
1122 }
1123 } else
1124 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1125 int phy_num = j + event_group * 2;
1126 for (i = phy_num; i < phy_num + 2; i++) {
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001127 if (d40_alloc_mask_set(&phys[i], is_src,
1128 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001129 goto found_phy;
1130 }
1131 }
1132 return -EINVAL;
1133found_phy:
1134 d40c->phy_chan = &phys[i];
1135 d40c->log_num = D40_PHY_CHAN;
1136 goto out;
1137 }
1138 if (dev_type == -1)
1139 return -EINVAL;
1140
1141 /* Find logical channel */
1142 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1143 int phy_num = j + event_group * 2;
1144 /*
1145 * Spread logical channels across all available physical rather
1146 * than pack every logical channel at the first available phy
1147 * channels.
1148 */
1149 if (is_src) {
1150 for (i = phy_num; i < phy_num + 2; i++) {
1151 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001152 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001153 goto found_log;
1154 }
1155 } else {
1156 for (i = phy_num + 1; i >= phy_num; i--) {
1157 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001158 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001159 goto found_log;
1160 }
1161 }
1162 }
1163 return -EINVAL;
1164
1165found_log:
1166 d40c->phy_chan = &phys[i];
1167 d40c->log_num = log_num;
1168out:
1169
1170 if (is_log)
1171 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1172 else
1173 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1174
1175 return 0;
1176
1177}
1178
Linus Walleij8d318a52010-03-30 15:33:42 +02001179static int d40_config_memcpy(struct d40_chan *d40c)
1180{
1181 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1182
1183 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1184 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1185 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1186 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1187 memcpy[d40c->chan.chan_id];
1188
1189 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1190 dma_has_cap(DMA_SLAVE, cap)) {
1191 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1192 } else {
1193 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1194 __func__);
1195 return -EINVAL;
1196 }
1197
1198 return 0;
1199}
1200
1201
1202static int d40_free_dma(struct d40_chan *d40c)
1203{
1204
1205 int res = 0;
1206 u32 event, dir;
1207 struct d40_phy_res *phy = d40c->phy_chan;
1208 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001209 struct d40_desc *d;
1210 struct d40_desc *_d;
1211
Linus Walleij8d318a52010-03-30 15:33:42 +02001212
1213 /* Terminate all queued and active transfers */
1214 d40_term_all(d40c);
1215
Per Fridena8be8622010-06-20 21:24:59 +00001216 /* Release client owned descriptors */
1217 if (!list_empty(&d40c->client))
1218 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1219 d40_pool_lli_free(d);
1220 d40_desc_remove(d);
1221 /* Return desc to free-list */
1222 d40_desc_free(d40c, d);
1223 }
1224
Linus Walleij8d318a52010-03-30 15:33:42 +02001225 if (phy == NULL) {
1226 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1227 __func__);
1228 return -EINVAL;
1229 }
1230
1231 if (phy->allocated_src == D40_ALLOC_FREE &&
1232 phy->allocated_dst == D40_ALLOC_FREE) {
1233 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1234 __func__);
1235 return -EINVAL;
1236 }
1237
Linus Walleij8d318a52010-03-30 15:33:42 +02001238 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1239 if (res) {
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001240 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001241 __func__);
1242 return res;
1243 }
1244
1245 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1246 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1247 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1248 dir = D40_CHAN_REG_SDLNK;
1249 is_src = false;
1250 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1251 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1252 dir = D40_CHAN_REG_SSLNK;
1253 is_src = true;
1254 } else {
1255 dev_err(&d40c->chan.dev->device,
1256 "[%s] Unknown direction\n", __func__);
1257 return -EINVAL;
1258 }
1259
1260 if (d40c->log_num != D40_PHY_CHAN) {
1261 /*
1262 * Release logical channel, deactivate the event line during
1263 * the time physical res is suspended.
1264 */
1265 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1266 D40_EVENTLINE_MASK(event),
1267 d40c->base->virtbase + D40_DREG_PCBASE +
1268 phy->num * D40_DREG_PCDELTA + dir);
1269
1270 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1271
1272 /*
1273 * Check if there are more logical allocation
1274 * on this phy channel.
1275 */
1276 if (!d40_alloc_mask_free(phy, is_src, event)) {
1277 /* Resume the other logical channels if any */
1278 if (d40_chan_has_events(d40c)) {
1279 res = d40_channel_execute_command(d40c,
1280 D40_DMA_RUN);
1281 if (res) {
1282 dev_err(&d40c->chan.dev->device,
1283 "[%s] Executing RUN command\n",
1284 __func__);
1285 return res;
1286 }
1287 }
1288 return 0;
1289 }
1290 } else
1291 d40_alloc_mask_free(phy, is_src, 0);
1292
1293 /* Release physical channel */
1294 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1295 if (res) {
1296 dev_err(&d40c->chan.dev->device,
1297 "[%s] Failed to stop channel\n", __func__);
1298 return res;
1299 }
1300 d40c->phy_chan = NULL;
1301 /* Invalidate channel type */
1302 d40c->dma_cfg.channel_type = 0;
1303 d40c->base->lookup_phy_chans[phy->num] = NULL;
1304
1305 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001306}
1307
1308static int d40_pause(struct dma_chan *chan)
1309{
1310 struct d40_chan *d40c =
1311 container_of(chan, struct d40_chan, chan);
1312 int res;
Linus Walleij8d318a52010-03-30 15:33:42 +02001313 unsigned long flags;
1314
1315 spin_lock_irqsave(&d40c->lock, flags);
1316
1317 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1318 if (res == 0) {
1319 if (d40c->log_num != D40_PHY_CHAN) {
1320 d40_config_set_event(d40c, false);
1321 /* Resume the other logical channels if any */
1322 if (d40_chan_has_events(d40c))
1323 res = d40_channel_execute_command(d40c,
1324 D40_DMA_RUN);
1325 }
1326 }
1327
1328 spin_unlock_irqrestore(&d40c->lock, flags);
1329 return res;
1330}
1331
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001332static bool d40_is_paused(struct d40_chan *d40c)
1333{
1334 bool is_paused = false;
1335 unsigned long flags;
1336 void __iomem *active_reg;
1337 u32 status;
1338 u32 event;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001339
1340 spin_lock_irqsave(&d40c->lock, flags);
1341
1342 if (d40c->log_num == D40_PHY_CHAN) {
1343 if (d40c->phy_chan->num % 2 == 0)
1344 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1345 else
1346 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1347
1348 status = (readl(active_reg) &
1349 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1350 D40_CHAN_POS(d40c->phy_chan->num);
1351 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1352 is_paused = true;
1353
1354 goto _exit;
1355 }
1356
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001357 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1358 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1359 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1360 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1361 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1362 else {
1363 dev_err(&d40c->chan.dev->device,
1364 "[%s] Unknown direction\n", __func__);
1365 goto _exit;
1366 }
1367 status = d40_chan_has_events(d40c);
1368 status = (status & D40_EVENTLINE_MASK(event)) >>
1369 D40_EVENTLINE_POS(event);
1370
1371 if (status != D40_DMA_RUN)
1372 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001373_exit:
1374 spin_unlock_irqrestore(&d40c->lock, flags);
1375 return is_paused;
1376
1377}
1378
1379
Linus Walleij8d318a52010-03-30 15:33:42 +02001380static bool d40_tx_is_linked(struct d40_chan *d40c)
1381{
1382 bool is_link;
1383
1384 if (d40c->log_num != D40_PHY_CHAN)
1385 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1386 else
1387 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1388 d40c->phy_chan->num * D40_DREG_PCDELTA +
1389 D40_CHAN_REG_SDLNK) &
1390 D40_SREG_LNK_PHYS_LNK_MASK;
1391 return is_link;
1392}
1393
1394static u32 d40_residue(struct d40_chan *d40c)
1395{
1396 u32 num_elt;
1397
1398 if (d40c->log_num != D40_PHY_CHAN)
1399 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1400 >> D40_MEM_LCSP2_ECNT_POS;
1401 else
1402 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1403 d40c->phy_chan->num * D40_DREG_PCDELTA +
1404 D40_CHAN_REG_SDELT) &
1405 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1406 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1407}
1408
1409static int d40_resume(struct dma_chan *chan)
1410{
1411 struct d40_chan *d40c =
1412 container_of(chan, struct d40_chan, chan);
1413 int res = 0;
1414 unsigned long flags;
1415
1416 spin_lock_irqsave(&d40c->lock, flags);
1417
Jonas Aaberg0c322692010-06-20 21:25:46 +00001418 /* If bytes left to transfer or linked tx resume job */
1419 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1420 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +02001421 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001422 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
Jonas Aaberg0c322692010-06-20 21:25:46 +00001423 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001424
Linus Walleij8d318a52010-03-30 15:33:42 +02001425 spin_unlock_irqrestore(&d40c->lock, flags);
1426 return res;
1427}
1428
1429static u32 stedma40_residue(struct dma_chan *chan)
1430{
1431 struct d40_chan *d40c =
1432 container_of(chan, struct d40_chan, chan);
1433 u32 bytes_left;
1434 unsigned long flags;
1435
1436 spin_lock_irqsave(&d40c->lock, flags);
1437 bytes_left = d40_residue(d40c);
1438 spin_unlock_irqrestore(&d40c->lock, flags);
1439
1440 return bytes_left;
1441}
1442
1443/* Public DMA functions in addition to the DMA engine framework */
1444
1445int stedma40_set_psize(struct dma_chan *chan,
1446 int src_psize,
1447 int dst_psize)
1448{
1449 struct d40_chan *d40c =
1450 container_of(chan, struct d40_chan, chan);
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&d40c->lock, flags);
1454
1455 if (d40c->log_num != D40_PHY_CHAN) {
1456 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1457 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1458 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1459 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1460 goto out;
1461 }
1462
1463 if (src_psize == STEDMA40_PSIZE_PHY_1)
1464 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1465 else {
1466 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1467 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1468 D40_SREG_CFG_PSIZE_POS);
1469 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1470 }
1471
1472 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1473 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1474 else {
1475 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1476 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1477 D40_SREG_CFG_PSIZE_POS);
1478 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1479 }
1480out:
1481 spin_unlock_irqrestore(&d40c->lock, flags);
1482 return 0;
1483}
1484EXPORT_SYMBOL(stedma40_set_psize);
1485
1486struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1487 struct scatterlist *sgl_dst,
1488 struct scatterlist *sgl_src,
1489 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001490 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001491{
1492 int res;
1493 struct d40_desc *d40d;
1494 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1495 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001496 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001497
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001498 if (d40c->phy_chan == NULL) {
1499 dev_err(&d40c->chan.dev->device,
1500 "[%s] Unallocated channel.\n", __func__);
1501 return ERR_PTR(-EINVAL);
1502 }
1503
Jonas Aaberg2a614342010-06-20 21:25:24 +00001504 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001505 d40d = d40_desc_get(d40c);
1506
1507 if (d40d == NULL)
1508 goto err;
1509
Linus Walleij8d318a52010-03-30 15:33:42 +02001510 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001511 d40d->lli_tx_len = d40d->lli_len;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001512 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001513
1514 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001515 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1516 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1517
Linus Walleij8d318a52010-03-30 15:33:42 +02001518 if (sgl_len > 1)
1519 /*
1520 * Check if there is space available in lcla. If not,
1521 * split list into 1-length and run only in lcpa
1522 * space.
1523 */
1524 if (d40_lcla_id_get(d40c,
1525 &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001526 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001527
1528 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1529 dev_err(&d40c->chan.dev->device,
1530 "[%s] Out of memory\n", __func__);
1531 goto err;
1532 }
1533
1534 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1535 sgl_src,
1536 sgl_len,
1537 d40d->lli_log.src,
1538 d40c->log_def.lcsp1,
1539 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001540 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001541 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001542 d40c->base->plat_data->llis_per_log);
1543
1544 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1545 sgl_dst,
1546 sgl_len,
1547 d40d->lli_log.dst,
1548 d40c->log_def.lcsp3,
1549 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001550 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001551 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001552 d40c->base->plat_data->llis_per_log);
1553
1554
1555 } else {
1556 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1557 dev_err(&d40c->chan.dev->device,
1558 "[%s] Out of memory\n", __func__);
1559 goto err;
1560 }
1561
1562 res = d40_phy_sg_to_lli(sgl_src,
1563 sgl_len,
1564 0,
1565 d40d->lli_phy.src,
1566 d40d->lli_phy.src_addr,
1567 d40c->src_def_cfg,
1568 d40c->dma_cfg.src_info.data_width,
1569 d40c->dma_cfg.src_info.psize,
1570 true);
1571
1572 if (res < 0)
1573 goto err;
1574
1575 res = d40_phy_sg_to_lli(sgl_dst,
1576 sgl_len,
1577 0,
1578 d40d->lli_phy.dst,
1579 d40d->lli_phy.dst_addr,
1580 d40c->dst_def_cfg,
1581 d40c->dma_cfg.dst_info.data_width,
1582 d40c->dma_cfg.dst_info.psize,
1583 true);
1584
1585 if (res < 0)
1586 goto err;
1587
1588 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1589 d40d->lli_pool.size, DMA_TO_DEVICE);
1590 }
1591
1592 dma_async_tx_descriptor_init(&d40d->txd, chan);
1593
1594 d40d->txd.tx_submit = d40_tx_submit;
1595
Jonas Aaberg2a614342010-06-20 21:25:24 +00001596 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001597
1598 return &d40d->txd;
1599err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001600 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001601 return NULL;
1602}
1603EXPORT_SYMBOL(stedma40_memcpy_sg);
1604
1605bool stedma40_filter(struct dma_chan *chan, void *data)
1606{
1607 struct stedma40_chan_cfg *info = data;
1608 struct d40_chan *d40c =
1609 container_of(chan, struct d40_chan, chan);
1610 int err;
1611
1612 if (data) {
1613 err = d40_validate_conf(d40c, info);
1614 if (!err)
1615 d40c->dma_cfg = *info;
1616 } else
1617 err = d40_config_memcpy(d40c);
1618
1619 return err == 0;
1620}
1621EXPORT_SYMBOL(stedma40_filter);
1622
1623/* DMA ENGINE functions */
1624static int d40_alloc_chan_resources(struct dma_chan *chan)
1625{
1626 int err;
1627 unsigned long flags;
1628 struct d40_chan *d40c =
1629 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001630 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001631 spin_lock_irqsave(&d40c->lock, flags);
1632
1633 d40c->completed = chan->cookie = 1;
1634
1635 /*
1636 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001637 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001638 */
1639 if (d40c->dma_cfg.channel_type == 0) {
1640 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001641 if (err) {
1642 dev_err(&d40c->chan.dev->device,
1643 "[%s] Failed to configure memcpy channel\n",
1644 __func__);
1645 goto fail;
1646 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001647 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001648 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001649
1650 err = d40_allocate_channel(d40c);
1651 if (err) {
1652 dev_err(&d40c->chan.dev->device,
1653 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001654 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001655 }
1656
Linus Walleijef1872e2010-06-20 21:24:52 +00001657 /* Fill in basic CFG register values */
1658 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1659 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1660
1661 if (d40c->log_num != D40_PHY_CHAN) {
1662 d40_log_cfg(&d40c->dma_cfg,
1663 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1664
1665 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1666 d40c->lcpa = d40c->base->lcpa_base +
1667 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1668 else
1669 d40c->lcpa = d40c->base->lcpa_base +
1670 d40c->dma_cfg.dst_dev_type *
1671 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1672 }
1673
1674 /*
1675 * Only write channel configuration to the DMA if the physical
1676 * resource is free. In case of multiple logical channels
1677 * on the same physical resource, only the first write is necessary.
1678 */
1679 if (is_free_phy) {
1680 err = d40_config_write(d40c);
1681 if (err) {
1682 dev_err(&d40c->chan.dev->device,
1683 "[%s] Failed to configure channel\n",
1684 __func__);
1685 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001686 }
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001687fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001688 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001689 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001690}
1691
1692static void d40_free_chan_resources(struct dma_chan *chan)
1693{
1694 struct d40_chan *d40c =
1695 container_of(chan, struct d40_chan, chan);
1696 int err;
1697 unsigned long flags;
1698
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001699 if (d40c->phy_chan == NULL) {
1700 dev_err(&d40c->chan.dev->device,
1701 "[%s] Cannot free unallocated channel\n", __func__);
1702 return;
1703 }
1704
1705
Linus Walleij8d318a52010-03-30 15:33:42 +02001706 spin_lock_irqsave(&d40c->lock, flags);
1707
1708 err = d40_free_dma(d40c);
1709
1710 if (err)
1711 dev_err(&d40c->chan.dev->device,
1712 "[%s] Failed to free channel\n", __func__);
1713 spin_unlock_irqrestore(&d40c->lock, flags);
1714}
1715
1716static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1717 dma_addr_t dst,
1718 dma_addr_t src,
1719 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001720 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001721{
1722 struct d40_desc *d40d;
1723 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1724 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001725 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001726 int err = 0;
1727
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001728 if (d40c->phy_chan == NULL) {
1729 dev_err(&d40c->chan.dev->device,
1730 "[%s] Channel is not allocated.\n", __func__);
1731 return ERR_PTR(-EINVAL);
1732 }
1733
Jonas Aaberg2a614342010-06-20 21:25:24 +00001734 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001735 d40d = d40_desc_get(d40c);
1736
1737 if (d40d == NULL) {
1738 dev_err(&d40c->chan.dev->device,
1739 "[%s] Descriptor is NULL\n", __func__);
1740 goto err;
1741 }
1742
Jonas Aaberg2a614342010-06-20 21:25:24 +00001743 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001744
1745 dma_async_tx_descriptor_init(&d40d->txd, chan);
1746
1747 d40d->txd.tx_submit = d40_tx_submit;
1748
1749 if (d40c->log_num != D40_PHY_CHAN) {
1750
1751 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1752 dev_err(&d40c->chan.dev->device,
1753 "[%s] Out of memory\n", __func__);
1754 goto err;
1755 }
1756 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001757 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001758
1759 d40_log_fill_lli(d40d->lli_log.src,
1760 src,
1761 size,
1762 0,
1763 d40c->log_def.lcsp1,
1764 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2123a612010-06-20 21:25:54 +00001765 false, true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001766
1767 d40_log_fill_lli(d40d->lli_log.dst,
1768 dst,
1769 size,
1770 0,
1771 d40c->log_def.lcsp3,
1772 d40c->dma_cfg.dst_info.data_width,
1773 true, true);
1774
1775 } else {
1776
1777 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1778 dev_err(&d40c->chan.dev->device,
1779 "[%s] Out of memory\n", __func__);
1780 goto err;
1781 }
1782
1783 err = d40_phy_fill_lli(d40d->lli_phy.src,
1784 src,
1785 size,
1786 d40c->dma_cfg.src_info.psize,
1787 0,
1788 d40c->src_def_cfg,
1789 true,
1790 d40c->dma_cfg.src_info.data_width,
1791 false);
1792 if (err)
1793 goto err_fill_lli;
1794
1795 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1796 dst,
1797 size,
1798 d40c->dma_cfg.dst_info.psize,
1799 0,
1800 d40c->dst_def_cfg,
1801 true,
1802 d40c->dma_cfg.dst_info.data_width,
1803 false);
1804
1805 if (err)
1806 goto err_fill_lli;
1807
1808 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1809 d40d->lli_pool.size, DMA_TO_DEVICE);
1810 }
1811
Jonas Aaberg2a614342010-06-20 21:25:24 +00001812 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001813 return &d40d->txd;
1814
1815err_fill_lli:
1816 dev_err(&d40c->chan.dev->device,
1817 "[%s] Failed filling in PHY LLI\n", __func__);
1818 d40_pool_lli_free(d40d);
1819err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001820 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001821 return NULL;
1822}
1823
1824static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1825 struct d40_chan *d40c,
1826 struct scatterlist *sgl,
1827 unsigned int sg_len,
1828 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001829 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001830{
1831 dma_addr_t dev_addr = 0;
1832 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001833
1834 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1835 dev_err(&d40c->chan.dev->device,
1836 "[%s] Out of memory\n", __func__);
1837 return -ENOMEM;
1838 }
1839
1840 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001841 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1842 d40d->lli_tx_len = d40d->lli_len;
1843 else
1844 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001845
1846 if (sg_len > 1)
1847 /*
1848 * Check if there is space available in lcla.
1849 * If not, split list into 1-length and run only
1850 * in lcpa space.
1851 */
1852 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001853 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001854
Jonas Aaberg2a614342010-06-20 21:25:24 +00001855 if (direction == DMA_FROM_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001856 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001857 else if (direction == DMA_TO_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001858 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001859 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001860 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001861
1862 total_size = d40_log_sg_to_dev(&d40c->lcla,
1863 sgl, sg_len,
1864 &d40d->lli_log,
1865 &d40c->log_def,
1866 d40c->dma_cfg.src_info.data_width,
1867 d40c->dma_cfg.dst_info.data_width,
1868 direction,
1869 dma_flags & DMA_PREP_INTERRUPT,
1870 dev_addr, d40d->lli_tx_len,
1871 d40c->base->plat_data->llis_per_log);
1872
Linus Walleij8d318a52010-03-30 15:33:42 +02001873 if (total_size < 0)
1874 return -EINVAL;
1875
1876 return 0;
1877}
1878
1879static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1880 struct d40_chan *d40c,
1881 struct scatterlist *sgl,
1882 unsigned int sgl_len,
1883 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001884 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001885{
1886 dma_addr_t src_dev_addr;
1887 dma_addr_t dst_dev_addr;
1888 int res;
1889
1890 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1891 dev_err(&d40c->chan.dev->device,
1892 "[%s] Out of memory\n", __func__);
1893 return -ENOMEM;
1894 }
1895
1896 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001897 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001898
1899 if (direction == DMA_FROM_DEVICE) {
1900 dst_dev_addr = 0;
1901 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1902 } else if (direction == DMA_TO_DEVICE) {
1903 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1904 src_dev_addr = 0;
1905 } else
1906 return -EINVAL;
1907
1908 res = d40_phy_sg_to_lli(sgl,
1909 sgl_len,
1910 src_dev_addr,
1911 d40d->lli_phy.src,
1912 d40d->lli_phy.src_addr,
1913 d40c->src_def_cfg,
1914 d40c->dma_cfg.src_info.data_width,
1915 d40c->dma_cfg.src_info.psize,
1916 true);
1917 if (res < 0)
1918 return res;
1919
1920 res = d40_phy_sg_to_lli(sgl,
1921 sgl_len,
1922 dst_dev_addr,
1923 d40d->lli_phy.dst,
1924 d40d->lli_phy.dst_addr,
1925 d40c->dst_def_cfg,
1926 d40c->dma_cfg.dst_info.data_width,
1927 d40c->dma_cfg.dst_info.psize,
1928 true);
1929 if (res < 0)
1930 return res;
1931
1932 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1933 d40d->lli_pool.size, DMA_TO_DEVICE);
1934 return 0;
1935}
1936
1937static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1938 struct scatterlist *sgl,
1939 unsigned int sg_len,
1940 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001941 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001942{
1943 struct d40_desc *d40d;
1944 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1945 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001946 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001947 int err;
1948
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001949 if (d40c->phy_chan == NULL) {
1950 dev_err(&d40c->chan.dev->device,
1951 "[%s] Cannot prepare unallocated channel\n", __func__);
1952 return ERR_PTR(-EINVAL);
1953 }
1954
Linus Walleij8d318a52010-03-30 15:33:42 +02001955 if (d40c->dma_cfg.pre_transfer)
1956 d40c->dma_cfg.pre_transfer(chan,
1957 d40c->dma_cfg.pre_transfer_data,
1958 sg_dma_len(sgl));
1959
Jonas Aaberg2a614342010-06-20 21:25:24 +00001960 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001961 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001962 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001963
1964 if (d40d == NULL)
1965 return NULL;
1966
Linus Walleij8d318a52010-03-30 15:33:42 +02001967 if (d40c->log_num != D40_PHY_CHAN)
1968 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001969 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001970 else
1971 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001972 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001973 if (err) {
1974 dev_err(&d40c->chan.dev->device,
1975 "[%s] Failed to prepare %s slave sg job: %d\n",
1976 __func__,
1977 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
1978 return NULL;
1979 }
1980
Jonas Aaberg2a614342010-06-20 21:25:24 +00001981 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001982
1983 dma_async_tx_descriptor_init(&d40d->txd, chan);
1984
1985 d40d->txd.tx_submit = d40_tx_submit;
1986
1987 return &d40d->txd;
1988}
1989
1990static enum dma_status d40_tx_status(struct dma_chan *chan,
1991 dma_cookie_t cookie,
1992 struct dma_tx_state *txstate)
1993{
1994 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1995 dma_cookie_t last_used;
1996 dma_cookie_t last_complete;
1997 int ret;
1998
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001999 if (d40c->phy_chan == NULL) {
2000 dev_err(&d40c->chan.dev->device,
2001 "[%s] Cannot read status of unallocated channel\n",
2002 __func__);
2003 return -EINVAL;
2004 }
2005
Linus Walleij8d318a52010-03-30 15:33:42 +02002006 last_complete = d40c->completed;
2007 last_used = chan->cookie;
2008
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002009 if (d40_is_paused(d40c))
2010 ret = DMA_PAUSED;
2011 else
2012 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002013
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002014 dma_set_tx_state(txstate, last_complete, last_used,
2015 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002016
2017 return ret;
2018}
2019
2020static void d40_issue_pending(struct dma_chan *chan)
2021{
2022 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2023 unsigned long flags;
2024
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002025 if (d40c->phy_chan == NULL) {
2026 dev_err(&d40c->chan.dev->device,
2027 "[%s] Channel is not allocated!\n", __func__);
2028 return;
2029 }
2030
Linus Walleij8d318a52010-03-30 15:33:42 +02002031 spin_lock_irqsave(&d40c->lock, flags);
2032
2033 /* Busy means that pending jobs are already being processed */
2034 if (!d40c->busy)
2035 (void) d40_queue_start(d40c);
2036
2037 spin_unlock_irqrestore(&d40c->lock, flags);
2038}
2039
Linus Walleij05827632010-05-17 16:30:42 -07002040static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2041 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002042{
2043 unsigned long flags;
2044 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2045
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002046 if (d40c->phy_chan == NULL) {
2047 dev_err(&d40c->chan.dev->device,
2048 "[%s] Channel is not allocated!\n", __func__);
2049 return -EINVAL;
2050 }
2051
Linus Walleij8d318a52010-03-30 15:33:42 +02002052 switch (cmd) {
2053 case DMA_TERMINATE_ALL:
2054 spin_lock_irqsave(&d40c->lock, flags);
2055 d40_term_all(d40c);
2056 spin_unlock_irqrestore(&d40c->lock, flags);
2057 return 0;
2058 case DMA_PAUSE:
2059 return d40_pause(chan);
2060 case DMA_RESUME:
2061 return d40_resume(chan);
2062 }
2063
2064 /* Other commands are unimplemented */
2065 return -ENXIO;
2066}
2067
2068/* Initialization functions */
2069
2070static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2071 struct d40_chan *chans, int offset,
2072 int num_chans)
2073{
2074 int i = 0;
2075 struct d40_chan *d40c;
2076
2077 INIT_LIST_HEAD(&dma->channels);
2078
2079 for (i = offset; i < offset + num_chans; i++) {
2080 d40c = &chans[i];
2081 d40c->base = base;
2082 d40c->chan.device = dma;
2083
2084 /* Invalidate lcla element */
2085 d40c->lcla.src_id = -1;
2086 d40c->lcla.dst_id = -1;
2087
2088 spin_lock_init(&d40c->lock);
2089
2090 d40c->log_num = D40_PHY_CHAN;
2091
Linus Walleij8d318a52010-03-30 15:33:42 +02002092 INIT_LIST_HEAD(&d40c->active);
2093 INIT_LIST_HEAD(&d40c->queue);
2094 INIT_LIST_HEAD(&d40c->client);
2095
Linus Walleij8d318a52010-03-30 15:33:42 +02002096 tasklet_init(&d40c->tasklet, dma_tasklet,
2097 (unsigned long) d40c);
2098
2099 list_add_tail(&d40c->chan.device_node,
2100 &dma->channels);
2101 }
2102}
2103
2104static int __init d40_dmaengine_init(struct d40_base *base,
2105 int num_reserved_chans)
2106{
2107 int err ;
2108
2109 d40_chan_init(base, &base->dma_slave, base->log_chans,
2110 0, base->num_log_chans);
2111
2112 dma_cap_zero(base->dma_slave.cap_mask);
2113 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2114
2115 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2116 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2117 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2118 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2119 base->dma_slave.device_tx_status = d40_tx_status;
2120 base->dma_slave.device_issue_pending = d40_issue_pending;
2121 base->dma_slave.device_control = d40_control;
2122 base->dma_slave.dev = base->dev;
2123
2124 err = dma_async_device_register(&base->dma_slave);
2125
2126 if (err) {
2127 dev_err(base->dev,
2128 "[%s] Failed to register slave channels\n",
2129 __func__);
2130 goto failure1;
2131 }
2132
2133 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2134 base->num_log_chans, base->plat_data->memcpy_len);
2135
2136 dma_cap_zero(base->dma_memcpy.cap_mask);
2137 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2138
2139 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2140 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2141 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2142 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2143 base->dma_memcpy.device_tx_status = d40_tx_status;
2144 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2145 base->dma_memcpy.device_control = d40_control;
2146 base->dma_memcpy.dev = base->dev;
2147 /*
2148 * This controller can only access address at even
2149 * 32bit boundaries, i.e. 2^2
2150 */
2151 base->dma_memcpy.copy_align = 2;
2152
2153 err = dma_async_device_register(&base->dma_memcpy);
2154
2155 if (err) {
2156 dev_err(base->dev,
2157 "[%s] Failed to regsiter memcpy only channels\n",
2158 __func__);
2159 goto failure2;
2160 }
2161
2162 d40_chan_init(base, &base->dma_both, base->phy_chans,
2163 0, num_reserved_chans);
2164
2165 dma_cap_zero(base->dma_both.cap_mask);
2166 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2167 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2168
2169 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2170 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2171 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2172 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2173 base->dma_both.device_tx_status = d40_tx_status;
2174 base->dma_both.device_issue_pending = d40_issue_pending;
2175 base->dma_both.device_control = d40_control;
2176 base->dma_both.dev = base->dev;
2177 base->dma_both.copy_align = 2;
2178 err = dma_async_device_register(&base->dma_both);
2179
2180 if (err) {
2181 dev_err(base->dev,
2182 "[%s] Failed to register logical and physical capable channels\n",
2183 __func__);
2184 goto failure3;
2185 }
2186 return 0;
2187failure3:
2188 dma_async_device_unregister(&base->dma_memcpy);
2189failure2:
2190 dma_async_device_unregister(&base->dma_slave);
2191failure1:
2192 return err;
2193}
2194
2195/* Initialization functions. */
2196
2197static int __init d40_phy_res_init(struct d40_base *base)
2198{
2199 int i;
2200 int num_phy_chans_avail = 0;
2201 u32 val[2];
2202 int odd_even_bit = -2;
2203
2204 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2205 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2206
2207 for (i = 0; i < base->num_phy_chans; i++) {
2208 base->phy_res[i].num = i;
2209 odd_even_bit += 2 * ((i % 2) == 0);
2210 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2211 /* Mark security only channels as occupied */
2212 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2213 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2214 } else {
2215 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2216 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2217 num_phy_chans_avail++;
2218 }
2219 spin_lock_init(&base->phy_res[i].lock);
2220 }
2221 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2222 num_phy_chans_avail, base->num_phy_chans);
2223
2224 /* Verify settings extended vs standard */
2225 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2226
2227 for (i = 0; i < base->num_phy_chans; i++) {
2228
2229 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2230 (val[0] & 0x3) != 1)
2231 dev_info(base->dev,
2232 "[%s] INFO: channel %d is misconfigured (%d)\n",
2233 __func__, i, val[0] & 0x3);
2234
2235 val[0] = val[0] >> 2;
2236 }
2237
2238 return num_phy_chans_avail;
2239}
2240
2241static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2242{
2243 static const struct d40_reg_val dma_id_regs[] = {
2244 /* Peripheral Id */
2245 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2246 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2247 /*
2248 * D40_DREG_PERIPHID2 Depends on HW revision:
2249 * MOP500/HREF ED has 0x0008,
2250 * ? has 0x0018,
2251 * HREF V1 has 0x0028
2252 */
2253 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2254
2255 /* PCell Id */
2256 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2257 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2258 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2259 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2260 };
2261 struct stedma40_platform_data *plat_data;
2262 struct clk *clk = NULL;
2263 void __iomem *virtbase = NULL;
2264 struct resource *res = NULL;
2265 struct d40_base *base = NULL;
2266 int num_log_chans = 0;
2267 int num_phy_chans;
2268 int i;
2269
2270 clk = clk_get(&pdev->dev, NULL);
2271
2272 if (IS_ERR(clk)) {
2273 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2274 __func__);
2275 goto failure;
2276 }
2277
2278 clk_enable(clk);
2279
2280 /* Get IO for DMAC base address */
2281 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2282 if (!res)
2283 goto failure;
2284
2285 if (request_mem_region(res->start, resource_size(res),
2286 D40_NAME " I/O base") == NULL)
2287 goto failure;
2288
2289 virtbase = ioremap(res->start, resource_size(res));
2290 if (!virtbase)
2291 goto failure;
2292
2293 /* HW version check */
2294 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2295 if (dma_id_regs[i].val !=
2296 readl(virtbase + dma_id_regs[i].reg)) {
2297 dev_err(&pdev->dev,
2298 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2299 __func__,
2300 dma_id_regs[i].val,
2301 dma_id_regs[i].reg,
2302 readl(virtbase + dma_id_regs[i].reg));
2303 goto failure;
2304 }
2305 }
2306
2307 i = readl(virtbase + D40_DREG_PERIPHID2);
2308
2309 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2310 dev_err(&pdev->dev,
2311 "[%s] Unknown designer! Got %x wanted %x\n",
2312 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2313 goto failure;
2314 }
2315
2316 /* The number of physical channels on this HW */
2317 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2318
2319 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2320 (i >> 4) & 0xf, res->start);
2321
2322 plat_data = pdev->dev.platform_data;
2323
2324 /* Count the number of logical channels in use */
2325 for (i = 0; i < plat_data->dev_len; i++)
2326 if (plat_data->dev_rx[i] != 0)
2327 num_log_chans++;
2328
2329 for (i = 0; i < plat_data->dev_len; i++)
2330 if (plat_data->dev_tx[i] != 0)
2331 num_log_chans++;
2332
2333 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2334 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2335 sizeof(struct d40_chan), GFP_KERNEL);
2336
2337 if (base == NULL) {
2338 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2339 goto failure;
2340 }
2341
2342 base->clk = clk;
2343 base->num_phy_chans = num_phy_chans;
2344 base->num_log_chans = num_log_chans;
2345 base->phy_start = res->start;
2346 base->phy_size = resource_size(res);
2347 base->virtbase = virtbase;
2348 base->plat_data = plat_data;
2349 base->dev = &pdev->dev;
2350 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2351 base->log_chans = &base->phy_chans[num_phy_chans];
2352
2353 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2354 GFP_KERNEL);
2355 if (!base->phy_res)
2356 goto failure;
2357
2358 base->lookup_phy_chans = kzalloc(num_phy_chans *
2359 sizeof(struct d40_chan *),
2360 GFP_KERNEL);
2361 if (!base->lookup_phy_chans)
2362 goto failure;
2363
2364 if (num_log_chans + plat_data->memcpy_len) {
2365 /*
2366 * The max number of logical channels are event lines for all
2367 * src devices and dst devices
2368 */
2369 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2370 sizeof(struct d40_chan *),
2371 GFP_KERNEL);
2372 if (!base->lookup_log_chans)
2373 goto failure;
2374 }
2375 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2376 GFP_KERNEL);
2377 if (!base->lcla_pool.alloc_map)
2378 goto failure;
2379
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002380 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2381 0, SLAB_HWCACHE_ALIGN,
2382 NULL);
2383 if (base->desc_slab == NULL)
2384 goto failure;
2385
Linus Walleij8d318a52010-03-30 15:33:42 +02002386 return base;
2387
2388failure:
2389 if (clk) {
2390 clk_disable(clk);
2391 clk_put(clk);
2392 }
2393 if (virtbase)
2394 iounmap(virtbase);
2395 if (res)
2396 release_mem_region(res->start,
2397 resource_size(res));
2398 if (virtbase)
2399 iounmap(virtbase);
2400
2401 if (base) {
2402 kfree(base->lcla_pool.alloc_map);
2403 kfree(base->lookup_log_chans);
2404 kfree(base->lookup_phy_chans);
2405 kfree(base->phy_res);
2406 kfree(base);
2407 }
2408
2409 return NULL;
2410}
2411
2412static void __init d40_hw_init(struct d40_base *base)
2413{
2414
2415 static const struct d40_reg_val dma_init_reg[] = {
2416 /* Clock every part of the DMA block from start */
2417 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2418
2419 /* Interrupts on all logical channels */
2420 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2421 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2422 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2423 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2424 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2425 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2426 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2427 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2428 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2429 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2430 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2431 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2432 };
2433 int i;
2434 u32 prmseo[2] = {0, 0};
2435 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2436 u32 pcmis = 0;
2437 u32 pcicr = 0;
2438
2439 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2440 writel(dma_init_reg[i].val,
2441 base->virtbase + dma_init_reg[i].reg);
2442
2443 /* Configure all our dma channels to default settings */
2444 for (i = 0; i < base->num_phy_chans; i++) {
2445
2446 activeo[i % 2] = activeo[i % 2] << 2;
2447
2448 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2449 == D40_ALLOC_PHY) {
2450 activeo[i % 2] |= 3;
2451 continue;
2452 }
2453
2454 /* Enable interrupt # */
2455 pcmis = (pcmis << 1) | 1;
2456
2457 /* Clear interrupt # */
2458 pcicr = (pcicr << 1) | 1;
2459
2460 /* Set channel to physical mode */
2461 prmseo[i % 2] = prmseo[i % 2] << 2;
2462 prmseo[i % 2] |= 1;
2463
2464 }
2465
2466 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2467 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2468 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2469 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2470
2471 /* Write which interrupt to enable */
2472 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2473
2474 /* Write which interrupt to clear */
2475 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2476
2477}
2478
2479static int __init d40_probe(struct platform_device *pdev)
2480{
2481 int err;
2482 int ret = -ENOENT;
2483 struct d40_base *base;
2484 struct resource *res = NULL;
2485 int num_reserved_chans;
2486 u32 val;
2487
2488 base = d40_hw_detect_init(pdev);
2489
2490 if (!base)
2491 goto failure;
2492
2493 num_reserved_chans = d40_phy_res_init(base);
2494
2495 platform_set_drvdata(pdev, base);
2496
2497 spin_lock_init(&base->interrupt_lock);
2498 spin_lock_init(&base->execmd_lock);
2499
2500 /* Get IO for logical channel parameter address */
2501 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2502 if (!res) {
2503 ret = -ENOENT;
2504 dev_err(&pdev->dev,
2505 "[%s] No \"lcpa\" memory resource\n",
2506 __func__);
2507 goto failure;
2508 }
2509 base->lcpa_size = resource_size(res);
2510 base->phy_lcpa = res->start;
2511
2512 if (request_mem_region(res->start, resource_size(res),
2513 D40_NAME " I/O lcpa") == NULL) {
2514 ret = -EBUSY;
2515 dev_err(&pdev->dev,
2516 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2517 __func__, res->start, res->end);
2518 goto failure;
2519 }
2520
2521 /* We make use of ESRAM memory for this. */
2522 val = readl(base->virtbase + D40_DREG_LCPA);
2523 if (res->start != val && val != 0) {
2524 dev_warn(&pdev->dev,
2525 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2526 __func__, val, res->start);
2527 } else
2528 writel(res->start, base->virtbase + D40_DREG_LCPA);
2529
2530 base->lcpa_base = ioremap(res->start, resource_size(res));
2531 if (!base->lcpa_base) {
2532 ret = -ENOMEM;
2533 dev_err(&pdev->dev,
2534 "[%s] Failed to ioremap LCPA region\n",
2535 __func__);
2536 goto failure;
2537 }
2538 /* Get IO for logical channel link address */
2539 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2540 if (!res) {
2541 ret = -ENOENT;
2542 dev_err(&pdev->dev,
2543 "[%s] No \"lcla\" resource defined\n",
2544 __func__);
2545 goto failure;
2546 }
2547
2548 base->lcla_pool.base_size = resource_size(res);
2549 base->lcla_pool.phy = res->start;
2550
2551 if (request_mem_region(res->start, resource_size(res),
2552 D40_NAME " I/O lcla") == NULL) {
2553 ret = -EBUSY;
2554 dev_err(&pdev->dev,
2555 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2556 __func__, res->start, res->end);
2557 goto failure;
2558 }
2559 val = readl(base->virtbase + D40_DREG_LCLA);
2560 if (res->start != val && val != 0) {
2561 dev_warn(&pdev->dev,
2562 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2563 __func__, val, res->start);
2564 } else
2565 writel(res->start, base->virtbase + D40_DREG_LCLA);
2566
2567 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2568 if (!base->lcla_pool.base) {
2569 ret = -ENOMEM;
2570 dev_err(&pdev->dev,
2571 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2572 __func__, res->start, res->end);
2573 goto failure;
2574 }
2575
2576 spin_lock_init(&base->lcla_pool.lock);
2577
2578 base->lcla_pool.num_blocks = base->num_phy_chans;
2579
2580 base->irq = platform_get_irq(pdev, 0);
2581
2582 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2583
2584 if (ret) {
2585 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2586 goto failure;
2587 }
2588
2589 err = d40_dmaengine_init(base, num_reserved_chans);
2590 if (err)
2591 goto failure;
2592
2593 d40_hw_init(base);
2594
2595 dev_info(base->dev, "initialized\n");
2596 return 0;
2597
2598failure:
2599 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002600 if (base->desc_slab)
2601 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002602 if (base->virtbase)
2603 iounmap(base->virtbase);
2604 if (base->lcla_pool.phy)
2605 release_mem_region(base->lcla_pool.phy,
2606 base->lcla_pool.base_size);
2607 if (base->phy_lcpa)
2608 release_mem_region(base->phy_lcpa,
2609 base->lcpa_size);
2610 if (base->phy_start)
2611 release_mem_region(base->phy_start,
2612 base->phy_size);
2613 if (base->clk) {
2614 clk_disable(base->clk);
2615 clk_put(base->clk);
2616 }
2617
2618 kfree(base->lcla_pool.alloc_map);
2619 kfree(base->lookup_log_chans);
2620 kfree(base->lookup_phy_chans);
2621 kfree(base->phy_res);
2622 kfree(base);
2623 }
2624
2625 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2626 return ret;
2627}
2628
2629static struct platform_driver d40_driver = {
2630 .driver = {
2631 .owner = THIS_MODULE,
2632 .name = D40_NAME,
2633 },
2634};
2635
2636int __init stedma40_init(void)
2637{
2638 return platform_driver_probe(&d40_driver, d40_probe);
2639}
2640arch_initcall(stedma40_init);