blob: 1d21fbd419d182f7e64ee4c3c431165d85401fff [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
Linus Walleij508849a2010-06-20 21:26:07 +000033/* Hardware requirement on LCLA alignment */
34#define LCLA_ALIGNMENT 0x40000
35/* Attempts before giving up to trying to get pages that are aligned */
36#define MAX_LCLA_ALLOC_ATTEMPTS 256
37
38/* Bit markings for allocation map */
Linus Walleij8d318a52010-03-30 15:33:42 +020039#define D40_ALLOC_FREE (1 << 31)
40#define D40_ALLOC_PHY (1 << 30)
41#define D40_ALLOC_LOG_FREE 0
42
Linus Walleij8d318a52010-03-30 15:33:42 +020043/* Hardware designer of the block */
44#define D40_PERIPHID2_DESIGNER 0x8
45
46/**
47 * enum 40_command - The different commands and/or statuses.
48 *
49 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
50 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
51 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
52 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
53 */
54enum d40_command {
55 D40_DMA_STOP = 0,
56 D40_DMA_RUN = 1,
57 D40_DMA_SUSPEND_REQ = 2,
58 D40_DMA_SUSPENDED = 3
59};
60
61/**
62 * struct d40_lli_pool - Structure for keeping LLIs in memory
63 *
64 * @base: Pointer to memory area when the pre_alloc_lli's are not large
65 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
66 * pre_alloc_lli is used.
67 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
68 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
69 * one buffer to one buffer.
70 */
71struct d40_lli_pool {
72 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +000073 int size;
Linus Walleij8d318a52010-03-30 15:33:42 +020074 /* Space for dst and src, plus an extra for padding */
Linus Walleij508849a2010-06-20 21:26:07 +000075 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
Linus Walleij8d318a52010-03-30 15:33:42 +020076};
77
78/**
79 * struct d40_desc - A descriptor is one DMA job.
80 *
81 * @lli_phy: LLI settings for physical channel. Both src and dst=
82 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
83 * lli_len equals one.
84 * @lli_log: Same as above but for logical channels.
85 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000086 * @lli_len: Number of llis of current descriptor.
87 * @lli_count: Number of transfered llis.
88 * @lli_tx_len: Max number of LLIs per transfer, there can be
89 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020090 * @txd: DMA engine struct. Used for among other things for communication
91 * during a transfer.
92 * @node: List entry.
93 * @dir: The transfer direction of this job.
94 * @is_in_client_list: true if the client owns this descriptor.
95 *
96 * This descriptor is used for both logical and physical transfers.
97 */
98
99struct d40_desc {
100 /* LLI physical */
101 struct d40_phy_lli_bidir lli_phy;
102 /* LLI logical */
103 struct d40_log_lli_bidir lli_log;
104
105 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000106 int lli_len;
107 int lli_count;
108 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200109
110 struct dma_async_tx_descriptor txd;
111 struct list_head node;
112
113 enum dma_data_direction dir;
114 bool is_in_client_list;
115};
116
117/**
118 * struct d40_lcla_pool - LCLA pool settings and data.
119 *
Linus Walleij508849a2010-06-20 21:26:07 +0000120 * @base: The virtual address of LCLA. 18 bit aligned.
121 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
122 * This pointer is only there for clean-up on error.
123 * @pages: The number of pages needed for all physical channels.
124 * Only used later for clean-up on error
Linus Walleij8d318a52010-03-30 15:33:42 +0200125 * @lock: Lock to protect the content in this struct.
Linus Walleij508849a2010-06-20 21:26:07 +0000126 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
Linus Walleij8d318a52010-03-30 15:33:42 +0200127 * @num_blocks: The number of entries of alloc_map. Equals to the
128 * number of physical channels.
129 */
130struct d40_lcla_pool {
131 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +0000132 void *base_unaligned;
133 int pages;
Linus Walleij8d318a52010-03-30 15:33:42 +0200134 spinlock_t lock;
135 u32 *alloc_map;
136 int num_blocks;
137};
138
139/**
140 * struct d40_phy_res - struct for handling eventlines mapped to physical
141 * channels.
142 *
143 * @lock: A lock protection this entity.
144 * @num: The physical channel number of this entity.
145 * @allocated_src: Bit mapped to show which src event line's are mapped to
146 * this physical channel. Can also be free or physically allocated.
147 * @allocated_dst: Same as for src but is dst.
148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
149 * event line number. Both allocated_src and allocated_dst can not be
150 * allocated to a physical channel, since the interrupt handler has then
151 * no way of figure out which one the interrupt belongs to.
152 */
153struct d40_phy_res {
154 spinlock_t lock;
155 int num;
156 u32 allocated_src;
157 u32 allocated_dst;
158};
159
160struct d40_base;
161
162/**
163 * struct d40_chan - Struct that describes a channel.
164 *
165 * @lock: A spinlock to protect this struct.
166 * @log_num: The logical number, if any of this channel.
167 * @completed: Starts with 1, after first interrupt it is set to dma engine's
168 * current cookie.
169 * @pending_tx: The number of pending transfers. Used between interrupt handler
170 * and tasklet.
171 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000172 * @phy_chan: Pointer to physical channel which this instance runs on. If this
173 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200174 * @chan: DMA engine handle.
175 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
176 * transfer and call client callback.
177 * @client: Cliented owned descriptor list.
178 * @active: Active descriptor.
179 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200180 * @dma_cfg: The client configuration of this dma channel.
181 * @base: Pointer to the device instance struct.
182 * @src_def_cfg: Default cfg register setting for src.
183 * @dst_def_cfg: Default cfg register setting for dst.
184 * @log_def: Default logical channel settings.
185 * @lcla: Space for one dst src pair for logical channel transfers.
186 * @lcpa: Pointer to dst and src lcpa settings.
187 *
188 * This struct can either "be" a logical or a physical channel.
189 */
190struct d40_chan {
191 spinlock_t lock;
192 int log_num;
193 /* ID of the most recent completed transfer */
194 int completed;
195 int pending_tx;
196 bool busy;
197 struct d40_phy_res *phy_chan;
198 struct dma_chan chan;
199 struct tasklet_struct tasklet;
200 struct list_head client;
201 struct list_head active;
202 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200203 struct stedma40_chan_cfg dma_cfg;
204 struct d40_base *base;
205 /* Default register configurations */
206 u32 src_def_cfg;
207 u32 dst_def_cfg;
208 struct d40_def_lcsp log_def;
209 struct d40_lcla_elem lcla;
210 struct d40_log_lli_full *lcpa;
211};
212
213/**
214 * struct d40_base - The big global struct, one for each probe'd instance.
215 *
216 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
217 * @execmd_lock: Lock for execute command usage since several channels share
218 * the same physical register.
219 * @dev: The device structure.
220 * @virtbase: The virtual base address of the DMA's register.
221 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map.
224 * @irq: The IRQ number.
225 * @num_phy_chans: The number of physical channels. Read from HW. This
226 * is the number of available channels for this driver, not counting "Secure
227 * mode" allocated physical channels.
228 * @num_log_chans: The number of logical channels. Calculated from
229 * num_phy_chans.
230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
231 * @dma_slave: dma_device channels that can do only do slave transfers.
232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
233 * @phy_chans: Room for all possible physical channels in system.
234 * @log_chans: Room for all possible logical channels in system.
235 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
236 * to log_chans entries.
237 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
238 * to phy_chans entries.
239 * @plat_data: Pointer to provided platform_data which is the driver
240 * configuration.
241 * @phy_res: Vector containing all physical channels.
242 * @lcla_pool: lcla pool settings and data.
243 * @lcpa_base: The virtual mapped address of LCPA.
244 * @phy_lcpa: The physical address of the LCPA.
245 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000246 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200247 */
248struct d40_base {
249 spinlock_t interrupt_lock;
250 spinlock_t execmd_lock;
251 struct device *dev;
252 void __iomem *virtbase;
253 struct clk *clk;
254 phys_addr_t phy_start;
255 resource_size_t phy_size;
256 int irq;
257 int num_phy_chans;
258 int num_log_chans;
259 struct dma_device dma_both;
260 struct dma_device dma_slave;
261 struct dma_device dma_memcpy;
262 struct d40_chan *phy_chans;
263 struct d40_chan *log_chans;
264 struct d40_chan **lookup_log_chans;
265 struct d40_chan **lookup_phy_chans;
266 struct stedma40_platform_data *plat_data;
267 /* Physical half channels */
268 struct d40_phy_res *phy_res;
269 struct d40_lcla_pool lcla_pool;
270 void *lcpa_base;
271 dma_addr_t phy_lcpa;
272 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000273 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200274};
275
276/**
277 * struct d40_interrupt_lookup - lookup table for interrupt handler
278 *
279 * @src: Interrupt mask register.
280 * @clr: Interrupt clear register.
281 * @is_error: true if this is an error interrupt.
282 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
283 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
284 */
285struct d40_interrupt_lookup {
286 u32 src;
287 u32 clr;
288 bool is_error;
289 int offset;
290};
291
292/**
293 * struct d40_reg_val - simple lookup struct
294 *
295 * @reg: The register.
296 * @val: The value that belongs to the register in reg.
297 */
298struct d40_reg_val {
299 unsigned int reg;
300 unsigned int val;
301};
302
303static int d40_pool_lli_alloc(struct d40_desc *d40d,
304 int lli_len, bool is_log)
305{
306 u32 align;
307 void *base;
308
309 if (is_log)
310 align = sizeof(struct d40_log_lli);
311 else
312 align = sizeof(struct d40_phy_lli);
313
314 if (lli_len == 1) {
315 base = d40d->lli_pool.pre_alloc_lli;
316 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
317 d40d->lli_pool.base = NULL;
318 } else {
319 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
320
321 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
322 d40d->lli_pool.base = base;
323
324 if (d40d->lli_pool.base == NULL)
325 return -ENOMEM;
326 }
327
328 if (is_log) {
329 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
330 align);
331 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
332 align);
333 } else {
334 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
335 align);
336 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
337 align);
338
339 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
340 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
341 }
342
343 return 0;
344}
345
346static void d40_pool_lli_free(struct d40_desc *d40d)
347{
348 kfree(d40d->lli_pool.base);
349 d40d->lli_pool.base = NULL;
350 d40d->lli_pool.size = 0;
351 d40d->lli_log.src = NULL;
352 d40d->lli_log.dst = NULL;
353 d40d->lli_phy.src = NULL;
354 d40d->lli_phy.dst = NULL;
355 d40d->lli_phy.src_addr = 0;
356 d40d->lli_phy.dst_addr = 0;
357}
358
359static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
360 struct d40_desc *desc)
361{
362 dma_cookie_t cookie = d40c->chan.cookie;
363
364 if (++cookie < 0)
365 cookie = 1;
366
367 d40c->chan.cookie = cookie;
368 desc->txd.cookie = cookie;
369
370 return cookie;
371}
372
Linus Walleij8d318a52010-03-30 15:33:42 +0200373static void d40_desc_remove(struct d40_desc *d40d)
374{
375 list_del(&d40d->node);
376}
377
378static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
379{
Linus Walleij8d318a52010-03-30 15:33:42 +0200380 struct d40_desc *d;
381 struct d40_desc *_d;
382
383 if (!list_empty(&d40c->client)) {
384 list_for_each_entry_safe(d, _d, &d40c->client, node)
385 if (async_tx_test_ack(&d->txd)) {
386 d40_pool_lli_free(d);
387 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000388 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200389 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200390 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000391 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
392 if (d != NULL) {
393 memset(d, 0, sizeof(struct d40_desc));
394 INIT_LIST_HEAD(&d->node);
395 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200396 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000397 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200398}
399
400static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
401{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000402 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200403}
404
405static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
406{
407 list_add_tail(&desc->node, &d40c->active);
408}
409
410static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
411{
412 struct d40_desc *d;
413
414 if (list_empty(&d40c->active))
415 return NULL;
416
417 d = list_first_entry(&d40c->active,
418 struct d40_desc,
419 node);
420 return d;
421}
422
423static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
424{
425 list_add_tail(&desc->node, &d40c->queue);
426}
427
428static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
429{
430 struct d40_desc *d;
431
432 if (list_empty(&d40c->queue))
433 return NULL;
434
435 d = list_first_entry(&d40c->queue,
436 struct d40_desc,
437 node);
438 return d;
439}
440
441/* Support functions for logical channels */
442
Linus Walleij508849a2010-06-20 21:26:07 +0000443static int d40_lcla_id_get(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200444{
445 int src_id = 0;
446 int dst_id = 0;
447 struct d40_log_lli *lcla_lidx_base =
Linus Walleij508849a2010-06-20 21:26:07 +0000448 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
Linus Walleij8d318a52010-03-30 15:33:42 +0200449 int i;
450 int lli_per_log = d40c->base->plat_data->llis_per_log;
Jonas Aaberg2292b882010-06-20 21:25:39 +0000451 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200452
453 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
454 return 0;
455
Linus Walleij508849a2010-06-20 21:26:07 +0000456 if (d40c->base->lcla_pool.num_blocks > 32)
Linus Walleij8d318a52010-03-30 15:33:42 +0200457 return -EINVAL;
458
Linus Walleij508849a2010-06-20 21:26:07 +0000459 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200460
Linus Walleij508849a2010-06-20 21:26:07 +0000461 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
462 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
463 (0x1 << i))) {
464 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
465 (0x1 << i);
Linus Walleij8d318a52010-03-30 15:33:42 +0200466 break;
467 }
468 }
469 src_id = i;
Linus Walleij508849a2010-06-20 21:26:07 +0000470 if (src_id >= d40c->base->lcla_pool.num_blocks)
Linus Walleij8d318a52010-03-30 15:33:42 +0200471 goto err;
472
Linus Walleij508849a2010-06-20 21:26:07 +0000473 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
474 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
475 (0x1 << i))) {
476 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
477 (0x1 << i);
Linus Walleij8d318a52010-03-30 15:33:42 +0200478 break;
479 }
480 }
481
482 dst_id = i;
483 if (dst_id == src_id)
484 goto err;
485
486 d40c->lcla.src_id = src_id;
487 d40c->lcla.dst_id = dst_id;
488 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
489 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
490
Linus Walleij508849a2010-06-20 21:26:07 +0000491 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200492 return 0;
493err:
Linus Walleij508849a2010-06-20 21:26:07 +0000494 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200495 return -EINVAL;
496}
497
Linus Walleij8d318a52010-03-30 15:33:42 +0200498
499static int d40_channel_execute_command(struct d40_chan *d40c,
500 enum d40_command command)
501{
502 int status, i;
503 void __iomem *active_reg;
504 int ret = 0;
505 unsigned long flags;
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000506 u32 wmask;
Linus Walleij8d318a52010-03-30 15:33:42 +0200507
508 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
509
510 if (d40c->phy_chan->num % 2 == 0)
511 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
512 else
513 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
514
515 if (command == D40_DMA_SUSPEND_REQ) {
516 status = (readl(active_reg) &
517 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
518 D40_CHAN_POS(d40c->phy_chan->num);
519
520 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
521 goto done;
522 }
523
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000524 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
525 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
526 active_reg);
Linus Walleij8d318a52010-03-30 15:33:42 +0200527
528 if (command == D40_DMA_SUSPEND_REQ) {
529
530 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
531 status = (readl(active_reg) &
532 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
533 D40_CHAN_POS(d40c->phy_chan->num);
534
535 cpu_relax();
536 /*
537 * Reduce the number of bus accesses while
538 * waiting for the DMA to suspend.
539 */
540 udelay(3);
541
542 if (status == D40_DMA_STOP ||
543 status == D40_DMA_SUSPENDED)
544 break;
545 }
546
547 if (i == D40_SUSPEND_MAX_IT) {
548 dev_err(&d40c->chan.dev->device,
549 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
550 __func__, d40c->phy_chan->num, d40c->log_num,
551 status);
552 dump_stack();
553 ret = -EBUSY;
554 }
555
556 }
557done:
558 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
559 return ret;
560}
561
562static void d40_term_all(struct d40_chan *d40c)
563{
564 struct d40_desc *d40d;
Linus Walleij508849a2010-06-20 21:26:07 +0000565 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200566
567 /* Release active descriptors */
568 while ((d40d = d40_first_active_get(d40c))) {
569 d40_desc_remove(d40d);
570
571 /* Return desc to free-list */
572 d40_desc_free(d40c, d40d);
573 }
574
575 /* Release queued descriptors waiting for transfer */
576 while ((d40d = d40_first_queued(d40c))) {
577 d40_desc_remove(d40d);
578
579 /* Return desc to free-list */
580 d40_desc_free(d40c, d40d);
581 }
582
Linus Walleij508849a2010-06-20 21:26:07 +0000583 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
584
585 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
586 (~(0x1 << d40c->lcla.dst_id));
587 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
588 (~(0x1 << d40c->lcla.src_id));
589
590 d40c->lcla.src_id = -1;
591 d40c->lcla.dst_id = -1;
592
593 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200594
595 d40c->pending_tx = 0;
596 d40c->busy = false;
597}
598
599static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
600{
601 u32 val;
602 unsigned long flags;
603
Jonas Aaberg0c322692010-06-20 21:25:46 +0000604 /* Notice, that disable requires the physical channel to be stopped */
Linus Walleij8d318a52010-03-30 15:33:42 +0200605 if (do_enable)
606 val = D40_ACTIVATE_EVENTLINE;
607 else
608 val = D40_DEACTIVATE_EVENTLINE;
609
610 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
611
612 /* Enable event line connected to device (or memcpy) */
613 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
614 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
615 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
616
617 writel((val << D40_EVENTLINE_POS(event)) |
618 ~D40_EVENTLINE_MASK(event),
619 d40c->base->virtbase + D40_DREG_PCBASE +
620 d40c->phy_chan->num * D40_DREG_PCDELTA +
621 D40_CHAN_REG_SSLNK);
622 }
623 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
624 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
625
626 writel((val << D40_EVENTLINE_POS(event)) |
627 ~D40_EVENTLINE_MASK(event),
628 d40c->base->virtbase + D40_DREG_PCBASE +
629 d40c->phy_chan->num * D40_DREG_PCDELTA +
630 D40_CHAN_REG_SDLNK);
631 }
632
633 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
634}
635
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200636static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200637{
638 u32 val = 0;
639
640 /* If SSLNK or SDLNK is zero all events are disabled */
641 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
642 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
643 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
644 d40c->phy_chan->num * D40_DREG_PCDELTA +
645 D40_CHAN_REG_SSLNK);
646
647 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
648 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
649 d40c->phy_chan->num * D40_DREG_PCDELTA +
650 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200651 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200652}
653
654static void d40_config_enable_lidx(struct d40_chan *d40c)
655{
656 /* Set LIDX for lcla */
657 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
658 D40_SREG_ELEM_LOG_LIDX_MASK,
659 d40c->base->virtbase + D40_DREG_PCBASE +
660 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
661
662 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
663 D40_SREG_ELEM_LOG_LIDX_MASK,
664 d40c->base->virtbase + D40_DREG_PCBASE +
665 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
666}
667
668static int d40_config_write(struct d40_chan *d40c)
669{
670 u32 addr_base;
671 u32 var;
672 int res;
673
674 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
675 if (res)
676 return res;
677
678 /* Odd addresses are even addresses + 4 */
679 addr_base = (d40c->phy_chan->num % 2) * 4;
680 /* Setup channel mode to logical or physical */
681 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
682 D40_CHAN_POS(d40c->phy_chan->num);
683 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
684
685 /* Setup operational mode option register */
686 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
687 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
688
689 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
690
691 if (d40c->log_num != D40_PHY_CHAN) {
692 /* Set default config for CFG reg */
693 writel(d40c->src_def_cfg,
694 d40c->base->virtbase + D40_DREG_PCBASE +
695 d40c->phy_chan->num * D40_DREG_PCDELTA +
696 D40_CHAN_REG_SSCFG);
697 writel(d40c->dst_def_cfg,
698 d40c->base->virtbase + D40_DREG_PCBASE +
699 d40c->phy_chan->num * D40_DREG_PCDELTA +
700 D40_CHAN_REG_SDCFG);
701
702 d40_config_enable_lidx(d40c);
703 }
704 return res;
705}
706
707static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
708{
Linus Walleij8d318a52010-03-30 15:33:42 +0200709 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
710 d40_phy_lli_write(d40c->base->virtbase,
711 d40c->phy_chan->num,
712 d40d->lli_phy.dst,
713 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200714 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200715 struct d40_log_lli *src = d40d->lli_log.src;
716 struct d40_log_lli *dst = d40d->lli_log.dst;
Linus Walleij508849a2010-06-20 21:26:07 +0000717 int s;
Linus Walleij8d318a52010-03-30 15:33:42 +0200718
Per Friden941b77a2010-06-20 21:24:45 +0000719 src += d40d->lli_count;
720 dst += d40d->lli_count;
Linus Walleij508849a2010-06-20 21:26:07 +0000721 s = d40_log_lli_write(d40c->lcpa,
722 d40c->lcla.src, d40c->lcla.dst,
723 dst, src,
724 d40c->base->plat_data->llis_per_log);
725
726 /* If s equals to zero, the job is not linked */
727 if (s > 0) {
728 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
729 s * sizeof(struct d40_log_lli),
730 DMA_TO_DEVICE);
731 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
732 s * sizeof(struct d40_log_lli),
733 DMA_TO_DEVICE);
734 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200735 }
Per Friden941b77a2010-06-20 21:24:45 +0000736 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200737}
738
739static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
740{
741 struct d40_chan *d40c = container_of(tx->chan,
742 struct d40_chan,
743 chan);
744 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
745 unsigned long flags;
746
747 spin_lock_irqsave(&d40c->lock, flags);
748
749 tx->cookie = d40_assign_cookie(d40c, d40d);
750
751 d40_desc_queue(d40c, d40d);
752
753 spin_unlock_irqrestore(&d40c->lock, flags);
754
755 return tx->cookie;
756}
757
758static int d40_start(struct d40_chan *d40c)
759{
Jonas Aaberg0c322692010-06-20 21:25:46 +0000760 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +0200761 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +0200762
Jonas Aaberg0c322692010-06-20 21:25:46 +0000763 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +0200764}
765
766static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
767{
768 struct d40_desc *d40d;
769 int err;
770
771 /* Start queued jobs, if any */
772 d40d = d40_first_queued(d40c);
773
774 if (d40d != NULL) {
775 d40c->busy = true;
776
777 /* Remove from queue */
778 d40_desc_remove(d40d);
779
780 /* Add to active queue */
781 d40_desc_submit(d40c, d40d);
782
783 /* Initiate DMA job */
784 d40_desc_load(d40c, d40d);
785
786 /* Start dma job */
787 err = d40_start(d40c);
788
789 if (err)
790 return NULL;
791 }
792
793 return d40d;
794}
795
796/* called from interrupt context */
797static void dma_tc_handle(struct d40_chan *d40c)
798{
799 struct d40_desc *d40d;
800
801 if (!d40c->phy_chan)
802 return;
803
804 /* Get first active entry from list */
805 d40d = d40_first_active_get(d40c);
806
807 if (d40d == NULL)
808 return;
809
Per Friden941b77a2010-06-20 21:24:45 +0000810 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200811
812 d40_desc_load(d40c, d40d);
813 /* Start dma job */
814 (void) d40_start(d40c);
815 return;
816 }
817
818 if (d40_queue_start(d40c) == NULL)
819 d40c->busy = false;
820
821 d40c->pending_tx++;
822 tasklet_schedule(&d40c->tasklet);
823
824}
825
826static void dma_tasklet(unsigned long data)
827{
828 struct d40_chan *d40c = (struct d40_chan *) data;
829 struct d40_desc *d40d_fin;
830 unsigned long flags;
831 dma_async_tx_callback callback;
832 void *callback_param;
833
834 spin_lock_irqsave(&d40c->lock, flags);
835
836 /* Get first active entry from list */
837 d40d_fin = d40_first_active_get(d40c);
838
839 if (d40d_fin == NULL)
840 goto err;
841
842 d40c->completed = d40d_fin->txd.cookie;
843
844 /*
845 * If terminating a channel pending_tx is set to zero.
846 * This prevents any finished active jobs to return to the client.
847 */
848 if (d40c->pending_tx == 0) {
849 spin_unlock_irqrestore(&d40c->lock, flags);
850 return;
851 }
852
853 /* Callback to client */
854 callback = d40d_fin->txd.callback;
855 callback_param = d40d_fin->txd.callback_param;
856
857 if (async_tx_test_ack(&d40d_fin->txd)) {
858 d40_pool_lli_free(d40d_fin);
859 d40_desc_remove(d40d_fin);
860 /* Return desc to free-list */
861 d40_desc_free(d40c, d40d_fin);
862 } else {
Linus Walleij8d318a52010-03-30 15:33:42 +0200863 if (!d40d_fin->is_in_client_list) {
864 d40_desc_remove(d40d_fin);
865 list_add_tail(&d40d_fin->node, &d40c->client);
866 d40d_fin->is_in_client_list = true;
867 }
868 }
869
870 d40c->pending_tx--;
871
872 if (d40c->pending_tx)
873 tasklet_schedule(&d40c->tasklet);
874
875 spin_unlock_irqrestore(&d40c->lock, flags);
876
877 if (callback)
878 callback(callback_param);
879
880 return;
881
882 err:
883 /* Rescue manouver if receiving double interrupts */
884 if (d40c->pending_tx > 0)
885 d40c->pending_tx--;
886 spin_unlock_irqrestore(&d40c->lock, flags);
887}
888
889static irqreturn_t d40_handle_interrupt(int irq, void *data)
890{
891 static const struct d40_interrupt_lookup il[] = {
892 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
893 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
894 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
895 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
896 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
897 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
898 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
899 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
900 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
901 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
902 };
903
904 int i;
905 u32 regs[ARRAY_SIZE(il)];
906 u32 tmp;
907 u32 idx;
908 u32 row;
909 long chan = -1;
910 struct d40_chan *d40c;
911 unsigned long flags;
912 struct d40_base *base = data;
913
914 spin_lock_irqsave(&base->interrupt_lock, flags);
915
916 /* Read interrupt status of both logical and physical channels */
917 for (i = 0; i < ARRAY_SIZE(il); i++)
918 regs[i] = readl(base->virtbase + il[i].src);
919
920 for (;;) {
921
922 chan = find_next_bit((unsigned long *)regs,
923 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
924
925 /* No more set bits found? */
926 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
927 break;
928
929 row = chan / BITS_PER_LONG;
930 idx = chan & (BITS_PER_LONG - 1);
931
932 /* ACK interrupt */
933 tmp = readl(base->virtbase + il[row].clr);
934 tmp |= 1 << idx;
935 writel(tmp, base->virtbase + il[row].clr);
936
937 if (il[row].offset == D40_PHY_CHAN)
938 d40c = base->lookup_phy_chans[idx];
939 else
940 d40c = base->lookup_log_chans[il[row].offset + idx];
941 spin_lock(&d40c->lock);
942
943 if (!il[row].is_error)
944 dma_tc_handle(d40c);
945 else
Linus Walleij508849a2010-06-20 21:26:07 +0000946 dev_err(base->dev,
947 "[%s] IRQ chan: %ld offset %d idx %d\n",
Linus Walleij8d318a52010-03-30 15:33:42 +0200948 __func__, chan, il[row].offset, idx);
949
950 spin_unlock(&d40c->lock);
951 }
952
953 spin_unlock_irqrestore(&base->interrupt_lock, flags);
954
955 return IRQ_HANDLED;
956}
957
958
959static int d40_validate_conf(struct d40_chan *d40c,
960 struct stedma40_chan_cfg *conf)
961{
962 int res = 0;
963 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
964 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
965 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
966 == STEDMA40_CHANNEL_IN_LOG_MODE;
967
968 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
969 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
970 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
971 __func__);
972 res = -EINVAL;
973 }
974
975 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
976 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
977 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
978 __func__);
979 res = -EINVAL;
980 }
981
982 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
983 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
984 dev_err(&d40c->chan.dev->device,
985 "[%s] No event line\n", __func__);
986 res = -EINVAL;
987 }
988
989 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
990 (src_event_group != dst_event_group)) {
991 dev_err(&d40c->chan.dev->device,
992 "[%s] Invalid event group\n", __func__);
993 res = -EINVAL;
994 }
995
996 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
997 /*
998 * DMAC HW supports it. Will be added to this driver,
999 * in case any dma client requires it.
1000 */
1001 dev_err(&d40c->chan.dev->device,
1002 "[%s] periph to periph not supported\n",
1003 __func__);
1004 res = -EINVAL;
1005 }
1006
1007 return res;
1008}
1009
1010static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001011 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001012{
1013 unsigned long flags;
1014 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001015 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001016 /* Physical interrupts are masked per physical full channel */
1017 if (phy->allocated_src == D40_ALLOC_FREE &&
1018 phy->allocated_dst == D40_ALLOC_FREE) {
1019 phy->allocated_dst = D40_ALLOC_PHY;
1020 phy->allocated_src = D40_ALLOC_PHY;
1021 goto found;
1022 } else
1023 goto not_found;
1024 }
1025
1026 /* Logical channel */
1027 if (is_src) {
1028 if (phy->allocated_src == D40_ALLOC_PHY)
1029 goto not_found;
1030
1031 if (phy->allocated_src == D40_ALLOC_FREE)
1032 phy->allocated_src = D40_ALLOC_LOG_FREE;
1033
1034 if (!(phy->allocated_src & (1 << log_event_line))) {
1035 phy->allocated_src |= 1 << log_event_line;
1036 goto found;
1037 } else
1038 goto not_found;
1039 } else {
1040 if (phy->allocated_dst == D40_ALLOC_PHY)
1041 goto not_found;
1042
1043 if (phy->allocated_dst == D40_ALLOC_FREE)
1044 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1045
1046 if (!(phy->allocated_dst & (1 << log_event_line))) {
1047 phy->allocated_dst |= 1 << log_event_line;
1048 goto found;
1049 } else
1050 goto not_found;
1051 }
1052
1053not_found:
1054 spin_unlock_irqrestore(&phy->lock, flags);
1055 return false;
1056found:
1057 spin_unlock_irqrestore(&phy->lock, flags);
1058 return true;
1059}
1060
1061static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1062 int log_event_line)
1063{
1064 unsigned long flags;
1065 bool is_free = false;
1066
1067 spin_lock_irqsave(&phy->lock, flags);
1068 if (!log_event_line) {
1069 /* Physical interrupts are masked per physical full channel */
1070 phy->allocated_dst = D40_ALLOC_FREE;
1071 phy->allocated_src = D40_ALLOC_FREE;
1072 is_free = true;
1073 goto out;
1074 }
1075
1076 /* Logical channel */
1077 if (is_src) {
1078 phy->allocated_src &= ~(1 << log_event_line);
1079 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1080 phy->allocated_src = D40_ALLOC_FREE;
1081 } else {
1082 phy->allocated_dst &= ~(1 << log_event_line);
1083 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1084 phy->allocated_dst = D40_ALLOC_FREE;
1085 }
1086
1087 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1088 D40_ALLOC_FREE);
1089
1090out:
1091 spin_unlock_irqrestore(&phy->lock, flags);
1092
1093 return is_free;
1094}
1095
1096static int d40_allocate_channel(struct d40_chan *d40c)
1097{
1098 int dev_type;
1099 int event_group;
1100 int event_line;
1101 struct d40_phy_res *phys;
1102 int i;
1103 int j;
1104 int log_num;
1105 bool is_src;
Linus Walleij508849a2010-06-20 21:26:07 +00001106 bool is_log = (d40c->dma_cfg.channel_type &
1107 STEDMA40_CHANNEL_IN_OPER_MODE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001108 == STEDMA40_CHANNEL_IN_LOG_MODE;
1109
1110
1111 phys = d40c->base->phy_res;
1112
1113 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1114 dev_type = d40c->dma_cfg.src_dev_type;
1115 log_num = 2 * dev_type;
1116 is_src = true;
1117 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1118 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1119 /* dst event lines are used for logical memcpy */
1120 dev_type = d40c->dma_cfg.dst_dev_type;
1121 log_num = 2 * dev_type + 1;
1122 is_src = false;
1123 } else
1124 return -EINVAL;
1125
1126 event_group = D40_TYPE_TO_GROUP(dev_type);
1127 event_line = D40_TYPE_TO_EVENT(dev_type);
1128
1129 if (!is_log) {
1130 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1131 /* Find physical half channel */
1132 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1133
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001134 if (d40_alloc_mask_set(&phys[i], is_src,
1135 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001136 goto found_phy;
1137 }
1138 } else
1139 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1140 int phy_num = j + event_group * 2;
1141 for (i = phy_num; i < phy_num + 2; i++) {
Linus Walleij508849a2010-06-20 21:26:07 +00001142 if (d40_alloc_mask_set(&phys[i],
1143 is_src,
1144 0,
1145 is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001146 goto found_phy;
1147 }
1148 }
1149 return -EINVAL;
1150found_phy:
1151 d40c->phy_chan = &phys[i];
1152 d40c->log_num = D40_PHY_CHAN;
1153 goto out;
1154 }
1155 if (dev_type == -1)
1156 return -EINVAL;
1157
1158 /* Find logical channel */
1159 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1160 int phy_num = j + event_group * 2;
1161 /*
1162 * Spread logical channels across all available physical rather
1163 * than pack every logical channel at the first available phy
1164 * channels.
1165 */
1166 if (is_src) {
1167 for (i = phy_num; i < phy_num + 2; i++) {
1168 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001169 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001170 goto found_log;
1171 }
1172 } else {
1173 for (i = phy_num + 1; i >= phy_num; i--) {
1174 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001175 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001176 goto found_log;
1177 }
1178 }
1179 }
1180 return -EINVAL;
1181
1182found_log:
1183 d40c->phy_chan = &phys[i];
1184 d40c->log_num = log_num;
1185out:
1186
1187 if (is_log)
1188 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1189 else
1190 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1191
1192 return 0;
1193
1194}
1195
Linus Walleij8d318a52010-03-30 15:33:42 +02001196static int d40_config_memcpy(struct d40_chan *d40c)
1197{
1198 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1199
1200 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1201 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1202 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1203 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1204 memcpy[d40c->chan.chan_id];
1205
1206 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1207 dma_has_cap(DMA_SLAVE, cap)) {
1208 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1209 } else {
1210 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1211 __func__);
1212 return -EINVAL;
1213 }
1214
1215 return 0;
1216}
1217
1218
1219static int d40_free_dma(struct d40_chan *d40c)
1220{
1221
1222 int res = 0;
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001223 u32 event;
Linus Walleij8d318a52010-03-30 15:33:42 +02001224 struct d40_phy_res *phy = d40c->phy_chan;
1225 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001226 struct d40_desc *d;
1227 struct d40_desc *_d;
1228
Linus Walleij8d318a52010-03-30 15:33:42 +02001229
1230 /* Terminate all queued and active transfers */
1231 d40_term_all(d40c);
1232
Per Fridena8be8622010-06-20 21:24:59 +00001233 /* Release client owned descriptors */
1234 if (!list_empty(&d40c->client))
1235 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1236 d40_pool_lli_free(d);
1237 d40_desc_remove(d);
1238 /* Return desc to free-list */
1239 d40_desc_free(d40c, d);
1240 }
1241
Linus Walleij8d318a52010-03-30 15:33:42 +02001242 if (phy == NULL) {
1243 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1244 __func__);
1245 return -EINVAL;
1246 }
1247
1248 if (phy->allocated_src == D40_ALLOC_FREE &&
1249 phy->allocated_dst == D40_ALLOC_FREE) {
1250 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1251 __func__);
1252 return -EINVAL;
1253 }
1254
Linus Walleij8d318a52010-03-30 15:33:42 +02001255 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1256 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1257 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001258 is_src = false;
1259 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1260 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001261 is_src = true;
1262 } else {
1263 dev_err(&d40c->chan.dev->device,
1264 "[%s] Unknown direction\n", __func__);
1265 return -EINVAL;
1266 }
1267
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001268 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1269 if (res) {
1270 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1271 __func__);
1272 return res;
1273 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001274
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001275 if (d40c->log_num != D40_PHY_CHAN) {
1276 /* Release logical channel, deactivate the event line */
1277
1278 d40_config_set_event(d40c, false);
Linus Walleij8d318a52010-03-30 15:33:42 +02001279 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1280
1281 /*
1282 * Check if there are more logical allocation
1283 * on this phy channel.
1284 */
1285 if (!d40_alloc_mask_free(phy, is_src, event)) {
1286 /* Resume the other logical channels if any */
1287 if (d40_chan_has_events(d40c)) {
1288 res = d40_channel_execute_command(d40c,
1289 D40_DMA_RUN);
1290 if (res) {
1291 dev_err(&d40c->chan.dev->device,
1292 "[%s] Executing RUN command\n",
1293 __func__);
1294 return res;
1295 }
1296 }
1297 return 0;
1298 }
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001299 } else {
1300 (void) d40_alloc_mask_free(phy, is_src, 0);
1301 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001302
1303 /* Release physical channel */
1304 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1305 if (res) {
1306 dev_err(&d40c->chan.dev->device,
1307 "[%s] Failed to stop channel\n", __func__);
1308 return res;
1309 }
1310 d40c->phy_chan = NULL;
1311 /* Invalidate channel type */
1312 d40c->dma_cfg.channel_type = 0;
1313 d40c->base->lookup_phy_chans[phy->num] = NULL;
1314
1315 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001316}
1317
1318static int d40_pause(struct dma_chan *chan)
1319{
1320 struct d40_chan *d40c =
1321 container_of(chan, struct d40_chan, chan);
1322 int res;
Linus Walleij8d318a52010-03-30 15:33:42 +02001323 unsigned long flags;
1324
1325 spin_lock_irqsave(&d40c->lock, flags);
1326
1327 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1328 if (res == 0) {
1329 if (d40c->log_num != D40_PHY_CHAN) {
1330 d40_config_set_event(d40c, false);
1331 /* Resume the other logical channels if any */
1332 if (d40_chan_has_events(d40c))
1333 res = d40_channel_execute_command(d40c,
1334 D40_DMA_RUN);
1335 }
1336 }
1337
1338 spin_unlock_irqrestore(&d40c->lock, flags);
1339 return res;
1340}
1341
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001342static bool d40_is_paused(struct d40_chan *d40c)
1343{
1344 bool is_paused = false;
1345 unsigned long flags;
1346 void __iomem *active_reg;
1347 u32 status;
1348 u32 event;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001349
1350 spin_lock_irqsave(&d40c->lock, flags);
1351
1352 if (d40c->log_num == D40_PHY_CHAN) {
1353 if (d40c->phy_chan->num % 2 == 0)
1354 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1355 else
1356 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1357
1358 status = (readl(active_reg) &
1359 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1360 D40_CHAN_POS(d40c->phy_chan->num);
1361 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1362 is_paused = true;
1363
1364 goto _exit;
1365 }
1366
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001367 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1368 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1369 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1370 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1371 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1372 else {
1373 dev_err(&d40c->chan.dev->device,
1374 "[%s] Unknown direction\n", __func__);
1375 goto _exit;
1376 }
1377 status = d40_chan_has_events(d40c);
1378 status = (status & D40_EVENTLINE_MASK(event)) >>
1379 D40_EVENTLINE_POS(event);
1380
1381 if (status != D40_DMA_RUN)
1382 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001383_exit:
1384 spin_unlock_irqrestore(&d40c->lock, flags);
1385 return is_paused;
1386
1387}
1388
1389
Linus Walleij8d318a52010-03-30 15:33:42 +02001390static bool d40_tx_is_linked(struct d40_chan *d40c)
1391{
1392 bool is_link;
1393
1394 if (d40c->log_num != D40_PHY_CHAN)
1395 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1396 else
1397 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1398 d40c->phy_chan->num * D40_DREG_PCDELTA +
1399 D40_CHAN_REG_SDLNK) &
1400 D40_SREG_LNK_PHYS_LNK_MASK;
1401 return is_link;
1402}
1403
1404static u32 d40_residue(struct d40_chan *d40c)
1405{
1406 u32 num_elt;
1407
1408 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij508849a2010-06-20 21:26:07 +00001409 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
Linus Walleij8d318a52010-03-30 15:33:42 +02001410 >> D40_MEM_LCSP2_ECNT_POS;
1411 else
1412 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1413 d40c->phy_chan->num * D40_DREG_PCDELTA +
1414 D40_CHAN_REG_SDELT) &
Linus Walleij508849a2010-06-20 21:26:07 +00001415 D40_SREG_ELEM_PHY_ECNT_MASK) >>
1416 D40_SREG_ELEM_PHY_ECNT_POS;
Linus Walleij8d318a52010-03-30 15:33:42 +02001417 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1418}
1419
1420static int d40_resume(struct dma_chan *chan)
1421{
1422 struct d40_chan *d40c =
1423 container_of(chan, struct d40_chan, chan);
1424 int res = 0;
1425 unsigned long flags;
1426
1427 spin_lock_irqsave(&d40c->lock, flags);
1428
Jonas Aaberg0c322692010-06-20 21:25:46 +00001429 /* If bytes left to transfer or linked tx resume job */
1430 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1431 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +02001432 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001433 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
Jonas Aaberg0c322692010-06-20 21:25:46 +00001434 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001435
Linus Walleij8d318a52010-03-30 15:33:42 +02001436 spin_unlock_irqrestore(&d40c->lock, flags);
1437 return res;
1438}
1439
1440static u32 stedma40_residue(struct dma_chan *chan)
1441{
1442 struct d40_chan *d40c =
1443 container_of(chan, struct d40_chan, chan);
1444 u32 bytes_left;
1445 unsigned long flags;
1446
1447 spin_lock_irqsave(&d40c->lock, flags);
1448 bytes_left = d40_residue(d40c);
1449 spin_unlock_irqrestore(&d40c->lock, flags);
1450
1451 return bytes_left;
1452}
1453
1454/* Public DMA functions in addition to the DMA engine framework */
1455
1456int stedma40_set_psize(struct dma_chan *chan,
1457 int src_psize,
1458 int dst_psize)
1459{
1460 struct d40_chan *d40c =
1461 container_of(chan, struct d40_chan, chan);
1462 unsigned long flags;
1463
1464 spin_lock_irqsave(&d40c->lock, flags);
1465
1466 if (d40c->log_num != D40_PHY_CHAN) {
1467 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1468 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
Linus Walleij508849a2010-06-20 21:26:07 +00001469 d40c->log_def.lcsp1 |= src_psize <<
1470 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1471 d40c->log_def.lcsp3 |= dst_psize <<
1472 D40_MEM_LCSP1_SCFG_PSIZE_POS;
Linus Walleij8d318a52010-03-30 15:33:42 +02001473 goto out;
1474 }
1475
1476 if (src_psize == STEDMA40_PSIZE_PHY_1)
1477 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1478 else {
1479 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1480 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1481 D40_SREG_CFG_PSIZE_POS);
1482 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1483 }
1484
1485 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1486 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1487 else {
1488 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1489 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1490 D40_SREG_CFG_PSIZE_POS);
1491 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1492 }
1493out:
1494 spin_unlock_irqrestore(&d40c->lock, flags);
1495 return 0;
1496}
1497EXPORT_SYMBOL(stedma40_set_psize);
1498
1499struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1500 struct scatterlist *sgl_dst,
1501 struct scatterlist *sgl_src,
1502 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001503 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001504{
1505 int res;
1506 struct d40_desc *d40d;
1507 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1508 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001509 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001510
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001511 if (d40c->phy_chan == NULL) {
1512 dev_err(&d40c->chan.dev->device,
1513 "[%s] Unallocated channel.\n", __func__);
1514 return ERR_PTR(-EINVAL);
1515 }
1516
Jonas Aaberg2a614342010-06-20 21:25:24 +00001517 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001518 d40d = d40_desc_get(d40c);
1519
1520 if (d40d == NULL)
1521 goto err;
1522
Linus Walleij8d318a52010-03-30 15:33:42 +02001523 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001524 d40d->lli_tx_len = d40d->lli_len;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001525 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001526
1527 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001528 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1529 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1530
Linus Walleij8d318a52010-03-30 15:33:42 +02001531 if (sgl_len > 1)
1532 /*
1533 * Check if there is space available in lcla. If not,
1534 * split list into 1-length and run only in lcpa
1535 * space.
1536 */
Linus Walleij508849a2010-06-20 21:26:07 +00001537 if (d40_lcla_id_get(d40c) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001538 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001539
1540 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1541 dev_err(&d40c->chan.dev->device,
1542 "[%s] Out of memory\n", __func__);
1543 goto err;
1544 }
1545
1546 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1547 sgl_src,
1548 sgl_len,
1549 d40d->lli_log.src,
1550 d40c->log_def.lcsp1,
1551 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001552 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001553 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001554 d40c->base->plat_data->llis_per_log);
1555
1556 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1557 sgl_dst,
1558 sgl_len,
1559 d40d->lli_log.dst,
1560 d40c->log_def.lcsp3,
1561 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001562 dma_flags & DMA_PREP_INTERRUPT,
Per Friden941b77a2010-06-20 21:24:45 +00001563 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001564 d40c->base->plat_data->llis_per_log);
1565
1566
1567 } else {
1568 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1569 dev_err(&d40c->chan.dev->device,
1570 "[%s] Out of memory\n", __func__);
1571 goto err;
1572 }
1573
1574 res = d40_phy_sg_to_lli(sgl_src,
1575 sgl_len,
1576 0,
1577 d40d->lli_phy.src,
1578 d40d->lli_phy.src_addr,
1579 d40c->src_def_cfg,
1580 d40c->dma_cfg.src_info.data_width,
1581 d40c->dma_cfg.src_info.psize,
1582 true);
1583
1584 if (res < 0)
1585 goto err;
1586
1587 res = d40_phy_sg_to_lli(sgl_dst,
1588 sgl_len,
1589 0,
1590 d40d->lli_phy.dst,
1591 d40d->lli_phy.dst_addr,
1592 d40c->dst_def_cfg,
1593 d40c->dma_cfg.dst_info.data_width,
1594 d40c->dma_cfg.dst_info.psize,
1595 true);
1596
1597 if (res < 0)
1598 goto err;
1599
1600 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1601 d40d->lli_pool.size, DMA_TO_DEVICE);
1602 }
1603
1604 dma_async_tx_descriptor_init(&d40d->txd, chan);
1605
1606 d40d->txd.tx_submit = d40_tx_submit;
1607
Jonas Aaberg2a614342010-06-20 21:25:24 +00001608 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001609
1610 return &d40d->txd;
1611err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001612 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001613 return NULL;
1614}
1615EXPORT_SYMBOL(stedma40_memcpy_sg);
1616
1617bool stedma40_filter(struct dma_chan *chan, void *data)
1618{
1619 struct stedma40_chan_cfg *info = data;
1620 struct d40_chan *d40c =
1621 container_of(chan, struct d40_chan, chan);
1622 int err;
1623
1624 if (data) {
1625 err = d40_validate_conf(d40c, info);
1626 if (!err)
1627 d40c->dma_cfg = *info;
1628 } else
1629 err = d40_config_memcpy(d40c);
1630
1631 return err == 0;
1632}
1633EXPORT_SYMBOL(stedma40_filter);
1634
1635/* DMA ENGINE functions */
1636static int d40_alloc_chan_resources(struct dma_chan *chan)
1637{
1638 int err;
1639 unsigned long flags;
1640 struct d40_chan *d40c =
1641 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001642 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001643 spin_lock_irqsave(&d40c->lock, flags);
1644
1645 d40c->completed = chan->cookie = 1;
1646
1647 /*
1648 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001649 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001650 */
1651 if (d40c->dma_cfg.channel_type == 0) {
1652 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001653 if (err) {
1654 dev_err(&d40c->chan.dev->device,
1655 "[%s] Failed to configure memcpy channel\n",
1656 __func__);
1657 goto fail;
1658 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001659 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001660 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001661
1662 err = d40_allocate_channel(d40c);
1663 if (err) {
1664 dev_err(&d40c->chan.dev->device,
1665 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001666 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001667 }
1668
Linus Walleijef1872e2010-06-20 21:24:52 +00001669 /* Fill in basic CFG register values */
1670 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1671 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1672
1673 if (d40c->log_num != D40_PHY_CHAN) {
1674 d40_log_cfg(&d40c->dma_cfg,
1675 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1676
1677 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1678 d40c->lcpa = d40c->base->lcpa_base +
1679 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1680 else
1681 d40c->lcpa = d40c->base->lcpa_base +
1682 d40c->dma_cfg.dst_dev_type *
1683 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1684 }
1685
1686 /*
1687 * Only write channel configuration to the DMA if the physical
1688 * resource is free. In case of multiple logical channels
1689 * on the same physical resource, only the first write is necessary.
1690 */
1691 if (is_free_phy) {
1692 err = d40_config_write(d40c);
1693 if (err) {
1694 dev_err(&d40c->chan.dev->device,
1695 "[%s] Failed to configure channel\n",
1696 __func__);
1697 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001698 }
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001699fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001700 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001701 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001702}
1703
1704static void d40_free_chan_resources(struct dma_chan *chan)
1705{
1706 struct d40_chan *d40c =
1707 container_of(chan, struct d40_chan, chan);
1708 int err;
1709 unsigned long flags;
1710
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001711 if (d40c->phy_chan == NULL) {
1712 dev_err(&d40c->chan.dev->device,
1713 "[%s] Cannot free unallocated channel\n", __func__);
1714 return;
1715 }
1716
1717
Linus Walleij8d318a52010-03-30 15:33:42 +02001718 spin_lock_irqsave(&d40c->lock, flags);
1719
1720 err = d40_free_dma(d40c);
1721
1722 if (err)
1723 dev_err(&d40c->chan.dev->device,
1724 "[%s] Failed to free channel\n", __func__);
1725 spin_unlock_irqrestore(&d40c->lock, flags);
1726}
1727
1728static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1729 dma_addr_t dst,
1730 dma_addr_t src,
1731 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001732 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001733{
1734 struct d40_desc *d40d;
1735 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1736 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001737 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001738 int err = 0;
1739
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001740 if (d40c->phy_chan == NULL) {
1741 dev_err(&d40c->chan.dev->device,
1742 "[%s] Channel is not allocated.\n", __func__);
1743 return ERR_PTR(-EINVAL);
1744 }
1745
Jonas Aaberg2a614342010-06-20 21:25:24 +00001746 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001747 d40d = d40_desc_get(d40c);
1748
1749 if (d40d == NULL) {
1750 dev_err(&d40c->chan.dev->device,
1751 "[%s] Descriptor is NULL\n", __func__);
1752 goto err;
1753 }
1754
Jonas Aaberg2a614342010-06-20 21:25:24 +00001755 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001756
1757 dma_async_tx_descriptor_init(&d40d->txd, chan);
1758
1759 d40d->txd.tx_submit = d40_tx_submit;
1760
1761 if (d40c->log_num != D40_PHY_CHAN) {
1762
1763 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1764 dev_err(&d40c->chan.dev->device,
1765 "[%s] Out of memory\n", __func__);
1766 goto err;
1767 }
1768 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001769 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001770
1771 d40_log_fill_lli(d40d->lli_log.src,
1772 src,
1773 size,
1774 0,
1775 d40c->log_def.lcsp1,
1776 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2123a612010-06-20 21:25:54 +00001777 false, true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001778
1779 d40_log_fill_lli(d40d->lli_log.dst,
1780 dst,
1781 size,
1782 0,
1783 d40c->log_def.lcsp3,
1784 d40c->dma_cfg.dst_info.data_width,
1785 true, true);
1786
1787 } else {
1788
1789 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1790 dev_err(&d40c->chan.dev->device,
1791 "[%s] Out of memory\n", __func__);
1792 goto err;
1793 }
1794
1795 err = d40_phy_fill_lli(d40d->lli_phy.src,
1796 src,
1797 size,
1798 d40c->dma_cfg.src_info.psize,
1799 0,
1800 d40c->src_def_cfg,
1801 true,
1802 d40c->dma_cfg.src_info.data_width,
1803 false);
1804 if (err)
1805 goto err_fill_lli;
1806
1807 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1808 dst,
1809 size,
1810 d40c->dma_cfg.dst_info.psize,
1811 0,
1812 d40c->dst_def_cfg,
1813 true,
1814 d40c->dma_cfg.dst_info.data_width,
1815 false);
1816
1817 if (err)
1818 goto err_fill_lli;
1819
1820 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1821 d40d->lli_pool.size, DMA_TO_DEVICE);
1822 }
1823
Jonas Aaberg2a614342010-06-20 21:25:24 +00001824 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001825 return &d40d->txd;
1826
1827err_fill_lli:
1828 dev_err(&d40c->chan.dev->device,
1829 "[%s] Failed filling in PHY LLI\n", __func__);
1830 d40_pool_lli_free(d40d);
1831err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001832 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001833 return NULL;
1834}
1835
1836static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1837 struct d40_chan *d40c,
1838 struct scatterlist *sgl,
1839 unsigned int sg_len,
1840 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001841 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001842{
1843 dma_addr_t dev_addr = 0;
1844 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001845
1846 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1847 dev_err(&d40c->chan.dev->device,
1848 "[%s] Out of memory\n", __func__);
1849 return -ENOMEM;
1850 }
1851
1852 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001853 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1854 d40d->lli_tx_len = d40d->lli_len;
1855 else
1856 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001857
1858 if (sg_len > 1)
1859 /*
1860 * Check if there is space available in lcla.
1861 * If not, split list into 1-length and run only
1862 * in lcpa space.
1863 */
Linus Walleij508849a2010-06-20 21:26:07 +00001864 if (d40_lcla_id_get(d40c) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001865 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001866
Jonas Aaberg2a614342010-06-20 21:25:24 +00001867 if (direction == DMA_FROM_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001868 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001869 else if (direction == DMA_TO_DEVICE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001870 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001871 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001872 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001873
1874 total_size = d40_log_sg_to_dev(&d40c->lcla,
1875 sgl, sg_len,
1876 &d40d->lli_log,
1877 &d40c->log_def,
1878 d40c->dma_cfg.src_info.data_width,
1879 d40c->dma_cfg.dst_info.data_width,
1880 direction,
1881 dma_flags & DMA_PREP_INTERRUPT,
1882 dev_addr, d40d->lli_tx_len,
1883 d40c->base->plat_data->llis_per_log);
1884
Linus Walleij8d318a52010-03-30 15:33:42 +02001885 if (total_size < 0)
1886 return -EINVAL;
1887
1888 return 0;
1889}
1890
1891static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1892 struct d40_chan *d40c,
1893 struct scatterlist *sgl,
1894 unsigned int sgl_len,
1895 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001896 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001897{
1898 dma_addr_t src_dev_addr;
1899 dma_addr_t dst_dev_addr;
1900 int res;
1901
1902 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1903 dev_err(&d40c->chan.dev->device,
1904 "[%s] Out of memory\n", __func__);
1905 return -ENOMEM;
1906 }
1907
1908 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001909 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02001910
1911 if (direction == DMA_FROM_DEVICE) {
1912 dst_dev_addr = 0;
1913 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1914 } else if (direction == DMA_TO_DEVICE) {
1915 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1916 src_dev_addr = 0;
1917 } else
1918 return -EINVAL;
1919
1920 res = d40_phy_sg_to_lli(sgl,
1921 sgl_len,
1922 src_dev_addr,
1923 d40d->lli_phy.src,
1924 d40d->lli_phy.src_addr,
1925 d40c->src_def_cfg,
1926 d40c->dma_cfg.src_info.data_width,
1927 d40c->dma_cfg.src_info.psize,
1928 true);
1929 if (res < 0)
1930 return res;
1931
1932 res = d40_phy_sg_to_lli(sgl,
1933 sgl_len,
1934 dst_dev_addr,
1935 d40d->lli_phy.dst,
1936 d40d->lli_phy.dst_addr,
1937 d40c->dst_def_cfg,
1938 d40c->dma_cfg.dst_info.data_width,
1939 d40c->dma_cfg.dst_info.psize,
1940 true);
1941 if (res < 0)
1942 return res;
1943
1944 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1945 d40d->lli_pool.size, DMA_TO_DEVICE);
1946 return 0;
1947}
1948
1949static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1950 struct scatterlist *sgl,
1951 unsigned int sg_len,
1952 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001953 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001954{
1955 struct d40_desc *d40d;
1956 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1957 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001958 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001959 int err;
1960
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001961 if (d40c->phy_chan == NULL) {
1962 dev_err(&d40c->chan.dev->device,
1963 "[%s] Cannot prepare unallocated channel\n", __func__);
1964 return ERR_PTR(-EINVAL);
1965 }
1966
Linus Walleij8d318a52010-03-30 15:33:42 +02001967 if (d40c->dma_cfg.pre_transfer)
1968 d40c->dma_cfg.pre_transfer(chan,
1969 d40c->dma_cfg.pre_transfer_data,
1970 sg_dma_len(sgl));
1971
Jonas Aaberg2a614342010-06-20 21:25:24 +00001972 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001973 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001974 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001975
1976 if (d40d == NULL)
1977 return NULL;
1978
Linus Walleij8d318a52010-03-30 15:33:42 +02001979 if (d40c->log_num != D40_PHY_CHAN)
1980 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001981 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001982 else
1983 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001984 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001985 if (err) {
1986 dev_err(&d40c->chan.dev->device,
1987 "[%s] Failed to prepare %s slave sg job: %d\n",
1988 __func__,
1989 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
1990 return NULL;
1991 }
1992
Jonas Aaberg2a614342010-06-20 21:25:24 +00001993 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001994
1995 dma_async_tx_descriptor_init(&d40d->txd, chan);
1996
1997 d40d->txd.tx_submit = d40_tx_submit;
1998
1999 return &d40d->txd;
2000}
2001
2002static enum dma_status d40_tx_status(struct dma_chan *chan,
2003 dma_cookie_t cookie,
2004 struct dma_tx_state *txstate)
2005{
2006 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2007 dma_cookie_t last_used;
2008 dma_cookie_t last_complete;
2009 int ret;
2010
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002011 if (d40c->phy_chan == NULL) {
2012 dev_err(&d40c->chan.dev->device,
2013 "[%s] Cannot read status of unallocated channel\n",
2014 __func__);
2015 return -EINVAL;
2016 }
2017
Linus Walleij8d318a52010-03-30 15:33:42 +02002018 last_complete = d40c->completed;
2019 last_used = chan->cookie;
2020
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002021 if (d40_is_paused(d40c))
2022 ret = DMA_PAUSED;
2023 else
2024 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002025
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002026 dma_set_tx_state(txstate, last_complete, last_used,
2027 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002028
2029 return ret;
2030}
2031
2032static void d40_issue_pending(struct dma_chan *chan)
2033{
2034 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2035 unsigned long flags;
2036
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002037 if (d40c->phy_chan == NULL) {
2038 dev_err(&d40c->chan.dev->device,
2039 "[%s] Channel is not allocated!\n", __func__);
2040 return;
2041 }
2042
Linus Walleij8d318a52010-03-30 15:33:42 +02002043 spin_lock_irqsave(&d40c->lock, flags);
2044
2045 /* Busy means that pending jobs are already being processed */
2046 if (!d40c->busy)
2047 (void) d40_queue_start(d40c);
2048
2049 spin_unlock_irqrestore(&d40c->lock, flags);
2050}
2051
Linus Walleij05827632010-05-17 16:30:42 -07002052static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2053 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002054{
2055 unsigned long flags;
2056 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2057
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002058 if (d40c->phy_chan == NULL) {
2059 dev_err(&d40c->chan.dev->device,
2060 "[%s] Channel is not allocated!\n", __func__);
2061 return -EINVAL;
2062 }
2063
Linus Walleij8d318a52010-03-30 15:33:42 +02002064 switch (cmd) {
2065 case DMA_TERMINATE_ALL:
2066 spin_lock_irqsave(&d40c->lock, flags);
2067 d40_term_all(d40c);
2068 spin_unlock_irqrestore(&d40c->lock, flags);
2069 return 0;
2070 case DMA_PAUSE:
2071 return d40_pause(chan);
2072 case DMA_RESUME:
2073 return d40_resume(chan);
2074 }
2075
2076 /* Other commands are unimplemented */
2077 return -ENXIO;
2078}
2079
2080/* Initialization functions */
2081
2082static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2083 struct d40_chan *chans, int offset,
2084 int num_chans)
2085{
2086 int i = 0;
2087 struct d40_chan *d40c;
2088
2089 INIT_LIST_HEAD(&dma->channels);
2090
2091 for (i = offset; i < offset + num_chans; i++) {
2092 d40c = &chans[i];
2093 d40c->base = base;
2094 d40c->chan.device = dma;
2095
2096 /* Invalidate lcla element */
2097 d40c->lcla.src_id = -1;
2098 d40c->lcla.dst_id = -1;
2099
2100 spin_lock_init(&d40c->lock);
2101
2102 d40c->log_num = D40_PHY_CHAN;
2103
Linus Walleij8d318a52010-03-30 15:33:42 +02002104 INIT_LIST_HEAD(&d40c->active);
2105 INIT_LIST_HEAD(&d40c->queue);
2106 INIT_LIST_HEAD(&d40c->client);
2107
Linus Walleij8d318a52010-03-30 15:33:42 +02002108 tasklet_init(&d40c->tasklet, dma_tasklet,
2109 (unsigned long) d40c);
2110
2111 list_add_tail(&d40c->chan.device_node,
2112 &dma->channels);
2113 }
2114}
2115
2116static int __init d40_dmaengine_init(struct d40_base *base,
2117 int num_reserved_chans)
2118{
2119 int err ;
2120
2121 d40_chan_init(base, &base->dma_slave, base->log_chans,
2122 0, base->num_log_chans);
2123
2124 dma_cap_zero(base->dma_slave.cap_mask);
2125 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2126
2127 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2128 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2129 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2130 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2131 base->dma_slave.device_tx_status = d40_tx_status;
2132 base->dma_slave.device_issue_pending = d40_issue_pending;
2133 base->dma_slave.device_control = d40_control;
2134 base->dma_slave.dev = base->dev;
2135
2136 err = dma_async_device_register(&base->dma_slave);
2137
2138 if (err) {
2139 dev_err(base->dev,
2140 "[%s] Failed to register slave channels\n",
2141 __func__);
2142 goto failure1;
2143 }
2144
2145 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2146 base->num_log_chans, base->plat_data->memcpy_len);
2147
2148 dma_cap_zero(base->dma_memcpy.cap_mask);
2149 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2150
2151 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2152 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2153 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2154 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2155 base->dma_memcpy.device_tx_status = d40_tx_status;
2156 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2157 base->dma_memcpy.device_control = d40_control;
2158 base->dma_memcpy.dev = base->dev;
2159 /*
2160 * This controller can only access address at even
2161 * 32bit boundaries, i.e. 2^2
2162 */
2163 base->dma_memcpy.copy_align = 2;
2164
2165 err = dma_async_device_register(&base->dma_memcpy);
2166
2167 if (err) {
2168 dev_err(base->dev,
2169 "[%s] Failed to regsiter memcpy only channels\n",
2170 __func__);
2171 goto failure2;
2172 }
2173
2174 d40_chan_init(base, &base->dma_both, base->phy_chans,
2175 0, num_reserved_chans);
2176
2177 dma_cap_zero(base->dma_both.cap_mask);
2178 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2179 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2180
2181 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2182 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2183 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2184 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2185 base->dma_both.device_tx_status = d40_tx_status;
2186 base->dma_both.device_issue_pending = d40_issue_pending;
2187 base->dma_both.device_control = d40_control;
2188 base->dma_both.dev = base->dev;
2189 base->dma_both.copy_align = 2;
2190 err = dma_async_device_register(&base->dma_both);
2191
2192 if (err) {
2193 dev_err(base->dev,
2194 "[%s] Failed to register logical and physical capable channels\n",
2195 __func__);
2196 goto failure3;
2197 }
2198 return 0;
2199failure3:
2200 dma_async_device_unregister(&base->dma_memcpy);
2201failure2:
2202 dma_async_device_unregister(&base->dma_slave);
2203failure1:
2204 return err;
2205}
2206
2207/* Initialization functions. */
2208
2209static int __init d40_phy_res_init(struct d40_base *base)
2210{
2211 int i;
2212 int num_phy_chans_avail = 0;
2213 u32 val[2];
2214 int odd_even_bit = -2;
2215
2216 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2217 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2218
2219 for (i = 0; i < base->num_phy_chans; i++) {
2220 base->phy_res[i].num = i;
2221 odd_even_bit += 2 * ((i % 2) == 0);
2222 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2223 /* Mark security only channels as occupied */
2224 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2225 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2226 } else {
2227 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2228 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2229 num_phy_chans_avail++;
2230 }
2231 spin_lock_init(&base->phy_res[i].lock);
2232 }
Jonas Aaberg6b7acd82010-06-20 21:26:59 +00002233
2234 /* Mark disabled channels as occupied */
2235 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2236 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2237 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2238 num_phy_chans_avail--;
2239 }
2240
Linus Walleij8d318a52010-03-30 15:33:42 +02002241 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2242 num_phy_chans_avail, base->num_phy_chans);
2243
2244 /* Verify settings extended vs standard */
2245 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2246
2247 for (i = 0; i < base->num_phy_chans; i++) {
2248
2249 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2250 (val[0] & 0x3) != 1)
2251 dev_info(base->dev,
2252 "[%s] INFO: channel %d is misconfigured (%d)\n",
2253 __func__, i, val[0] & 0x3);
2254
2255 val[0] = val[0] >> 2;
2256 }
2257
2258 return num_phy_chans_avail;
2259}
2260
2261static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2262{
2263 static const struct d40_reg_val dma_id_regs[] = {
2264 /* Peripheral Id */
2265 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2266 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2267 /*
2268 * D40_DREG_PERIPHID2 Depends on HW revision:
2269 * MOP500/HREF ED has 0x0008,
2270 * ? has 0x0018,
2271 * HREF V1 has 0x0028
2272 */
2273 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2274
2275 /* PCell Id */
2276 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2277 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2278 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2279 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2280 };
2281 struct stedma40_platform_data *plat_data;
2282 struct clk *clk = NULL;
2283 void __iomem *virtbase = NULL;
2284 struct resource *res = NULL;
2285 struct d40_base *base = NULL;
2286 int num_log_chans = 0;
2287 int num_phy_chans;
2288 int i;
2289
2290 clk = clk_get(&pdev->dev, NULL);
2291
2292 if (IS_ERR(clk)) {
2293 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2294 __func__);
2295 goto failure;
2296 }
2297
2298 clk_enable(clk);
2299
2300 /* Get IO for DMAC base address */
2301 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2302 if (!res)
2303 goto failure;
2304
2305 if (request_mem_region(res->start, resource_size(res),
2306 D40_NAME " I/O base") == NULL)
2307 goto failure;
2308
2309 virtbase = ioremap(res->start, resource_size(res));
2310 if (!virtbase)
2311 goto failure;
2312
2313 /* HW version check */
2314 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2315 if (dma_id_regs[i].val !=
2316 readl(virtbase + dma_id_regs[i].reg)) {
2317 dev_err(&pdev->dev,
2318 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2319 __func__,
2320 dma_id_regs[i].val,
2321 dma_id_regs[i].reg,
2322 readl(virtbase + dma_id_regs[i].reg));
2323 goto failure;
2324 }
2325 }
2326
2327 i = readl(virtbase + D40_DREG_PERIPHID2);
2328
2329 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2330 dev_err(&pdev->dev,
2331 "[%s] Unknown designer! Got %x wanted %x\n",
2332 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2333 goto failure;
2334 }
2335
2336 /* The number of physical channels on this HW */
2337 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2338
2339 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2340 (i >> 4) & 0xf, res->start);
2341
2342 plat_data = pdev->dev.platform_data;
2343
2344 /* Count the number of logical channels in use */
2345 for (i = 0; i < plat_data->dev_len; i++)
2346 if (plat_data->dev_rx[i] != 0)
2347 num_log_chans++;
2348
2349 for (i = 0; i < plat_data->dev_len; i++)
2350 if (plat_data->dev_tx[i] != 0)
2351 num_log_chans++;
2352
2353 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2354 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2355 sizeof(struct d40_chan), GFP_KERNEL);
2356
2357 if (base == NULL) {
2358 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2359 goto failure;
2360 }
2361
2362 base->clk = clk;
2363 base->num_phy_chans = num_phy_chans;
2364 base->num_log_chans = num_log_chans;
2365 base->phy_start = res->start;
2366 base->phy_size = resource_size(res);
2367 base->virtbase = virtbase;
2368 base->plat_data = plat_data;
2369 base->dev = &pdev->dev;
2370 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2371 base->log_chans = &base->phy_chans[num_phy_chans];
2372
2373 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2374 GFP_KERNEL);
2375 if (!base->phy_res)
2376 goto failure;
2377
2378 base->lookup_phy_chans = kzalloc(num_phy_chans *
2379 sizeof(struct d40_chan *),
2380 GFP_KERNEL);
2381 if (!base->lookup_phy_chans)
2382 goto failure;
2383
2384 if (num_log_chans + plat_data->memcpy_len) {
2385 /*
2386 * The max number of logical channels are event lines for all
2387 * src devices and dst devices
2388 */
2389 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2390 sizeof(struct d40_chan *),
2391 GFP_KERNEL);
2392 if (!base->lookup_log_chans)
2393 goto failure;
2394 }
2395 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2396 GFP_KERNEL);
2397 if (!base->lcla_pool.alloc_map)
2398 goto failure;
2399
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002400 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2401 0, SLAB_HWCACHE_ALIGN,
2402 NULL);
2403 if (base->desc_slab == NULL)
2404 goto failure;
2405
Linus Walleij8d318a52010-03-30 15:33:42 +02002406 return base;
2407
2408failure:
2409 if (clk) {
2410 clk_disable(clk);
2411 clk_put(clk);
2412 }
2413 if (virtbase)
2414 iounmap(virtbase);
2415 if (res)
2416 release_mem_region(res->start,
2417 resource_size(res));
2418 if (virtbase)
2419 iounmap(virtbase);
2420
2421 if (base) {
2422 kfree(base->lcla_pool.alloc_map);
2423 kfree(base->lookup_log_chans);
2424 kfree(base->lookup_phy_chans);
2425 kfree(base->phy_res);
2426 kfree(base);
2427 }
2428
2429 return NULL;
2430}
2431
2432static void __init d40_hw_init(struct d40_base *base)
2433{
2434
2435 static const struct d40_reg_val dma_init_reg[] = {
2436 /* Clock every part of the DMA block from start */
2437 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2438
2439 /* Interrupts on all logical channels */
2440 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2441 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2442 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2447 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2448 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2449 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2450 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2451 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2452 };
2453 int i;
2454 u32 prmseo[2] = {0, 0};
2455 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2456 u32 pcmis = 0;
2457 u32 pcicr = 0;
2458
2459 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2460 writel(dma_init_reg[i].val,
2461 base->virtbase + dma_init_reg[i].reg);
2462
2463 /* Configure all our dma channels to default settings */
2464 for (i = 0; i < base->num_phy_chans; i++) {
2465
2466 activeo[i % 2] = activeo[i % 2] << 2;
2467
2468 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2469 == D40_ALLOC_PHY) {
2470 activeo[i % 2] |= 3;
2471 continue;
2472 }
2473
2474 /* Enable interrupt # */
2475 pcmis = (pcmis << 1) | 1;
2476
2477 /* Clear interrupt # */
2478 pcicr = (pcicr << 1) | 1;
2479
2480 /* Set channel to physical mode */
2481 prmseo[i % 2] = prmseo[i % 2] << 2;
2482 prmseo[i % 2] |= 1;
2483
2484 }
2485
2486 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2487 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2488 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2489 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2490
2491 /* Write which interrupt to enable */
2492 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2493
2494 /* Write which interrupt to clear */
2495 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2496
2497}
2498
Linus Walleij508849a2010-06-20 21:26:07 +00002499static int __init d40_lcla_allocate(struct d40_base *base)
2500{
2501 unsigned long *page_list;
2502 int i, j;
2503 int ret = 0;
2504
2505 /*
2506 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2507 * To full fill this hardware requirement without wasting 256 kb
2508 * we allocate pages until we get an aligned one.
2509 */
2510 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2511 GFP_KERNEL);
2512
2513 if (!page_list) {
2514 ret = -ENOMEM;
2515 goto failure;
2516 }
2517
2518 /* Calculating how many pages that are required */
2519 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2520
2521 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2522 page_list[i] = __get_free_pages(GFP_KERNEL,
2523 base->lcla_pool.pages);
2524 if (!page_list[i]) {
2525
2526 dev_err(base->dev,
2527 "[%s] Failed to allocate %d pages.\n",
2528 __func__, base->lcla_pool.pages);
2529
2530 for (j = 0; j < i; j++)
2531 free_pages(page_list[j], base->lcla_pool.pages);
2532 goto failure;
2533 }
2534
2535 if ((virt_to_phys((void *)page_list[i]) &
2536 (LCLA_ALIGNMENT - 1)) == 0)
2537 break;
2538 }
2539
2540 for (j = 0; j < i; j++)
2541 free_pages(page_list[j], base->lcla_pool.pages);
2542
2543 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2544 base->lcla_pool.base = (void *)page_list[i];
2545 } else {
2546 /* After many attempts, no succees with finding the correct
2547 * alignment try with allocating a big buffer */
2548 dev_warn(base->dev,
2549 "[%s] Failed to get %d pages @ 18 bit align.\n",
2550 __func__, base->lcla_pool.pages);
2551 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2552 base->num_phy_chans +
2553 LCLA_ALIGNMENT,
2554 GFP_KERNEL);
2555 if (!base->lcla_pool.base_unaligned) {
2556 ret = -ENOMEM;
2557 goto failure;
2558 }
2559
2560 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2561 LCLA_ALIGNMENT);
2562 }
2563
2564 writel(virt_to_phys(base->lcla_pool.base),
2565 base->virtbase + D40_DREG_LCLA);
2566failure:
2567 kfree(page_list);
2568 return ret;
2569}
2570
Linus Walleij8d318a52010-03-30 15:33:42 +02002571static int __init d40_probe(struct platform_device *pdev)
2572{
2573 int err;
2574 int ret = -ENOENT;
2575 struct d40_base *base;
2576 struct resource *res = NULL;
2577 int num_reserved_chans;
2578 u32 val;
2579
2580 base = d40_hw_detect_init(pdev);
2581
2582 if (!base)
2583 goto failure;
2584
2585 num_reserved_chans = d40_phy_res_init(base);
2586
2587 platform_set_drvdata(pdev, base);
2588
2589 spin_lock_init(&base->interrupt_lock);
2590 spin_lock_init(&base->execmd_lock);
2591
2592 /* Get IO for logical channel parameter address */
2593 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2594 if (!res) {
2595 ret = -ENOENT;
2596 dev_err(&pdev->dev,
2597 "[%s] No \"lcpa\" memory resource\n",
2598 __func__);
2599 goto failure;
2600 }
2601 base->lcpa_size = resource_size(res);
2602 base->phy_lcpa = res->start;
2603
2604 if (request_mem_region(res->start, resource_size(res),
2605 D40_NAME " I/O lcpa") == NULL) {
2606 ret = -EBUSY;
2607 dev_err(&pdev->dev,
2608 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2609 __func__, res->start, res->end);
2610 goto failure;
2611 }
2612
2613 /* We make use of ESRAM memory for this. */
2614 val = readl(base->virtbase + D40_DREG_LCPA);
2615 if (res->start != val && val != 0) {
2616 dev_warn(&pdev->dev,
2617 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2618 __func__, val, res->start);
2619 } else
2620 writel(res->start, base->virtbase + D40_DREG_LCPA);
2621
2622 base->lcpa_base = ioremap(res->start, resource_size(res));
2623 if (!base->lcpa_base) {
2624 ret = -ENOMEM;
2625 dev_err(&pdev->dev,
2626 "[%s] Failed to ioremap LCPA region\n",
2627 __func__);
2628 goto failure;
2629 }
Linus Walleij508849a2010-06-20 21:26:07 +00002630
2631 ret = d40_lcla_allocate(base);
2632 if (ret) {
2633 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02002634 __func__);
2635 goto failure;
2636 }
2637
Linus Walleij8d318a52010-03-30 15:33:42 +02002638 spin_lock_init(&base->lcla_pool.lock);
2639
2640 base->lcla_pool.num_blocks = base->num_phy_chans;
2641
2642 base->irq = platform_get_irq(pdev, 0);
2643
2644 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2645
2646 if (ret) {
2647 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2648 goto failure;
2649 }
2650
2651 err = d40_dmaengine_init(base, num_reserved_chans);
2652 if (err)
2653 goto failure;
2654
2655 d40_hw_init(base);
2656
2657 dev_info(base->dev, "initialized\n");
2658 return 0;
2659
2660failure:
2661 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002662 if (base->desc_slab)
2663 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002664 if (base->virtbase)
2665 iounmap(base->virtbase);
Linus Walleij508849a2010-06-20 21:26:07 +00002666 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2667 free_pages((unsigned long)base->lcla_pool.base,
2668 base->lcla_pool.pages);
2669 if (base->lcla_pool.base_unaligned)
2670 kfree(base->lcla_pool.base_unaligned);
Linus Walleij8d318a52010-03-30 15:33:42 +02002671 if (base->phy_lcpa)
2672 release_mem_region(base->phy_lcpa,
2673 base->lcpa_size);
2674 if (base->phy_start)
2675 release_mem_region(base->phy_start,
2676 base->phy_size);
2677 if (base->clk) {
2678 clk_disable(base->clk);
2679 clk_put(base->clk);
2680 }
2681
2682 kfree(base->lcla_pool.alloc_map);
2683 kfree(base->lookup_log_chans);
2684 kfree(base->lookup_phy_chans);
2685 kfree(base->phy_res);
2686 kfree(base);
2687 }
2688
2689 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2690 return ret;
2691}
2692
2693static struct platform_driver d40_driver = {
2694 .driver = {
2695 .owner = THIS_MODULE,
2696 .name = D40_NAME,
2697 },
2698};
2699
2700int __init stedma40_init(void)
2701{
2702 return platform_driver_probe(&d40_driver, d40_probe);
2703}
2704arch_initcall(stedma40_init);