blob: c9f485e3baeb2ea31e993d29df2a945b53af0885 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
Jonas Aaberg767a9672010-08-09 12:08:34 +00002 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
Linus Walleij8d318a52010-03-30 15:33:42 +02005 * License terms: GNU General Public License (GPL) version 2
Linus Walleij8d318a52010-03-30 15:33:42 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/dmaengine.h>
11#include <linux/platform_device.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
Jonas Aaberg698e4732010-08-09 12:08:56 +000014#include <linux/err.h>
Linus Walleij8d318a52010-03-30 15:33:42 +020015
16#include <plat/ste_dma40.h>
17
18#include "ste_dma40_ll.h"
19
20#define D40_NAME "dma40"
21
22#define D40_PHY_CHAN -1
23
24/* For masking out/in 2 bit channel positions */
25#define D40_CHAN_POS(chan) (2 * (chan / 2))
26#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
27
28/* Maximum iterations taken before giving up suspending a channel */
29#define D40_SUSPEND_MAX_IT 500
30
Linus Walleij508849a2010-06-20 21:26:07 +000031/* Hardware requirement on LCLA alignment */
32#define LCLA_ALIGNMENT 0x40000
Jonas Aaberg698e4732010-08-09 12:08:56 +000033
34/* Max number of links per event group */
35#define D40_LCLA_LINK_PER_EVENT_GRP 128
36#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37
Linus Walleij508849a2010-06-20 21:26:07 +000038/* Attempts before giving up to trying to get pages that are aligned */
39#define MAX_LCLA_ALLOC_ATTEMPTS 256
40
41/* Bit markings for allocation map */
Linus Walleij8d318a52010-03-30 15:33:42 +020042#define D40_ALLOC_FREE (1 << 31)
43#define D40_ALLOC_PHY (1 << 30)
44#define D40_ALLOC_LOG_FREE 0
45
Linus Walleij8d318a52010-03-30 15:33:42 +020046/* Hardware designer of the block */
Jonas Aaberg3ae02672010-08-09 12:08:18 +000047#define D40_HW_DESIGNER 0x8
Linus Walleij8d318a52010-03-30 15:33:42 +020048
49/**
50 * enum 40_command - The different commands and/or statuses.
51 *
52 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
53 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
54 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
55 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
56 */
57enum d40_command {
58 D40_DMA_STOP = 0,
59 D40_DMA_RUN = 1,
60 D40_DMA_SUSPEND_REQ = 2,
61 D40_DMA_SUSPENDED = 3
62};
63
64/**
65 * struct d40_lli_pool - Structure for keeping LLIs in memory
66 *
67 * @base: Pointer to memory area when the pre_alloc_lli's are not large
68 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
69 * pre_alloc_lli is used.
70 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
71 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
72 * one buffer to one buffer.
73 */
74struct d40_lli_pool {
75 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +000076 int size;
Linus Walleij8d318a52010-03-30 15:33:42 +020077 /* Space for dst and src, plus an extra for padding */
Linus Walleij508849a2010-06-20 21:26:07 +000078 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
Linus Walleij8d318a52010-03-30 15:33:42 +020079};
80
81/**
82 * struct d40_desc - A descriptor is one DMA job.
83 *
84 * @lli_phy: LLI settings for physical channel. Both src and dst=
85 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
86 * lli_len equals one.
87 * @lli_log: Same as above but for logical channels.
88 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000089 * @lli_len: Number of llis of current descriptor.
Jonas Aaberg698e4732010-08-09 12:08:56 +000090 * @lli_current: Number of transfered llis.
91 * @lcla_alloc: Number of LCLA entries allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +020092 * @txd: DMA engine struct. Used for among other things for communication
93 * during a transfer.
94 * @node: List entry.
Linus Walleij8d318a52010-03-30 15:33:42 +020095 * @is_in_client_list: true if the client owns this descriptor.
Jonas Aabergaa182ae2010-08-09 12:08:26 +000096 * @is_hw_linked: true if this job will automatically be continued for
97 * the previous one.
Linus Walleij8d318a52010-03-30 15:33:42 +020098 *
99 * This descriptor is used for both logical and physical transfers.
100 */
Linus Walleij8d318a52010-03-30 15:33:42 +0200101struct d40_desc {
102 /* LLI physical */
103 struct d40_phy_lli_bidir lli_phy;
104 /* LLI logical */
105 struct d40_log_lli_bidir lli_log;
106
107 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000108 int lli_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000109 int lli_current;
110 int lcla_alloc;
Linus Walleij8d318a52010-03-30 15:33:42 +0200111
112 struct dma_async_tx_descriptor txd;
113 struct list_head node;
114
Linus Walleij8d318a52010-03-30 15:33:42 +0200115 bool is_in_client_list;
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000116 bool is_hw_linked;
Linus Walleij8d318a52010-03-30 15:33:42 +0200117};
118
119/**
120 * struct d40_lcla_pool - LCLA pool settings and data.
121 *
Linus Walleij508849a2010-06-20 21:26:07 +0000122 * @base: The virtual address of LCLA. 18 bit aligned.
123 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
124 * This pointer is only there for clean-up on error.
125 * @pages: The number of pages needed for all physical channels.
126 * Only used later for clean-up on error
Linus Walleij8d318a52010-03-30 15:33:42 +0200127 * @lock: Lock to protect the content in this struct.
Jonas Aaberg698e4732010-08-09 12:08:56 +0000128 * @alloc_map: big map over which LCLA entry is own by which job.
Linus Walleij8d318a52010-03-30 15:33:42 +0200129 */
130struct d40_lcla_pool {
131 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +0000132 void *base_unaligned;
133 int pages;
Linus Walleij8d318a52010-03-30 15:33:42 +0200134 spinlock_t lock;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000135 struct d40_desc **alloc_map;
Linus Walleij8d318a52010-03-30 15:33:42 +0200136};
137
138/**
139 * struct d40_phy_res - struct for handling eventlines mapped to physical
140 * channels.
141 *
142 * @lock: A lock protection this entity.
143 * @num: The physical channel number of this entity.
144 * @allocated_src: Bit mapped to show which src event line's are mapped to
145 * this physical channel. Can also be free or physically allocated.
146 * @allocated_dst: Same as for src but is dst.
147 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
Jonas Aaberg767a9672010-08-09 12:08:34 +0000148 * event line number.
Linus Walleij8d318a52010-03-30 15:33:42 +0200149 */
150struct d40_phy_res {
151 spinlock_t lock;
152 int num;
153 u32 allocated_src;
154 u32 allocated_dst;
155};
156
157struct d40_base;
158
159/**
160 * struct d40_chan - Struct that describes a channel.
161 *
162 * @lock: A spinlock to protect this struct.
163 * @log_num: The logical number, if any of this channel.
164 * @completed: Starts with 1, after first interrupt it is set to dma engine's
165 * current cookie.
166 * @pending_tx: The number of pending transfers. Used between interrupt handler
167 * and tasklet.
168 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000169 * @phy_chan: Pointer to physical channel which this instance runs on. If this
170 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200171 * @chan: DMA engine handle.
172 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
173 * transfer and call client callback.
174 * @client: Cliented owned descriptor list.
175 * @active: Active descriptor.
176 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200177 * @dma_cfg: The client configuration of this dma channel.
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
184 *
185 * This struct can either "be" a logical or a physical channel.
186 */
187struct d40_chan {
188 spinlock_t lock;
189 int log_num;
190 /* ID of the most recent completed transfer */
191 int completed;
192 int pending_tx;
193 bool busy;
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
203 u32 src_def_cfg;
204 u32 dst_def_cfg;
205 struct d40_def_lcsp log_def;
Linus Walleij8d318a52010-03-30 15:33:42 +0200206 struct d40_log_lli_full *lcpa;
Linus Walleij95e14002010-08-04 13:37:45 +0200207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr;
209 enum dma_data_direction runtime_direction;
Linus Walleij8d318a52010-03-30 15:33:42 +0200210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.
214 *
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
Linus Walleijf4185592010-06-22 18:06:42 -0700220 * @rev: silicon revision detected.
Linus Walleij8d318a52010-03-30 15:33:42 +0200221 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map.
224 * @irq: The IRQ number.
225 * @num_phy_chans: The number of physical channels. Read from HW. This
226 * is the number of available channels for this driver, not counting "Secure
227 * mode" allocated physical channels.
228 * @num_log_chans: The number of logical channels. Calculated from
229 * num_phy_chans.
230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
231 * @dma_slave: dma_device channels that can do only do slave transfers.
232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
Linus Walleij8d318a52010-03-30 15:33:42 +0200233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
239 * configuration.
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000245 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200246 */
247struct d40_base {
248 spinlock_t interrupt_lock;
249 spinlock_t execmd_lock;
250 struct device *dev;
251 void __iomem *virtbase;
Linus Walleijf4185592010-06-22 18:06:42 -0700252 u8 rev:4;
Linus Walleij8d318a52010-03-30 15:33:42 +0200253 struct clk *clk;
254 phys_addr_t phy_start;
255 resource_size_t phy_size;
256 int irq;
257 int num_phy_chans;
258 int num_log_chans;
259 struct dma_device dma_both;
260 struct dma_device dma_slave;
261 struct dma_device dma_memcpy;
262 struct d40_chan *phy_chans;
263 struct d40_chan *log_chans;
264 struct d40_chan **lookup_log_chans;
265 struct d40_chan **lookup_phy_chans;
266 struct stedma40_platform_data *plat_data;
267 /* Physical half channels */
268 struct d40_phy_res *phy_res;
269 struct d40_lcla_pool lcla_pool;
270 void *lcpa_base;
271 dma_addr_t phy_lcpa;
272 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000273 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200274};
275
276/**
277 * struct d40_interrupt_lookup - lookup table for interrupt handler
278 *
279 * @src: Interrupt mask register.
280 * @clr: Interrupt clear register.
281 * @is_error: true if this is an error interrupt.
282 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
283 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
284 */
285struct d40_interrupt_lookup {
286 u32 src;
287 u32 clr;
288 bool is_error;
289 int offset;
290};
291
292/**
293 * struct d40_reg_val - simple lookup struct
294 *
295 * @reg: The register.
296 * @val: The value that belongs to the register in reg.
297 */
298struct d40_reg_val {
299 unsigned int reg;
300 unsigned int val;
301};
302
303static int d40_pool_lli_alloc(struct d40_desc *d40d,
304 int lli_len, bool is_log)
305{
306 u32 align;
307 void *base;
308
309 if (is_log)
310 align = sizeof(struct d40_log_lli);
311 else
312 align = sizeof(struct d40_phy_lli);
313
314 if (lli_len == 1) {
315 base = d40d->lli_pool.pre_alloc_lli;
316 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
317 d40d->lli_pool.base = NULL;
318 } else {
319 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
320
321 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
322 d40d->lli_pool.base = base;
323
324 if (d40d->lli_pool.base == NULL)
325 return -ENOMEM;
326 }
327
328 if (is_log) {
329 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
330 align);
331 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
332 align);
333 } else {
334 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
335 align);
336 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
337 align);
Linus Walleij8d318a52010-03-30 15:33:42 +0200338 }
339
340 return 0;
341}
342
343static void d40_pool_lli_free(struct d40_desc *d40d)
344{
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
Linus Walleij8d318a52010-03-30 15:33:42 +0200352}
353
Jonas Aaberg698e4732010-08-09 12:08:56 +0000354static int d40_lcla_alloc_one(struct d40_chan *d40c,
355 struct d40_desc *d40d)
356{
357 unsigned long flags;
358 int i;
359 int ret = -EINVAL;
360 int p;
361
362 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
363
364 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
365
366 /*
367 * Allocate both src and dst at the same time, therefore the half
368 * start on 1 since 0 can't be used since zero is used as end marker.
369 */
370 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
373 d40d->lcla_alloc++;
374 ret = i;
375 break;
376 }
377 }
378
379 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
380
381 return ret;
382}
383
384static int d40_lcla_free_all(struct d40_chan *d40c,
385 struct d40_desc *d40d)
386{
387 unsigned long flags;
388 int i;
389 int ret = -EINVAL;
390
391 if (d40c->log_num == D40_PHY_CHAN)
392 return 0;
393
394 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
395
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
401 d40d->lcla_alloc--;
402 if (d40d->lcla_alloc == 0) {
403 ret = 0;
404 break;
405 }
406 }
407 }
408
409 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
410
411 return ret;
412
413}
414
Linus Walleij8d318a52010-03-30 15:33:42 +0200415static void d40_desc_remove(struct d40_desc *d40d)
416{
417 list_del(&d40d->node);
418}
419
420static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
421{
Linus Walleij8d318a52010-03-30 15:33:42 +0200422 struct d40_desc *d;
423 struct d40_desc *_d;
424
425 if (!list_empty(&d40c->client)) {
426 list_for_each_entry_safe(d, _d, &d40c->client, node)
427 if (async_tx_test_ack(&d->txd)) {
428 d40_pool_lli_free(d);
429 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000430 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200431 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200432 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000433 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
434 if (d != NULL) {
435 memset(d, 0, sizeof(struct d40_desc));
436 INIT_LIST_HEAD(&d->node);
437 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200438 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000439 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200440}
441
442static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
443{
Jonas Aaberg698e4732010-08-09 12:08:56 +0000444
445 d40_lcla_free_all(d40c, d40d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000446 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200447}
448
449static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
450{
451 list_add_tail(&desc->node, &d40c->active);
452}
453
Jonas Aaberg698e4732010-08-09 12:08:56 +0000454static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
455{
456 int curr_lcla = -EINVAL, next_lcla;
457
458 if (d40c->log_num == D40_PHY_CHAN) {
459 d40_phy_lli_write(d40c->base->virtbase,
460 d40c->phy_chan->num,
461 d40d->lli_phy.dst,
462 d40d->lli_phy.src);
463 d40d->lli_current = d40d->lli_len;
464 } else {
465
466 if ((d40d->lli_len - d40d->lli_current) > 1)
467 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
468
469 d40_log_lli_lcpa_write(d40c->lcpa,
470 &d40d->lli_log.dst[d40d->lli_current],
471 &d40d->lli_log.src[d40d->lli_current],
472 curr_lcla);
473
474 d40d->lli_current++;
475 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
476 struct d40_log_lli *lcla;
477
478 if (d40d->lli_current + 1 < d40d->lli_len)
479 next_lcla = d40_lcla_alloc_one(d40c, d40d);
480 else
481 next_lcla = -EINVAL;
482
483 lcla = d40c->base->lcla_pool.base +
484 d40c->phy_chan->num * 1024 +
485 8 * curr_lcla * 2;
486
487 d40_log_lli_lcla_write(lcla,
488 &d40d->lli_log.dst[d40d->lli_current],
489 &d40d->lli_log.src[d40d->lli_current],
490 next_lcla);
491
492 (void) dma_map_single(d40c->base->dev, lcla,
493 2 * sizeof(struct d40_log_lli),
494 DMA_TO_DEVICE);
495
496 curr_lcla = next_lcla;
497
498 if (curr_lcla == -EINVAL) {
499 d40d->lli_current++;
500 break;
501 }
502
503 }
504 }
505}
506
Linus Walleij8d318a52010-03-30 15:33:42 +0200507static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
508{
509 struct d40_desc *d;
510
511 if (list_empty(&d40c->active))
512 return NULL;
513
514 d = list_first_entry(&d40c->active,
515 struct d40_desc,
516 node);
517 return d;
518}
519
520static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
521{
522 list_add_tail(&desc->node, &d40c->queue);
523}
524
525static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
526{
527 struct d40_desc *d;
528
529 if (list_empty(&d40c->queue))
530 return NULL;
531
532 d = list_first_entry(&d40c->queue,
533 struct d40_desc,
534 node);
535 return d;
536}
537
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000538static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
539{
540 struct d40_desc *d;
541
542 if (list_empty(&d40c->queue))
543 return NULL;
544 list_for_each_entry(d, &d40c->queue, node)
545 if (list_is_last(&d->node, &d40c->queue))
546 break;
547 return d;
548}
549
Linus Walleij8d318a52010-03-30 15:33:42 +0200550/* Support functions for logical channels */
551
Linus Walleij8d318a52010-03-30 15:33:42 +0200552
553static int d40_channel_execute_command(struct d40_chan *d40c,
554 enum d40_command command)
555{
Jonas Aaberg767a9672010-08-09 12:08:34 +0000556 u32 status;
557 int i;
Linus Walleij8d318a52010-03-30 15:33:42 +0200558 void __iomem *active_reg;
559 int ret = 0;
560 unsigned long flags;
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000561 u32 wmask;
Linus Walleij8d318a52010-03-30 15:33:42 +0200562
563 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
564
565 if (d40c->phy_chan->num % 2 == 0)
566 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
567 else
568 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
569
570 if (command == D40_DMA_SUSPEND_REQ) {
571 status = (readl(active_reg) &
572 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
573 D40_CHAN_POS(d40c->phy_chan->num);
574
575 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
576 goto done;
577 }
578
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000579 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
580 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
581 active_reg);
Linus Walleij8d318a52010-03-30 15:33:42 +0200582
583 if (command == D40_DMA_SUSPEND_REQ) {
584
585 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
586 status = (readl(active_reg) &
587 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
588 D40_CHAN_POS(d40c->phy_chan->num);
589
590 cpu_relax();
591 /*
592 * Reduce the number of bus accesses while
593 * waiting for the DMA to suspend.
594 */
595 udelay(3);
596
597 if (status == D40_DMA_STOP ||
598 status == D40_DMA_SUSPENDED)
599 break;
600 }
601
602 if (i == D40_SUSPEND_MAX_IT) {
603 dev_err(&d40c->chan.dev->device,
604 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
605 __func__, d40c->phy_chan->num, d40c->log_num,
606 status);
607 dump_stack();
608 ret = -EBUSY;
609 }
610
611 }
612done:
613 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
614 return ret;
615}
616
617static void d40_term_all(struct d40_chan *d40c)
618{
619 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200620
621 /* Release active descriptors */
622 while ((d40d = d40_first_active_get(d40c))) {
623 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200624 d40_desc_free(d40c, d40d);
625 }
626
627 /* Release queued descriptors waiting for transfer */
628 while ((d40d = d40_first_queued(d40c))) {
629 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200630 d40_desc_free(d40c, d40d);
631 }
632
Linus Walleij8d318a52010-03-30 15:33:42 +0200633
634 d40c->pending_tx = 0;
635 d40c->busy = false;
636}
637
638static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
639{
640 u32 val;
641 unsigned long flags;
642
Jonas Aaberg0c322692010-06-20 21:25:46 +0000643 /* Notice, that disable requires the physical channel to be stopped */
Linus Walleij8d318a52010-03-30 15:33:42 +0200644 if (do_enable)
645 val = D40_ACTIVATE_EVENTLINE;
646 else
647 val = D40_DEACTIVATE_EVENTLINE;
648
649 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
650
651 /* Enable event line connected to device (or memcpy) */
652 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
653 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
654 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
655
656 writel((val << D40_EVENTLINE_POS(event)) |
657 ~D40_EVENTLINE_MASK(event),
658 d40c->base->virtbase + D40_DREG_PCBASE +
659 d40c->phy_chan->num * D40_DREG_PCDELTA +
660 D40_CHAN_REG_SSLNK);
661 }
662 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
663 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
664
665 writel((val << D40_EVENTLINE_POS(event)) |
666 ~D40_EVENTLINE_MASK(event),
667 d40c->base->virtbase + D40_DREG_PCBASE +
668 d40c->phy_chan->num * D40_DREG_PCDELTA +
669 D40_CHAN_REG_SDLNK);
670 }
671
672 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
673}
674
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200675static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200676{
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000677 u32 val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200678
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000679 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
680 d40c->phy_chan->num * D40_DREG_PCDELTA +
681 D40_CHAN_REG_SSLNK);
Linus Walleij8d318a52010-03-30 15:33:42 +0200682
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000683 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
684 d40c->phy_chan->num * D40_DREG_PCDELTA +
685 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200686 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200687}
688
Jonas Aabergb55912c2010-08-09 12:08:02 +0000689static void d40_config_write(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200690{
691 u32 addr_base;
692 u32 var;
Linus Walleij8d318a52010-03-30 15:33:42 +0200693
694 /* Odd addresses are even addresses + 4 */
695 addr_base = (d40c->phy_chan->num % 2) * 4;
696 /* Setup channel mode to logical or physical */
697 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
698 D40_CHAN_POS(d40c->phy_chan->num);
699 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
700
701 /* Setup operational mode option register */
702 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
703 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
704
705 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
706
707 if (d40c->log_num != D40_PHY_CHAN) {
708 /* Set default config for CFG reg */
709 writel(d40c->src_def_cfg,
710 d40c->base->virtbase + D40_DREG_PCBASE +
711 d40c->phy_chan->num * D40_DREG_PCDELTA +
712 D40_CHAN_REG_SSCFG);
713 writel(d40c->dst_def_cfg,
714 d40c->base->virtbase + D40_DREG_PCBASE +
715 d40c->phy_chan->num * D40_DREG_PCDELTA +
716 D40_CHAN_REG_SDCFG);
717
Jonas Aabergb55912c2010-08-09 12:08:02 +0000718 /* Set LIDX for lcla */
719 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
720 D40_SREG_ELEM_LOG_LIDX_MASK,
721 d40c->base->virtbase + D40_DREG_PCBASE +
722 d40c->phy_chan->num * D40_DREG_PCDELTA +
723 D40_CHAN_REG_SDELT);
724
725 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
726 D40_SREG_ELEM_LOG_LIDX_MASK,
727 d40c->base->virtbase + D40_DREG_PCBASE +
728 d40c->phy_chan->num * D40_DREG_PCDELTA +
729 D40_CHAN_REG_SSELT);
730
Linus Walleij8d318a52010-03-30 15:33:42 +0200731 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200732}
733
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000734static u32 d40_residue(struct d40_chan *d40c)
735{
736 u32 num_elt;
737
738 if (d40c->log_num != D40_PHY_CHAN)
739 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
740 >> D40_MEM_LCSP2_ECNT_POS;
741 else
742 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
743 d40c->phy_chan->num * D40_DREG_PCDELTA +
744 D40_CHAN_REG_SDELT) &
745 D40_SREG_ELEM_PHY_ECNT_MASK) >>
746 D40_SREG_ELEM_PHY_ECNT_POS;
747 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
748}
749
750static bool d40_tx_is_linked(struct d40_chan *d40c)
751{
752 bool is_link;
753
754 if (d40c->log_num != D40_PHY_CHAN)
755 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
756 else
757 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
758 d40c->phy_chan->num * D40_DREG_PCDELTA +
759 D40_CHAN_REG_SDLNK) &
760 D40_SREG_LNK_PHYS_LNK_MASK;
761 return is_link;
762}
763
764static int d40_pause(struct dma_chan *chan)
765{
766 struct d40_chan *d40c =
767 container_of(chan, struct d40_chan, chan);
768 int res = 0;
769 unsigned long flags;
770
771 spin_lock_irqsave(&d40c->lock, flags);
772
773 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
774 if (res == 0) {
775 if (d40c->log_num != D40_PHY_CHAN) {
776 d40_config_set_event(d40c, false);
777 /* Resume the other logical channels if any */
778 if (d40_chan_has_events(d40c))
779 res = d40_channel_execute_command(d40c,
780 D40_DMA_RUN);
781 }
782 }
783
784 spin_unlock_irqrestore(&d40c->lock, flags);
785 return res;
786}
787
788static int d40_resume(struct dma_chan *chan)
789{
790 struct d40_chan *d40c =
791 container_of(chan, struct d40_chan, chan);
792 int res = 0;
793 unsigned long flags;
794
795 spin_lock_irqsave(&d40c->lock, flags);
796
797 if (d40c->base->rev == 0)
798 if (d40c->log_num != D40_PHY_CHAN) {
799 res = d40_channel_execute_command(d40c,
800 D40_DMA_SUSPEND_REQ);
801 goto no_suspend;
802 }
803
804 /* If bytes left to transfer or linked tx resume job */
805 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
806
807 if (d40c->log_num != D40_PHY_CHAN)
808 d40_config_set_event(d40c, true);
809
810 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
811 }
812
813no_suspend:
814 spin_unlock_irqrestore(&d40c->lock, flags);
815 return res;
816}
817
818static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
819{
820 /* TODO: Write */
821}
822
823static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
824{
825 struct d40_desc *d40d_prev = NULL;
826 int i;
827 u32 val;
828
829 if (!list_empty(&d40c->queue))
830 d40d_prev = d40_last_queued(d40c);
831 else if (!list_empty(&d40c->active))
832 d40d_prev = d40_first_active_get(d40c);
833
834 if (!d40d_prev)
835 return;
836
837 /* Here we try to join this job with previous jobs */
838 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
839 d40c->phy_chan->num * D40_DREG_PCDELTA +
840 D40_CHAN_REG_SSLNK);
841
842 /* Figure out which link we're currently transmitting */
843 for (i = 0; i < d40d_prev->lli_len; i++)
844 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
845 break;
846
847 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
848 d40c->phy_chan->num * D40_DREG_PCDELTA +
849 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
850
851 if (i == (d40d_prev->lli_len - 1) && val > 0) {
852 /* Change the current one */
853 writel(virt_to_phys(d40d->lli_phy.src),
854 d40c->base->virtbase + D40_DREG_PCBASE +
855 d40c->phy_chan->num * D40_DREG_PCDELTA +
856 D40_CHAN_REG_SSLNK);
857 writel(virt_to_phys(d40d->lli_phy.dst),
858 d40c->base->virtbase + D40_DREG_PCBASE +
859 d40c->phy_chan->num * D40_DREG_PCDELTA +
860 D40_CHAN_REG_SDLNK);
861
862 d40d->is_hw_linked = true;
863
864 } else if (i < d40d_prev->lli_len) {
865 (void) dma_unmap_single(d40c->base->dev,
866 virt_to_phys(d40d_prev->lli_phy.src),
867 d40d_prev->lli_pool.size,
868 DMA_TO_DEVICE);
869
870 /* Keep the settings */
871 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
872 ~D40_SREG_LNK_PHYS_LNK_MASK;
873 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
874 val | virt_to_phys(d40d->lli_phy.src);
875
876 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
877 ~D40_SREG_LNK_PHYS_LNK_MASK;
878 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
879 val | virt_to_phys(d40d->lli_phy.dst);
880
881 (void) dma_map_single(d40c->base->dev,
882 d40d_prev->lli_phy.src,
883 d40d_prev->lli_pool.size,
884 DMA_TO_DEVICE);
885 d40d->is_hw_linked = true;
886 }
887}
888
Linus Walleij8d318a52010-03-30 15:33:42 +0200889static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
890{
891 struct d40_chan *d40c = container_of(tx->chan,
892 struct d40_chan,
893 chan);
894 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
895 unsigned long flags;
896
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000897 (void) d40_pause(&d40c->chan);
898
Linus Walleij8d318a52010-03-30 15:33:42 +0200899 spin_lock_irqsave(&d40c->lock, flags);
900
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000901 d40c->chan.cookie++;
902
903 if (d40c->chan.cookie < 0)
904 d40c->chan.cookie = 1;
905
906 d40d->txd.cookie = d40c->chan.cookie;
907
908 if (d40c->log_num == D40_PHY_CHAN)
909 d40_tx_submit_phy(d40c, d40d);
910 else
911 d40_tx_submit_log(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200912
913 d40_desc_queue(d40c, d40d);
914
915 spin_unlock_irqrestore(&d40c->lock, flags);
916
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000917 (void) d40_resume(&d40c->chan);
918
Linus Walleij8d318a52010-03-30 15:33:42 +0200919 return tx->cookie;
920}
921
922static int d40_start(struct d40_chan *d40c)
923{
Linus Walleijf4185592010-06-22 18:06:42 -0700924 if (d40c->base->rev == 0) {
925 int err;
926
927 if (d40c->log_num != D40_PHY_CHAN) {
928 err = d40_channel_execute_command(d40c,
929 D40_DMA_SUSPEND_REQ);
930 if (err)
931 return err;
932 }
933 }
934
Jonas Aaberg0c322692010-06-20 21:25:46 +0000935 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +0200936 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +0200937
Jonas Aaberg0c322692010-06-20 21:25:46 +0000938 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +0200939}
940
941static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
942{
943 struct d40_desc *d40d;
944 int err;
945
946 /* Start queued jobs, if any */
947 d40d = d40_first_queued(d40c);
948
949 if (d40d != NULL) {
950 d40c->busy = true;
951
952 /* Remove from queue */
953 d40_desc_remove(d40d);
954
955 /* Add to active queue */
956 d40_desc_submit(d40c, d40d);
957
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000958 /*
959 * If this job is already linked in hw,
960 * do not submit it.
961 */
Jonas Aaberg698e4732010-08-09 12:08:56 +0000962
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000963 if (!d40d->is_hw_linked) {
964 /* Initiate DMA job */
965 d40_desc_load(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200966
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000967 /* Start dma job */
968 err = d40_start(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +0200969
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000970 if (err)
971 return NULL;
972 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200973 }
974
975 return d40d;
976}
977
978/* called from interrupt context */
979static void dma_tc_handle(struct d40_chan *d40c)
980{
981 struct d40_desc *d40d;
982
Linus Walleij8d318a52010-03-30 15:33:42 +0200983 /* Get first active entry from list */
984 d40d = d40_first_active_get(d40c);
985
986 if (d40d == NULL)
987 return;
988
Jonas Aaberg698e4732010-08-09 12:08:56 +0000989 d40_lcla_free_all(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200990
Jonas Aaberg698e4732010-08-09 12:08:56 +0000991 if (d40d->lli_current < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200992 d40_desc_load(d40c, d40d);
993 /* Start dma job */
994 (void) d40_start(d40c);
995 return;
996 }
997
998 if (d40_queue_start(d40c) == NULL)
999 d40c->busy = false;
1000
1001 d40c->pending_tx++;
1002 tasklet_schedule(&d40c->tasklet);
1003
1004}
1005
1006static void dma_tasklet(unsigned long data)
1007{
1008 struct d40_chan *d40c = (struct d40_chan *) data;
Jonas Aaberg767a9672010-08-09 12:08:34 +00001009 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +02001010 unsigned long flags;
1011 dma_async_tx_callback callback;
1012 void *callback_param;
1013
1014 spin_lock_irqsave(&d40c->lock, flags);
1015
1016 /* Get first active entry from list */
Jonas Aaberg767a9672010-08-09 12:08:34 +00001017 d40d = d40_first_active_get(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +02001018
Jonas Aaberg767a9672010-08-09 12:08:34 +00001019 if (d40d == NULL)
Linus Walleij8d318a52010-03-30 15:33:42 +02001020 goto err;
1021
Jonas Aaberg767a9672010-08-09 12:08:34 +00001022 d40c->completed = d40d->txd.cookie;
Linus Walleij8d318a52010-03-30 15:33:42 +02001023
1024 /*
1025 * If terminating a channel pending_tx is set to zero.
1026 * This prevents any finished active jobs to return to the client.
1027 */
1028 if (d40c->pending_tx == 0) {
1029 spin_unlock_irqrestore(&d40c->lock, flags);
1030 return;
1031 }
1032
1033 /* Callback to client */
Jonas Aaberg767a9672010-08-09 12:08:34 +00001034 callback = d40d->txd.callback;
1035 callback_param = d40d->txd.callback_param;
Linus Walleij8d318a52010-03-30 15:33:42 +02001036
Jonas Aaberg767a9672010-08-09 12:08:34 +00001037 if (async_tx_test_ack(&d40d->txd)) {
1038 d40_pool_lli_free(d40d);
1039 d40_desc_remove(d40d);
1040 d40_desc_free(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +02001041 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00001042 if (!d40d->is_in_client_list) {
1043 d40_desc_remove(d40d);
Jonas Aaberg698e4732010-08-09 12:08:56 +00001044 d40_lcla_free_all(d40c, d40d);
Jonas Aaberg767a9672010-08-09 12:08:34 +00001045 list_add_tail(&d40d->node, &d40c->client);
1046 d40d->is_in_client_list = true;
Linus Walleij8d318a52010-03-30 15:33:42 +02001047 }
1048 }
1049
1050 d40c->pending_tx--;
1051
1052 if (d40c->pending_tx)
1053 tasklet_schedule(&d40c->tasklet);
1054
1055 spin_unlock_irqrestore(&d40c->lock, flags);
1056
Jonas Aaberg767a9672010-08-09 12:08:34 +00001057 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
Linus Walleij8d318a52010-03-30 15:33:42 +02001058 callback(callback_param);
1059
1060 return;
1061
1062 err:
1063 /* Rescue manouver if receiving double interrupts */
1064 if (d40c->pending_tx > 0)
1065 d40c->pending_tx--;
1066 spin_unlock_irqrestore(&d40c->lock, flags);
1067}
1068
1069static irqreturn_t d40_handle_interrupt(int irq, void *data)
1070{
1071 static const struct d40_interrupt_lookup il[] = {
1072 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1073 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1074 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1075 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1076 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1077 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1078 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1079 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1080 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1081 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1082 };
1083
1084 int i;
1085 u32 regs[ARRAY_SIZE(il)];
Linus Walleij8d318a52010-03-30 15:33:42 +02001086 u32 idx;
1087 u32 row;
1088 long chan = -1;
1089 struct d40_chan *d40c;
1090 unsigned long flags;
1091 struct d40_base *base = data;
1092
1093 spin_lock_irqsave(&base->interrupt_lock, flags);
1094
1095 /* Read interrupt status of both logical and physical channels */
1096 for (i = 0; i < ARRAY_SIZE(il); i++)
1097 regs[i] = readl(base->virtbase + il[i].src);
1098
1099 for (;;) {
1100
1101 chan = find_next_bit((unsigned long *)regs,
1102 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1103
1104 /* No more set bits found? */
1105 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1106 break;
1107
1108 row = chan / BITS_PER_LONG;
1109 idx = chan & (BITS_PER_LONG - 1);
1110
1111 /* ACK interrupt */
Jonas Aaberg1b003482010-08-09 12:07:54 +00001112 writel(1 << idx, base->virtbase + il[row].clr);
Linus Walleij8d318a52010-03-30 15:33:42 +02001113
1114 if (il[row].offset == D40_PHY_CHAN)
1115 d40c = base->lookup_phy_chans[idx];
1116 else
1117 d40c = base->lookup_log_chans[il[row].offset + idx];
1118 spin_lock(&d40c->lock);
1119
1120 if (!il[row].is_error)
1121 dma_tc_handle(d40c);
1122 else
Linus Walleij508849a2010-06-20 21:26:07 +00001123 dev_err(base->dev,
1124 "[%s] IRQ chan: %ld offset %d idx %d\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001125 __func__, chan, il[row].offset, idx);
1126
1127 spin_unlock(&d40c->lock);
1128 }
1129
1130 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1131
1132 return IRQ_HANDLED;
1133}
1134
Linus Walleij8d318a52010-03-30 15:33:42 +02001135static int d40_validate_conf(struct d40_chan *d40c,
1136 struct stedma40_chan_cfg *conf)
1137{
1138 int res = 0;
1139 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1140 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1141 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1142 == STEDMA40_CHANNEL_IN_LOG_MODE;
1143
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001144 if (!conf->dir) {
1145 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
1146 __func__);
1147 res = -EINVAL;
1148 }
1149
1150 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1151 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1152 d40c->runtime_addr == 0) {
1153
1154 dev_err(&d40c->chan.dev->device,
1155 "[%s] Invalid TX channel address (%d)\n",
1156 __func__, conf->dst_dev_type);
1157 res = -EINVAL;
1158 }
1159
1160 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1161 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1162 d40c->runtime_addr == 0) {
1163 dev_err(&d40c->chan.dev->device,
1164 "[%s] Invalid RX channel address (%d)\n",
1165 __func__, conf->src_dev_type);
1166 res = -EINVAL;
1167 }
1168
1169 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001170 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1171 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1172 __func__);
1173 res = -EINVAL;
1174 }
1175
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001176 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001177 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1178 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1179 __func__);
1180 res = -EINVAL;
1181 }
1182
1183 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1184 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1185 dev_err(&d40c->chan.dev->device,
1186 "[%s] No event line\n", __func__);
1187 res = -EINVAL;
1188 }
1189
1190 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1191 (src_event_group != dst_event_group)) {
1192 dev_err(&d40c->chan.dev->device,
1193 "[%s] Invalid event group\n", __func__);
1194 res = -EINVAL;
1195 }
1196
1197 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1198 /*
1199 * DMAC HW supports it. Will be added to this driver,
1200 * in case any dma client requires it.
1201 */
1202 dev_err(&d40c->chan.dev->device,
1203 "[%s] periph to periph not supported\n",
1204 __func__);
1205 res = -EINVAL;
1206 }
1207
1208 return res;
1209}
1210
1211static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001212 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001213{
1214 unsigned long flags;
1215 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001216 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001217 /* Physical interrupts are masked per physical full channel */
1218 if (phy->allocated_src == D40_ALLOC_FREE &&
1219 phy->allocated_dst == D40_ALLOC_FREE) {
1220 phy->allocated_dst = D40_ALLOC_PHY;
1221 phy->allocated_src = D40_ALLOC_PHY;
1222 goto found;
1223 } else
1224 goto not_found;
1225 }
1226
1227 /* Logical channel */
1228 if (is_src) {
1229 if (phy->allocated_src == D40_ALLOC_PHY)
1230 goto not_found;
1231
1232 if (phy->allocated_src == D40_ALLOC_FREE)
1233 phy->allocated_src = D40_ALLOC_LOG_FREE;
1234
1235 if (!(phy->allocated_src & (1 << log_event_line))) {
1236 phy->allocated_src |= 1 << log_event_line;
1237 goto found;
1238 } else
1239 goto not_found;
1240 } else {
1241 if (phy->allocated_dst == D40_ALLOC_PHY)
1242 goto not_found;
1243
1244 if (phy->allocated_dst == D40_ALLOC_FREE)
1245 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1246
1247 if (!(phy->allocated_dst & (1 << log_event_line))) {
1248 phy->allocated_dst |= 1 << log_event_line;
1249 goto found;
1250 } else
1251 goto not_found;
1252 }
1253
1254not_found:
1255 spin_unlock_irqrestore(&phy->lock, flags);
1256 return false;
1257found:
1258 spin_unlock_irqrestore(&phy->lock, flags);
1259 return true;
1260}
1261
1262static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1263 int log_event_line)
1264{
1265 unsigned long flags;
1266 bool is_free = false;
1267
1268 spin_lock_irqsave(&phy->lock, flags);
1269 if (!log_event_line) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001270 phy->allocated_dst = D40_ALLOC_FREE;
1271 phy->allocated_src = D40_ALLOC_FREE;
1272 is_free = true;
1273 goto out;
1274 }
1275
1276 /* Logical channel */
1277 if (is_src) {
1278 phy->allocated_src &= ~(1 << log_event_line);
1279 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1280 phy->allocated_src = D40_ALLOC_FREE;
1281 } else {
1282 phy->allocated_dst &= ~(1 << log_event_line);
1283 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1284 phy->allocated_dst = D40_ALLOC_FREE;
1285 }
1286
1287 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1288 D40_ALLOC_FREE);
1289
1290out:
1291 spin_unlock_irqrestore(&phy->lock, flags);
1292
1293 return is_free;
1294}
1295
1296static int d40_allocate_channel(struct d40_chan *d40c)
1297{
1298 int dev_type;
1299 int event_group;
1300 int event_line;
1301 struct d40_phy_res *phys;
1302 int i;
1303 int j;
1304 int log_num;
1305 bool is_src;
Linus Walleij508849a2010-06-20 21:26:07 +00001306 bool is_log = (d40c->dma_cfg.channel_type &
1307 STEDMA40_CHANNEL_IN_OPER_MODE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001308 == STEDMA40_CHANNEL_IN_LOG_MODE;
1309
1310
1311 phys = d40c->base->phy_res;
1312
1313 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1314 dev_type = d40c->dma_cfg.src_dev_type;
1315 log_num = 2 * dev_type;
1316 is_src = true;
1317 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1318 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1319 /* dst event lines are used for logical memcpy */
1320 dev_type = d40c->dma_cfg.dst_dev_type;
1321 log_num = 2 * dev_type + 1;
1322 is_src = false;
1323 } else
1324 return -EINVAL;
1325
1326 event_group = D40_TYPE_TO_GROUP(dev_type);
1327 event_line = D40_TYPE_TO_EVENT(dev_type);
1328
1329 if (!is_log) {
1330 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1331 /* Find physical half channel */
1332 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1333
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001334 if (d40_alloc_mask_set(&phys[i], is_src,
1335 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001336 goto found_phy;
1337 }
1338 } else
1339 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1340 int phy_num = j + event_group * 2;
1341 for (i = phy_num; i < phy_num + 2; i++) {
Linus Walleij508849a2010-06-20 21:26:07 +00001342 if (d40_alloc_mask_set(&phys[i],
1343 is_src,
1344 0,
1345 is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001346 goto found_phy;
1347 }
1348 }
1349 return -EINVAL;
1350found_phy:
1351 d40c->phy_chan = &phys[i];
1352 d40c->log_num = D40_PHY_CHAN;
1353 goto out;
1354 }
1355 if (dev_type == -1)
1356 return -EINVAL;
1357
1358 /* Find logical channel */
1359 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1360 int phy_num = j + event_group * 2;
1361 /*
1362 * Spread logical channels across all available physical rather
1363 * than pack every logical channel at the first available phy
1364 * channels.
1365 */
1366 if (is_src) {
1367 for (i = phy_num; i < phy_num + 2; i++) {
1368 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001369 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001370 goto found_log;
1371 }
1372 } else {
1373 for (i = phy_num + 1; i >= phy_num; i--) {
1374 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001375 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001376 goto found_log;
1377 }
1378 }
1379 }
1380 return -EINVAL;
1381
1382found_log:
1383 d40c->phy_chan = &phys[i];
1384 d40c->log_num = log_num;
1385out:
1386
1387 if (is_log)
1388 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1389 else
1390 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1391
1392 return 0;
1393
1394}
1395
Linus Walleij8d318a52010-03-30 15:33:42 +02001396static int d40_config_memcpy(struct d40_chan *d40c)
1397{
1398 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1399
1400 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1401 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1402 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1403 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1404 memcpy[d40c->chan.chan_id];
1405
1406 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1407 dma_has_cap(DMA_SLAVE, cap)) {
1408 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1409 } else {
1410 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1411 __func__);
1412 return -EINVAL;
1413 }
1414
1415 return 0;
1416}
1417
1418
1419static int d40_free_dma(struct d40_chan *d40c)
1420{
1421
1422 int res = 0;
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001423 u32 event;
Linus Walleij8d318a52010-03-30 15:33:42 +02001424 struct d40_phy_res *phy = d40c->phy_chan;
1425 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001426 struct d40_desc *d;
1427 struct d40_desc *_d;
1428
Linus Walleij8d318a52010-03-30 15:33:42 +02001429
1430 /* Terminate all queued and active transfers */
1431 d40_term_all(d40c);
1432
Per Fridena8be8622010-06-20 21:24:59 +00001433 /* Release client owned descriptors */
1434 if (!list_empty(&d40c->client))
1435 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1436 d40_pool_lli_free(d);
1437 d40_desc_remove(d);
Per Fridena8be8622010-06-20 21:24:59 +00001438 d40_desc_free(d40c, d);
1439 }
1440
Linus Walleij8d318a52010-03-30 15:33:42 +02001441 if (phy == NULL) {
1442 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1443 __func__);
1444 return -EINVAL;
1445 }
1446
1447 if (phy->allocated_src == D40_ALLOC_FREE &&
1448 phy->allocated_dst == D40_ALLOC_FREE) {
1449 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1450 __func__);
1451 return -EINVAL;
1452 }
1453
Linus Walleij8d318a52010-03-30 15:33:42 +02001454 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1455 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1456 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001457 is_src = false;
1458 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1459 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001460 is_src = true;
1461 } else {
1462 dev_err(&d40c->chan.dev->device,
1463 "[%s] Unknown direction\n", __func__);
1464 return -EINVAL;
1465 }
1466
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001467 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1468 if (res) {
1469 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1470 __func__);
1471 return res;
1472 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001473
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001474 if (d40c->log_num != D40_PHY_CHAN) {
1475 /* Release logical channel, deactivate the event line */
1476
1477 d40_config_set_event(d40c, false);
Linus Walleij8d318a52010-03-30 15:33:42 +02001478 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1479
1480 /*
1481 * Check if there are more logical allocation
1482 * on this phy channel.
1483 */
1484 if (!d40_alloc_mask_free(phy, is_src, event)) {
1485 /* Resume the other logical channels if any */
1486 if (d40_chan_has_events(d40c)) {
1487 res = d40_channel_execute_command(d40c,
1488 D40_DMA_RUN);
1489 if (res) {
1490 dev_err(&d40c->chan.dev->device,
1491 "[%s] Executing RUN command\n",
1492 __func__);
1493 return res;
1494 }
1495 }
1496 return 0;
1497 }
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001498 } else {
1499 (void) d40_alloc_mask_free(phy, is_src, 0);
1500 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001501
1502 /* Release physical channel */
1503 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1504 if (res) {
1505 dev_err(&d40c->chan.dev->device,
1506 "[%s] Failed to stop channel\n", __func__);
1507 return res;
1508 }
1509 d40c->phy_chan = NULL;
1510 /* Invalidate channel type */
1511 d40c->dma_cfg.channel_type = 0;
1512 d40c->base->lookup_phy_chans[phy->num] = NULL;
1513
1514 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001515}
1516
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001517static bool d40_is_paused(struct d40_chan *d40c)
1518{
1519 bool is_paused = false;
1520 unsigned long flags;
1521 void __iomem *active_reg;
1522 u32 status;
1523 u32 event;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001524
1525 spin_lock_irqsave(&d40c->lock, flags);
1526
1527 if (d40c->log_num == D40_PHY_CHAN) {
1528 if (d40c->phy_chan->num % 2 == 0)
1529 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1530 else
1531 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1532
1533 status = (readl(active_reg) &
1534 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1535 D40_CHAN_POS(d40c->phy_chan->num);
1536 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1537 is_paused = true;
1538
1539 goto _exit;
1540 }
1541
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001542 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001543 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001544 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001545 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1546 d40c->phy_chan->num * D40_DREG_PCDELTA +
1547 D40_CHAN_REG_SDLNK);
1548 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001549 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001550 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1551 d40c->phy_chan->num * D40_DREG_PCDELTA +
1552 D40_CHAN_REG_SSLNK);
1553 } else {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001554 dev_err(&d40c->chan.dev->device,
1555 "[%s] Unknown direction\n", __func__);
1556 goto _exit;
1557 }
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001558
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001559 status = (status & D40_EVENTLINE_MASK(event)) >>
1560 D40_EVENTLINE_POS(event);
1561
1562 if (status != D40_DMA_RUN)
1563 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001564_exit:
1565 spin_unlock_irqrestore(&d40c->lock, flags);
1566 return is_paused;
1567
1568}
1569
1570
Linus Walleij8d318a52010-03-30 15:33:42 +02001571static u32 stedma40_residue(struct dma_chan *chan)
1572{
1573 struct d40_chan *d40c =
1574 container_of(chan, struct d40_chan, chan);
1575 u32 bytes_left;
1576 unsigned long flags;
1577
1578 spin_lock_irqsave(&d40c->lock, flags);
1579 bytes_left = d40_residue(d40c);
1580 spin_unlock_irqrestore(&d40c->lock, flags);
1581
1582 return bytes_left;
1583}
1584
1585/* Public DMA functions in addition to the DMA engine framework */
1586
1587int stedma40_set_psize(struct dma_chan *chan,
1588 int src_psize,
1589 int dst_psize)
1590{
1591 struct d40_chan *d40c =
1592 container_of(chan, struct d40_chan, chan);
1593 unsigned long flags;
1594
1595 spin_lock_irqsave(&d40c->lock, flags);
1596
1597 if (d40c->log_num != D40_PHY_CHAN) {
1598 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1599 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
Linus Walleij508849a2010-06-20 21:26:07 +00001600 d40c->log_def.lcsp1 |= src_psize <<
1601 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1602 d40c->log_def.lcsp3 |= dst_psize <<
1603 D40_MEM_LCSP1_SCFG_PSIZE_POS;
Linus Walleij8d318a52010-03-30 15:33:42 +02001604 goto out;
1605 }
1606
1607 if (src_psize == STEDMA40_PSIZE_PHY_1)
1608 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1609 else {
1610 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1611 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1612 D40_SREG_CFG_PSIZE_POS);
1613 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1614 }
1615
1616 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1617 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1618 else {
1619 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1620 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1621 D40_SREG_CFG_PSIZE_POS);
1622 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1623 }
1624out:
1625 spin_unlock_irqrestore(&d40c->lock, flags);
1626 return 0;
1627}
1628EXPORT_SYMBOL(stedma40_set_psize);
1629
1630struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1631 struct scatterlist *sgl_dst,
1632 struct scatterlist *sgl_src,
1633 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001634 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001635{
1636 int res;
1637 struct d40_desc *d40d;
1638 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1639 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001640 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001641
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001642 if (d40c->phy_chan == NULL) {
1643 dev_err(&d40c->chan.dev->device,
1644 "[%s] Unallocated channel.\n", __func__);
1645 return ERR_PTR(-EINVAL);
1646 }
1647
Jonas Aaberg2a614342010-06-20 21:25:24 +00001648 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001649 d40d = d40_desc_get(d40c);
1650
1651 if (d40d == NULL)
1652 goto err;
1653
Linus Walleij8d318a52010-03-30 15:33:42 +02001654 d40d->lli_len = sgl_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +00001655 d40d->lli_current = 0;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001656 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001657
1658 if (d40c->log_num != D40_PHY_CHAN) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001659
1660 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1661 dev_err(&d40c->chan.dev->device,
1662 "[%s] Out of memory\n", __func__);
1663 goto err;
1664 }
1665
Jonas Aaberg698e4732010-08-09 12:08:56 +00001666 (void) d40_log_sg_to_lli(sgl_src,
Linus Walleij8d318a52010-03-30 15:33:42 +02001667 sgl_len,
1668 d40d->lli_log.src,
1669 d40c->log_def.lcsp1,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001670 d40c->dma_cfg.src_info.data_width);
Linus Walleij8d318a52010-03-30 15:33:42 +02001671
Jonas Aaberg698e4732010-08-09 12:08:56 +00001672 (void) d40_log_sg_to_lli(sgl_dst,
Linus Walleij8d318a52010-03-30 15:33:42 +02001673 sgl_len,
1674 d40d->lli_log.dst,
1675 d40c->log_def.lcsp3,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001676 d40c->dma_cfg.dst_info.data_width);
Linus Walleij8d318a52010-03-30 15:33:42 +02001677 } else {
1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1679 dev_err(&d40c->chan.dev->device,
1680 "[%s] Out of memory\n", __func__);
1681 goto err;
1682 }
1683
1684 res = d40_phy_sg_to_lli(sgl_src,
1685 sgl_len,
1686 0,
1687 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001688 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02001689 d40c->src_def_cfg,
1690 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001691 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001692
1693 if (res < 0)
1694 goto err;
1695
1696 res = d40_phy_sg_to_lli(sgl_dst,
1697 sgl_len,
1698 0,
1699 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001700 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02001701 d40c->dst_def_cfg,
1702 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001703 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001704
1705 if (res < 0)
1706 goto err;
1707
1708 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1709 d40d->lli_pool.size, DMA_TO_DEVICE);
1710 }
1711
1712 dma_async_tx_descriptor_init(&d40d->txd, chan);
1713
1714 d40d->txd.tx_submit = d40_tx_submit;
1715
Jonas Aaberg2a614342010-06-20 21:25:24 +00001716 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001717
1718 return &d40d->txd;
1719err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001720 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001721 return NULL;
1722}
1723EXPORT_SYMBOL(stedma40_memcpy_sg);
1724
1725bool stedma40_filter(struct dma_chan *chan, void *data)
1726{
1727 struct stedma40_chan_cfg *info = data;
1728 struct d40_chan *d40c =
1729 container_of(chan, struct d40_chan, chan);
1730 int err;
1731
1732 if (data) {
1733 err = d40_validate_conf(d40c, info);
1734 if (!err)
1735 d40c->dma_cfg = *info;
1736 } else
1737 err = d40_config_memcpy(d40c);
1738
1739 return err == 0;
1740}
1741EXPORT_SYMBOL(stedma40_filter);
1742
1743/* DMA ENGINE functions */
1744static int d40_alloc_chan_resources(struct dma_chan *chan)
1745{
1746 int err;
1747 unsigned long flags;
1748 struct d40_chan *d40c =
1749 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001750 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001751 spin_lock_irqsave(&d40c->lock, flags);
1752
1753 d40c->completed = chan->cookie = 1;
1754
1755 /*
1756 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001757 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001758 */
1759 if (d40c->dma_cfg.channel_type == 0) {
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001760
Linus Walleij8d318a52010-03-30 15:33:42 +02001761 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001762 if (err) {
1763 dev_err(&d40c->chan.dev->device,
1764 "[%s] Failed to configure memcpy channel\n",
1765 __func__);
1766 goto fail;
1767 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001768 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001769 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001770
1771 err = d40_allocate_channel(d40c);
1772 if (err) {
1773 dev_err(&d40c->chan.dev->device,
1774 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001775 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001776 }
1777
Linus Walleijef1872e2010-06-20 21:24:52 +00001778 /* Fill in basic CFG register values */
1779 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1780 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1781
1782 if (d40c->log_num != D40_PHY_CHAN) {
1783 d40_log_cfg(&d40c->dma_cfg,
1784 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1785
1786 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1787 d40c->lcpa = d40c->base->lcpa_base +
1788 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1789 else
1790 d40c->lcpa = d40c->base->lcpa_base +
1791 d40c->dma_cfg.dst_dev_type *
1792 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1793 }
1794
1795 /*
1796 * Only write channel configuration to the DMA if the physical
1797 * resource is free. In case of multiple logical channels
1798 * on the same physical resource, only the first write is necessary.
1799 */
Jonas Aabergb55912c2010-08-09 12:08:02 +00001800 if (is_free_phy)
1801 d40_config_write(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001802fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001803 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001804 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001805}
1806
1807static void d40_free_chan_resources(struct dma_chan *chan)
1808{
1809 struct d40_chan *d40c =
1810 container_of(chan, struct d40_chan, chan);
1811 int err;
1812 unsigned long flags;
1813
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001814 if (d40c->phy_chan == NULL) {
1815 dev_err(&d40c->chan.dev->device,
1816 "[%s] Cannot free unallocated channel\n", __func__);
1817 return;
1818 }
1819
1820
Linus Walleij8d318a52010-03-30 15:33:42 +02001821 spin_lock_irqsave(&d40c->lock, flags);
1822
1823 err = d40_free_dma(d40c);
1824
1825 if (err)
1826 dev_err(&d40c->chan.dev->device,
1827 "[%s] Failed to free channel\n", __func__);
1828 spin_unlock_irqrestore(&d40c->lock, flags);
1829}
1830
1831static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1832 dma_addr_t dst,
1833 dma_addr_t src,
1834 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001835 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001836{
1837 struct d40_desc *d40d;
1838 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1839 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001840 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001841 int err = 0;
1842
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001843 if (d40c->phy_chan == NULL) {
1844 dev_err(&d40c->chan.dev->device,
1845 "[%s] Channel is not allocated.\n", __func__);
1846 return ERR_PTR(-EINVAL);
1847 }
1848
Jonas Aaberg2a614342010-06-20 21:25:24 +00001849 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001850 d40d = d40_desc_get(d40c);
1851
1852 if (d40d == NULL) {
1853 dev_err(&d40c->chan.dev->device,
1854 "[%s] Descriptor is NULL\n", __func__);
1855 goto err;
1856 }
1857
Jonas Aaberg2a614342010-06-20 21:25:24 +00001858 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001859
1860 dma_async_tx_descriptor_init(&d40d->txd, chan);
1861
1862 d40d->txd.tx_submit = d40_tx_submit;
1863
1864 if (d40c->log_num != D40_PHY_CHAN) {
1865
1866 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1867 dev_err(&d40c->chan.dev->device,
1868 "[%s] Out of memory\n", __func__);
1869 goto err;
1870 }
1871 d40d->lli_len = 1;
Jonas Aaberg698e4732010-08-09 12:08:56 +00001872 d40d->lli_current = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001873
1874 d40_log_fill_lli(d40d->lli_log.src,
1875 src,
1876 size,
Linus Walleij8d318a52010-03-30 15:33:42 +02001877 d40c->log_def.lcsp1,
1878 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001879 true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001880
1881 d40_log_fill_lli(d40d->lli_log.dst,
1882 dst,
1883 size,
Linus Walleij8d318a52010-03-30 15:33:42 +02001884 d40c->log_def.lcsp3,
1885 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001886 true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001887
1888 } else {
1889
1890 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1891 dev_err(&d40c->chan.dev->device,
1892 "[%s] Out of memory\n", __func__);
1893 goto err;
1894 }
1895
1896 err = d40_phy_fill_lli(d40d->lli_phy.src,
1897 src,
1898 size,
1899 d40c->dma_cfg.src_info.psize,
1900 0,
1901 d40c->src_def_cfg,
1902 true,
1903 d40c->dma_cfg.src_info.data_width,
1904 false);
1905 if (err)
1906 goto err_fill_lli;
1907
1908 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1909 dst,
1910 size,
1911 d40c->dma_cfg.dst_info.psize,
1912 0,
1913 d40c->dst_def_cfg,
1914 true,
1915 d40c->dma_cfg.dst_info.data_width,
1916 false);
1917
1918 if (err)
1919 goto err_fill_lli;
1920
1921 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1922 d40d->lli_pool.size, DMA_TO_DEVICE);
1923 }
1924
Jonas Aaberg2a614342010-06-20 21:25:24 +00001925 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001926 return &d40d->txd;
1927
1928err_fill_lli:
1929 dev_err(&d40c->chan.dev->device,
1930 "[%s] Failed filling in PHY LLI\n", __func__);
1931 d40_pool_lli_free(d40d);
1932err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001933 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001934 return NULL;
1935}
1936
1937static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1938 struct d40_chan *d40c,
1939 struct scatterlist *sgl,
1940 unsigned int sg_len,
1941 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001942 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001943{
1944 dma_addr_t dev_addr = 0;
1945 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001946
1947 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1948 dev_err(&d40c->chan.dev->device,
1949 "[%s] Out of memory\n", __func__);
1950 return -ENOMEM;
1951 }
1952
1953 d40d->lli_len = sg_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +00001954 d40d->lli_current = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001955
Jonas Aaberg2a614342010-06-20 21:25:24 +00001956 if (direction == DMA_FROM_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001957 if (d40c->runtime_addr)
1958 dev_addr = d40c->runtime_addr;
1959 else
1960 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001961 else if (direction == DMA_TO_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001962 if (d40c->runtime_addr)
1963 dev_addr = d40c->runtime_addr;
1964 else
1965 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1966
Jonas Aaberg2a614342010-06-20 21:25:24 +00001967 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001968 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001969
Jonas Aaberg698e4732010-08-09 12:08:56 +00001970 total_size = d40_log_sg_to_dev(sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001971 &d40d->lli_log,
1972 &d40c->log_def,
1973 d40c->dma_cfg.src_info.data_width,
1974 d40c->dma_cfg.dst_info.data_width,
1975 direction,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001976 dev_addr);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001977
Linus Walleij8d318a52010-03-30 15:33:42 +02001978 if (total_size < 0)
1979 return -EINVAL;
1980
1981 return 0;
1982}
1983
1984static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1985 struct d40_chan *d40c,
1986 struct scatterlist *sgl,
1987 unsigned int sgl_len,
1988 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001989 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001990{
1991 dma_addr_t src_dev_addr;
1992 dma_addr_t dst_dev_addr;
1993 int res;
1994
1995 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1996 dev_err(&d40c->chan.dev->device,
1997 "[%s] Out of memory\n", __func__);
1998 return -ENOMEM;
1999 }
2000
2001 d40d->lli_len = sgl_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +00002002 d40d->lli_current = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02002003
2004 if (direction == DMA_FROM_DEVICE) {
2005 dst_dev_addr = 0;
Linus Walleij95e14002010-08-04 13:37:45 +02002006 if (d40c->runtime_addr)
2007 src_dev_addr = d40c->runtime_addr;
2008 else
2009 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002010 } else if (direction == DMA_TO_DEVICE) {
Linus Walleij95e14002010-08-04 13:37:45 +02002011 if (d40c->runtime_addr)
2012 dst_dev_addr = d40c->runtime_addr;
2013 else
2014 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002015 src_dev_addr = 0;
2016 } else
2017 return -EINVAL;
2018
2019 res = d40_phy_sg_to_lli(sgl,
2020 sgl_len,
2021 src_dev_addr,
2022 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002023 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02002024 d40c->src_def_cfg,
2025 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002026 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002027 if (res < 0)
2028 return res;
2029
2030 res = d40_phy_sg_to_lli(sgl,
2031 sgl_len,
2032 dst_dev_addr,
2033 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002034 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02002035 d40c->dst_def_cfg,
2036 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002037 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002038 if (res < 0)
2039 return res;
2040
2041 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2042 d40d->lli_pool.size, DMA_TO_DEVICE);
2043 return 0;
2044}
2045
2046static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2047 struct scatterlist *sgl,
2048 unsigned int sg_len,
2049 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002050 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02002051{
2052 struct d40_desc *d40d;
2053 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2054 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002055 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002056 int err;
2057
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002058 if (d40c->phy_chan == NULL) {
2059 dev_err(&d40c->chan.dev->device,
2060 "[%s] Cannot prepare unallocated channel\n", __func__);
2061 return ERR_PTR(-EINVAL);
2062 }
2063
Linus Walleij8d318a52010-03-30 15:33:42 +02002064 if (d40c->dma_cfg.pre_transfer)
2065 d40c->dma_cfg.pre_transfer(chan,
2066 d40c->dma_cfg.pre_transfer_data,
2067 sg_dma_len(sgl));
2068
Jonas Aaberg2a614342010-06-20 21:25:24 +00002069 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002070 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002071 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002072
2073 if (d40d == NULL)
2074 return NULL;
2075
Linus Walleij8d318a52010-03-30 15:33:42 +02002076 if (d40c->log_num != D40_PHY_CHAN)
2077 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002078 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002079 else
2080 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002081 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002082 if (err) {
2083 dev_err(&d40c->chan.dev->device,
2084 "[%s] Failed to prepare %s slave sg job: %d\n",
2085 __func__,
2086 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2087 return NULL;
2088 }
2089
Jonas Aaberg2a614342010-06-20 21:25:24 +00002090 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002091
2092 dma_async_tx_descriptor_init(&d40d->txd, chan);
2093
2094 d40d->txd.tx_submit = d40_tx_submit;
2095
2096 return &d40d->txd;
2097}
2098
2099static enum dma_status d40_tx_status(struct dma_chan *chan,
2100 dma_cookie_t cookie,
2101 struct dma_tx_state *txstate)
2102{
2103 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2104 dma_cookie_t last_used;
2105 dma_cookie_t last_complete;
2106 int ret;
2107
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002108 if (d40c->phy_chan == NULL) {
2109 dev_err(&d40c->chan.dev->device,
2110 "[%s] Cannot read status of unallocated channel\n",
2111 __func__);
2112 return -EINVAL;
2113 }
2114
Linus Walleij8d318a52010-03-30 15:33:42 +02002115 last_complete = d40c->completed;
2116 last_used = chan->cookie;
2117
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002118 if (d40_is_paused(d40c))
2119 ret = DMA_PAUSED;
2120 else
2121 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002122
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002123 dma_set_tx_state(txstate, last_complete, last_used,
2124 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002125
2126 return ret;
2127}
2128
2129static void d40_issue_pending(struct dma_chan *chan)
2130{
2131 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2132 unsigned long flags;
2133
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002134 if (d40c->phy_chan == NULL) {
2135 dev_err(&d40c->chan.dev->device,
2136 "[%s] Channel is not allocated!\n", __func__);
2137 return;
2138 }
2139
Linus Walleij8d318a52010-03-30 15:33:42 +02002140 spin_lock_irqsave(&d40c->lock, flags);
2141
2142 /* Busy means that pending jobs are already being processed */
2143 if (!d40c->busy)
2144 (void) d40_queue_start(d40c);
2145
2146 spin_unlock_irqrestore(&d40c->lock, flags);
2147}
2148
Linus Walleij95e14002010-08-04 13:37:45 +02002149/* Runtime reconfiguration extension */
2150static void d40_set_runtime_config(struct dma_chan *chan,
2151 struct dma_slave_config *config)
2152{
2153 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2154 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2155 enum dma_slave_buswidth config_addr_width;
2156 dma_addr_t config_addr;
2157 u32 config_maxburst;
2158 enum stedma40_periph_data_width addr_width;
2159 int psize;
2160
2161 if (config->direction == DMA_FROM_DEVICE) {
2162 dma_addr_t dev_addr_rx =
2163 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2164
2165 config_addr = config->src_addr;
2166 if (dev_addr_rx)
2167 dev_dbg(d40c->base->dev,
2168 "channel has a pre-wired RX address %08x "
2169 "overriding with %08x\n",
2170 dev_addr_rx, config_addr);
2171 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2172 dev_dbg(d40c->base->dev,
2173 "channel was not configured for peripheral "
2174 "to memory transfer (%d) overriding\n",
2175 cfg->dir);
2176 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2177
2178 config_addr_width = config->src_addr_width;
2179 config_maxburst = config->src_maxburst;
2180
2181 } else if (config->direction == DMA_TO_DEVICE) {
2182 dma_addr_t dev_addr_tx =
2183 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2184
2185 config_addr = config->dst_addr;
2186 if (dev_addr_tx)
2187 dev_dbg(d40c->base->dev,
2188 "channel has a pre-wired TX address %08x "
2189 "overriding with %08x\n",
2190 dev_addr_tx, config_addr);
2191 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2192 dev_dbg(d40c->base->dev,
2193 "channel was not configured for memory "
2194 "to peripheral transfer (%d) overriding\n",
2195 cfg->dir);
2196 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2197
2198 config_addr_width = config->dst_addr_width;
2199 config_maxburst = config->dst_maxburst;
2200
2201 } else {
2202 dev_err(d40c->base->dev,
2203 "unrecognized channel direction %d\n",
2204 config->direction);
2205 return;
2206 }
2207
2208 switch (config_addr_width) {
2209 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2210 addr_width = STEDMA40_BYTE_WIDTH;
2211 break;
2212 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2213 addr_width = STEDMA40_HALFWORD_WIDTH;
2214 break;
2215 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2216 addr_width = STEDMA40_WORD_WIDTH;
2217 break;
2218 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2219 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2220 break;
2221 default:
2222 dev_err(d40c->base->dev,
2223 "illegal peripheral address width "
2224 "requested (%d)\n",
2225 config->src_addr_width);
2226 return;
2227 }
2228
2229 if (config_maxburst >= 16)
2230 psize = STEDMA40_PSIZE_LOG_16;
2231 else if (config_maxburst >= 8)
2232 psize = STEDMA40_PSIZE_LOG_8;
2233 else if (config_maxburst >= 4)
2234 psize = STEDMA40_PSIZE_LOG_4;
2235 else
2236 psize = STEDMA40_PSIZE_LOG_1;
2237
2238 /* Set up all the endpoint configs */
2239 cfg->src_info.data_width = addr_width;
2240 cfg->src_info.psize = psize;
2241 cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
2242 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2243 cfg->dst_info.data_width = addr_width;
2244 cfg->dst_info.psize = psize;
2245 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
2246 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2247
2248 /* These settings will take precedence later */
2249 d40c->runtime_addr = config_addr;
2250 d40c->runtime_direction = config->direction;
2251 dev_dbg(d40c->base->dev,
2252 "configured channel %s for %s, data width %d, "
2253 "maxburst %d bytes, LE, no flow control\n",
2254 dma_chan_name(chan),
2255 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2256 config_addr_width,
2257 config_maxburst);
2258}
2259
Linus Walleij05827632010-05-17 16:30:42 -07002260static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2261 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002262{
2263 unsigned long flags;
2264 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2265
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002266 if (d40c->phy_chan == NULL) {
2267 dev_err(&d40c->chan.dev->device,
2268 "[%s] Channel is not allocated!\n", __func__);
2269 return -EINVAL;
2270 }
2271
Linus Walleij8d318a52010-03-30 15:33:42 +02002272 switch (cmd) {
2273 case DMA_TERMINATE_ALL:
2274 spin_lock_irqsave(&d40c->lock, flags);
2275 d40_term_all(d40c);
2276 spin_unlock_irqrestore(&d40c->lock, flags);
2277 return 0;
2278 case DMA_PAUSE:
2279 return d40_pause(chan);
2280 case DMA_RESUME:
2281 return d40_resume(chan);
Linus Walleij95e14002010-08-04 13:37:45 +02002282 case DMA_SLAVE_CONFIG:
2283 d40_set_runtime_config(chan,
2284 (struct dma_slave_config *) arg);
2285 return 0;
2286 default:
2287 break;
Linus Walleij8d318a52010-03-30 15:33:42 +02002288 }
2289
2290 /* Other commands are unimplemented */
2291 return -ENXIO;
2292}
2293
2294/* Initialization functions */
2295
2296static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2297 struct d40_chan *chans, int offset,
2298 int num_chans)
2299{
2300 int i = 0;
2301 struct d40_chan *d40c;
2302
2303 INIT_LIST_HEAD(&dma->channels);
2304
2305 for (i = offset; i < offset + num_chans; i++) {
2306 d40c = &chans[i];
2307 d40c->base = base;
2308 d40c->chan.device = dma;
2309
Linus Walleij8d318a52010-03-30 15:33:42 +02002310 spin_lock_init(&d40c->lock);
2311
2312 d40c->log_num = D40_PHY_CHAN;
2313
Linus Walleij8d318a52010-03-30 15:33:42 +02002314 INIT_LIST_HEAD(&d40c->active);
2315 INIT_LIST_HEAD(&d40c->queue);
2316 INIT_LIST_HEAD(&d40c->client);
2317
Linus Walleij8d318a52010-03-30 15:33:42 +02002318 tasklet_init(&d40c->tasklet, dma_tasklet,
2319 (unsigned long) d40c);
2320
2321 list_add_tail(&d40c->chan.device_node,
2322 &dma->channels);
2323 }
2324}
2325
2326static int __init d40_dmaengine_init(struct d40_base *base,
2327 int num_reserved_chans)
2328{
2329 int err ;
2330
2331 d40_chan_init(base, &base->dma_slave, base->log_chans,
2332 0, base->num_log_chans);
2333
2334 dma_cap_zero(base->dma_slave.cap_mask);
2335 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2336
2337 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2338 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2339 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2340 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2341 base->dma_slave.device_tx_status = d40_tx_status;
2342 base->dma_slave.device_issue_pending = d40_issue_pending;
2343 base->dma_slave.device_control = d40_control;
2344 base->dma_slave.dev = base->dev;
2345
2346 err = dma_async_device_register(&base->dma_slave);
2347
2348 if (err) {
2349 dev_err(base->dev,
2350 "[%s] Failed to register slave channels\n",
2351 __func__);
2352 goto failure1;
2353 }
2354
2355 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2356 base->num_log_chans, base->plat_data->memcpy_len);
2357
2358 dma_cap_zero(base->dma_memcpy.cap_mask);
2359 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2360
2361 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2362 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2363 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2364 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2365 base->dma_memcpy.device_tx_status = d40_tx_status;
2366 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2367 base->dma_memcpy.device_control = d40_control;
2368 base->dma_memcpy.dev = base->dev;
2369 /*
2370 * This controller can only access address at even
2371 * 32bit boundaries, i.e. 2^2
2372 */
2373 base->dma_memcpy.copy_align = 2;
2374
2375 err = dma_async_device_register(&base->dma_memcpy);
2376
2377 if (err) {
2378 dev_err(base->dev,
2379 "[%s] Failed to regsiter memcpy only channels\n",
2380 __func__);
2381 goto failure2;
2382 }
2383
2384 d40_chan_init(base, &base->dma_both, base->phy_chans,
2385 0, num_reserved_chans);
2386
2387 dma_cap_zero(base->dma_both.cap_mask);
2388 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2389 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2390
2391 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2392 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2393 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2394 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2395 base->dma_both.device_tx_status = d40_tx_status;
2396 base->dma_both.device_issue_pending = d40_issue_pending;
2397 base->dma_both.device_control = d40_control;
2398 base->dma_both.dev = base->dev;
2399 base->dma_both.copy_align = 2;
2400 err = dma_async_device_register(&base->dma_both);
2401
2402 if (err) {
2403 dev_err(base->dev,
2404 "[%s] Failed to register logical and physical capable channels\n",
2405 __func__);
2406 goto failure3;
2407 }
2408 return 0;
2409failure3:
2410 dma_async_device_unregister(&base->dma_memcpy);
2411failure2:
2412 dma_async_device_unregister(&base->dma_slave);
2413failure1:
2414 return err;
2415}
2416
2417/* Initialization functions. */
2418
2419static int __init d40_phy_res_init(struct d40_base *base)
2420{
2421 int i;
2422 int num_phy_chans_avail = 0;
2423 u32 val[2];
2424 int odd_even_bit = -2;
2425
2426 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2427 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2428
2429 for (i = 0; i < base->num_phy_chans; i++) {
2430 base->phy_res[i].num = i;
2431 odd_even_bit += 2 * ((i % 2) == 0);
2432 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2433 /* Mark security only channels as occupied */
2434 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2435 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2436 } else {
2437 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2438 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2439 num_phy_chans_avail++;
2440 }
2441 spin_lock_init(&base->phy_res[i].lock);
2442 }
Jonas Aaberg6b7acd82010-06-20 21:26:59 +00002443
2444 /* Mark disabled channels as occupied */
2445 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2446 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2447 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2448 num_phy_chans_avail--;
2449 }
2450
Linus Walleij8d318a52010-03-30 15:33:42 +02002451 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2452 num_phy_chans_avail, base->num_phy_chans);
2453
2454 /* Verify settings extended vs standard */
2455 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2456
2457 for (i = 0; i < base->num_phy_chans; i++) {
2458
2459 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2460 (val[0] & 0x3) != 1)
2461 dev_info(base->dev,
2462 "[%s] INFO: channel %d is misconfigured (%d)\n",
2463 __func__, i, val[0] & 0x3);
2464
2465 val[0] = val[0] >> 2;
2466 }
2467
2468 return num_phy_chans_avail;
2469}
2470
2471static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2472{
2473 static const struct d40_reg_val dma_id_regs[] = {
2474 /* Peripheral Id */
2475 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2476 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2477 /*
2478 * D40_DREG_PERIPHID2 Depends on HW revision:
2479 * MOP500/HREF ED has 0x0008,
2480 * ? has 0x0018,
2481 * HREF V1 has 0x0028
2482 */
2483 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2484
2485 /* PCell Id */
2486 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2487 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2488 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2489 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2490 };
2491 struct stedma40_platform_data *plat_data;
2492 struct clk *clk = NULL;
2493 void __iomem *virtbase = NULL;
2494 struct resource *res = NULL;
2495 struct d40_base *base = NULL;
2496 int num_log_chans = 0;
2497 int num_phy_chans;
2498 int i;
Linus Walleijf4185592010-06-22 18:06:42 -07002499 u32 val;
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002500 u32 rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002501
2502 clk = clk_get(&pdev->dev, NULL);
2503
2504 if (IS_ERR(clk)) {
2505 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2506 __func__);
2507 goto failure;
2508 }
2509
2510 clk_enable(clk);
2511
2512 /* Get IO for DMAC base address */
2513 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2514 if (!res)
2515 goto failure;
2516
2517 if (request_mem_region(res->start, resource_size(res),
2518 D40_NAME " I/O base") == NULL)
2519 goto failure;
2520
2521 virtbase = ioremap(res->start, resource_size(res));
2522 if (!virtbase)
2523 goto failure;
2524
2525 /* HW version check */
2526 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2527 if (dma_id_regs[i].val !=
2528 readl(virtbase + dma_id_regs[i].reg)) {
2529 dev_err(&pdev->dev,
2530 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2531 __func__,
2532 dma_id_regs[i].val,
2533 dma_id_regs[i].reg,
2534 readl(virtbase + dma_id_regs[i].reg));
2535 goto failure;
2536 }
2537 }
2538
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002539 /* Get silicon revision and designer */
Linus Walleijf4185592010-06-22 18:06:42 -07002540 val = readl(virtbase + D40_DREG_PERIPHID2);
Linus Walleij8d318a52010-03-30 15:33:42 +02002541
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002542 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2543 D40_HW_DESIGNER) {
Linus Walleij8d318a52010-03-30 15:33:42 +02002544 dev_err(&pdev->dev,
2545 "[%s] Unknown designer! Got %x wanted %x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002546 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2547 D40_HW_DESIGNER);
Linus Walleij8d318a52010-03-30 15:33:42 +02002548 goto failure;
2549 }
2550
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002551 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2552 D40_DREG_PERIPHID2_REV_POS;
2553
Linus Walleij8d318a52010-03-30 15:33:42 +02002554 /* The number of physical channels on this HW */
2555 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2556
2557 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002558 rev, res->start);
Linus Walleij8d318a52010-03-30 15:33:42 +02002559
2560 plat_data = pdev->dev.platform_data;
2561
2562 /* Count the number of logical channels in use */
2563 for (i = 0; i < plat_data->dev_len; i++)
2564 if (plat_data->dev_rx[i] != 0)
2565 num_log_chans++;
2566
2567 for (i = 0; i < plat_data->dev_len; i++)
2568 if (plat_data->dev_tx[i] != 0)
2569 num_log_chans++;
2570
2571 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2572 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2573 sizeof(struct d40_chan), GFP_KERNEL);
2574
2575 if (base == NULL) {
2576 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2577 goto failure;
2578 }
2579
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002580 base->rev = rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002581 base->clk = clk;
2582 base->num_phy_chans = num_phy_chans;
2583 base->num_log_chans = num_log_chans;
2584 base->phy_start = res->start;
2585 base->phy_size = resource_size(res);
2586 base->virtbase = virtbase;
2587 base->plat_data = plat_data;
2588 base->dev = &pdev->dev;
2589 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2590 base->log_chans = &base->phy_chans[num_phy_chans];
2591
2592 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2593 GFP_KERNEL);
2594 if (!base->phy_res)
2595 goto failure;
2596
2597 base->lookup_phy_chans = kzalloc(num_phy_chans *
2598 sizeof(struct d40_chan *),
2599 GFP_KERNEL);
2600 if (!base->lookup_phy_chans)
2601 goto failure;
2602
2603 if (num_log_chans + plat_data->memcpy_len) {
2604 /*
2605 * The max number of logical channels are event lines for all
2606 * src devices and dst devices
2607 */
2608 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2609 sizeof(struct d40_chan *),
2610 GFP_KERNEL);
2611 if (!base->lookup_log_chans)
2612 goto failure;
2613 }
Jonas Aaberg698e4732010-08-09 12:08:56 +00002614
2615 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2616 sizeof(struct d40_desc *) *
2617 D40_LCLA_LINK_PER_EVENT_GRP,
Linus Walleij8d318a52010-03-30 15:33:42 +02002618 GFP_KERNEL);
2619 if (!base->lcla_pool.alloc_map)
2620 goto failure;
2621
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002622 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2623 0, SLAB_HWCACHE_ALIGN,
2624 NULL);
2625 if (base->desc_slab == NULL)
2626 goto failure;
2627
Linus Walleij8d318a52010-03-30 15:33:42 +02002628 return base;
2629
2630failure:
2631 if (clk) {
2632 clk_disable(clk);
2633 clk_put(clk);
2634 }
2635 if (virtbase)
2636 iounmap(virtbase);
2637 if (res)
2638 release_mem_region(res->start,
2639 resource_size(res));
2640 if (virtbase)
2641 iounmap(virtbase);
2642
2643 if (base) {
2644 kfree(base->lcla_pool.alloc_map);
2645 kfree(base->lookup_log_chans);
2646 kfree(base->lookup_phy_chans);
2647 kfree(base->phy_res);
2648 kfree(base);
2649 }
2650
2651 return NULL;
2652}
2653
2654static void __init d40_hw_init(struct d40_base *base)
2655{
2656
2657 static const struct d40_reg_val dma_init_reg[] = {
2658 /* Clock every part of the DMA block from start */
2659 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2660
2661 /* Interrupts on all logical channels */
2662 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2663 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2664 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2665 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2666 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2667 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2668 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2669 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2670 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2671 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2672 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2673 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2674 };
2675 int i;
2676 u32 prmseo[2] = {0, 0};
2677 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2678 u32 pcmis = 0;
2679 u32 pcicr = 0;
2680
2681 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2682 writel(dma_init_reg[i].val,
2683 base->virtbase + dma_init_reg[i].reg);
2684
2685 /* Configure all our dma channels to default settings */
2686 for (i = 0; i < base->num_phy_chans; i++) {
2687
2688 activeo[i % 2] = activeo[i % 2] << 2;
2689
2690 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2691 == D40_ALLOC_PHY) {
2692 activeo[i % 2] |= 3;
2693 continue;
2694 }
2695
2696 /* Enable interrupt # */
2697 pcmis = (pcmis << 1) | 1;
2698
2699 /* Clear interrupt # */
2700 pcicr = (pcicr << 1) | 1;
2701
2702 /* Set channel to physical mode */
2703 prmseo[i % 2] = prmseo[i % 2] << 2;
2704 prmseo[i % 2] |= 1;
2705
2706 }
2707
2708 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2709 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2710 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2711 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2712
2713 /* Write which interrupt to enable */
2714 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2715
2716 /* Write which interrupt to clear */
2717 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2718
2719}
2720
Linus Walleij508849a2010-06-20 21:26:07 +00002721static int __init d40_lcla_allocate(struct d40_base *base)
2722{
2723 unsigned long *page_list;
2724 int i, j;
2725 int ret = 0;
2726
2727 /*
2728 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2729 * To full fill this hardware requirement without wasting 256 kb
2730 * we allocate pages until we get an aligned one.
2731 */
2732 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2733 GFP_KERNEL);
2734
2735 if (!page_list) {
2736 ret = -ENOMEM;
2737 goto failure;
2738 }
2739
2740 /* Calculating how many pages that are required */
2741 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2742
2743 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2744 page_list[i] = __get_free_pages(GFP_KERNEL,
2745 base->lcla_pool.pages);
2746 if (!page_list[i]) {
2747
2748 dev_err(base->dev,
2749 "[%s] Failed to allocate %d pages.\n",
2750 __func__, base->lcla_pool.pages);
2751
2752 for (j = 0; j < i; j++)
2753 free_pages(page_list[j], base->lcla_pool.pages);
2754 goto failure;
2755 }
2756
2757 if ((virt_to_phys((void *)page_list[i]) &
2758 (LCLA_ALIGNMENT - 1)) == 0)
2759 break;
2760 }
2761
2762 for (j = 0; j < i; j++)
2763 free_pages(page_list[j], base->lcla_pool.pages);
2764
2765 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2766 base->lcla_pool.base = (void *)page_list[i];
2767 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00002768 /*
2769 * After many attempts and no succees with finding the correct
2770 * alignment, try with allocating a big buffer.
2771 */
Linus Walleij508849a2010-06-20 21:26:07 +00002772 dev_warn(base->dev,
2773 "[%s] Failed to get %d pages @ 18 bit align.\n",
2774 __func__, base->lcla_pool.pages);
2775 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2776 base->num_phy_chans +
2777 LCLA_ALIGNMENT,
2778 GFP_KERNEL);
2779 if (!base->lcla_pool.base_unaligned) {
2780 ret = -ENOMEM;
2781 goto failure;
2782 }
2783
2784 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2785 LCLA_ALIGNMENT);
2786 }
2787
2788 writel(virt_to_phys(base->lcla_pool.base),
2789 base->virtbase + D40_DREG_LCLA);
2790failure:
2791 kfree(page_list);
2792 return ret;
2793}
2794
Linus Walleij8d318a52010-03-30 15:33:42 +02002795static int __init d40_probe(struct platform_device *pdev)
2796{
2797 int err;
2798 int ret = -ENOENT;
2799 struct d40_base *base;
2800 struct resource *res = NULL;
2801 int num_reserved_chans;
2802 u32 val;
2803
2804 base = d40_hw_detect_init(pdev);
2805
2806 if (!base)
2807 goto failure;
2808
2809 num_reserved_chans = d40_phy_res_init(base);
2810
2811 platform_set_drvdata(pdev, base);
2812
2813 spin_lock_init(&base->interrupt_lock);
2814 spin_lock_init(&base->execmd_lock);
2815
2816 /* Get IO for logical channel parameter address */
2817 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2818 if (!res) {
2819 ret = -ENOENT;
2820 dev_err(&pdev->dev,
2821 "[%s] No \"lcpa\" memory resource\n",
2822 __func__);
2823 goto failure;
2824 }
2825 base->lcpa_size = resource_size(res);
2826 base->phy_lcpa = res->start;
2827
2828 if (request_mem_region(res->start, resource_size(res),
2829 D40_NAME " I/O lcpa") == NULL) {
2830 ret = -EBUSY;
2831 dev_err(&pdev->dev,
2832 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2833 __func__, res->start, res->end);
2834 goto failure;
2835 }
2836
2837 /* We make use of ESRAM memory for this. */
2838 val = readl(base->virtbase + D40_DREG_LCPA);
2839 if (res->start != val && val != 0) {
2840 dev_warn(&pdev->dev,
2841 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2842 __func__, val, res->start);
2843 } else
2844 writel(res->start, base->virtbase + D40_DREG_LCPA);
2845
2846 base->lcpa_base = ioremap(res->start, resource_size(res));
2847 if (!base->lcpa_base) {
2848 ret = -ENOMEM;
2849 dev_err(&pdev->dev,
2850 "[%s] Failed to ioremap LCPA region\n",
2851 __func__);
2852 goto failure;
2853 }
Linus Walleij508849a2010-06-20 21:26:07 +00002854
2855 ret = d40_lcla_allocate(base);
2856 if (ret) {
2857 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02002858 __func__);
2859 goto failure;
2860 }
2861
Linus Walleij8d318a52010-03-30 15:33:42 +02002862 spin_lock_init(&base->lcla_pool.lock);
2863
Linus Walleij8d318a52010-03-30 15:33:42 +02002864 base->irq = platform_get_irq(pdev, 0);
2865
2866 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2867
2868 if (ret) {
2869 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2870 goto failure;
2871 }
2872
2873 err = d40_dmaengine_init(base, num_reserved_chans);
2874 if (err)
2875 goto failure;
2876
2877 d40_hw_init(base);
2878
2879 dev_info(base->dev, "initialized\n");
2880 return 0;
2881
2882failure:
2883 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002884 if (base->desc_slab)
2885 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002886 if (base->virtbase)
2887 iounmap(base->virtbase);
Linus Walleij508849a2010-06-20 21:26:07 +00002888 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2889 free_pages((unsigned long)base->lcla_pool.base,
2890 base->lcla_pool.pages);
Jonas Aaberg767a9672010-08-09 12:08:34 +00002891
2892 kfree(base->lcla_pool.base_unaligned);
2893
Linus Walleij8d318a52010-03-30 15:33:42 +02002894 if (base->phy_lcpa)
2895 release_mem_region(base->phy_lcpa,
2896 base->lcpa_size);
2897 if (base->phy_start)
2898 release_mem_region(base->phy_start,
2899 base->phy_size);
2900 if (base->clk) {
2901 clk_disable(base->clk);
2902 clk_put(base->clk);
2903 }
2904
2905 kfree(base->lcla_pool.alloc_map);
2906 kfree(base->lookup_log_chans);
2907 kfree(base->lookup_phy_chans);
2908 kfree(base->phy_res);
2909 kfree(base);
2910 }
2911
2912 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2913 return ret;
2914}
2915
2916static struct platform_driver d40_driver = {
2917 .driver = {
2918 .owner = THIS_MODULE,
2919 .name = D40_NAME,
2920 },
2921};
2922
2923int __init stedma40_init(void)
2924{
2925 return platform_driver_probe(&d40_driver, d40_probe);
2926}
2927arch_initcall(stedma40_init);