blob: 05d0a9937617e96c2cc5c0435a6ca6e831b5aa34 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
Jonas Aaberg767a9672010-08-09 12:08:34 +00002 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
Linus Walleij8d318a52010-03-30 15:33:42 +02005 * License terms: GNU General Public License (GPL) version 2
Linus Walleij8d318a52010-03-30 15:33:42 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/dmaengine.h>
11#include <linux/platform_device.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
Jonas Aaberg698e4732010-08-09 12:08:56 +000014#include <linux/err.h>
Linus Walleij8d318a52010-03-30 15:33:42 +020015
16#include <plat/ste_dma40.h>
17
18#include "ste_dma40_ll.h"
19
20#define D40_NAME "dma40"
21
22#define D40_PHY_CHAN -1
23
24/* For masking out/in 2 bit channel positions */
25#define D40_CHAN_POS(chan) (2 * (chan / 2))
26#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
27
28/* Maximum iterations taken before giving up suspending a channel */
29#define D40_SUSPEND_MAX_IT 500
30
Linus Walleij508849a2010-06-20 21:26:07 +000031/* Hardware requirement on LCLA alignment */
32#define LCLA_ALIGNMENT 0x40000
Jonas Aaberg698e4732010-08-09 12:08:56 +000033
34/* Max number of links per event group */
35#define D40_LCLA_LINK_PER_EVENT_GRP 128
36#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37
Linus Walleij508849a2010-06-20 21:26:07 +000038/* Attempts before giving up to trying to get pages that are aligned */
39#define MAX_LCLA_ALLOC_ATTEMPTS 256
40
41/* Bit markings for allocation map */
Linus Walleij8d318a52010-03-30 15:33:42 +020042#define D40_ALLOC_FREE (1 << 31)
43#define D40_ALLOC_PHY (1 << 30)
44#define D40_ALLOC_LOG_FREE 0
45
Linus Walleij8d318a52010-03-30 15:33:42 +020046/* Hardware designer of the block */
Jonas Aaberg3ae02672010-08-09 12:08:18 +000047#define D40_HW_DESIGNER 0x8
Linus Walleij8d318a52010-03-30 15:33:42 +020048
49/**
50 * enum 40_command - The different commands and/or statuses.
51 *
52 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
53 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
54 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
55 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
56 */
57enum d40_command {
58 D40_DMA_STOP = 0,
59 D40_DMA_RUN = 1,
60 D40_DMA_SUSPEND_REQ = 2,
61 D40_DMA_SUSPENDED = 3
62};
63
64/**
65 * struct d40_lli_pool - Structure for keeping LLIs in memory
66 *
67 * @base: Pointer to memory area when the pre_alloc_lli's are not large
68 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
69 * pre_alloc_lli is used.
70 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
71 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
72 * one buffer to one buffer.
73 */
74struct d40_lli_pool {
75 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +000076 int size;
Linus Walleij8d318a52010-03-30 15:33:42 +020077 /* Space for dst and src, plus an extra for padding */
Linus Walleij508849a2010-06-20 21:26:07 +000078 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
Linus Walleij8d318a52010-03-30 15:33:42 +020079};
80
81/**
82 * struct d40_desc - A descriptor is one DMA job.
83 *
84 * @lli_phy: LLI settings for physical channel. Both src and dst=
85 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
86 * lli_len equals one.
87 * @lli_log: Same as above but for logical channels.
88 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000089 * @lli_len: Number of llis of current descriptor.
Jonas Aaberg698e4732010-08-09 12:08:56 +000090 * @lli_current: Number of transfered llis.
91 * @lcla_alloc: Number of LCLA entries allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +020092 * @txd: DMA engine struct. Used for among other things for communication
93 * during a transfer.
94 * @node: List entry.
Linus Walleij8d318a52010-03-30 15:33:42 +020095 * @is_in_client_list: true if the client owns this descriptor.
Jonas Aabergaa182ae2010-08-09 12:08:26 +000096 * @is_hw_linked: true if this job will automatically be continued for
97 * the previous one.
Linus Walleij8d318a52010-03-30 15:33:42 +020098 *
99 * This descriptor is used for both logical and physical transfers.
100 */
Linus Walleij8d318a52010-03-30 15:33:42 +0200101struct d40_desc {
102 /* LLI physical */
103 struct d40_phy_lli_bidir lli_phy;
104 /* LLI logical */
105 struct d40_log_lli_bidir lli_log;
106
107 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000108 int lli_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000109 int lli_current;
110 int lcla_alloc;
Linus Walleij8d318a52010-03-30 15:33:42 +0200111
112 struct dma_async_tx_descriptor txd;
113 struct list_head node;
114
Linus Walleij8d318a52010-03-30 15:33:42 +0200115 bool is_in_client_list;
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000116 bool is_hw_linked;
Linus Walleij8d318a52010-03-30 15:33:42 +0200117};
118
119/**
120 * struct d40_lcla_pool - LCLA pool settings and data.
121 *
Linus Walleij508849a2010-06-20 21:26:07 +0000122 * @base: The virtual address of LCLA. 18 bit aligned.
123 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
124 * This pointer is only there for clean-up on error.
125 * @pages: The number of pages needed for all physical channels.
126 * Only used later for clean-up on error
Linus Walleij8d318a52010-03-30 15:33:42 +0200127 * @lock: Lock to protect the content in this struct.
Jonas Aaberg698e4732010-08-09 12:08:56 +0000128 * @alloc_map: big map over which LCLA entry is own by which job.
Linus Walleij8d318a52010-03-30 15:33:42 +0200129 */
130struct d40_lcla_pool {
131 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +0000132 void *base_unaligned;
133 int pages;
Linus Walleij8d318a52010-03-30 15:33:42 +0200134 spinlock_t lock;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000135 struct d40_desc **alloc_map;
Linus Walleij8d318a52010-03-30 15:33:42 +0200136};
137
138/**
139 * struct d40_phy_res - struct for handling eventlines mapped to physical
140 * channels.
141 *
142 * @lock: A lock protection this entity.
143 * @num: The physical channel number of this entity.
144 * @allocated_src: Bit mapped to show which src event line's are mapped to
145 * this physical channel. Can also be free or physically allocated.
146 * @allocated_dst: Same as for src but is dst.
147 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
Jonas Aaberg767a9672010-08-09 12:08:34 +0000148 * event line number.
Linus Walleij8d318a52010-03-30 15:33:42 +0200149 */
150struct d40_phy_res {
151 spinlock_t lock;
152 int num;
153 u32 allocated_src;
154 u32 allocated_dst;
155};
156
157struct d40_base;
158
159/**
160 * struct d40_chan - Struct that describes a channel.
161 *
162 * @lock: A spinlock to protect this struct.
163 * @log_num: The logical number, if any of this channel.
164 * @completed: Starts with 1, after first interrupt it is set to dma engine's
165 * current cookie.
166 * @pending_tx: The number of pending transfers. Used between interrupt handler
167 * and tasklet.
168 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000169 * @phy_chan: Pointer to physical channel which this instance runs on. If this
170 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200171 * @chan: DMA engine handle.
172 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
173 * transfer and call client callback.
174 * @client: Cliented owned descriptor list.
175 * @active: Active descriptor.
176 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200177 * @dma_cfg: The client configuration of this dma channel.
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
184 *
185 * This struct can either "be" a logical or a physical channel.
186 */
187struct d40_chan {
188 spinlock_t lock;
189 int log_num;
190 /* ID of the most recent completed transfer */
191 int completed;
192 int pending_tx;
193 bool busy;
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
203 u32 src_def_cfg;
204 u32 dst_def_cfg;
205 struct d40_def_lcsp log_def;
Linus Walleij8d318a52010-03-30 15:33:42 +0200206 struct d40_log_lli_full *lcpa;
Linus Walleij95e14002010-08-04 13:37:45 +0200207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr;
209 enum dma_data_direction runtime_direction;
Linus Walleij8d318a52010-03-30 15:33:42 +0200210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.
214 *
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
Linus Walleijf4185592010-06-22 18:06:42 -0700220 * @rev: silicon revision detected.
Linus Walleij8d318a52010-03-30 15:33:42 +0200221 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map.
224 * @irq: The IRQ number.
225 * @num_phy_chans: The number of physical channels. Read from HW. This
226 * is the number of available channels for this driver, not counting "Secure
227 * mode" allocated physical channels.
228 * @num_log_chans: The number of logical channels. Calculated from
229 * num_phy_chans.
230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
231 * @dma_slave: dma_device channels that can do only do slave transfers.
232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
Linus Walleij8d318a52010-03-30 15:33:42 +0200233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
239 * configuration.
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000245 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200246 */
247struct d40_base {
248 spinlock_t interrupt_lock;
249 spinlock_t execmd_lock;
250 struct device *dev;
251 void __iomem *virtbase;
Linus Walleijf4185592010-06-22 18:06:42 -0700252 u8 rev:4;
Linus Walleij8d318a52010-03-30 15:33:42 +0200253 struct clk *clk;
254 phys_addr_t phy_start;
255 resource_size_t phy_size;
256 int irq;
257 int num_phy_chans;
258 int num_log_chans;
259 struct dma_device dma_both;
260 struct dma_device dma_slave;
261 struct dma_device dma_memcpy;
262 struct d40_chan *phy_chans;
263 struct d40_chan *log_chans;
264 struct d40_chan **lookup_log_chans;
265 struct d40_chan **lookup_phy_chans;
266 struct stedma40_platform_data *plat_data;
267 /* Physical half channels */
268 struct d40_phy_res *phy_res;
269 struct d40_lcla_pool lcla_pool;
270 void *lcpa_base;
271 dma_addr_t phy_lcpa;
272 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000273 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200274};
275
276/**
277 * struct d40_interrupt_lookup - lookup table for interrupt handler
278 *
279 * @src: Interrupt mask register.
280 * @clr: Interrupt clear register.
281 * @is_error: true if this is an error interrupt.
282 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
283 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
284 */
285struct d40_interrupt_lookup {
286 u32 src;
287 u32 clr;
288 bool is_error;
289 int offset;
290};
291
292/**
293 * struct d40_reg_val - simple lookup struct
294 *
295 * @reg: The register.
296 * @val: The value that belongs to the register in reg.
297 */
298struct d40_reg_val {
299 unsigned int reg;
300 unsigned int val;
301};
302
303static int d40_pool_lli_alloc(struct d40_desc *d40d,
304 int lli_len, bool is_log)
305{
306 u32 align;
307 void *base;
308
309 if (is_log)
310 align = sizeof(struct d40_log_lli);
311 else
312 align = sizeof(struct d40_phy_lli);
313
314 if (lli_len == 1) {
315 base = d40d->lli_pool.pre_alloc_lli;
316 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
317 d40d->lli_pool.base = NULL;
318 } else {
319 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
320
321 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
322 d40d->lli_pool.base = base;
323
324 if (d40d->lli_pool.base == NULL)
325 return -ENOMEM;
326 }
327
328 if (is_log) {
329 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
330 align);
331 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
332 align);
333 } else {
334 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
335 align);
336 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
337 align);
Linus Walleij8d318a52010-03-30 15:33:42 +0200338 }
339
340 return 0;
341}
342
343static void d40_pool_lli_free(struct d40_desc *d40d)
344{
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
Linus Walleij8d318a52010-03-30 15:33:42 +0200352}
353
Jonas Aaberg698e4732010-08-09 12:08:56 +0000354static int d40_lcla_alloc_one(struct d40_chan *d40c,
355 struct d40_desc *d40d)
356{
357 unsigned long flags;
358 int i;
359 int ret = -EINVAL;
360 int p;
361
362 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
363
364 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
365
366 /*
367 * Allocate both src and dst at the same time, therefore the half
368 * start on 1 since 0 can't be used since zero is used as end marker.
369 */
370 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
373 d40d->lcla_alloc++;
374 ret = i;
375 break;
376 }
377 }
378
379 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
380
381 return ret;
382}
383
384static int d40_lcla_free_all(struct d40_chan *d40c,
385 struct d40_desc *d40d)
386{
387 unsigned long flags;
388 int i;
389 int ret = -EINVAL;
390
391 if (d40c->log_num == D40_PHY_CHAN)
392 return 0;
393
394 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
395
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
401 d40d->lcla_alloc--;
402 if (d40d->lcla_alloc == 0) {
403 ret = 0;
404 break;
405 }
406 }
407 }
408
409 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
410
411 return ret;
412
413}
414
Linus Walleij8d318a52010-03-30 15:33:42 +0200415static void d40_desc_remove(struct d40_desc *d40d)
416{
417 list_del(&d40d->node);
418}
419
420static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
421{
Linus Walleij8d318a52010-03-30 15:33:42 +0200422 struct d40_desc *d;
423 struct d40_desc *_d;
424
425 if (!list_empty(&d40c->client)) {
426 list_for_each_entry_safe(d, _d, &d40c->client, node)
427 if (async_tx_test_ack(&d->txd)) {
428 d40_pool_lli_free(d);
429 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000430 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200431 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200432 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000433 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
434 if (d != NULL) {
435 memset(d, 0, sizeof(struct d40_desc));
436 INIT_LIST_HEAD(&d->node);
437 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200438 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000439 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200440}
441
442static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
443{
Jonas Aaberg698e4732010-08-09 12:08:56 +0000444
445 d40_lcla_free_all(d40c, d40d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000446 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200447}
448
449static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
450{
451 list_add_tail(&desc->node, &d40c->active);
452}
453
Jonas Aaberg698e4732010-08-09 12:08:56 +0000454static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
455{
456 int curr_lcla = -EINVAL, next_lcla;
457
458 if (d40c->log_num == D40_PHY_CHAN) {
459 d40_phy_lli_write(d40c->base->virtbase,
460 d40c->phy_chan->num,
461 d40d->lli_phy.dst,
462 d40d->lli_phy.src);
463 d40d->lli_current = d40d->lli_len;
464 } else {
465
466 if ((d40d->lli_len - d40d->lli_current) > 1)
467 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
468
469 d40_log_lli_lcpa_write(d40c->lcpa,
470 &d40d->lli_log.dst[d40d->lli_current],
471 &d40d->lli_log.src[d40d->lli_current],
472 curr_lcla);
473
474 d40d->lli_current++;
475 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
476 struct d40_log_lli *lcla;
477
478 if (d40d->lli_current + 1 < d40d->lli_len)
479 next_lcla = d40_lcla_alloc_one(d40c, d40d);
480 else
481 next_lcla = -EINVAL;
482
483 lcla = d40c->base->lcla_pool.base +
484 d40c->phy_chan->num * 1024 +
485 8 * curr_lcla * 2;
486
487 d40_log_lli_lcla_write(lcla,
488 &d40d->lli_log.dst[d40d->lli_current],
489 &d40d->lli_log.src[d40d->lli_current],
490 next_lcla);
491
492 (void) dma_map_single(d40c->base->dev, lcla,
493 2 * sizeof(struct d40_log_lli),
494 DMA_TO_DEVICE);
495
496 curr_lcla = next_lcla;
497
498 if (curr_lcla == -EINVAL) {
499 d40d->lli_current++;
500 break;
501 }
502
503 }
504 }
505}
506
Linus Walleij8d318a52010-03-30 15:33:42 +0200507static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
508{
509 struct d40_desc *d;
510
511 if (list_empty(&d40c->active))
512 return NULL;
513
514 d = list_first_entry(&d40c->active,
515 struct d40_desc,
516 node);
517 return d;
518}
519
520static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
521{
522 list_add_tail(&desc->node, &d40c->queue);
523}
524
525static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
526{
527 struct d40_desc *d;
528
529 if (list_empty(&d40c->queue))
530 return NULL;
531
532 d = list_first_entry(&d40c->queue,
533 struct d40_desc,
534 node);
535 return d;
536}
537
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000538static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
539{
540 struct d40_desc *d;
541
542 if (list_empty(&d40c->queue))
543 return NULL;
544 list_for_each_entry(d, &d40c->queue, node)
545 if (list_is_last(&d->node, &d40c->queue))
546 break;
547 return d;
548}
549
Linus Walleij8d318a52010-03-30 15:33:42 +0200550/* Support functions for logical channels */
551
Linus Walleij8d318a52010-03-30 15:33:42 +0200552
553static int d40_channel_execute_command(struct d40_chan *d40c,
554 enum d40_command command)
555{
Jonas Aaberg767a9672010-08-09 12:08:34 +0000556 u32 status;
557 int i;
Linus Walleij8d318a52010-03-30 15:33:42 +0200558 void __iomem *active_reg;
559 int ret = 0;
560 unsigned long flags;
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000561 u32 wmask;
Linus Walleij8d318a52010-03-30 15:33:42 +0200562
563 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
564
565 if (d40c->phy_chan->num % 2 == 0)
566 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
567 else
568 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
569
570 if (command == D40_DMA_SUSPEND_REQ) {
571 status = (readl(active_reg) &
572 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
573 D40_CHAN_POS(d40c->phy_chan->num);
574
575 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
576 goto done;
577 }
578
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000579 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
580 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
581 active_reg);
Linus Walleij8d318a52010-03-30 15:33:42 +0200582
583 if (command == D40_DMA_SUSPEND_REQ) {
584
585 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
586 status = (readl(active_reg) &
587 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
588 D40_CHAN_POS(d40c->phy_chan->num);
589
590 cpu_relax();
591 /*
592 * Reduce the number of bus accesses while
593 * waiting for the DMA to suspend.
594 */
595 udelay(3);
596
597 if (status == D40_DMA_STOP ||
598 status == D40_DMA_SUSPENDED)
599 break;
600 }
601
602 if (i == D40_SUSPEND_MAX_IT) {
603 dev_err(&d40c->chan.dev->device,
604 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
605 __func__, d40c->phy_chan->num, d40c->log_num,
606 status);
607 dump_stack();
608 ret = -EBUSY;
609 }
610
611 }
612done:
613 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
614 return ret;
615}
616
617static void d40_term_all(struct d40_chan *d40c)
618{
619 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200620
621 /* Release active descriptors */
622 while ((d40d = d40_first_active_get(d40c))) {
623 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200624 d40_desc_free(d40c, d40d);
625 }
626
627 /* Release queued descriptors waiting for transfer */
628 while ((d40d = d40_first_queued(d40c))) {
629 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200630 d40_desc_free(d40c, d40d);
631 }
632
Linus Walleij8d318a52010-03-30 15:33:42 +0200633
634 d40c->pending_tx = 0;
635 d40c->busy = false;
636}
637
638static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
639{
640 u32 val;
641 unsigned long flags;
642
Jonas Aaberg0c322692010-06-20 21:25:46 +0000643 /* Notice, that disable requires the physical channel to be stopped */
Linus Walleij8d318a52010-03-30 15:33:42 +0200644 if (do_enable)
645 val = D40_ACTIVATE_EVENTLINE;
646 else
647 val = D40_DEACTIVATE_EVENTLINE;
648
649 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
650
651 /* Enable event line connected to device (or memcpy) */
652 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
653 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
654 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
655
656 writel((val << D40_EVENTLINE_POS(event)) |
657 ~D40_EVENTLINE_MASK(event),
658 d40c->base->virtbase + D40_DREG_PCBASE +
659 d40c->phy_chan->num * D40_DREG_PCDELTA +
660 D40_CHAN_REG_SSLNK);
661 }
662 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
663 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
664
665 writel((val << D40_EVENTLINE_POS(event)) |
666 ~D40_EVENTLINE_MASK(event),
667 d40c->base->virtbase + D40_DREG_PCBASE +
668 d40c->phy_chan->num * D40_DREG_PCDELTA +
669 D40_CHAN_REG_SDLNK);
670 }
671
672 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
673}
674
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200675static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200676{
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000677 u32 val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200678
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000679 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
680 d40c->phy_chan->num * D40_DREG_PCDELTA +
681 D40_CHAN_REG_SSLNK);
Linus Walleij8d318a52010-03-30 15:33:42 +0200682
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000683 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
684 d40c->phy_chan->num * D40_DREG_PCDELTA +
685 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200686 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200687}
688
Jonas Aabergb55912c2010-08-09 12:08:02 +0000689static void d40_config_write(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200690{
691 u32 addr_base;
692 u32 var;
Linus Walleij8d318a52010-03-30 15:33:42 +0200693
694 /* Odd addresses are even addresses + 4 */
695 addr_base = (d40c->phy_chan->num % 2) * 4;
696 /* Setup channel mode to logical or physical */
697 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
698 D40_CHAN_POS(d40c->phy_chan->num);
699 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
700
701 /* Setup operational mode option register */
702 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
703 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
704
705 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
706
707 if (d40c->log_num != D40_PHY_CHAN) {
708 /* Set default config for CFG reg */
709 writel(d40c->src_def_cfg,
710 d40c->base->virtbase + D40_DREG_PCBASE +
711 d40c->phy_chan->num * D40_DREG_PCDELTA +
712 D40_CHAN_REG_SSCFG);
713 writel(d40c->dst_def_cfg,
714 d40c->base->virtbase + D40_DREG_PCBASE +
715 d40c->phy_chan->num * D40_DREG_PCDELTA +
716 D40_CHAN_REG_SDCFG);
717
Jonas Aabergb55912c2010-08-09 12:08:02 +0000718 /* Set LIDX for lcla */
719 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
720 D40_SREG_ELEM_LOG_LIDX_MASK,
721 d40c->base->virtbase + D40_DREG_PCBASE +
722 d40c->phy_chan->num * D40_DREG_PCDELTA +
723 D40_CHAN_REG_SDELT);
724
725 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
726 D40_SREG_ELEM_LOG_LIDX_MASK,
727 d40c->base->virtbase + D40_DREG_PCBASE +
728 d40c->phy_chan->num * D40_DREG_PCDELTA +
729 D40_CHAN_REG_SSELT);
730
Linus Walleij8d318a52010-03-30 15:33:42 +0200731 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200732}
733
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000734static u32 d40_residue(struct d40_chan *d40c)
735{
736 u32 num_elt;
737
738 if (d40c->log_num != D40_PHY_CHAN)
739 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
740 >> D40_MEM_LCSP2_ECNT_POS;
741 else
742 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
743 d40c->phy_chan->num * D40_DREG_PCDELTA +
744 D40_CHAN_REG_SDELT) &
745 D40_SREG_ELEM_PHY_ECNT_MASK) >>
746 D40_SREG_ELEM_PHY_ECNT_POS;
747 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
748}
749
750static bool d40_tx_is_linked(struct d40_chan *d40c)
751{
752 bool is_link;
753
754 if (d40c->log_num != D40_PHY_CHAN)
755 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
756 else
757 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
758 d40c->phy_chan->num * D40_DREG_PCDELTA +
759 D40_CHAN_REG_SDLNK) &
760 D40_SREG_LNK_PHYS_LNK_MASK;
761 return is_link;
762}
763
764static int d40_pause(struct dma_chan *chan)
765{
766 struct d40_chan *d40c =
767 container_of(chan, struct d40_chan, chan);
768 int res = 0;
769 unsigned long flags;
770
Jonas Aaberg3ac012a2010-08-09 12:09:12 +0000771 if (!d40c->busy)
772 return 0;
773
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000774 spin_lock_irqsave(&d40c->lock, flags);
775
776 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
777 if (res == 0) {
778 if (d40c->log_num != D40_PHY_CHAN) {
779 d40_config_set_event(d40c, false);
780 /* Resume the other logical channels if any */
781 if (d40_chan_has_events(d40c))
782 res = d40_channel_execute_command(d40c,
783 D40_DMA_RUN);
784 }
785 }
786
787 spin_unlock_irqrestore(&d40c->lock, flags);
788 return res;
789}
790
791static int d40_resume(struct dma_chan *chan)
792{
793 struct d40_chan *d40c =
794 container_of(chan, struct d40_chan, chan);
795 int res = 0;
796 unsigned long flags;
797
Jonas Aaberg3ac012a2010-08-09 12:09:12 +0000798 if (!d40c->busy)
799 return 0;
800
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000801 spin_lock_irqsave(&d40c->lock, flags);
802
803 if (d40c->base->rev == 0)
804 if (d40c->log_num != D40_PHY_CHAN) {
805 res = d40_channel_execute_command(d40c,
806 D40_DMA_SUSPEND_REQ);
807 goto no_suspend;
808 }
809
810 /* If bytes left to transfer or linked tx resume job */
811 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
812
813 if (d40c->log_num != D40_PHY_CHAN)
814 d40_config_set_event(d40c, true);
815
816 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
817 }
818
819no_suspend:
820 spin_unlock_irqrestore(&d40c->lock, flags);
821 return res;
822}
823
824static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
825{
826 /* TODO: Write */
827}
828
829static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
830{
831 struct d40_desc *d40d_prev = NULL;
832 int i;
833 u32 val;
834
835 if (!list_empty(&d40c->queue))
836 d40d_prev = d40_last_queued(d40c);
837 else if (!list_empty(&d40c->active))
838 d40d_prev = d40_first_active_get(d40c);
839
840 if (!d40d_prev)
841 return;
842
843 /* Here we try to join this job with previous jobs */
844 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
845 d40c->phy_chan->num * D40_DREG_PCDELTA +
846 D40_CHAN_REG_SSLNK);
847
848 /* Figure out which link we're currently transmitting */
849 for (i = 0; i < d40d_prev->lli_len; i++)
850 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
851 break;
852
853 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
854 d40c->phy_chan->num * D40_DREG_PCDELTA +
855 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
856
857 if (i == (d40d_prev->lli_len - 1) && val > 0) {
858 /* Change the current one */
859 writel(virt_to_phys(d40d->lli_phy.src),
860 d40c->base->virtbase + D40_DREG_PCBASE +
861 d40c->phy_chan->num * D40_DREG_PCDELTA +
862 D40_CHAN_REG_SSLNK);
863 writel(virt_to_phys(d40d->lli_phy.dst),
864 d40c->base->virtbase + D40_DREG_PCBASE +
865 d40c->phy_chan->num * D40_DREG_PCDELTA +
866 D40_CHAN_REG_SDLNK);
867
868 d40d->is_hw_linked = true;
869
870 } else if (i < d40d_prev->lli_len) {
871 (void) dma_unmap_single(d40c->base->dev,
872 virt_to_phys(d40d_prev->lli_phy.src),
873 d40d_prev->lli_pool.size,
874 DMA_TO_DEVICE);
875
876 /* Keep the settings */
877 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
878 ~D40_SREG_LNK_PHYS_LNK_MASK;
879 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
880 val | virt_to_phys(d40d->lli_phy.src);
881
882 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
883 ~D40_SREG_LNK_PHYS_LNK_MASK;
884 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
885 val | virt_to_phys(d40d->lli_phy.dst);
886
887 (void) dma_map_single(d40c->base->dev,
888 d40d_prev->lli_phy.src,
889 d40d_prev->lli_pool.size,
890 DMA_TO_DEVICE);
891 d40d->is_hw_linked = true;
892 }
893}
894
Linus Walleij8d318a52010-03-30 15:33:42 +0200895static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
896{
897 struct d40_chan *d40c = container_of(tx->chan,
898 struct d40_chan,
899 chan);
900 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
901 unsigned long flags;
902
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000903 (void) d40_pause(&d40c->chan);
904
Linus Walleij8d318a52010-03-30 15:33:42 +0200905 spin_lock_irqsave(&d40c->lock, flags);
906
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000907 d40c->chan.cookie++;
908
909 if (d40c->chan.cookie < 0)
910 d40c->chan.cookie = 1;
911
912 d40d->txd.cookie = d40c->chan.cookie;
913
914 if (d40c->log_num == D40_PHY_CHAN)
915 d40_tx_submit_phy(d40c, d40d);
916 else
917 d40_tx_submit_log(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200918
919 d40_desc_queue(d40c, d40d);
920
921 spin_unlock_irqrestore(&d40c->lock, flags);
922
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000923 (void) d40_resume(&d40c->chan);
924
Linus Walleij8d318a52010-03-30 15:33:42 +0200925 return tx->cookie;
926}
927
928static int d40_start(struct d40_chan *d40c)
929{
Linus Walleijf4185592010-06-22 18:06:42 -0700930 if (d40c->base->rev == 0) {
931 int err;
932
933 if (d40c->log_num != D40_PHY_CHAN) {
934 err = d40_channel_execute_command(d40c,
935 D40_DMA_SUSPEND_REQ);
936 if (err)
937 return err;
938 }
939 }
940
Jonas Aaberg0c322692010-06-20 21:25:46 +0000941 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +0200942 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +0200943
Jonas Aaberg0c322692010-06-20 21:25:46 +0000944 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +0200945}
946
947static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
948{
949 struct d40_desc *d40d;
950 int err;
951
952 /* Start queued jobs, if any */
953 d40d = d40_first_queued(d40c);
954
955 if (d40d != NULL) {
956 d40c->busy = true;
957
958 /* Remove from queue */
959 d40_desc_remove(d40d);
960
961 /* Add to active queue */
962 d40_desc_submit(d40c, d40d);
963
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000964 /*
965 * If this job is already linked in hw,
966 * do not submit it.
967 */
Jonas Aaberg698e4732010-08-09 12:08:56 +0000968
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000969 if (!d40d->is_hw_linked) {
970 /* Initiate DMA job */
971 d40_desc_load(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200972
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000973 /* Start dma job */
974 err = d40_start(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +0200975
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000976 if (err)
977 return NULL;
978 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200979 }
980
981 return d40d;
982}
983
984/* called from interrupt context */
985static void dma_tc_handle(struct d40_chan *d40c)
986{
987 struct d40_desc *d40d;
988
Linus Walleij8d318a52010-03-30 15:33:42 +0200989 /* Get first active entry from list */
990 d40d = d40_first_active_get(d40c);
991
992 if (d40d == NULL)
993 return;
994
Jonas Aaberg698e4732010-08-09 12:08:56 +0000995 d40_lcla_free_all(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200996
Jonas Aaberg698e4732010-08-09 12:08:56 +0000997 if (d40d->lli_current < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200998 d40_desc_load(d40c, d40d);
999 /* Start dma job */
1000 (void) d40_start(d40c);
1001 return;
1002 }
1003
1004 if (d40_queue_start(d40c) == NULL)
1005 d40c->busy = false;
1006
1007 d40c->pending_tx++;
1008 tasklet_schedule(&d40c->tasklet);
1009
1010}
1011
1012static void dma_tasklet(unsigned long data)
1013{
1014 struct d40_chan *d40c = (struct d40_chan *) data;
Jonas Aaberg767a9672010-08-09 12:08:34 +00001015 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +02001016 unsigned long flags;
1017 dma_async_tx_callback callback;
1018 void *callback_param;
1019
1020 spin_lock_irqsave(&d40c->lock, flags);
1021
1022 /* Get first active entry from list */
Jonas Aaberg767a9672010-08-09 12:08:34 +00001023 d40d = d40_first_active_get(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +02001024
Jonas Aaberg767a9672010-08-09 12:08:34 +00001025 if (d40d == NULL)
Linus Walleij8d318a52010-03-30 15:33:42 +02001026 goto err;
1027
Jonas Aaberg767a9672010-08-09 12:08:34 +00001028 d40c->completed = d40d->txd.cookie;
Linus Walleij8d318a52010-03-30 15:33:42 +02001029
1030 /*
1031 * If terminating a channel pending_tx is set to zero.
1032 * This prevents any finished active jobs to return to the client.
1033 */
1034 if (d40c->pending_tx == 0) {
1035 spin_unlock_irqrestore(&d40c->lock, flags);
1036 return;
1037 }
1038
1039 /* Callback to client */
Jonas Aaberg767a9672010-08-09 12:08:34 +00001040 callback = d40d->txd.callback;
1041 callback_param = d40d->txd.callback_param;
Linus Walleij8d318a52010-03-30 15:33:42 +02001042
Jonas Aaberg767a9672010-08-09 12:08:34 +00001043 if (async_tx_test_ack(&d40d->txd)) {
1044 d40_pool_lli_free(d40d);
1045 d40_desc_remove(d40d);
1046 d40_desc_free(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +02001047 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00001048 if (!d40d->is_in_client_list) {
1049 d40_desc_remove(d40d);
Jonas Aaberg698e4732010-08-09 12:08:56 +00001050 d40_lcla_free_all(d40c, d40d);
Jonas Aaberg767a9672010-08-09 12:08:34 +00001051 list_add_tail(&d40d->node, &d40c->client);
1052 d40d->is_in_client_list = true;
Linus Walleij8d318a52010-03-30 15:33:42 +02001053 }
1054 }
1055
1056 d40c->pending_tx--;
1057
1058 if (d40c->pending_tx)
1059 tasklet_schedule(&d40c->tasklet);
1060
1061 spin_unlock_irqrestore(&d40c->lock, flags);
1062
Jonas Aaberg767a9672010-08-09 12:08:34 +00001063 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
Linus Walleij8d318a52010-03-30 15:33:42 +02001064 callback(callback_param);
1065
1066 return;
1067
1068 err:
1069 /* Rescue manouver if receiving double interrupts */
1070 if (d40c->pending_tx > 0)
1071 d40c->pending_tx--;
1072 spin_unlock_irqrestore(&d40c->lock, flags);
1073}
1074
1075static irqreturn_t d40_handle_interrupt(int irq, void *data)
1076{
1077 static const struct d40_interrupt_lookup il[] = {
1078 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1079 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1080 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1081 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1082 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1083 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1084 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1085 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1086 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1087 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1088 };
1089
1090 int i;
1091 u32 regs[ARRAY_SIZE(il)];
Linus Walleij8d318a52010-03-30 15:33:42 +02001092 u32 idx;
1093 u32 row;
1094 long chan = -1;
1095 struct d40_chan *d40c;
1096 unsigned long flags;
1097 struct d40_base *base = data;
1098
1099 spin_lock_irqsave(&base->interrupt_lock, flags);
1100
1101 /* Read interrupt status of both logical and physical channels */
1102 for (i = 0; i < ARRAY_SIZE(il); i++)
1103 regs[i] = readl(base->virtbase + il[i].src);
1104
1105 for (;;) {
1106
1107 chan = find_next_bit((unsigned long *)regs,
1108 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1109
1110 /* No more set bits found? */
1111 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1112 break;
1113
1114 row = chan / BITS_PER_LONG;
1115 idx = chan & (BITS_PER_LONG - 1);
1116
1117 /* ACK interrupt */
Jonas Aaberg1b003482010-08-09 12:07:54 +00001118 writel(1 << idx, base->virtbase + il[row].clr);
Linus Walleij8d318a52010-03-30 15:33:42 +02001119
1120 if (il[row].offset == D40_PHY_CHAN)
1121 d40c = base->lookup_phy_chans[idx];
1122 else
1123 d40c = base->lookup_log_chans[il[row].offset + idx];
1124 spin_lock(&d40c->lock);
1125
1126 if (!il[row].is_error)
1127 dma_tc_handle(d40c);
1128 else
Linus Walleij508849a2010-06-20 21:26:07 +00001129 dev_err(base->dev,
1130 "[%s] IRQ chan: %ld offset %d idx %d\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001131 __func__, chan, il[row].offset, idx);
1132
1133 spin_unlock(&d40c->lock);
1134 }
1135
1136 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1137
1138 return IRQ_HANDLED;
1139}
1140
Linus Walleij8d318a52010-03-30 15:33:42 +02001141static int d40_validate_conf(struct d40_chan *d40c,
1142 struct stedma40_chan_cfg *conf)
1143{
1144 int res = 0;
1145 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1146 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1147 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1148 == STEDMA40_CHANNEL_IN_LOG_MODE;
1149
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001150 if (!conf->dir) {
1151 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
1152 __func__);
1153 res = -EINVAL;
1154 }
1155
1156 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1157 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1158 d40c->runtime_addr == 0) {
1159
1160 dev_err(&d40c->chan.dev->device,
1161 "[%s] Invalid TX channel address (%d)\n",
1162 __func__, conf->dst_dev_type);
1163 res = -EINVAL;
1164 }
1165
1166 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1167 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1168 d40c->runtime_addr == 0) {
1169 dev_err(&d40c->chan.dev->device,
1170 "[%s] Invalid RX channel address (%d)\n",
1171 __func__, conf->src_dev_type);
1172 res = -EINVAL;
1173 }
1174
1175 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001176 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1177 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1178 __func__);
1179 res = -EINVAL;
1180 }
1181
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001182 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001183 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1184 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1185 __func__);
1186 res = -EINVAL;
1187 }
1188
1189 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1190 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1191 dev_err(&d40c->chan.dev->device,
1192 "[%s] No event line\n", __func__);
1193 res = -EINVAL;
1194 }
1195
1196 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1197 (src_event_group != dst_event_group)) {
1198 dev_err(&d40c->chan.dev->device,
1199 "[%s] Invalid event group\n", __func__);
1200 res = -EINVAL;
1201 }
1202
1203 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1204 /*
1205 * DMAC HW supports it. Will be added to this driver,
1206 * in case any dma client requires it.
1207 */
1208 dev_err(&d40c->chan.dev->device,
1209 "[%s] periph to periph not supported\n",
1210 __func__);
1211 res = -EINVAL;
1212 }
1213
1214 return res;
1215}
1216
1217static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001218 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001219{
1220 unsigned long flags;
1221 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001222 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001223 /* Physical interrupts are masked per physical full channel */
1224 if (phy->allocated_src == D40_ALLOC_FREE &&
1225 phy->allocated_dst == D40_ALLOC_FREE) {
1226 phy->allocated_dst = D40_ALLOC_PHY;
1227 phy->allocated_src = D40_ALLOC_PHY;
1228 goto found;
1229 } else
1230 goto not_found;
1231 }
1232
1233 /* Logical channel */
1234 if (is_src) {
1235 if (phy->allocated_src == D40_ALLOC_PHY)
1236 goto not_found;
1237
1238 if (phy->allocated_src == D40_ALLOC_FREE)
1239 phy->allocated_src = D40_ALLOC_LOG_FREE;
1240
1241 if (!(phy->allocated_src & (1 << log_event_line))) {
1242 phy->allocated_src |= 1 << log_event_line;
1243 goto found;
1244 } else
1245 goto not_found;
1246 } else {
1247 if (phy->allocated_dst == D40_ALLOC_PHY)
1248 goto not_found;
1249
1250 if (phy->allocated_dst == D40_ALLOC_FREE)
1251 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1252
1253 if (!(phy->allocated_dst & (1 << log_event_line))) {
1254 phy->allocated_dst |= 1 << log_event_line;
1255 goto found;
1256 } else
1257 goto not_found;
1258 }
1259
1260not_found:
1261 spin_unlock_irqrestore(&phy->lock, flags);
1262 return false;
1263found:
1264 spin_unlock_irqrestore(&phy->lock, flags);
1265 return true;
1266}
1267
1268static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1269 int log_event_line)
1270{
1271 unsigned long flags;
1272 bool is_free = false;
1273
1274 spin_lock_irqsave(&phy->lock, flags);
1275 if (!log_event_line) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001276 phy->allocated_dst = D40_ALLOC_FREE;
1277 phy->allocated_src = D40_ALLOC_FREE;
1278 is_free = true;
1279 goto out;
1280 }
1281
1282 /* Logical channel */
1283 if (is_src) {
1284 phy->allocated_src &= ~(1 << log_event_line);
1285 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1286 phy->allocated_src = D40_ALLOC_FREE;
1287 } else {
1288 phy->allocated_dst &= ~(1 << log_event_line);
1289 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1290 phy->allocated_dst = D40_ALLOC_FREE;
1291 }
1292
1293 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1294 D40_ALLOC_FREE);
1295
1296out:
1297 spin_unlock_irqrestore(&phy->lock, flags);
1298
1299 return is_free;
1300}
1301
1302static int d40_allocate_channel(struct d40_chan *d40c)
1303{
1304 int dev_type;
1305 int event_group;
1306 int event_line;
1307 struct d40_phy_res *phys;
1308 int i;
1309 int j;
1310 int log_num;
1311 bool is_src;
Linus Walleij508849a2010-06-20 21:26:07 +00001312 bool is_log = (d40c->dma_cfg.channel_type &
1313 STEDMA40_CHANNEL_IN_OPER_MODE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001314 == STEDMA40_CHANNEL_IN_LOG_MODE;
1315
1316
1317 phys = d40c->base->phy_res;
1318
1319 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1320 dev_type = d40c->dma_cfg.src_dev_type;
1321 log_num = 2 * dev_type;
1322 is_src = true;
1323 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1324 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1325 /* dst event lines are used for logical memcpy */
1326 dev_type = d40c->dma_cfg.dst_dev_type;
1327 log_num = 2 * dev_type + 1;
1328 is_src = false;
1329 } else
1330 return -EINVAL;
1331
1332 event_group = D40_TYPE_TO_GROUP(dev_type);
1333 event_line = D40_TYPE_TO_EVENT(dev_type);
1334
1335 if (!is_log) {
1336 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1337 /* Find physical half channel */
1338 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1339
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001340 if (d40_alloc_mask_set(&phys[i], is_src,
1341 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001342 goto found_phy;
1343 }
1344 } else
1345 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1346 int phy_num = j + event_group * 2;
1347 for (i = phy_num; i < phy_num + 2; i++) {
Linus Walleij508849a2010-06-20 21:26:07 +00001348 if (d40_alloc_mask_set(&phys[i],
1349 is_src,
1350 0,
1351 is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001352 goto found_phy;
1353 }
1354 }
1355 return -EINVAL;
1356found_phy:
1357 d40c->phy_chan = &phys[i];
1358 d40c->log_num = D40_PHY_CHAN;
1359 goto out;
1360 }
1361 if (dev_type == -1)
1362 return -EINVAL;
1363
1364 /* Find logical channel */
1365 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1366 int phy_num = j + event_group * 2;
1367 /*
1368 * Spread logical channels across all available physical rather
1369 * than pack every logical channel at the first available phy
1370 * channels.
1371 */
1372 if (is_src) {
1373 for (i = phy_num; i < phy_num + 2; i++) {
1374 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001375 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001376 goto found_log;
1377 }
1378 } else {
1379 for (i = phy_num + 1; i >= phy_num; i--) {
1380 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001381 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001382 goto found_log;
1383 }
1384 }
1385 }
1386 return -EINVAL;
1387
1388found_log:
1389 d40c->phy_chan = &phys[i];
1390 d40c->log_num = log_num;
1391out:
1392
1393 if (is_log)
1394 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1395 else
1396 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1397
1398 return 0;
1399
1400}
1401
Linus Walleij8d318a52010-03-30 15:33:42 +02001402static int d40_config_memcpy(struct d40_chan *d40c)
1403{
1404 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1405
1406 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1407 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1408 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1409 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1410 memcpy[d40c->chan.chan_id];
1411
1412 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1413 dma_has_cap(DMA_SLAVE, cap)) {
1414 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1415 } else {
1416 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1417 __func__);
1418 return -EINVAL;
1419 }
1420
1421 return 0;
1422}
1423
1424
1425static int d40_free_dma(struct d40_chan *d40c)
1426{
1427
1428 int res = 0;
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001429 u32 event;
Linus Walleij8d318a52010-03-30 15:33:42 +02001430 struct d40_phy_res *phy = d40c->phy_chan;
1431 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001432 struct d40_desc *d;
1433 struct d40_desc *_d;
1434
Linus Walleij8d318a52010-03-30 15:33:42 +02001435
1436 /* Terminate all queued and active transfers */
1437 d40_term_all(d40c);
1438
Per Fridena8be8622010-06-20 21:24:59 +00001439 /* Release client owned descriptors */
1440 if (!list_empty(&d40c->client))
1441 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1442 d40_pool_lli_free(d);
1443 d40_desc_remove(d);
Per Fridena8be8622010-06-20 21:24:59 +00001444 d40_desc_free(d40c, d);
1445 }
1446
Linus Walleij8d318a52010-03-30 15:33:42 +02001447 if (phy == NULL) {
1448 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1449 __func__);
1450 return -EINVAL;
1451 }
1452
1453 if (phy->allocated_src == D40_ALLOC_FREE &&
1454 phy->allocated_dst == D40_ALLOC_FREE) {
1455 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1456 __func__);
1457 return -EINVAL;
1458 }
1459
Linus Walleij8d318a52010-03-30 15:33:42 +02001460 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1461 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1462 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001463 is_src = false;
1464 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1465 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001466 is_src = true;
1467 } else {
1468 dev_err(&d40c->chan.dev->device,
1469 "[%s] Unknown direction\n", __func__);
1470 return -EINVAL;
1471 }
1472
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001473 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1474 if (res) {
1475 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1476 __func__);
1477 return res;
1478 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001479
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001480 if (d40c->log_num != D40_PHY_CHAN) {
1481 /* Release logical channel, deactivate the event line */
1482
1483 d40_config_set_event(d40c, false);
Linus Walleij8d318a52010-03-30 15:33:42 +02001484 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1485
1486 /*
1487 * Check if there are more logical allocation
1488 * on this phy channel.
1489 */
1490 if (!d40_alloc_mask_free(phy, is_src, event)) {
1491 /* Resume the other logical channels if any */
1492 if (d40_chan_has_events(d40c)) {
1493 res = d40_channel_execute_command(d40c,
1494 D40_DMA_RUN);
1495 if (res) {
1496 dev_err(&d40c->chan.dev->device,
1497 "[%s] Executing RUN command\n",
1498 __func__);
1499 return res;
1500 }
1501 }
1502 return 0;
1503 }
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001504 } else {
1505 (void) d40_alloc_mask_free(phy, is_src, 0);
1506 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001507
1508 /* Release physical channel */
1509 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1510 if (res) {
1511 dev_err(&d40c->chan.dev->device,
1512 "[%s] Failed to stop channel\n", __func__);
1513 return res;
1514 }
1515 d40c->phy_chan = NULL;
1516 /* Invalidate channel type */
1517 d40c->dma_cfg.channel_type = 0;
1518 d40c->base->lookup_phy_chans[phy->num] = NULL;
1519
1520 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001521}
1522
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001523static bool d40_is_paused(struct d40_chan *d40c)
1524{
1525 bool is_paused = false;
1526 unsigned long flags;
1527 void __iomem *active_reg;
1528 u32 status;
1529 u32 event;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001530
1531 spin_lock_irqsave(&d40c->lock, flags);
1532
1533 if (d40c->log_num == D40_PHY_CHAN) {
1534 if (d40c->phy_chan->num % 2 == 0)
1535 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1536 else
1537 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1538
1539 status = (readl(active_reg) &
1540 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1541 D40_CHAN_POS(d40c->phy_chan->num);
1542 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1543 is_paused = true;
1544
1545 goto _exit;
1546 }
1547
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001548 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001549 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001550 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001551 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1552 d40c->phy_chan->num * D40_DREG_PCDELTA +
1553 D40_CHAN_REG_SDLNK);
1554 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001555 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001556 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1557 d40c->phy_chan->num * D40_DREG_PCDELTA +
1558 D40_CHAN_REG_SSLNK);
1559 } else {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001560 dev_err(&d40c->chan.dev->device,
1561 "[%s] Unknown direction\n", __func__);
1562 goto _exit;
1563 }
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001564
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001565 status = (status & D40_EVENTLINE_MASK(event)) >>
1566 D40_EVENTLINE_POS(event);
1567
1568 if (status != D40_DMA_RUN)
1569 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001570_exit:
1571 spin_unlock_irqrestore(&d40c->lock, flags);
1572 return is_paused;
1573
1574}
1575
1576
Linus Walleij8d318a52010-03-30 15:33:42 +02001577static u32 stedma40_residue(struct dma_chan *chan)
1578{
1579 struct d40_chan *d40c =
1580 container_of(chan, struct d40_chan, chan);
1581 u32 bytes_left;
1582 unsigned long flags;
1583
1584 spin_lock_irqsave(&d40c->lock, flags);
1585 bytes_left = d40_residue(d40c);
1586 spin_unlock_irqrestore(&d40c->lock, flags);
1587
1588 return bytes_left;
1589}
1590
1591/* Public DMA functions in addition to the DMA engine framework */
1592
1593int stedma40_set_psize(struct dma_chan *chan,
1594 int src_psize,
1595 int dst_psize)
1596{
1597 struct d40_chan *d40c =
1598 container_of(chan, struct d40_chan, chan);
1599 unsigned long flags;
1600
1601 spin_lock_irqsave(&d40c->lock, flags);
1602
1603 if (d40c->log_num != D40_PHY_CHAN) {
1604 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1605 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
Linus Walleij508849a2010-06-20 21:26:07 +00001606 d40c->log_def.lcsp1 |= src_psize <<
1607 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1608 d40c->log_def.lcsp3 |= dst_psize <<
1609 D40_MEM_LCSP1_SCFG_PSIZE_POS;
Linus Walleij8d318a52010-03-30 15:33:42 +02001610 goto out;
1611 }
1612
1613 if (src_psize == STEDMA40_PSIZE_PHY_1)
1614 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1615 else {
1616 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1617 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1618 D40_SREG_CFG_PSIZE_POS);
1619 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1620 }
1621
1622 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1623 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1624 else {
1625 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1626 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1627 D40_SREG_CFG_PSIZE_POS);
1628 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1629 }
1630out:
1631 spin_unlock_irqrestore(&d40c->lock, flags);
1632 return 0;
1633}
1634EXPORT_SYMBOL(stedma40_set_psize);
1635
1636struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1637 struct scatterlist *sgl_dst,
1638 struct scatterlist *sgl_src,
1639 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001640 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001641{
1642 int res;
1643 struct d40_desc *d40d;
1644 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1645 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001646 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001647
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001648 if (d40c->phy_chan == NULL) {
1649 dev_err(&d40c->chan.dev->device,
1650 "[%s] Unallocated channel.\n", __func__);
1651 return ERR_PTR(-EINVAL);
1652 }
1653
Jonas Aaberg2a614342010-06-20 21:25:24 +00001654 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001655 d40d = d40_desc_get(d40c);
1656
1657 if (d40d == NULL)
1658 goto err;
1659
Linus Walleij8d318a52010-03-30 15:33:42 +02001660 d40d->lli_len = sgl_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +00001661 d40d->lli_current = 0;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001662 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001663
1664 if (d40c->log_num != D40_PHY_CHAN) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001665
1666 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1667 dev_err(&d40c->chan.dev->device,
1668 "[%s] Out of memory\n", __func__);
1669 goto err;
1670 }
1671
Jonas Aaberg698e4732010-08-09 12:08:56 +00001672 (void) d40_log_sg_to_lli(sgl_src,
Linus Walleij8d318a52010-03-30 15:33:42 +02001673 sgl_len,
1674 d40d->lli_log.src,
1675 d40c->log_def.lcsp1,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001676 d40c->dma_cfg.src_info.data_width);
Linus Walleij8d318a52010-03-30 15:33:42 +02001677
Jonas Aaberg698e4732010-08-09 12:08:56 +00001678 (void) d40_log_sg_to_lli(sgl_dst,
Linus Walleij8d318a52010-03-30 15:33:42 +02001679 sgl_len,
1680 d40d->lli_log.dst,
1681 d40c->log_def.lcsp3,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001682 d40c->dma_cfg.dst_info.data_width);
Linus Walleij8d318a52010-03-30 15:33:42 +02001683 } else {
1684 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1685 dev_err(&d40c->chan.dev->device,
1686 "[%s] Out of memory\n", __func__);
1687 goto err;
1688 }
1689
1690 res = d40_phy_sg_to_lli(sgl_src,
1691 sgl_len,
1692 0,
1693 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001694 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02001695 d40c->src_def_cfg,
1696 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001697 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001698
1699 if (res < 0)
1700 goto err;
1701
1702 res = d40_phy_sg_to_lli(sgl_dst,
1703 sgl_len,
1704 0,
1705 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001706 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02001707 d40c->dst_def_cfg,
1708 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001709 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001710
1711 if (res < 0)
1712 goto err;
1713
1714 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1715 d40d->lli_pool.size, DMA_TO_DEVICE);
1716 }
1717
1718 dma_async_tx_descriptor_init(&d40d->txd, chan);
1719
1720 d40d->txd.tx_submit = d40_tx_submit;
1721
Jonas Aaberg2a614342010-06-20 21:25:24 +00001722 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001723
1724 return &d40d->txd;
1725err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001726 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001727 return NULL;
1728}
1729EXPORT_SYMBOL(stedma40_memcpy_sg);
1730
1731bool stedma40_filter(struct dma_chan *chan, void *data)
1732{
1733 struct stedma40_chan_cfg *info = data;
1734 struct d40_chan *d40c =
1735 container_of(chan, struct d40_chan, chan);
1736 int err;
1737
1738 if (data) {
1739 err = d40_validate_conf(d40c, info);
1740 if (!err)
1741 d40c->dma_cfg = *info;
1742 } else
1743 err = d40_config_memcpy(d40c);
1744
1745 return err == 0;
1746}
1747EXPORT_SYMBOL(stedma40_filter);
1748
1749/* DMA ENGINE functions */
1750static int d40_alloc_chan_resources(struct dma_chan *chan)
1751{
1752 int err;
1753 unsigned long flags;
1754 struct d40_chan *d40c =
1755 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001756 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001757 spin_lock_irqsave(&d40c->lock, flags);
1758
1759 d40c->completed = chan->cookie = 1;
1760
1761 /*
1762 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001763 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001764 */
1765 if (d40c->dma_cfg.channel_type == 0) {
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001766
Linus Walleij8d318a52010-03-30 15:33:42 +02001767 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001768 if (err) {
1769 dev_err(&d40c->chan.dev->device,
1770 "[%s] Failed to configure memcpy channel\n",
1771 __func__);
1772 goto fail;
1773 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001774 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001775 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001776
1777 err = d40_allocate_channel(d40c);
1778 if (err) {
1779 dev_err(&d40c->chan.dev->device,
1780 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001781 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001782 }
1783
Linus Walleijef1872e2010-06-20 21:24:52 +00001784 /* Fill in basic CFG register values */
1785 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1786 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1787
1788 if (d40c->log_num != D40_PHY_CHAN) {
1789 d40_log_cfg(&d40c->dma_cfg,
1790 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1791
1792 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1793 d40c->lcpa = d40c->base->lcpa_base +
1794 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1795 else
1796 d40c->lcpa = d40c->base->lcpa_base +
1797 d40c->dma_cfg.dst_dev_type *
1798 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1799 }
1800
1801 /*
1802 * Only write channel configuration to the DMA if the physical
1803 * resource is free. In case of multiple logical channels
1804 * on the same physical resource, only the first write is necessary.
1805 */
Jonas Aabergb55912c2010-08-09 12:08:02 +00001806 if (is_free_phy)
1807 d40_config_write(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001808fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001809 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001810 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001811}
1812
1813static void d40_free_chan_resources(struct dma_chan *chan)
1814{
1815 struct d40_chan *d40c =
1816 container_of(chan, struct d40_chan, chan);
1817 int err;
1818 unsigned long flags;
1819
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001820 if (d40c->phy_chan == NULL) {
1821 dev_err(&d40c->chan.dev->device,
1822 "[%s] Cannot free unallocated channel\n", __func__);
1823 return;
1824 }
1825
1826
Linus Walleij8d318a52010-03-30 15:33:42 +02001827 spin_lock_irqsave(&d40c->lock, flags);
1828
1829 err = d40_free_dma(d40c);
1830
1831 if (err)
1832 dev_err(&d40c->chan.dev->device,
1833 "[%s] Failed to free channel\n", __func__);
1834 spin_unlock_irqrestore(&d40c->lock, flags);
1835}
1836
1837static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1838 dma_addr_t dst,
1839 dma_addr_t src,
1840 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001841 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001842{
1843 struct d40_desc *d40d;
1844 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1845 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001846 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001847 int err = 0;
1848
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001849 if (d40c->phy_chan == NULL) {
1850 dev_err(&d40c->chan.dev->device,
1851 "[%s] Channel is not allocated.\n", __func__);
1852 return ERR_PTR(-EINVAL);
1853 }
1854
Jonas Aaberg2a614342010-06-20 21:25:24 +00001855 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001856 d40d = d40_desc_get(d40c);
1857
1858 if (d40d == NULL) {
1859 dev_err(&d40c->chan.dev->device,
1860 "[%s] Descriptor is NULL\n", __func__);
1861 goto err;
1862 }
1863
Jonas Aaberg2a614342010-06-20 21:25:24 +00001864 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001865
1866 dma_async_tx_descriptor_init(&d40d->txd, chan);
1867
1868 d40d->txd.tx_submit = d40_tx_submit;
1869
1870 if (d40c->log_num != D40_PHY_CHAN) {
1871
1872 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1873 dev_err(&d40c->chan.dev->device,
1874 "[%s] Out of memory\n", __func__);
1875 goto err;
1876 }
1877 d40d->lli_len = 1;
Jonas Aaberg698e4732010-08-09 12:08:56 +00001878 d40d->lli_current = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001879
1880 d40_log_fill_lli(d40d->lli_log.src,
1881 src,
1882 size,
Linus Walleij8d318a52010-03-30 15:33:42 +02001883 d40c->log_def.lcsp1,
1884 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001885 true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001886
1887 d40_log_fill_lli(d40d->lli_log.dst,
1888 dst,
1889 size,
Linus Walleij8d318a52010-03-30 15:33:42 +02001890 d40c->log_def.lcsp3,
1891 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001892 true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001893
1894 } else {
1895
1896 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1897 dev_err(&d40c->chan.dev->device,
1898 "[%s] Out of memory\n", __func__);
1899 goto err;
1900 }
1901
1902 err = d40_phy_fill_lli(d40d->lli_phy.src,
1903 src,
1904 size,
1905 d40c->dma_cfg.src_info.psize,
1906 0,
1907 d40c->src_def_cfg,
1908 true,
1909 d40c->dma_cfg.src_info.data_width,
1910 false);
1911 if (err)
1912 goto err_fill_lli;
1913
1914 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1915 dst,
1916 size,
1917 d40c->dma_cfg.dst_info.psize,
1918 0,
1919 d40c->dst_def_cfg,
1920 true,
1921 d40c->dma_cfg.dst_info.data_width,
1922 false);
1923
1924 if (err)
1925 goto err_fill_lli;
1926
1927 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1928 d40d->lli_pool.size, DMA_TO_DEVICE);
1929 }
1930
Jonas Aaberg2a614342010-06-20 21:25:24 +00001931 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001932 return &d40d->txd;
1933
1934err_fill_lli:
1935 dev_err(&d40c->chan.dev->device,
1936 "[%s] Failed filling in PHY LLI\n", __func__);
1937 d40_pool_lli_free(d40d);
1938err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001939 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001940 return NULL;
1941}
1942
1943static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1944 struct d40_chan *d40c,
1945 struct scatterlist *sgl,
1946 unsigned int sg_len,
1947 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001948 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001949{
1950 dma_addr_t dev_addr = 0;
1951 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001952
1953 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1954 dev_err(&d40c->chan.dev->device,
1955 "[%s] Out of memory\n", __func__);
1956 return -ENOMEM;
1957 }
1958
1959 d40d->lli_len = sg_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +00001960 d40d->lli_current = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001961
Jonas Aaberg2a614342010-06-20 21:25:24 +00001962 if (direction == DMA_FROM_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001963 if (d40c->runtime_addr)
1964 dev_addr = d40c->runtime_addr;
1965 else
1966 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001967 else if (direction == DMA_TO_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001968 if (d40c->runtime_addr)
1969 dev_addr = d40c->runtime_addr;
1970 else
1971 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1972
Jonas Aaberg2a614342010-06-20 21:25:24 +00001973 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001974 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001975
Jonas Aaberg698e4732010-08-09 12:08:56 +00001976 total_size = d40_log_sg_to_dev(sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001977 &d40d->lli_log,
1978 &d40c->log_def,
1979 d40c->dma_cfg.src_info.data_width,
1980 d40c->dma_cfg.dst_info.data_width,
1981 direction,
Jonas Aaberg698e4732010-08-09 12:08:56 +00001982 dev_addr);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001983
Linus Walleij8d318a52010-03-30 15:33:42 +02001984 if (total_size < 0)
1985 return -EINVAL;
1986
1987 return 0;
1988}
1989
1990static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1991 struct d40_chan *d40c,
1992 struct scatterlist *sgl,
1993 unsigned int sgl_len,
1994 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001995 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001996{
1997 dma_addr_t src_dev_addr;
1998 dma_addr_t dst_dev_addr;
1999 int res;
2000
2001 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
2002 dev_err(&d40c->chan.dev->device,
2003 "[%s] Out of memory\n", __func__);
2004 return -ENOMEM;
2005 }
2006
2007 d40d->lli_len = sgl_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +00002008 d40d->lli_current = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02002009
2010 if (direction == DMA_FROM_DEVICE) {
2011 dst_dev_addr = 0;
Linus Walleij95e14002010-08-04 13:37:45 +02002012 if (d40c->runtime_addr)
2013 src_dev_addr = d40c->runtime_addr;
2014 else
2015 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002016 } else if (direction == DMA_TO_DEVICE) {
Linus Walleij95e14002010-08-04 13:37:45 +02002017 if (d40c->runtime_addr)
2018 dst_dev_addr = d40c->runtime_addr;
2019 else
2020 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002021 src_dev_addr = 0;
2022 } else
2023 return -EINVAL;
2024
2025 res = d40_phy_sg_to_lli(sgl,
2026 sgl_len,
2027 src_dev_addr,
2028 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002029 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02002030 d40c->src_def_cfg,
2031 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002032 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002033 if (res < 0)
2034 return res;
2035
2036 res = d40_phy_sg_to_lli(sgl,
2037 sgl_len,
2038 dst_dev_addr,
2039 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002040 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02002041 d40c->dst_def_cfg,
2042 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002043 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002044 if (res < 0)
2045 return res;
2046
2047 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2048 d40d->lli_pool.size, DMA_TO_DEVICE);
2049 return 0;
2050}
2051
2052static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2053 struct scatterlist *sgl,
2054 unsigned int sg_len,
2055 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002056 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02002057{
2058 struct d40_desc *d40d;
2059 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2060 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002061 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002062 int err;
2063
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002064 if (d40c->phy_chan == NULL) {
2065 dev_err(&d40c->chan.dev->device,
2066 "[%s] Cannot prepare unallocated channel\n", __func__);
2067 return ERR_PTR(-EINVAL);
2068 }
2069
Linus Walleij8d318a52010-03-30 15:33:42 +02002070 if (d40c->dma_cfg.pre_transfer)
2071 d40c->dma_cfg.pre_transfer(chan,
2072 d40c->dma_cfg.pre_transfer_data,
2073 sg_dma_len(sgl));
2074
Jonas Aaberg2a614342010-06-20 21:25:24 +00002075 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002076 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002077 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002078
2079 if (d40d == NULL)
2080 return NULL;
2081
Linus Walleij8d318a52010-03-30 15:33:42 +02002082 if (d40c->log_num != D40_PHY_CHAN)
2083 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002084 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002085 else
2086 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002087 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002088 if (err) {
2089 dev_err(&d40c->chan.dev->device,
2090 "[%s] Failed to prepare %s slave sg job: %d\n",
2091 __func__,
2092 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2093 return NULL;
2094 }
2095
Jonas Aaberg2a614342010-06-20 21:25:24 +00002096 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002097
2098 dma_async_tx_descriptor_init(&d40d->txd, chan);
2099
2100 d40d->txd.tx_submit = d40_tx_submit;
2101
2102 return &d40d->txd;
2103}
2104
2105static enum dma_status d40_tx_status(struct dma_chan *chan,
2106 dma_cookie_t cookie,
2107 struct dma_tx_state *txstate)
2108{
2109 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2110 dma_cookie_t last_used;
2111 dma_cookie_t last_complete;
2112 int ret;
2113
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002114 if (d40c->phy_chan == NULL) {
2115 dev_err(&d40c->chan.dev->device,
2116 "[%s] Cannot read status of unallocated channel\n",
2117 __func__);
2118 return -EINVAL;
2119 }
2120
Linus Walleij8d318a52010-03-30 15:33:42 +02002121 last_complete = d40c->completed;
2122 last_used = chan->cookie;
2123
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002124 if (d40_is_paused(d40c))
2125 ret = DMA_PAUSED;
2126 else
2127 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002128
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002129 dma_set_tx_state(txstate, last_complete, last_used,
2130 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002131
2132 return ret;
2133}
2134
2135static void d40_issue_pending(struct dma_chan *chan)
2136{
2137 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2138 unsigned long flags;
2139
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002140 if (d40c->phy_chan == NULL) {
2141 dev_err(&d40c->chan.dev->device,
2142 "[%s] Channel is not allocated!\n", __func__);
2143 return;
2144 }
2145
Linus Walleij8d318a52010-03-30 15:33:42 +02002146 spin_lock_irqsave(&d40c->lock, flags);
2147
2148 /* Busy means that pending jobs are already being processed */
2149 if (!d40c->busy)
2150 (void) d40_queue_start(d40c);
2151
2152 spin_unlock_irqrestore(&d40c->lock, flags);
2153}
2154
Linus Walleij95e14002010-08-04 13:37:45 +02002155/* Runtime reconfiguration extension */
2156static void d40_set_runtime_config(struct dma_chan *chan,
2157 struct dma_slave_config *config)
2158{
2159 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2160 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2161 enum dma_slave_buswidth config_addr_width;
2162 dma_addr_t config_addr;
2163 u32 config_maxburst;
2164 enum stedma40_periph_data_width addr_width;
2165 int psize;
2166
2167 if (config->direction == DMA_FROM_DEVICE) {
2168 dma_addr_t dev_addr_rx =
2169 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2170
2171 config_addr = config->src_addr;
2172 if (dev_addr_rx)
2173 dev_dbg(d40c->base->dev,
2174 "channel has a pre-wired RX address %08x "
2175 "overriding with %08x\n",
2176 dev_addr_rx, config_addr);
2177 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2178 dev_dbg(d40c->base->dev,
2179 "channel was not configured for peripheral "
2180 "to memory transfer (%d) overriding\n",
2181 cfg->dir);
2182 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2183
2184 config_addr_width = config->src_addr_width;
2185 config_maxburst = config->src_maxburst;
2186
2187 } else if (config->direction == DMA_TO_DEVICE) {
2188 dma_addr_t dev_addr_tx =
2189 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2190
2191 config_addr = config->dst_addr;
2192 if (dev_addr_tx)
2193 dev_dbg(d40c->base->dev,
2194 "channel has a pre-wired TX address %08x "
2195 "overriding with %08x\n",
2196 dev_addr_tx, config_addr);
2197 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2198 dev_dbg(d40c->base->dev,
2199 "channel was not configured for memory "
2200 "to peripheral transfer (%d) overriding\n",
2201 cfg->dir);
2202 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2203
2204 config_addr_width = config->dst_addr_width;
2205 config_maxburst = config->dst_maxburst;
2206
2207 } else {
2208 dev_err(d40c->base->dev,
2209 "unrecognized channel direction %d\n",
2210 config->direction);
2211 return;
2212 }
2213
2214 switch (config_addr_width) {
2215 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2216 addr_width = STEDMA40_BYTE_WIDTH;
2217 break;
2218 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2219 addr_width = STEDMA40_HALFWORD_WIDTH;
2220 break;
2221 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2222 addr_width = STEDMA40_WORD_WIDTH;
2223 break;
2224 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2225 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2226 break;
2227 default:
2228 dev_err(d40c->base->dev,
2229 "illegal peripheral address width "
2230 "requested (%d)\n",
2231 config->src_addr_width);
2232 return;
2233 }
2234
2235 if (config_maxburst >= 16)
2236 psize = STEDMA40_PSIZE_LOG_16;
2237 else if (config_maxburst >= 8)
2238 psize = STEDMA40_PSIZE_LOG_8;
2239 else if (config_maxburst >= 4)
2240 psize = STEDMA40_PSIZE_LOG_4;
2241 else
2242 psize = STEDMA40_PSIZE_LOG_1;
2243
2244 /* Set up all the endpoint configs */
2245 cfg->src_info.data_width = addr_width;
2246 cfg->src_info.psize = psize;
2247 cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
2248 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2249 cfg->dst_info.data_width = addr_width;
2250 cfg->dst_info.psize = psize;
2251 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
2252 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2253
2254 /* These settings will take precedence later */
2255 d40c->runtime_addr = config_addr;
2256 d40c->runtime_direction = config->direction;
2257 dev_dbg(d40c->base->dev,
2258 "configured channel %s for %s, data width %d, "
2259 "maxburst %d bytes, LE, no flow control\n",
2260 dma_chan_name(chan),
2261 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2262 config_addr_width,
2263 config_maxburst);
2264}
2265
Linus Walleij05827632010-05-17 16:30:42 -07002266static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2267 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002268{
2269 unsigned long flags;
2270 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2271
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002272 if (d40c->phy_chan == NULL) {
2273 dev_err(&d40c->chan.dev->device,
2274 "[%s] Channel is not allocated!\n", __func__);
2275 return -EINVAL;
2276 }
2277
Linus Walleij8d318a52010-03-30 15:33:42 +02002278 switch (cmd) {
2279 case DMA_TERMINATE_ALL:
2280 spin_lock_irqsave(&d40c->lock, flags);
2281 d40_term_all(d40c);
2282 spin_unlock_irqrestore(&d40c->lock, flags);
2283 return 0;
2284 case DMA_PAUSE:
2285 return d40_pause(chan);
2286 case DMA_RESUME:
2287 return d40_resume(chan);
Linus Walleij95e14002010-08-04 13:37:45 +02002288 case DMA_SLAVE_CONFIG:
2289 d40_set_runtime_config(chan,
2290 (struct dma_slave_config *) arg);
2291 return 0;
2292 default:
2293 break;
Linus Walleij8d318a52010-03-30 15:33:42 +02002294 }
2295
2296 /* Other commands are unimplemented */
2297 return -ENXIO;
2298}
2299
2300/* Initialization functions */
2301
2302static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2303 struct d40_chan *chans, int offset,
2304 int num_chans)
2305{
2306 int i = 0;
2307 struct d40_chan *d40c;
2308
2309 INIT_LIST_HEAD(&dma->channels);
2310
2311 for (i = offset; i < offset + num_chans; i++) {
2312 d40c = &chans[i];
2313 d40c->base = base;
2314 d40c->chan.device = dma;
2315
Linus Walleij8d318a52010-03-30 15:33:42 +02002316 spin_lock_init(&d40c->lock);
2317
2318 d40c->log_num = D40_PHY_CHAN;
2319
Linus Walleij8d318a52010-03-30 15:33:42 +02002320 INIT_LIST_HEAD(&d40c->active);
2321 INIT_LIST_HEAD(&d40c->queue);
2322 INIT_LIST_HEAD(&d40c->client);
2323
Linus Walleij8d318a52010-03-30 15:33:42 +02002324 tasklet_init(&d40c->tasklet, dma_tasklet,
2325 (unsigned long) d40c);
2326
2327 list_add_tail(&d40c->chan.device_node,
2328 &dma->channels);
2329 }
2330}
2331
2332static int __init d40_dmaengine_init(struct d40_base *base,
2333 int num_reserved_chans)
2334{
2335 int err ;
2336
2337 d40_chan_init(base, &base->dma_slave, base->log_chans,
2338 0, base->num_log_chans);
2339
2340 dma_cap_zero(base->dma_slave.cap_mask);
2341 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2342
2343 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2344 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2345 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2346 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2347 base->dma_slave.device_tx_status = d40_tx_status;
2348 base->dma_slave.device_issue_pending = d40_issue_pending;
2349 base->dma_slave.device_control = d40_control;
2350 base->dma_slave.dev = base->dev;
2351
2352 err = dma_async_device_register(&base->dma_slave);
2353
2354 if (err) {
2355 dev_err(base->dev,
2356 "[%s] Failed to register slave channels\n",
2357 __func__);
2358 goto failure1;
2359 }
2360
2361 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2362 base->num_log_chans, base->plat_data->memcpy_len);
2363
2364 dma_cap_zero(base->dma_memcpy.cap_mask);
2365 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2366
2367 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2368 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2369 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2370 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2371 base->dma_memcpy.device_tx_status = d40_tx_status;
2372 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2373 base->dma_memcpy.device_control = d40_control;
2374 base->dma_memcpy.dev = base->dev;
2375 /*
2376 * This controller can only access address at even
2377 * 32bit boundaries, i.e. 2^2
2378 */
2379 base->dma_memcpy.copy_align = 2;
2380
2381 err = dma_async_device_register(&base->dma_memcpy);
2382
2383 if (err) {
2384 dev_err(base->dev,
2385 "[%s] Failed to regsiter memcpy only channels\n",
2386 __func__);
2387 goto failure2;
2388 }
2389
2390 d40_chan_init(base, &base->dma_both, base->phy_chans,
2391 0, num_reserved_chans);
2392
2393 dma_cap_zero(base->dma_both.cap_mask);
2394 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2395 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2396
2397 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2398 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2399 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2400 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2401 base->dma_both.device_tx_status = d40_tx_status;
2402 base->dma_both.device_issue_pending = d40_issue_pending;
2403 base->dma_both.device_control = d40_control;
2404 base->dma_both.dev = base->dev;
2405 base->dma_both.copy_align = 2;
2406 err = dma_async_device_register(&base->dma_both);
2407
2408 if (err) {
2409 dev_err(base->dev,
2410 "[%s] Failed to register logical and physical capable channels\n",
2411 __func__);
2412 goto failure3;
2413 }
2414 return 0;
2415failure3:
2416 dma_async_device_unregister(&base->dma_memcpy);
2417failure2:
2418 dma_async_device_unregister(&base->dma_slave);
2419failure1:
2420 return err;
2421}
2422
2423/* Initialization functions. */
2424
2425static int __init d40_phy_res_init(struct d40_base *base)
2426{
2427 int i;
2428 int num_phy_chans_avail = 0;
2429 u32 val[2];
2430 int odd_even_bit = -2;
2431
2432 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2433 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2434
2435 for (i = 0; i < base->num_phy_chans; i++) {
2436 base->phy_res[i].num = i;
2437 odd_even_bit += 2 * ((i % 2) == 0);
2438 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2439 /* Mark security only channels as occupied */
2440 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2441 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2442 } else {
2443 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2444 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2445 num_phy_chans_avail++;
2446 }
2447 spin_lock_init(&base->phy_res[i].lock);
2448 }
Jonas Aaberg6b7acd82010-06-20 21:26:59 +00002449
2450 /* Mark disabled channels as occupied */
2451 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2452 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2453 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2454 num_phy_chans_avail--;
2455 }
2456
Linus Walleij8d318a52010-03-30 15:33:42 +02002457 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2458 num_phy_chans_avail, base->num_phy_chans);
2459
2460 /* Verify settings extended vs standard */
2461 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2462
2463 for (i = 0; i < base->num_phy_chans; i++) {
2464
2465 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2466 (val[0] & 0x3) != 1)
2467 dev_info(base->dev,
2468 "[%s] INFO: channel %d is misconfigured (%d)\n",
2469 __func__, i, val[0] & 0x3);
2470
2471 val[0] = val[0] >> 2;
2472 }
2473
2474 return num_phy_chans_avail;
2475}
2476
2477static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2478{
2479 static const struct d40_reg_val dma_id_regs[] = {
2480 /* Peripheral Id */
2481 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2482 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2483 /*
2484 * D40_DREG_PERIPHID2 Depends on HW revision:
2485 * MOP500/HREF ED has 0x0008,
2486 * ? has 0x0018,
2487 * HREF V1 has 0x0028
2488 */
2489 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2490
2491 /* PCell Id */
2492 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2493 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2494 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2495 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2496 };
2497 struct stedma40_platform_data *plat_data;
2498 struct clk *clk = NULL;
2499 void __iomem *virtbase = NULL;
2500 struct resource *res = NULL;
2501 struct d40_base *base = NULL;
2502 int num_log_chans = 0;
2503 int num_phy_chans;
2504 int i;
Linus Walleijf4185592010-06-22 18:06:42 -07002505 u32 val;
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002506 u32 rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002507
2508 clk = clk_get(&pdev->dev, NULL);
2509
2510 if (IS_ERR(clk)) {
2511 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2512 __func__);
2513 goto failure;
2514 }
2515
2516 clk_enable(clk);
2517
2518 /* Get IO for DMAC base address */
2519 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2520 if (!res)
2521 goto failure;
2522
2523 if (request_mem_region(res->start, resource_size(res),
2524 D40_NAME " I/O base") == NULL)
2525 goto failure;
2526
2527 virtbase = ioremap(res->start, resource_size(res));
2528 if (!virtbase)
2529 goto failure;
2530
2531 /* HW version check */
2532 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2533 if (dma_id_regs[i].val !=
2534 readl(virtbase + dma_id_regs[i].reg)) {
2535 dev_err(&pdev->dev,
2536 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2537 __func__,
2538 dma_id_regs[i].val,
2539 dma_id_regs[i].reg,
2540 readl(virtbase + dma_id_regs[i].reg));
2541 goto failure;
2542 }
2543 }
2544
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002545 /* Get silicon revision and designer */
Linus Walleijf4185592010-06-22 18:06:42 -07002546 val = readl(virtbase + D40_DREG_PERIPHID2);
Linus Walleij8d318a52010-03-30 15:33:42 +02002547
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002548 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2549 D40_HW_DESIGNER) {
Linus Walleij8d318a52010-03-30 15:33:42 +02002550 dev_err(&pdev->dev,
2551 "[%s] Unknown designer! Got %x wanted %x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002552 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2553 D40_HW_DESIGNER);
Linus Walleij8d318a52010-03-30 15:33:42 +02002554 goto failure;
2555 }
2556
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002557 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2558 D40_DREG_PERIPHID2_REV_POS;
2559
Linus Walleij8d318a52010-03-30 15:33:42 +02002560 /* The number of physical channels on this HW */
2561 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2562
2563 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002564 rev, res->start);
Linus Walleij8d318a52010-03-30 15:33:42 +02002565
2566 plat_data = pdev->dev.platform_data;
2567
2568 /* Count the number of logical channels in use */
2569 for (i = 0; i < plat_data->dev_len; i++)
2570 if (plat_data->dev_rx[i] != 0)
2571 num_log_chans++;
2572
2573 for (i = 0; i < plat_data->dev_len; i++)
2574 if (plat_data->dev_tx[i] != 0)
2575 num_log_chans++;
2576
2577 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2578 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2579 sizeof(struct d40_chan), GFP_KERNEL);
2580
2581 if (base == NULL) {
2582 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2583 goto failure;
2584 }
2585
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002586 base->rev = rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002587 base->clk = clk;
2588 base->num_phy_chans = num_phy_chans;
2589 base->num_log_chans = num_log_chans;
2590 base->phy_start = res->start;
2591 base->phy_size = resource_size(res);
2592 base->virtbase = virtbase;
2593 base->plat_data = plat_data;
2594 base->dev = &pdev->dev;
2595 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2596 base->log_chans = &base->phy_chans[num_phy_chans];
2597
2598 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2599 GFP_KERNEL);
2600 if (!base->phy_res)
2601 goto failure;
2602
2603 base->lookup_phy_chans = kzalloc(num_phy_chans *
2604 sizeof(struct d40_chan *),
2605 GFP_KERNEL);
2606 if (!base->lookup_phy_chans)
2607 goto failure;
2608
2609 if (num_log_chans + plat_data->memcpy_len) {
2610 /*
2611 * The max number of logical channels are event lines for all
2612 * src devices and dst devices
2613 */
2614 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2615 sizeof(struct d40_chan *),
2616 GFP_KERNEL);
2617 if (!base->lookup_log_chans)
2618 goto failure;
2619 }
Jonas Aaberg698e4732010-08-09 12:08:56 +00002620
2621 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2622 sizeof(struct d40_desc *) *
2623 D40_LCLA_LINK_PER_EVENT_GRP,
Linus Walleij8d318a52010-03-30 15:33:42 +02002624 GFP_KERNEL);
2625 if (!base->lcla_pool.alloc_map)
2626 goto failure;
2627
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002628 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2629 0, SLAB_HWCACHE_ALIGN,
2630 NULL);
2631 if (base->desc_slab == NULL)
2632 goto failure;
2633
Linus Walleij8d318a52010-03-30 15:33:42 +02002634 return base;
2635
2636failure:
2637 if (clk) {
2638 clk_disable(clk);
2639 clk_put(clk);
2640 }
2641 if (virtbase)
2642 iounmap(virtbase);
2643 if (res)
2644 release_mem_region(res->start,
2645 resource_size(res));
2646 if (virtbase)
2647 iounmap(virtbase);
2648
2649 if (base) {
2650 kfree(base->lcla_pool.alloc_map);
2651 kfree(base->lookup_log_chans);
2652 kfree(base->lookup_phy_chans);
2653 kfree(base->phy_res);
2654 kfree(base);
2655 }
2656
2657 return NULL;
2658}
2659
2660static void __init d40_hw_init(struct d40_base *base)
2661{
2662
2663 static const struct d40_reg_val dma_init_reg[] = {
2664 /* Clock every part of the DMA block from start */
2665 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2666
2667 /* Interrupts on all logical channels */
2668 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2669 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2670 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2671 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2672 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2673 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2674 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2675 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2676 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2677 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2678 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2679 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2680 };
2681 int i;
2682 u32 prmseo[2] = {0, 0};
2683 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2684 u32 pcmis = 0;
2685 u32 pcicr = 0;
2686
2687 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2688 writel(dma_init_reg[i].val,
2689 base->virtbase + dma_init_reg[i].reg);
2690
2691 /* Configure all our dma channels to default settings */
2692 for (i = 0; i < base->num_phy_chans; i++) {
2693
2694 activeo[i % 2] = activeo[i % 2] << 2;
2695
2696 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2697 == D40_ALLOC_PHY) {
2698 activeo[i % 2] |= 3;
2699 continue;
2700 }
2701
2702 /* Enable interrupt # */
2703 pcmis = (pcmis << 1) | 1;
2704
2705 /* Clear interrupt # */
2706 pcicr = (pcicr << 1) | 1;
2707
2708 /* Set channel to physical mode */
2709 prmseo[i % 2] = prmseo[i % 2] << 2;
2710 prmseo[i % 2] |= 1;
2711
2712 }
2713
2714 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2715 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2716 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2717 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2718
2719 /* Write which interrupt to enable */
2720 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2721
2722 /* Write which interrupt to clear */
2723 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2724
2725}
2726
Linus Walleij508849a2010-06-20 21:26:07 +00002727static int __init d40_lcla_allocate(struct d40_base *base)
2728{
2729 unsigned long *page_list;
2730 int i, j;
2731 int ret = 0;
2732
2733 /*
2734 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2735 * To full fill this hardware requirement without wasting 256 kb
2736 * we allocate pages until we get an aligned one.
2737 */
2738 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2739 GFP_KERNEL);
2740
2741 if (!page_list) {
2742 ret = -ENOMEM;
2743 goto failure;
2744 }
2745
2746 /* Calculating how many pages that are required */
2747 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2748
2749 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2750 page_list[i] = __get_free_pages(GFP_KERNEL,
2751 base->lcla_pool.pages);
2752 if (!page_list[i]) {
2753
2754 dev_err(base->dev,
2755 "[%s] Failed to allocate %d pages.\n",
2756 __func__, base->lcla_pool.pages);
2757
2758 for (j = 0; j < i; j++)
2759 free_pages(page_list[j], base->lcla_pool.pages);
2760 goto failure;
2761 }
2762
2763 if ((virt_to_phys((void *)page_list[i]) &
2764 (LCLA_ALIGNMENT - 1)) == 0)
2765 break;
2766 }
2767
2768 for (j = 0; j < i; j++)
2769 free_pages(page_list[j], base->lcla_pool.pages);
2770
2771 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2772 base->lcla_pool.base = (void *)page_list[i];
2773 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00002774 /*
2775 * After many attempts and no succees with finding the correct
2776 * alignment, try with allocating a big buffer.
2777 */
Linus Walleij508849a2010-06-20 21:26:07 +00002778 dev_warn(base->dev,
2779 "[%s] Failed to get %d pages @ 18 bit align.\n",
2780 __func__, base->lcla_pool.pages);
2781 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2782 base->num_phy_chans +
2783 LCLA_ALIGNMENT,
2784 GFP_KERNEL);
2785 if (!base->lcla_pool.base_unaligned) {
2786 ret = -ENOMEM;
2787 goto failure;
2788 }
2789
2790 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2791 LCLA_ALIGNMENT);
2792 }
2793
2794 writel(virt_to_phys(base->lcla_pool.base),
2795 base->virtbase + D40_DREG_LCLA);
2796failure:
2797 kfree(page_list);
2798 return ret;
2799}
2800
Linus Walleij8d318a52010-03-30 15:33:42 +02002801static int __init d40_probe(struct platform_device *pdev)
2802{
2803 int err;
2804 int ret = -ENOENT;
2805 struct d40_base *base;
2806 struct resource *res = NULL;
2807 int num_reserved_chans;
2808 u32 val;
2809
2810 base = d40_hw_detect_init(pdev);
2811
2812 if (!base)
2813 goto failure;
2814
2815 num_reserved_chans = d40_phy_res_init(base);
2816
2817 platform_set_drvdata(pdev, base);
2818
2819 spin_lock_init(&base->interrupt_lock);
2820 spin_lock_init(&base->execmd_lock);
2821
2822 /* Get IO for logical channel parameter address */
2823 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2824 if (!res) {
2825 ret = -ENOENT;
2826 dev_err(&pdev->dev,
2827 "[%s] No \"lcpa\" memory resource\n",
2828 __func__);
2829 goto failure;
2830 }
2831 base->lcpa_size = resource_size(res);
2832 base->phy_lcpa = res->start;
2833
2834 if (request_mem_region(res->start, resource_size(res),
2835 D40_NAME " I/O lcpa") == NULL) {
2836 ret = -EBUSY;
2837 dev_err(&pdev->dev,
2838 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2839 __func__, res->start, res->end);
2840 goto failure;
2841 }
2842
2843 /* We make use of ESRAM memory for this. */
2844 val = readl(base->virtbase + D40_DREG_LCPA);
2845 if (res->start != val && val != 0) {
2846 dev_warn(&pdev->dev,
2847 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2848 __func__, val, res->start);
2849 } else
2850 writel(res->start, base->virtbase + D40_DREG_LCPA);
2851
2852 base->lcpa_base = ioremap(res->start, resource_size(res));
2853 if (!base->lcpa_base) {
2854 ret = -ENOMEM;
2855 dev_err(&pdev->dev,
2856 "[%s] Failed to ioremap LCPA region\n",
2857 __func__);
2858 goto failure;
2859 }
Linus Walleij508849a2010-06-20 21:26:07 +00002860
2861 ret = d40_lcla_allocate(base);
2862 if (ret) {
2863 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02002864 __func__);
2865 goto failure;
2866 }
2867
Linus Walleij8d318a52010-03-30 15:33:42 +02002868 spin_lock_init(&base->lcla_pool.lock);
2869
Linus Walleij8d318a52010-03-30 15:33:42 +02002870 base->irq = platform_get_irq(pdev, 0);
2871
2872 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2873
2874 if (ret) {
2875 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2876 goto failure;
2877 }
2878
2879 err = d40_dmaengine_init(base, num_reserved_chans);
2880 if (err)
2881 goto failure;
2882
2883 d40_hw_init(base);
2884
2885 dev_info(base->dev, "initialized\n");
2886 return 0;
2887
2888failure:
2889 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002890 if (base->desc_slab)
2891 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002892 if (base->virtbase)
2893 iounmap(base->virtbase);
Linus Walleij508849a2010-06-20 21:26:07 +00002894 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2895 free_pages((unsigned long)base->lcla_pool.base,
2896 base->lcla_pool.pages);
Jonas Aaberg767a9672010-08-09 12:08:34 +00002897
2898 kfree(base->lcla_pool.base_unaligned);
2899
Linus Walleij8d318a52010-03-30 15:33:42 +02002900 if (base->phy_lcpa)
2901 release_mem_region(base->phy_lcpa,
2902 base->lcpa_size);
2903 if (base->phy_start)
2904 release_mem_region(base->phy_start,
2905 base->phy_size);
2906 if (base->clk) {
2907 clk_disable(base->clk);
2908 clk_put(base->clk);
2909 }
2910
2911 kfree(base->lcla_pool.alloc_map);
2912 kfree(base->lookup_log_chans);
2913 kfree(base->lookup_phy_chans);
2914 kfree(base->phy_res);
2915 kfree(base);
2916 }
2917
2918 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2919 return ret;
2920}
2921
2922static struct platform_driver d40_driver = {
2923 .driver = {
2924 .owner = THIS_MODULE,
2925 .name = D40_NAME,
2926 },
2927};
2928
2929int __init stedma40_init(void)
2930{
2931 return platform_driver_probe(&d40_driver, d40_probe);
2932}
2933arch_initcall(stedma40_init);